import findspark
findspark.init()
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("Analyzing soccer players") \
.getOrCreate()
players = spark.read\
.format("csv")\
.option("header", "true")\
.load("../datasets/player.csv")
players.printSchema()
players.show(5)
player_attributes = spark.read\
.format("csv")\
.option("header", "true")\
.load("../datasets/Player_Attributes.csv")
player_attributes.printSchema()
players.count() , player_attributes.count()
player_attributes.select('player_api_id')\
.distinct()\
.count()
players = players.drop('id', 'player_fifa_api_id')
players.columns
According to our requirement there are certain traits which we are not at all going to use in this entire program
So its better to remove those traits to make our dataset less bulky
player_attributes = player_attributes.drop(
'id',
'player_fifa_api_id',
'preferred_foot',
'attacking_work_rate',
'defensive_work_rate',
'crossing',
'jumping',
'sprint_speed',
'balance',
'aggression',
'short_passing',
'potential'
)
player_attributes.columns
player_attributes = player_attributes.dropna()
players = players.dropna()
players.count() , player_attributes.count()
from pyspark.sql.functions import udf
year_extract_udf = udf(lambda date: date.split('-')[0])
player_attributes = player_attributes.withColumn(
"year",
year_extract_udf(player_attributes.date)
)
player_attributes = player_attributes.drop('date')
player_attributes.columns
pa_2016 = player_attributes.filter(player_attributes.year == 2016)
pa_2016.count()
pa_2016.select(pa_2016.player_api_id)\
.distinct()\
.count()
pa_striker_2016 = pa_2016.groupBy('player_api_id')\
.agg({
'finishing':"avg",
"shot_power":"avg",
"acceleration":"avg"
})
pa_striker_2016.count()
pa_striker_2016.show(5)
pa_striker_2016 = pa_striker_2016.withColumnRenamed("avg(finishing)","finishing")\
.withColumnRenamed("avg(shot_power)","shot_power")\
.withColumnRenamed("avg(acceleration)","acceleration")
weight_finishing = 1
weight_shot_power = 2
weight_acceleration = 1
total_weight = weight_finishing + weight_shot_power + weight_acceleration
strikers = pa_striker_2016.withColumn("striker_grade",
(pa_striker_2016.finishing * weight_finishing + \
pa_striker_2016.shot_power * weight_shot_power+ \
pa_striker_2016.acceleration * weight_acceleration) / total_weight)
strikers = strikers.drop('finishing',
'acceleration',
'shot_power'
)
strikers = strikers.filter(strikers.striker_grade > 70)\
.sort(strikers.striker_grade.desc())
strikers.show(10)
strikers.count(), players.count()
striker_details = players.join(strikers, players.player_api_id == strikers.player_api_id)
striker_details.columns
striker_details.count()
striker_details = players.join(strikers, ['player_api_id'])
striker_details.show(5)
from pyspark.sql.functions import broadcast
striker_details = players.select(
"player_api_id",
"player_name"
)\
.join(
broadcast(strikers),
['player_api_id'],
'inner'
)
striker_details = striker_details.sort(striker_details.striker_grade.desc())
striker_details.show(5)
players.count(), player_attributes.count()
players_heading_acc = player_attributes.select('player_api_id',
'heading_accuracy')\
.join(broadcast(players),
player_attributes.player_api_id == players.player_api_id)
players_heading_acc.count()
players_heading_acc.columns
short_count = spark.sparkContext.accumulator(0)
medium_low_count = spark.sparkContext.accumulator(0)
medium_high_count = spark.sparkContext.accumulator(0)
tall_count = spark.sparkContext.accumulator(0)
def count_players_by_height(row):
height = float(row.height)
if (height <= 175 ):
short_count.add(1)
elif (height <= 183 and height > 175 ):
medium_low_count.add(1)
elif (height <= 195 and height > 183 ):
medium_high_count.add(1)
elif (height > 195) :
tall_count.add(1)
players_heading_acc.foreach(lambda x: count_players_by_height(x))
all_players = [short_count.value,
medium_low_count.value,
medium_high_count.value,
tall_count.value]
all_players
short_ha_count = spark.sparkContext.accumulator(0)
medium_low_ha_count = spark.sparkContext.accumulator(0)
medium_high_ha_count = spark.sparkContext.accumulator(0)
tall_ha_count = spark.sparkContext.accumulator(0)
def count_players_by_height_and_heading_accuracy(row, threshold_score):
height = float(row.height)
ha = float(row.heading_accuracy)
if ha <= threshold_score:
return
if (height <= 175 ):
short_ha_count.add(1)
elif (height <= 183 and height > 175):
medium_low_ha_count.add(1)
elif (height <= 195 and height > 183):
medium_high_ha_count.add(1)
elif (height > 195) :
tall_ha_count.add(1)
players_heading_acc.foreach(lambda x: count_players_by_height_and_heading_accuracy(x, 60))
all_players_above_threshold = [short_ha_count.value,
medium_low_ha_count.value,
medium_high_ha_count.value,
tall_ha_count.value]
all_players_above_threshold
percentage_values = [short_ha_count.value / short_count.value *100,
medium_low_ha_count.value / medium_low_count.value *100,
medium_high_ha_count.value / medium_high_count.value *100,
tall_ha_count.value / tall_count.value *100
]
percentage_values
from pyspark.accumulators import AccumulatorParam
class VectorAccumulatorParam(AccumulatorParam):
def zero(self, value):
return [0.0] * len(value)
def addInPlace(self, v1, v2):
for i in range(len(v1)):
v1[i] += v2[i]
return v1
import pyspark
from pyspark import SparkContext
sc = SparkContext.getOrCreate()
vector_accum = sc.accumulator([10.0, 20.0, 30.0], VectorAccumulatorParam())
vector_accum.value
vector_accum += [1, 2, 3]
vector_accum.value
pa_2016.columns
pa_2016.select("player_api_id", "overall_rating")\
.coalesce(1)\
.write\
.option("header", "true")\
.csv("players_overall.csv")
pa_2016.select("player_api_id", "overall_rating")\
.write\
.json("players_overall.json")