spark 数据建模准备
去重
#初始化spark
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local[*]").appName("shuangyu").getOrCreate()
df = spark.createDataFrame([(1,144.5,5.9,33,'M'),
(2,167.2,5.4,45,'M'),
(3,124.1,5.2,23,'F'),
(4,144.5,5.9,33,'M'),
(5,133.2,5.7,54,'F'),
(3,124.1,5.2,23,'F'),
(5,129.2,5.3,42,'M')],["id","weight","height","age","gender"])
#分别打印dataframe未去重和去重后的行数
print("count of rows: {}".format(df.count()))
print("count of distinct rows: {}".format(df.distinct().count()))
count of rows: 7
count of distinct rows: 6
#去掉重复的行
df = df.dropDuplicates()
df.show()
+---+------+------+---+------+
| id|weight|height|age|gender|
+---+------+------+---+------+
| 5| 133.2| 5.7| 54| F|
| 5| 129.2| 5.3| 42| M|
| 1| 144.5| 5.9| 33| M|
| 4| 144.5| 5.9| 33| M|
| 2| 167.2| 5.4| 45| M|
| 3| 124.1| 5.2| 23| F|
+---+------+------+---+------+
#计算排除id后是否有重复的数据
print("counts of ids: {}".format(df.count()))
print("counts of distinct ids: {}".format(df.select([c for c in df.columns if c != "id"]).distinct().count()))
counts of ids: 6
counts of distinct ids: 5
#发现有2行出去ID外其它都是重复的,现在要去掉其中的一行
df = df.dropDuplicates(subset = [c for c in df.columns if c != "id"])
df.show()
+---+------+------+---+------+
| id|weight|height|age|gender|
+---+------+------+---+------+
| 5| 133.2| 5.7| 54| F|
| 1| 144.5| 5.9| 33| M|
| 2| 167.2| 5.4| 45| M|
| 3| 124.1| 5.2| 23| F|
| 5| 129.2| 5.3| 42| M|
+---+------+------+---+------