参考自:https://blog.youkuaiyun.com/sinat_26917383/article/details/80500349
https://www.analyticsvidhya.com/blog/2016/10/spark-dataframe-and-operations/
PySpark的Dataframe的求差集、交集 、并集
import findspark
findspark.init()
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName("example") \
.config("spark.config.option","some-value") \
.getOrCreate()
sc =spark.sparkContext
sentenceDataFrame = spark.createDataFrame((
(1, "asf"),
(2, "2143"),
(3, "rfds")
)).toDF("label", "sentence")
sentenceDataFrame.show()
sentenceDataFrame1 = spark.createDataFrame((
(1, "asf"),
(2, "2143"),
(4, "f8934y")
)).toDF("label", "sentence")
sentenceDataFrame1.show()
'''
+-----+--------+
|label|sentence|
+-----+--------+
| 1| asf|
| 2| 2143|
| 3| rfds|
+-----+--------+
+-----+--------+
|label|sentence|
+-----+--------+
| 1| asf|
| 2| 2143|
| 4| f8934y|
+-----+--------+
'''
#差集 subtract
newDF = sentenceDataFrame.select("sentence").subtract(sentenceDataFrame1.select("sentence"))
newDF.show()
'''
+--------+
|sentence|
+--------+
| rfds|
+--------+
'''
#交集 intersect
newDF_intersect = sentenceDataFrame1.select("sentence").intersect(sentenceDataFrame.select("sentence"))
newDF_intersect.show()
'''
+--------+
|sentence|
+--------+
| asf|
| 2143|
+--------+
'''
#并集 union
newDF_union = sentenceDataFrame.select("sentence").union(sentenceDataFrame1.select("sentence"))
newDF_union.show()
'''
+--------+
|sentence|
+--------+
| asf|
| 2143|
| rfds|
| asf|
| 2143|
| f8934y|
+--------+
'''
#并集 +去重
newDF_union = sentenceDataFrame.select("sentence").union(sentenceDataFrame1.select("sentence")).distinct()
newDF_union.show()
'''
+--------+
|sentence|
+--------+
| rfds|
| asf|
| 2143|
| f8934y|
+--------+
'''
from pyspark.sql.functions import lit
df = spark.createDataFrame(
[(1, "a", 23.0), (3, "B", -23.0)], ("x1", "x2", "x3"))
df.show()
'''
+---+---+-----+
| x1| x2| x3|
+---+---+-----+
| 1| a| 23.0|
| 3| B|-23.0|
+---+---+-----+
'''
from pyspark.sql.functions import monotonically_increasing_id
df = df.withColumn("id", monotonically_increasing_id())
df.show()
'''
+---+---+-----+-----------+
| x1| x2| x3| id|
+---+---+-----+-----------+
| 1| a| 23.0| 8589934592|
| 3| B|-23.0|25769803776|
+---+---+-----+-----------+
'''
from pyspark.sql import Row
l = ['jerry', 'tom']
row = Row("pid", "name")
new_df = sc.parallelize([row(i, l[i]) for i in range(0,len(l))]).toDF()#这里有报错
new_df.show()