# -*- coding: utf-8 -*- from __future__ import print_function from pyspark.sql import SparkSession from pyspark.sql import Row if __name__ == "__main__": # 初始化SparkSession spark = SparkSession .builder .appName("RDD_and_DataFrame") .config("spark.some.config.option", "some-value") .getOrCreate() sc = spark.sparkContext lines = sc.textFile("employee.txt") parts = lines.map(lambda l: l.split(",")) employee = parts.map(lambda p: Row(name=p[0], salary=int(p[1]))) #RDD转换成DataFrame employee_temp = spark.createDataFrame(employee) #显示DataFrame数据 employee_temp.show() #创建视图 employee_temp.createOrReplaceTempView("employee") #过滤数据 employee_result = spark.sql("SELECT name,salary FROM employee WHERE salary >= 14000 AND salary <= 20000") # DataFrame转换成RDD result = employee_result.rdd.map(lambda p: "name: " + p.name + " salary: " + str(p.salary)).collect() #打印RDD数据 for n in result: print(n)
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
|
# -*- coding: utf-8 -*-
from
__future__
import
print_function
from
pyspark
.
sql
import
SparkSession
from
pyspark
.
sql
import
Row
if
__name__
==
"__main__"
:
# 初始化SparkSession
spark
=
SparkSession
.
builder
.
appName
(
"RDD_and_DataFrame"
)
.
config
(
"spark.some.config.option"
,
"some-value"
)
.
getOrCreate
(
)
sc
=
spark
.
sparkContext
lines
=
sc
.
textFile
(
"employee.txt"
)
parts
=
lines
.
map
(
lambda
l
:
l
.
split
(
","
)
)
employee
=
parts
.
map
(
lambda
p
:
Row
(
name
=
p
[
0
]
,
salary
=
int
(
p
[
1
]
)
)
)
#RDD转换成DataFrame
employee_temp
=
spark
.
createDataFrame
(
employee
)
#显示DataFrame数据
employee_temp
.
show
(
)
#创建视图
employee_temp
.
createOrReplaceTempView
(
"employee"
)
#过滤数据
employee_result
=
spark
.
sql
(
"SELECT name,salary FROM employee WHERE salary >= 14000 AND salary <= 20000"
)
# DataFrame转换成RDD
result
=
employee_result
.
rdd
.
map
(
lambda
p
:
"name: "
+
p
.
name
+
" salary: "
+
str
(
p
.
salary
)
)
.
collect
(
)
#打印RDD数据
for
n
in
result
:
print
(
n
)
|
DataFrame转换成RDD
df.rdd就直接 转换成 rdd的操作
result = employee_result.rdd.map(lambda p: "name: " + p.name + " salary: " + str(p.salary)).collect()
|
1
2
|
result
=
employee_result
.
rdd
.
map
(
lambda
p
:
"name: "
+
p
.
name
+
" salary: "
+
str
(
p
.
salary
)
)
.
collect
(
)
|
1152

被折叠的 条评论
为什么被折叠?



