#!/usr/bin/evn python3
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
import pymysql
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage:NetworkWordCountStateful.py<hostname><port>",file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PythonSteamingStreamingStatefulNetworkWordCount")
ssc = StreamingContext(sc, 1)
# 创建一个检查点
ssc.checkpoint("file:///usr/local/spark/mycode/streaming/stateful/")
# 初始RDD中有添加两个元素
initialStateRDD = sc.parallelize(([u'hello', 1], [u'world', 1]))
# 跨批次状态累加
def updateFunc(new_values, last_num):
return sum(new_values)+(last_num or 0)
# 创建套接字流
lines = ssc.socketTextStream(sys.argv[1],int(sys.argv[2]))
# 进行map
running_counts = lines.flatMap(lambda line:line.split(" ")).map(lambda word: (word, 1))\
.updateStateByKey(updateFunc,initialRDD=initialStateRDD)
# 保存在文本文件中
# running_counts.saveAsTextFiles("file:///usr/local/spark/mycode/streaming/stateful/")
# running_counts.pprint()
def dbfunc(records):
db = pymysql.connect("localhost", "root", "123456", "spark")
cursor = db.cursor()
def doinsert(p):
sql = '''insert into wordcount(word, count) values('%s', '%s')''' % (str(p[0]), str(p[1]))
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
for item in records:
doinsert(item)
def func(rdd):
repartitionedRDD = rdd.repartition(3)
repartitionedRDD.foreachPatrtition(dbfunc)
running_counts.foreachRDD(func)
ssc.start()
ssc.awaitTermination()
学习链接:https://study.163.com/course/courseLearn.htm?courseId=1209408816#/learn/text?lessonId=1279273524&courseId=1209408816
本文介绍如何使用 Spark Streaming 进行实时词频统计,包括设置 SparkContext 和 StreamingContext,实现跨批次状态累加,以及将结果保存到 MySQL 数据库。
709

被折叠的 条评论
为什么被折叠?



