使用Spark Streaming统计hdfs文件单词
代码
package test01
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}
import test01.Demo01.ssc
object HDFSInputStreamDemo extends App {
private val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("hdfsdemo")
private val ssc = new StreamingContext(sparkConf,Seconds(5))
private val lines: DStream[String] = ssc.textFileStream("hdfs://hadoop07:9000/data/")
val words = lines.flatMap(_.split(" "))
val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _)
wordCounts.print()
ssc.start()
ssc.awaitTermination()
}