package com.spark.streaming
import java.net.InetSocketAddress
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.flume.{FlumeUtils, SparkFlumeEvent}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
/**
* sparkStreaming整合flume 推模式Push
*/
object SparkStreamingFlumePush {
System.setProperty("hadoop.home.dir", "d://soft//hadoop//hadoop-2.7.3")
//newValues 表示当前批次汇总成的(word,1)中相同单词的所有的1
//runningCount 历史的所有相同key的value总和
def updateFunction(newValues: Seq[Int], runningCount: Option[Int]): Option[Int] = {
val newCount =runningCount.getOrElse(0)+newValues.sum
Some(newCount)
}
def main(args: Array[String]): Unit = {
//配置sparkConf参数
val sparkConf = new SparkConf().setAppName("SparkStreaming_Flume_Push").setMaster("local[2]")
//构建sparkContext对象
val sc = new SparkContext(sparkConf)
//构建StreamingContext对象,每个批处理的时间间隔
val scc = new StreamingContext(sc, Seconds(5))
//设置日志输出级别
sc.setLogLevel("WARN")
//设置检查点目录
scc.checkpoint("./")
//flume推数据过来
// 当前应用程序部署的服务器ip地址,跟flume配置文件保持一致
val flumeStream = FlumeUtils.createStream(scc,"192.168.1.159",8888,StorageLevel.MEMORY_AND_DISK)
//获取flume中数据,数据存在event的body中,转化为String
val lineStream = flumeStream.map(x=>new String(x.event.getBody.array()))
//实现单词汇总
val result = lineStream.flatMap(_.split(" ")).map((_,1)).updateStateByKey(updateFunction)
result.print(30)
scc.start()
scc.awaitTermination()
}
}
测试和配置信息https://blog.youkuaiyun.com/star5610/article/details/106522455