import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.log4j.{Level, Logger}
object WordBlackList {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("transformation").setMaster("local[2]")
val ssc: StreamingContext = new StreamingContext(conf,Seconds(1))
Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
val checkpointDirectory="hdfs://hadoop02:9000/values"
ssc.checkpoint(checkpointDirectory) val inputDStream: ReceiverInputDStream[String] = ssc.socketTextStream("hadoop01",9090) val wordOneDStream: DStream[(String, Int)] = inputDStream.flatMap(_.split(",")).map((_,1)) //用符号来模拟黑名单
val fileRDD: RDD[String] = ssc.sparkContext.parallelize(List("!","&","*","^","#")) val reFileRDD: RDD[(String, Boolean)] = fileRDD.map((_,true)) val broadcast: Broadcast[Array[(String, Boolean)]] = ssc.sparkContext.broadcast(reFileRDD.collect()) val broadcastRDD:
RDD[(String, Boolean)] = ssc.sparkContext.parallelize(broadcast.value) val resultDStream: DStream[(String, Int)] = wordOneDStream.transform(rdd => { val joinRDD: RDD[(String, (Int, Option[Boolean]))] = rdd.leftOuterJoin(broadcastRDD) val filterRDD: RDD[(String,
(Int, Option[Boolean]))] = joinRDD.filter(tuple => { if (tuple._2._2.isEmpty) { true } else { false } }) val resultRDD: RDD[(String, Int)] = filterRDD.map(tuple => { (tuple._1, tuple._2._1) }) resultRDD }) val AllCountsDSream: DStream[(String, Int)] = resultDStream.updateStateByKey((newValue:
Seq[Int], valueCount: Option[Int]) => { Some(newValue.sum + valueCount.getOrElse(0)) }) AllCountsDSream.print() ssc.start() ssc.awaitTermination() ssc.stop() }}