SparkStreaming---SparkSQL

如何在SparkStreaming中编写 SparkSQL
【目的:当业务由rdd处理很麻烦的时候,可以使用SparkSQL 来解决】

package window

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}

/*
* 在 SparkStreaming 中编写 SparkSQL
* */

object SparkStreamingSQL {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("Windows")
    val streamingContext = new StreamingContext(conf,Seconds(2))

    streamingContext.checkpoint("checkpoint")

    val kafkaParams = Map(
      (ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "192.168.XXX.100:9092"),
      (ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG->"org.apache.kafka.common.serialization.StringDeserializer"),
      (ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG->"org.apache.kafka.common.serialization.StringDeserializer"),
      //      (ConsumerConfig.AUTO_OFFSET_RESET_CONFIG->"earliest"),
      ConsumerConfig.GROUP_ID_CONFIG->"kafkaGroup4"
    )

    val kafkaStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(
      streamingContext,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe(Set("sparkKafka"), kafkaParams)
    )

    // TODO --- SparkSQL
    val Rstream: DStream[Row] = kafkaStream.transform(rdd => {
      val spark: SparkSession = SparkSessionSingleton.getInstance(rdd.sparkContext.getConf)
      val wcrdd: RDD[(String, Int)] = rdd.flatMap(x => x.value().split("\\s+").map(y => (y, 1)))

	  //别忘记导隐式类
      import spark.implicits._
      val frame: DataFrame = wcrdd.toDF("word", "number")
      frame.createOrReplaceTempView("wc")
      val res: DataFrame = spark.sql("select word,sum(number) from wc group by word")
      res.rdd		// 返回值需要rdd,所以再转回去
    })

    Rstream.print()

    streamingContext.start()
    streamingContext.awaitTermination()
  }
}
object SparkSessionSingleton{
  @transient private var instance:SparkSession = _
  def getInstance(sparkConf:SparkConf):SparkSession={
    if(null == instance){
      instance = SparkSession.builder().getOrCreate()
    }
    instance
  }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值