kafka0.8 VS kafka1.0 vs spark2.2.0

本文详细介绍Spark Streaming如何与Kafka 0.8及1.0版本整合,包括依赖配置、直连方式消费消息、手动更新偏移量到Zookeeper以及自动提交偏移量到Kafka等功能。

kafka0.8 vs spark2.2.0

<!-- sparkStreaming 和kafka整合的依赖 0-8_2.11 -->
<dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
    <version>${spark.version}</version>
</dependency>

<dependency>
    <groupId>org.apache.kafka</groupId>
    <!--<artifactId>kafka_2.10</artifactId>-->
    <version>0.8.2.1</version>
    <version>0.10.0.0</version>
</dependency>
package com.xp.cn.streaming

import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import kafka.utils.{ZKGroupTopicDirs, ZkUtils}
import org.I0Itec.zkclient.ZkClient
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.{Duration, StreamingContext}

/**
  * Created by zx on 2017/7/31.
  */
object KafkaDirectWordCountV2 {

  def main(args: Array[String]): Unit = {

    //指定组名
    val group = "g001"

    //创建SparkConf
    val conf = new SparkConf().setAppName("KafkaDirectWordCount").setMaster("local[2]")

    //创建SparkStreaming,并设置间隔时间
    val ssc = new StreamingContext(conf, Duration(5000))

    //指定消费的 topic 名字
    val topic = "wwcc"

    //指定kafka的broker地址(sparkStream的Task直连到kafka的分区上,用更加底层的API消费,效率更高)
    val brokerList = "xupan001:9092,xupan001:9092,xupan001:9092"

    //指定zk的地址,后期更新消费的偏移量时使用(以后可以使用Redis、MySQL来记录偏移量)
    val zkQuorum = "xupan001:2181,xupan001:2181,xupan001:2181"

    //创建 stream 时使用的 topic 名字集合,SparkStreaming可同时消费多个topic
    val topics: Set[String] = Set(topic)

    //创建一个 ZKGroupTopicDirs 对象,其实是指定往zk中写入数据的目录,用于保存偏移量
    val topicDirs = new ZKGroupTopicDirs(group, topic)
    //获取 zookeeper 中的路径 "/g001/offsets/wordcount/"
    val zkTopicPath = s"${topicDirs.consumerOffsetDir}"

    //准备kafka的参数
    val kafkaParams = Map(
      "metadata.broker.list" -> brokerList,
      "group.id" -> group,
      //从头开始读取数据
      "auto.offset.reset" -> kafka.api.OffsetRequest.SmallestTimeString
    )

    //zookeeper 的host 和 ip,创建一个 client,用于跟新偏移量量的
    //是zookeeper的客户端,可以从zk中读取偏移量数据,并更新偏移量
    val zkClient = new ZkClient(zkQuorum)

    //查询该路径下是否字节点(默认有字节点为我们自己保存不同 partition 时生成的)
    // /g001/offsets/wordcount/0/10001"
    // /g001/offsets/wordcount/1/30001"
    // /g001/offsets/wordcount/2/10001"
    //zkTopicPath  -> /g001/offsets/wordcount/
    val children = zkClient.countChildren(zkTopicPath)

    var kafkaStream: InputDStream[(String, String)] = null

    //如果 zookeeper 中有保存 offset,我们会利用这个 offset 作为 kafkaStream 的起始位置
    var fromOffsets: Map[TopicAndPartition, Long] = Map()

    //如果保存过 offset
    if (children > 0) {
      for (i <- 0 until children) {
        // /g001/offsets/wordcount/0/10001

        // /g001/offsets/wordcount/0
        val partitionOffset = zkClient.readData[String](s"$zkTopicPath/${i}")
        // wordcount/0
        val tp = TopicAndPartition(topic, i)
        //将不同 partition 对应的 offset 增加到 fromOffsets 中
        // wordcount/0 -> 10001
        fromOffsets += (tp -> partitionOffset.toLong)
      }
      //Key: kafka的key   values: "hello tom hello jerry"
      //这个会将 kafka 的消息进行 transform,最终 kafak 的数据都会变成 (kafka的key, message) 这样的 tuple
      val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key(), mmd.message())
   
      //通过KafkaUtils创建直连的DStream(fromOffsets参数的作用是:按照前面计算好了的偏移量继续消费数据)
   //[String, String, StringDecoder, StringDecoder,     (String, String)]
   //  key    value    key的解码方式   value的解码方式 
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, fromOffsets, messageHandler)
    } else {
      //如果未保存,根据 kafkaParam 的配置使用最新(largest)或者最旧的(smallest) offset
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
    }

    //偏移量的范围
    var offsetRanges = Array[OffsetRange]()

    //直连方式只有在KafkaDStream的RDD中才能获取偏移量,那么就不能到调用DStream的Transformation
    //所以只能子在kafkaStream调用foreachRDD,获取RDD的偏移量,然后就是对RDD进行操作了
    //依次迭代KafkaDStream中的KafkaRDD
    //foreachRDD触发的实际操作是DStream转换,kafkaStream.foreachRDD这一步实际上是在Driver中调用的
    //rdd.foreach是在Executor中执行的
    kafkaStream.foreachRDD { kafkaRDD =>
      //只有KafkaRDD可以强转成HasOffsetRanges,并获取到偏移量
      offsetRanges = kafkaRDD.asInstanceOf[HasOffsetRanges].offsetRanges
      val lines: RDD[String] = kafkaRDD.map(_._2)

      //对RDD进行操作,触发Action
      //foreachPartition在Executor中执行
      lines.foreachPartition(partition =>
        partition.foreach(x => {
          println(x)
        })
      )

      for (o <- offsetRanges) {
        //  /g001/offsets/wordcount/0
        val zkPath = s"${topicDirs.consumerOffsetDir}/${o.partition}"
        //将该 partition 的 offset 保存到 zookeeper
        //  /g001/offsets/wordcount/0/20000
        ZkUtils.updatePersistentPath(zkClient, zkPath, o.untilOffset.toString)
      }
    }

    ssc.start()
    ssc.awaitTermination()

  }

}

 

 

 

 

kafka1.0 vs spark2.2.0
 

<!-- kafka -->
<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka_2.10</artifactId>
    <!--<version>0.8.2.1</version>-->
    <version>0.10.0.0</version>
</dependency>

<!-- sparkStreaming 和kafka整合的依赖 0-10_2.11 -->
<dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
    <version>${spark.version}</version>
</dependency>

 

 

package com.xp.cn.streaming

import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, CanCommitOffsets, KafkaUtils}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf}

/**
 * Created by xupan on 2017/12/18.
 * spark streaming kafka_2.10-0.10.2.1
 * 不要像0.8那样需要手动把偏移量更新到Zookeeper中
 * 1.0默认把偏移量更新到kafka中
 */
object KafkaStreamingV2 {


  def main(args: Array[String]) {
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)

    //创建conf,spark streaming至少要启动两个线程,一个负责接受数据,一个负责处理数据
    val conf = new SparkConf().setAppName("KafkaStreamingV2").setMaster("local[4]")

    //创建StreamingContext,每隔10秒产生一个批次
    val ssc = new StreamingContext(conf, Seconds(10))

    val group = "v2group"
    val topic = "v2topic"

    //配置Kafka参数
    val kafkaParams = Map[String,Object](
      "bootstrap.servers" -> "xupan001:9092,xupan002:9092,xupan003:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      "auto.offset.reset" -> "earliest",//kafka中没有偏移量从头开始读,有就从偏移量开始读
      "enable.auto.commit" -> {false:java.lang.Boolean}//不是自动提交
    )

    //可以读取多个topic
    val topics = Array(topic)

    //用直连方式读取Kafka数据,在Kafka中读取偏移量
    val stream = KafkaUtils.createDirectStream[String,String](
      ssc,
      PreferConsistent,//位置策略(如果Kafka和spark程序在同一台机器,会从最优位置读取数据【当前位置】)
      Subscribe[String,String](topics,kafkaParams)//订阅策略(可以指定用正则的方式读取topic【topic-*】)
    )

    stream.foreachRDD(rdd => {

      if (!rdd.isEmpty()) {
        val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges


        //====================在下面写业务逻辑============================
        rdd.foreachPartition(part => {
          part.foreach(line => {
            val value = line.value()
            val key = line.key()
            println("key : " + key + "  value : " + value)
          })
        })
        //====================在上面写业务逻辑============================


        //commitAsync(offsetRanges: Array[OffsetRange])
        stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
      }


    })

    ssc.start()
    ssc.awaitTermination()
  }

}

转载于:https://my.oschina.net/u/2253438/blog/1591652

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值