Spark读取kafka数据

Spark 读取kafka数据

版本: sprak3.0

1.sprak获取kafka连接
 object MyKafkaUtil {

      //1.创建配置信息对象
      private val properties: Properties = PropertiesUtil.load("config.properties")

      //2.用于初始化链接到集群的地址
      val broker_list: String = properties.getProperty("kafka.broker.list")

      //3.kafka 消费者配置
      val kafkaParam = Map(

        "bootstrap.servers" -> broker_list, "key.deserializer" -> classOf[StringDeserializer], "value.deserializer" -> classOf[StringDeserializer],

        //消费者组
        "group.id" -> "commerce-consumer-group",

        //如果没有初始化偏移量或者当前的偏移量不存在任何服务器上,可以使用这个配置属性
        //可以使用这个配置,latest 自动重置偏移量为最新的偏移量
        "auto.offset.reset" -> "latest",

        //如果是 true,则这个消费者的偏移量会在后台自动提交,但是 kafka 宕机容易丢失数据
        //如果是 false,会需要手动维护 kafka 偏移量
        "enable.auto.commit" -> (true: java.lang.Boolean)
      )

      // 创建 DStream,返回接收到的输入数据
      // LocationStrategies:根据给定的主题和集群地址创建 consumer
      // LocationStrategies.PreferConsistent:持续的在所有 Executor 之间分配分区
      // ConsumerStrategies:选择如何在 Driver 和 Executor 上创建和配置 Kafka Consumer
      // ConsumerStrategies.Subscribe:订阅一系列主题
      def getKafkaStream(topic: String, ssc: StreamingContext): InputDStream[ConsumerRecord[String, String]] = {
        val dStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](ssc, LocationStrategies.PreferConsistent, ConsumerStrategies.Subscribe[String, String](Array(topic), kafkaParam))
        dStream
      }
    }
    
方式二
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka010._

object SparkKafkaDemo {
  def main(args: Array[String]) {
    if (args.length < 2) {
      System.err.println(s"""
                            |Usage: SparkKafkaDemo <brokers> <topics>
                            |  <brokers> is a list of one or more Kafka brokers
                            |  <topics> is a list of one or more kafka topics to consume from
                            |  <interval>
        """.stripMargin)
      System.exit(1)
    }
    val Array(brokers, topics, interval) = args

    val sparkConf = new SparkConf().setAppName("E-MapReduce Demo 9: Spark Kafka Demo (Scala)")
    val ssc = new StreamingContext(sparkConf, Seconds(interval.toInt))

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> brokers,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "mugen1",
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean),
      "security.protocol" -> "SASL_PLAINTEXT",
      "sasl.mechanism" -> "GSSAPI",
      "sasl.kerberos.service.name" -> "kafka"
    )

    val messages = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(topics), kafkaParams)
    )

    // Get the lines, split them into words, count the words and print
    val lines = messages.map(_.value)
    val words = lines.flatMap(_.split(" "))
    val wordCounts = words.map(x => (x, 1L)).reduceByKey(_ + _)
    wordCounts.print()

    // Start the computation
    ssc.start()
    ssc.awaitTermination()
  }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值