Streaming-kafka-mysql (scalikejdbc)

本文介绍如何使用Scala结合ScalikeJDBC处理Kafka实时流数据,并将处理后的数据有效存储到MySQL数据库中,实现数据的实时处理与持久化。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

package kafka

import java.sql.DriverManager

import com.typesafe.config.ConfigFactory
import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.kafka010.OffsetRange
import scalikejdbc.{DB, SQL}

import scala.collection.mutable.Map

object OffsetManager {

  val config = ConfigFactory.load()

  def getConn ={
    DriverManager.getConnection(config.getString("db.url"),
      config.getString("db.user"),
      config.getString("db.password"))
  }

  /**
    * 获取指定用户的偏移量信息
    */
  def apply(groupid :String,topic :String)={
/*    val conn = getConn
    val pstmt = conn.prepareStatement("SELECT * FROM streaming_offset WHERE groupid=? AND topic=?")
    pstmt.setString(1,groupid)
    pstmt.setString(2,topic)
    val rs = pstmt.executeQuery()

    val offsetRange = Map[TopicPartition,Long]()
    while (rs.next()){
      offsetRange += new TopicPartition(rs.getString("topic"),rs.getInt("partition")) -> rs.getLong("offset")
    }
    rs.close()
    pstmt.close()
    conn.close()
    offsetRange*/

    /**
      * 使用scalikeJDBC优化代码
      */
    DB.readOnly{ implicit session=>
      SQL("SELECT * FROM streaming_offset WHERE groupid=? AND topic=?").bind(groupid,topic).map(rs=>{
        new TopicPartition(rs.string("topic"),rs.int("partition")) -> rs.string("offset")
      }).list().apply()
    }.toMap
  }

  /**
    * 保存当前批次的偏移量信息
    * @param groupid
    * @param offsetRange
    */
  def saveCurrentBatchOffset(groupid :String ,offsetRange :Array[OffsetRange])={
/*    val conn = getConn
    val pstmt = conn.prepareStatement("replace into streaming_offset values(?,?,?,?)")
    for (o <- offsetRange){
      pstmt.setString(1,o.topic)
      pstmt.setString(2,groupid)
      pstmt.setLong(3,o.untilOffset)
      pstmt.setInt(4,o.partition)
      pstmt.executeUpdate()
    }
    pstmt.close()
    conn.close()*/

    DB.localTx{ implicit session=>
      for (o <- offsetRange){
        SQL("replace into streaming_offset values(?,?,?,?)").bind(o.topic,groupid,o.untilOffset,o.partition).update().apply()
      }

    }
  }
}
package kafka

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable.Map

object SSCDirectKafka010_MySql_Offset {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("SSCDirectKafka010_ZK_Offset").setMaster("local[*]")
   //在kafka中每次拉取的数据量,这里配置的3,并不是每次在kafka拉取3条数据,
    //而是: 2 * 分区数量 * 采样时间
    conf.set("spark.streaming.kafka.maxRatePerPartition","2")

    //是否优雅的停止你的sparkStreaming,如果不加这个参数的话,服务停止的时候可能会造成数据的丢失
    conf.set("spark.streaming.stopGracefullyOnShutdown","true")

    val ssc = new StreamingContext(conf,Seconds(3))

    //设置消费者组id
    val groupId = "day_04"
    //设置参数
    val kafkaParams: Map[String, Object] = Map[String, Object](
      "bootstrap.servers" -> "hadoop01:9092,hadoop02:9092,hadoop03:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId,
      "auto.offset.reset" -> "earliest",
      //"auto.commit.interval.ms"-> "1000",设置为1秒提交一次offset,默认是5秒
      "enable.auto.commit" -> (false: java.lang.Boolean)  //是否自动递交偏移量
    )
    //指定主题
    val topic ="user"
    val topics =Array(topic)

    val offsetManage = OffsetManager(groupId,topic)

      val stream: InputDStream[ConsumerRecord[String, String]] = if(offsetManage.size > 0){
        //当大于0时,说明消费过了,所有可以获取到偏移量
        KafkaUtils.createDirectStream[String,String](
          ssc,
          LocationStrategies.PreferConsistent,
          ConsumerStrategies.Subscribe[String,String](topics,kafkaParams,offsetManage)
        )
      }else{
        //说明没有消费过
        KafkaUtils.createDirectStream[String,String](
          ssc,
          LocationStrategies.PreferConsistent,
          ConsumerStrategies.Subscribe[String,String](topics,kafkaParams)
        )
      }

    stream.foreachRDD(rdd=>{
      //获取当前批次偏移量
      val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      rdd.foreach(println(_))

      //将偏移量信息保存到mysql
      OffsetManager.saveCurrentBatchOffset(groupId,offsetRanges)

    })
        ssc.start()
        ssc.awaitTermination()
  }
}

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值