数据格式
{“name”:“Michael”, “age”:25}
pom文件依赖
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<scala.version>2.11.8</scala.version>
<spark.version>2.2.0</spark.version>
<hadoop.version>2.7.3</hadoop.version>
<dependencies>
<!-- 导入scala的依赖 -->
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala.version}</version>
</dependency>
<!-- 导入spark的依赖 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!-- 指定hadoop-client API的版本 -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<!-- 导入spark sql的依赖 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!-- spark如果想整合Hive,必须加入hive的支持 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
<version>2.2.0</version>
</dependency>
<!-- spark steaming的依赖 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>2.2.0</version>
</dependency>
<!-- sparkSteaming跟Kafka整合的依赖 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!-- mysql的连接驱动依赖 -->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.38</version>
</dependency>
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>2.9.0</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>1.1.2</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>1.1.2</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<version>1.1.2</version>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>7.0.0-beta1</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>compile</scope>
</dependency>
</dependencies>
代码实现
import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import kafka.utils.{ZKGroupTopicDirs, ZkUtils}
import org.I0Itec.zkclient.ZkClient
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.{Duration, StreamingContext}
/**
* Created by zx on 2017/7/31.
*/
object demo {
def main(args: Array[String]): Unit = {
//指定组名
val group = "g1"
//创建SparkConf
val conf = new SparkConf().setAppName("OrderCount").setMaster("local[4]")
//创建SparkStreaming,并设置间隔时间
val ssc = new StreamingContext(conf, Duration(5000))
//指定消费的 topic 名字
val topic = "newjson"
//指定kafka的broker地址(sparkStream的Task直连到kafka的分区上,用更加底层的API消费,效率更高)
val brokerList = "hd-3:9092"
//指定zk的地址,后期更新消费的偏移量时使用(以后可以使用Redis、MySQL来记录偏移量)
val zkQuorum = "hd-2:2181,hd-3:2181,hd-4:2181"
//创建 stream 时使用的 topic 名字集合,SparkStreaming可同时消费多个topic
val topics: Set[String] = Set(topic)
//创建一个 ZKGroupTopicDirs 对象,其实是指定往zk中写入数据的目录,用于保存偏移量
val topicDirs = new ZKGroupTopicDirs(group, topic)
//获取 zookeeper 中的路径 "/g001/offsets/wordcount/"
val zkTopicPath = s"${topicDirs.consumerOffsetDir}"
val hbaseConf = HBaseConfiguration.create()
hbaseConf.set("hbase.zookeeper.quorum", "hd-2,hd-3,hd-4")
hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
val tableName = "circle"
val jobConf = new JobConf(hbaseConf)
jobConf.setOutputFormat(classOf[TableOutputFormat])
jobConf.set(TableOutputFormat.OUTPUT_TABLE, tableName)
//准备kafka的参数
val kafkaParams = Map(
//"key.deserializer" -> classOf[StringDeserializer],
//"value.deserializer" -> classOf[StringDeserializer],
//"deserializer.encoding" -> "GB2312", //配置读取Kafka中数据的编码
"metadata.broker.list" -> brokerList,
"group.id" -> group,
//从头开始读取数据
"auto.offset.reset" -> kafka.api.OffsetRequest.SmallestTimeString
)
//zookeeper 的host 和 ip,创建一个 client,用于跟新偏移量量的
//是zookeeper的客户端,可以从zk中读取偏移量数据,并更新偏移量
val zkClient = new ZkClient(zkQuorum)
val children = zkClient.countChildren(zkTopicPath)
var kafkaStream: InputDStream[(String, String)] = null
//如果 zookeeper 中有保存 offset,我们会利用这个 offset 作为 kafkaStream 的起始位置
var fromOffsets: Map[TopicAndPartition, Long] = Map()
//如果保存过 offset
//注意:偏移量的查询是在Driver完成的
if (children > 0) {
for (i <- 0 until children) {
// /g001/offsets/wordcount/0/10001
// /g001/offsets/wordcount/0
val partitionOffset = zkClient.readData[String](s"$zkTopicPath/${i}")
// wordcount/0
val tp = TopicAndPartition(topic, i)
//将不同 partition 对应的 offset 增加到 fromOffsets 中
// wordcount/0 -> 10001
fromOffsets += (tp -> partitionOffset.toLong)
}
//Key: kafka的key values: "hello tom hello jerry"
//这个会将 kafka 的消息进行 transform,最终 kafak 的数据都会变成 (kafka的key, message) 这样的 tuple
val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key(), mmd.message())
//通过KafkaUtils创建直连的DStream(fromOffsets参数的作用是:按照前面计算好了的偏移量继续消费数据)
//[String, String, StringDecoder, StringDecoder, (String, String)]
// key value key的解码方式 value的解码方式
kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, fromOffsets, messageHandler)
} else {
//如果未保存,根据 kafkaParam 的配置使用最新(largest)或者最旧的(smallest) offset
kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
}
//偏移量的范围
var offsetRanges = Array[OffsetRange]()
//kafkaStream.foreachRDD里面的业务逻辑是在Driver端执行
kafkaStream.foreachRDD{ kafkaRDD =>
//判断当前的kafkaStream中的RDD是否有数据
if(!kafkaRDD.isEmpty()) {
//只有KafkaRDD可以强转成HasOffsetRanges,并获取到偏移量
offsetRanges = kafkaRDD.asInstanceOf[HasOffsetRanges].offsetRanges
val lines: RDD[String] = kafkaRDD.map(_._2)
//创建sparksql解析json数据
val spark1 = SparkSession.builder().getOrCreate()
val df = spark1.read.json(lines)
df.createOrReplaceTempView("temp")
val ans = spark1.sql(" select name,age from temp").rdd.map(x => {
(x.getString(0), x.getString(1))
})
//写入hbase
ans.map(line =>{
val put = new Put(Bytes.toBytes(line._1))
put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("keyno"), Bytes.toBytes(line._2))
(new ImmutableBytesWritable, put)
}).saveAsHadoopDataset(jobConf)
//偏移量跟新在哪一端()
for (o <- offsetRanges) {
// /g001/offsets/wordcount/0
val zkPath = s"${topicDirs.consumerOffsetDir}/${o.partition}"
//将该 partition 的 offset 保存到 zookeeper
// /g001/offsets/wordcount/0/20000
ZkUtils.updatePersistentPath(zkClient, zkPath, o.untilOffset.toString)
}
}
}
ssc.start()
ssc.awaitTermination()
}
}