package com.uplooking.bigdata.streaming.p2;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import org.apache.velocity.runtime.directive.MacroParseException;
import scala.Tuple2;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* Java版本的Spark-Streaming和Kafka通过Reciver机制进行整合
* 引入spark和kafka的maven依赖---->spark-streaming-kafka_2.10
*
* 消费Kafka中的topic:spark-kafka
*/
public class JavaSparkKafkaReceiverOps {
public static void main(String[] args) {
SparkConf conf = new SparkConf();
conf.setMaster("local[2]");
conf.setAppName(JavaSparkKafkaReceiverOps.class.getSimpleName());
JavaSparkContext sc = new JavaSparkContext(conf);
JavaStreamingContext jssc = new JavaStreamingContext(sc, Durations.seconds(2));
/**
* 通过ssc读取kafka中的数据
* Create an input stream that pulls messages from Kafka Brokers.
* @param ssc StreamingContext object
* @param zkQuorum Zookeeper quorum (hostname:port,hostname:port,..)
* @param groupId The group id for this consumer
* @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed
* in its own thread
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
* @return DStream of (Kafka message key, Kafka message value)
*/
String zkQuorum = "master:2181,slave01:2181,slave02:2181";
String groupId = "spark-group-01";
Map<String, Integer> topics = new HashMap<>();
topics.put("spark-kafka", 1);
/**
* 返回值的第一列就是kafka中一条数据对应的key
* 第二个参数就是key所对应的value
*/
JavaPairReceiverInputDStream<String, String> inputDStream = KafkaUtils.createStream(jssc,
zkQuorum,
groupId,
topics
);
JavaDStream<String> wordsDStream = inputDStream.flatMap(t -> {
return Arrays.asList(t._2().split(" "));
});
JavaPairDStream<String, Integer> pairDStream = wordsDStream.mapToPair(word -> {
return new Tuple2<String, Integer>(word, 1);
});
JavaPairDStream<String, Integer> retDS = pairDStream.reduceByKey((v1, v2) -> {
return v1 + v2;
});
retDS.print();
jssc.start();
jssc.awaitTermination();
}
}
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import org.apache.velocity.runtime.directive.MacroParseException;
import scala.Tuple2;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* Java版本的Spark-Streaming和Kafka通过Reciver机制进行整合
* 引入spark和kafka的maven依赖---->spark-streaming-kafka_2.10
*
* 消费Kafka中的topic:spark-kafka
*/
public class JavaSparkKafkaReceiverOps {
public static void main(String[] args) {
SparkConf conf = new SparkConf();
conf.setMaster("local[2]");
conf.setAppName(JavaSparkKafkaReceiverOps.class.getSimpleName());
JavaSparkContext sc = new JavaSparkContext(conf);
JavaStreamingContext jssc = new JavaStreamingContext(sc, Durations.seconds(2));
/**
* 通过ssc读取kafka中的数据
* Create an input stream that pulls messages from Kafka Brokers.
* @param ssc StreamingContext object
* @param zkQuorum Zookeeper quorum (hostname:port,hostname:port,..)
* @param groupId The group id for this consumer
* @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed
* in its own thread
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
* @return DStream of (Kafka message key, Kafka message value)
*/
String zkQuorum = "master:2181,slave01:2181,slave02:2181";
String groupId = "spark-group-01";
Map<String, Integer> topics = new HashMap<>();
topics.put("spark-kafka", 1);
/**
* 返回值的第一列就是kafka中一条数据对应的key
* 第二个参数就是key所对应的value
*/
JavaPairReceiverInputDStream<String, String> inputDStream = KafkaUtils.createStream(jssc,
zkQuorum,
groupId,
topics
);
JavaDStream<String> wordsDStream = inputDStream.flatMap(t -> {
return Arrays.asList(t._2().split(" "));
});
JavaPairDStream<String, Integer> pairDStream = wordsDStream.mapToPair(word -> {
return new Tuple2<String, Integer>(word, 1);
});
JavaPairDStream<String, Integer> retDS = pairDStream.reduceByKey((v1, v2) -> {
return v1 + v2;
});
retDS.print();
jssc.start();
jssc.awaitTermination();
}
}