Java开发Kafka生产者&消费者demo
前提:
虚拟机开启zookeeper,kafka
查看是否开启:
ps -ef|grep zookeeper
jps
开启命令:
kafka:【123】# kafka-server-start.sh /opt/software/kafka_2.12-0.11.0.3/config/server.properties
查看所有主题:
kafka-topics.sh --zookeeper node1:2181 --list
查看主题详情信息
kafka-topics.sh --zookeeper node1:2181,node2:2181,node3:2181 --describe --topic userlog
创建主题
kafka-topics.sh --zookeeper node1:2181 --create --replication-factor 2 --partitions 3 --topic userlog
创建生产者
kafka-console-producer.sh --broker-list node1:9092,node2:9092,node3:9092 --topic userlog
创建消费者
kafka-console-consumer.sh --zookeeper node1:2181,node2:2181,node3:2181 --from-beginning --topic userlog
pom文件:
version和服务器的kafka版本要一致
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.12</artifactId>
<version>0.11.0.3</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.11.0.3</version>
</dependency>
</dependencies>
生产者
生产者配置常量类:
public class ConstantProducer {
public static String BOOTSTRAP_SERVERS = "bootstrap.servers";
public static String ACKS = "acks";
public static String RETRIES = "retries";
public static String BATCH_SIZE = "batch.size";
public static String LINGER_MS = "linger.ms";
public static String BUFFER_MEMORY = "buffer-memory";
public static String KEY_SERIALIZER_CLASS = "key.serializer";
public static String VALUE_SERIALIZER_CLASS = "value.serializer";
}
生产者类:
public class KafkaProducer extends Thread{
//定义kafka生产者
private static org.apache.kafka.clients.producer.KafkaProducer producer;
//定义主题
private static final String TOPIC = "JavaKafka";
//初始化Kafka生产者
public KafkaProducer() {
Properties properties = new Properties();
properties.put(ConstantProducer.BOOTSTRAP_SERVERS, "192.168.110.101:9092");
properties.put(ConstantProducer.ACKS, "all");
properties.put(ConstantProducer.RETRIES, 0);
properties.put(ConstantProducer.BATCH_SIZE, 16385);
properties.put(ConstantProducer.LINGER_MS, 1);
properties.put(ConstantProducer.BUFFER_MEMORY, 33554432);
properties.put(ConstantProducer.KEY_SERIALIZER_CLASS, "org.apache.kafka.common.serialization.StringSerializer");
properties.put(ConstantProducer.VALUE_SERIALIZER_CLASS, "org.apache.kafka.common.serialization.StringSerializer");
producer = new org.apache.kafka.clients.producer.KafkaProducer(properties);
}
//生产消息
public void sendProducer() {
for (int i = 0; i < 10; i++) {
String key = String.valueOf(i);
String data = "hello message "+ key;
producer.send(new ProducerRecord<>(TOPIC,key,data));
System.out.println("SUCCESS~");
}
producer.close();
}
public static void main(String[] args) {
new KafkaProducer().sendProducer();
}
}
消费者
消费者配置常量类:
public class ConstantConsumer {
public static String BOOTSTRAP_SERVERS = "bootstrap.servers";
/**
* 用来唯一标识consumer进程所在组的字符串,
* 如果设置同样的group id,表示这些processes都是属于同一个consumer group
*/
public static String GROUP_ID = "group.id";
/**
* 如果值为真,则为偏移启用自动落实,否则不提交。
*/
public static String ENABLE_AUTO_COMMIT = "enable.auto.commit";
/**
* consumer向zookeeper提交offset的频率,单位是秒
*/
public static String AUTO_COMMIT_INTERVAL_MS = "auto.commit.interval.ms";
/**
* zookeeper 会话的超时限制。
* 如果consumer在这段时间内没有向zookeeper发送心跳信息,则它会被认为挂掉了,并且reblance将会产生
*/
public static String SESSION_TIMEOUT_MS = "session.timeout.ms";
/**
* zookeeper中没有初始化的offset时,如果offset是以下值的回应:
* smallest:自动复位offset为smallest的offset
* largest:自动复位offset为largest的offset
* anything else:向consumer抛出异常
*/
public static String AUTO_OFFSET_RESET = "auto.offset.reset";
public static String KEY_DESERIALIZER = "key.deserializer";
public static String VALUE_DESERIALIZER = "value.deserializer";
}
消费者类:
public class KafkaConsumers extends Thread{
//定义kafka消费者
private static KafkaConsumer<String, String> consumer;
//定义主题
private static final String TOPIC = "JavaKafka";
//初始化kafka消费者
public KafkaConsumers() {
Properties properties = new Properties();
properties.put(ConstantProducer.BOOTSTRAP_SERVERS, "192.168.110.102:9092");
// ConstantConsumer.GROUP_ID 这个参数的value需要和服务器kafka/config/consumer.properties文件中的group.id=test-consumer-group Value一致,
// 否则会造成消费者无法消费的情况
properties.put(ConstantConsumer.GROUP_ID, "test-consumer-group");
properties.put(ConstantConsumer.ENABLE_AUTO_COMMIT, "true");
properties.put(ConstantConsumer.AUTO_COMMIT_INTERVAL_MS, 1000);
properties.put(ConstantConsumer.SESSION_TIMEOUT_MS, 30000);
properties.put(ConstantConsumer.AUTO_OFFSET_RESET, "earliest");
properties.put(ConstantConsumer.KEY_DESERIALIZER, "org.apache.kafka.common.serialization.StringDeserializer");
properties.put(ConstantConsumer.VALUE_DESERIALIZER, "org.apache.kafka.common.serialization.StringDeserializer");
consumer = new KafkaConsumer<>(properties);
}
//消费消息
public void getConsumers() {
consumer.subscribe(Arrays.asList(TOPIC));
while (true) {
//TODO 消费数据,stdout
ConsumerRecords<String, String> records = consumer.poll(1000);
for (ConsumerRecord<String, String> consumerRecords : records) {
System.out.println("key:" + consumerRecords.key() + ", value: " + consumerRecords.value() + ", topic: " + consumerRecords.topic());
}
}
}
public static void main(String[] args) {
new KafkaConsumers().getConsumers();
}
}