1. Producer 生产者
1.1 消息发送流程
Kafka
的 Producer
发送消息采用的是 异步发送 的方式。在消息发送的过程中,涉及到了
两个线程 main
线程和 Sender
线程 ,以及 一个线程共享变量 RecordAccumulator
。
main
线程将消息发送给 RecordAccumulator
Sender
线程不断从 RecordAccumulator
中拉取
消息发送到 Kafka broker
batch.size
:只有数据积累到batch.size
之后,sender 才会发送数据。linger.ms
:如果数据迟迟未达到batch.size
,sender
等待linger.time
之后就会发送数据- 若两个参数都配置了,哪个条件先达到都会发送
1.2 异步发送 API
1、pom.xml
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.11.0.0</version>
</dependency>
</dependencies>
2、用到的类
KafkaProducer
:需要创建一个生产者对象,用来发送数据ProducerConfig
:获取所需的一系列配置参数ProducerRecord
:每条数据都要封装成一个ProducerRecord
对象
不带回调函数
package top.midworld;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
public class ProducerTest {
public static void main(String[] args) throws ExecutionException, InterruptedException {
// 将相关配置封装在 Properties,其中 key 可以从 ProducerConfig 类获取
Properties props = new Properties();
props.put("bootstrap.servers", "hadoop1:9092");
props.put("acks", "all");
props.put("retries", 1); // 重试次数
props.put("batch.size", 16384); // 批次大小
props.put("linger.ms", 1); // 等待时间
props.put("buffer.memory", 33554432); // 缓冲区大小
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
String topic = "first";
KafkaProducer<String, String> producer = new KafkaProducer<>(props);
for (int i = 0; i <= 10; i++) {
// ProducerRecord(String topic, K key, V value)
producer.send(new ProducerRecord<>(topic, "kafka_test", "kafka test: " + Integer.toString(i)));
}
producer.close();
}
}
需要将发送的每条数据 封装为 ProducerRecord
,它接收三个参数,分别为:topic、key、value
consumer
消费者接收结果:
[hadoop@hadoop2 kafka]$ ./bin/kafka-console-consumer.sh --bootstrap-server hadoop2:9092 --from-beginning --topic first
first
second
kafka test: 0
kafka test: 1
kafka test: 2
kafka test: 3
kafka test: 4
kafka test: 5
kafka test: 6
kafka test: 7
kafka test: 8
kafka test: 9
kafka test: 10
带回调函数
producer.send()
可以带一个 callback
,该方法会在 Producer
收到 ack
时调用,为异步调用。有两个参数,分别是:RecordMetadata 和 Exception
,若 Exception
为空则表示发送成功,否则发送失败。消息发送失败会自动重试,不需要手动调用重试
package top.midworld;
import org.apache.kafka.clients.producer.*;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
public class ProducerAsyncCallback {
public static void main(String[] args) throws ExecutionException, InterruptedException {
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop1:9092");
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.RETRIES_CONFIG, 1); // 重试次数
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384); // 批次大小
props.put(ProducerConfig.LINGER_MS_CONFIG, 1); // 等待时间
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432); // 缓冲区大小
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
String topic = "first";
KafkaProducer<String, String> producer = new KafkaProducer<>(props);
for (int i = 0; i <= 10; i++) {
// ProducerRecord(String topic, K key, V value)
producer.send(new ProducerRecord<String, String>(topic, "kafka_test", "kafka_sender_callback: " + Integer.toString(i)), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
// sender 回调函数
if (e == null) {
System.out.println("success->" + recordMetadata.offset());
} else {
System.out.println("failed");
e.printStackTrace();
}
}
});
}
producer.close();
}
}
consumer
消费结果
[hadoop@hadoop2 kafka]$ ./bin/kafka-console-consumer.sh --bootstrap-server hadoop2:9092 --from-beginning --topic first
first
second
kafka test: 0
kafka test: 1
kafka test: 2
kafka test: 3
kafka test: 4
kafka test: 5
kafka test: 6
kafka test: 7
kafka test: 8
kafka test: 9
kafka test: 10
kafka_sender_callback: 0
kafka_sender_callback: 1
kafka_sender_callback: 2
kafka_sender_callback: 3
kafka_sender_callback: 4
kafka_sender_callback: 5
kafka_sender_callback: 6
kafka_sender_callback: 7
kafka_sender_callback: 8
kafka_sender_callback: 9
kafka_sender_callback: 10
1.3 同步发送
package top.midworld;
import org.apache.kafka.clients.producer.*;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
public class ProducerSync {
public static void main(String[] args) throws ExecutionException, InterruptedException {
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop1:9092");
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.RETRIES_CONFIG, 1); // 重试次数
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384); // 批次大小
props.put(ProducerConfig.LINGER_MS_CONFIG, 1); // 等待时间
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432); // 缓冲区大小
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
String topic = "first";
KafkaProducer<String, String> producer = new KafkaProducer<>(props);
for (int i = 0; i <= 10; i++) {
// ProducerRecord(String topic, K key, V value)
producer.send(
new ProducerRecord<>("first", "kafka_sync_test", "kafka_sync_test: " + Integer.toString(i))
).get();
}
producer.close();
}
}
1.4 指定分区发送消息
// 给 topic=first 的 1 号分区发送消息
producer.send(new ProducerRecord<String, String>("first", 1,"kafka_test", "kafka_sender_callback: " + Integer.toString(i)), new Callback(){});
2. Consumer 消费者
2.1 自动提交 offset
package top.midworld;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
import java.util.Properties;
/*
自动提交 offset
*/
public class ConsumerAutoOffset {
public static void main(String[] args) {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop1:9092");
props.put(ConsumerConfig.GROUP_ID_CONFIG, "test1");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); // 是否自动提交 offset
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "10000"); // 提交间隔,毫秒
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); // 相当于从开始读 --from-beginning
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
consumer.subscribe(Arrays.asList("first"));
while (true) {
// 消费者拉取消息
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
}
}
}
2.2 手动提交 offset
自动提交基于时间提交,很难把握 offset
提交的时机,因此 kafka
还提供了手动提交 offset
的 API
- 同步提交
commitSync
:会阻塞当前线程,一直到提交成功,并且有重试机制 - 异步提交
commitAsync
:没有重试机制,有可能会提交失败
两者的相同点是,都会将本次 poll
的一批数据最高的偏移量提交
重复消费和漏消费
无论是同步提交还是异步提交 offset
,都有可能会造成数据的漏消费或者重复消费。先
提交 offset
后消费,有可能造成数据的漏消费;而先消费后提交 offset
,有可能会造成数据
的重复消费
2.2.1 同步提交
package top.midworld;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
import java.util.Properties;
public class ConsumerAutoSyncOffset {
public static void main(String[] args) {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop1:9092");
props.put(ConsumerConfig.GROUP_ID_CONFIG, "test1");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); // 是否自动提交 offset
// props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "10000"); // 提交间隔,毫秒
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
consumer.subscribe(Arrays.asList("first"));
while (true) {
// 消费者拉取数据
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
// 同步提交,当前线程会阻塞 直到 offset 提交成功
consumer.commitSync();
}
}
}
2.2.2 异步提交
同步影响吞吐量,实际开发中更多用的是异步提交:
package top.midworld;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;
public class ConsumerAutoAsyncOffset {
public static void main(String[] args) {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop1:9092");
props.put(ConsumerConfig.GROUP_ID_CONFIG, "test1");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); // 是否自动提交 offset
// props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "10000"); // 提交间隔,毫秒
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
consumer.subscribe(Arrays.asList("first"));
while (true) {
// 消费者拉取数据
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
// 异步提交,当前线程会阻塞 直到 offset 提交成功
consumer.commitAsync(new OffsetCommitCallback() {
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception e) {
if (e != null) {
System.out.println("Commit offset failed for: " + offsets);
}
}
});
}
}
}
2.2.3 示例消费数据持久化到数据库
package top.midworld;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import java.util.*;
public class ConsumerAutoAsyncOffset {
public static void main(String[] args) {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop1:9092");
props.put(ConsumerConfig.GROUP_ID_CONFIG, "test4");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); // 是否自动提交 offset
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "10000"); // 提交间隔,毫秒
// props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); // 相当于从开始读 --from-beginning
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
consumer.subscribe(Arrays.asList("first"));
final int minBatchSize = 26;
List<ConsumerRecord<String, String>> buffer = new ArrayList<>();
while (true) {
ConsumerRecords<String, String> records = consumer.poll(1000);
for (ConsumerRecord<String, String> record : records) {
buffer.add(record);
// 数据达到批量要求,就写入 db,同步确认 offset
if (buffer.size() >= minBatchSize) {
System.out.println("*********************");
insertIntoDb(buffer);
consumer.commitSync();
buffer.clear();
}
}
}
}
// 插入 db 中
public static void insertIntoDb(List<ConsumerRecord<String, String>> consumerRecords) {
System.out.println("正在插入 DB 中...");
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
System.out.println("offset: " + consumerRecord.offset() + " key: " + consumerRecord.key() + " value: " + consumerRecord.value());
}
}
}
2.3 指定分区消费
package top.midworld;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import java.util.Arrays;
import java.util.Properties;
public class ConsumerPartition {
public static void main(String[] args) {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop1:9092");
props.put(ConsumerConfig.GROUP_ID_CONFIG, "test4");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); // 是否自动提交 offset
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "10000"); // 提交间隔,毫秒
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
// 只消费 first topic 1 号分区
TopicPartition p = new TopicPartition("first", 1);
consumer.assign(Arrays.asList(p));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(1000);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
consumer.commitSync();
}
}
}
参考
-
https://blog.youkuaiyun.com/lingdu_789/article/details/108345584
-
https://blog.youkuaiyun.com/zp17834994071/article/details/108137736