maven依赖
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.2.2.RELEASE</version>
</dependency>
配置文件
分别配置生产者和消费者的属性
spring:
kafka:
# 集群地址
bootstrap-servers: node1:6667,node2:6667,node3:6667
# 生产者配置
producer:
# 重试次数
retries: 3
# 应答级别
# acks=0 把消息发送到kafka就认为发送成功
# acks=1 把消息发送到kafka leader分区,并且写入磁盘就认为发送成功
# acks=all 把消息发送到kafka leader分区,并且leader分区的副本follower对消息进行了同步就任务发送成功
acks: 1
# 批量处理的最大大小 单位 byte
batch-size: 65536
# 发送延时,当生产端积累的消息达到batch-size或接收到消息linger.ms后,生产者就会将消息提交给kafka
buffer-memory: 524288
# Key 序列化类
key-serializer: org.apache.kafka.common.serialization.StringSerializer
# Value 序列化类
value-serializer: org.apache.kafka.common.serialization.StringSerializer
# 消费者配置
consumer:
# 默认消费者组
group-id: consumer1
# 自动提交 offset 默认 true
enable-auto-commit: true
# 自动提交的频率 单位 ms
auto-commit-interval: 100
# 批量消费最大数量
max-poll-records: 100
# Key 反序列化类
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# Value 反序列化类
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# 当kafka中没有初始offset或offset超出范围时将自动重置offset
# earliest:重置为分区中最小的offset
# latest:重置为分区中最新的offset(消费分区中新产生的数据)
# none:只要有一个分区不存在已提交的offset,就抛出异常
auto-offset-reset: latest
生产者
@Component
@Slf4j
public class KafkaSender<T> {
@Resource
private KafkaTemplate<String, String> kafkaTemplate;
private ObjectMapper objectMapper = new ObjectMapper();
public void send(String topic, T msg) {
try {
ListenableFuture future = this.kafkaTemplate.send(topic, objectMapper.writeValueAsString(msg));
future.addCallback(
success -> {},
failure -> log.error("消息发送失败:", failure)
);
} catch (JsonProcessingException e) {
log.error("json序列化错误:{}", e);
return;
}
}
}
消费者
@Component
@Slf4j
public class KafkaConsumer {
@KafkaListener(topics = {"kafka-topic"})
public void listen(ConsumerRecord<?, ?> record) {
Optional<?> kafkaMessage = Optional.ofNullable(record.value());
if (kafkaMessage.isPresent()) {
Object message = kafkaMessage.get();
log.info("------------------ message =" + message);
}
}
}
结合websocket发送到前端
手动配置kafka生产者
Properties props = new Properties();
props.put("bootstrap.servers", "node1:9092,node2:9092,node3:9092");
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("acks","1");
props.put("retries",0);
props.put("batch.size",65536);
props.put("linger.ms",1);
props.put("buffer.memory",524288);
KafkaProducer producer = new KafkaProducer<String, String>(props);
ProducerRecord<String, String> record = new ProducerRecord<String, String>("topic-01","XX");
try {
Future future = producer.send(record);
future.get();// 是否发送成功
} catch (Exception e) {
log.error("消息发送失败:", e);// 连接错误、No Leader错误都可以通过重试解决;消息太大这类错误不会进行任何重试,直接抛出异常
} finally {
// 必须关闭
producer.close();
}
手动配置kafka消费者
public class KafkaUtil {
public static KafkaConsumer<String, String> createKafkaConsmer(String kafkaGroupid, String kafkaServer, String topic) {
Properties props = new Properties();
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
// 空连接的超时限制 默认 600000
props.put("connections.max.idle.ms", 24 * 60 * 60 * 1000);
// 设置隔离级别
props.put("isolation.level", "read_committed");
// 自动提交偏移量
props.put("enable.auto.commit", true);
// 指定消费者被认定死亡之前可以与服务器断开连接的时间,默认是3s
props.put("session.timeout.ms", 40000);
props.put("heartbeat.interval.ms", 30000);
// broker尽力实现request.required.acks需求时的等待时间,否则会发送错误到客户端,默认 10000
props.put("request.timeout.ms", 31000);
// 单次调用poll方法能够返回的最大消息数量 默认500
props.put("max.poll.records", 1000);
props.put("auto.offset.reset","latest");
// consumer group id
props.put("group.id", kafkaGroupid);
// kafka server
props.put("bootstrap.servers", kafkaServer);
// 创建消费者
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
// 订阅主题
consumer.subscribe(Collections.singletonList(topic));
return consumer;
}
}
websocket业务类
启动消费者线程
@OnOpen
public void onOpen(Session session, @PathParam("userId") String userId) {
// 创建 consumer
KafkaConsumer<String, String> consumer = KafkaUtil.createKafkaConsmer(DateUtil.getDateToStr2(new Date()),KafkaConstant.KAFKASERVER, KafkaConstant.TOPIC);
// 开启一个线程
log.info("用户({})开启一个线程", userId);
ConsumerRunner runner = new ConsumerRunner(consumer, onlineCount, this, userId);
}
新增消费者线程
public class ConsumerRunnerVehicleRtd implements Runnable {
private KafkaConsumer<String, String> consumer;
private ObjectMapper objectMapper = new ObjectMapper();
private WSServer service;
private String userId;
public ConsumerRunnerVehicleRtd(KafkaConsumer<String, String> consumer, int consumerNo, WSServer service, String userId) {
this.consumer = consumer;
this.consumerNo = consumerNo;
this.service = service;
this.userId = userId;
}
@Override
public void run() {
try {
while (service.isExist(userId)) {
// 消费 100ms
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
if (records.count() > 0) {
log.info("--接收records条数:" + records.count());
// 遍历数据
for (ConsumerRecord<String, String> record : records) {
String message = record.value();
try {
vehicleRtdWsBody = objectMapper.readValue(message, VehicleRtdWsBody.class);
} catch (IOException e) {
logger.error("转换错误:{}", e);
}
}
}
}
} finally {
consumer.close();
logger.info("用户(" + userId + ")consumer--断开连接");
}
}
}
kafka顺序消费
1、生产者发送消息
写到一个partition里的数据是有顺序的,生产者在发送消息的时候指定key(相同的key会发送到一个分区中)或者指定固定的partition。
2、消费者接收消息
生产者发送消息是顺序的,所以消费者单线程消费时就是顺序的。
多线程消费时,消费者拿到消息之后开个多线程进行消费,虽然是拿到消息的时候是有序的,但是线程有快慢,线程处理的速度不同,会导致有序性打乱了。写 N 个内存 queue,,模仿kafka分区的方法,消费者拿到消息之后,消费之前把消息按照key的hash值取模,放到队列中,具有相同 key 的数据都到同一个内存 queue,保证了队列中的消息有序,然后对于 N 个线程,每个线程分别消费一个内存 queue 即可,这样就能保证顺序性。
@RestController
@Slf4j
public class ShunXuConsumerMoreThread implements ApplicationRunner {
@Resource
private KafkaTemplate<String, String> kafkaTemplate;
// 使用两个内存队列
final int queueLingth = 2;
// 创建两个内存队列
Queue<Map> queueA = new ConcurrentLinkedQueue<>();
Queue<Map> queueB = new ConcurrentLinkedQueue<>();
/**
* 投递顺序性消息,根据用户id做取模推送到不同分区的topic中
* 相同的key推送到相同的分区中
*/
@RequestMapping("/kafka2")
public String testKafka2() {
for (int userId = 0; userId < 300; userId++) {
kafkaTemplate.send("topic_query_2", userId + "", "insert" + userId);
kafkaTemplate.send("topic_query_2", userId + "", "update" + userId);
kafkaTemplate.send("topic_query_2", userId + "", "delete" + userId);
}
return null;
}
/**
* 主题消费者-把相同行为的数据放到同一内存队列中
*/
@KafkaListener(topics = "topic_query_2", groupId = "ConsumerGroupId1")
public void p3r2ConsumerGroupId0(ConsumerRecord<?, ?> consumer) {
// 1.封装消息参数
Map param = new HashMap();
param.put("topic", consumer.topic());
param.put("key", consumer.key());
param.put("value", consumer.value());
param.put("p", consumer.partition());
// 2.把相同行为(key)数据添加到同一内存队列中
int queueHash = consumer.key().hashCode() % queueLingth;
if (queueHash == 0) {
queueA.add(param);
}
if (queueHash == 1) {
queueB.add(param);
}
}
// 开启两个线程消费内存队列中的消息
// ApplicationRunner接口常用于项目启动后(也就是ApringApplication.run()执行结束),立马执行某些逻辑。
@Override
public void run(ApplicationArguments args) {
new Thread(() -> {
while (true) {
if (queueA.size() > 0) {
Map poll = queueA.poll();
//业务逻辑
System.out.println("Thrend-Id: " + Thread.currentThread().getId() +
" topic:" + poll.get("topic") +
" key:" + poll.get("key") +
" value:" + poll.get("value") +
" partition:" + poll.get("p"));
try {
Thread.sleep(10);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}).start();
new Thread(() -> {
while (true) {
if (queueB.size() > 0) {
Map poll = queueB.poll();
//业务逻辑
System.out.println("Thrend-Id: " + Thread.currentThread().getId() + " topic:" + poll.get("topic") + " key:" + poll.get("key") + " value:" + poll.get("value") + " partition:" + poll.get("p"));
try {
Thread.sleep(10);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}).start();
}
}

文章详细介绍了如何配置SpringKafka的生产者和消费者,包括重试次数、应答级别等参数,并展示了如何通过WebSocket将消息发送到前端。同时,文章探讨了Kafka的顺序消费,提出了使用内存队列模拟分区方法来保证消息顺序性的解决方案。
1488

被折叠的 条评论
为什么被折叠?



