1 监听代码
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.listener.AcknowledgingMessageListener;
import org.springframework.kafka.support.Acknowledgment;
public class KafkaLisstener implements AcknowledgingMessageListener<String, String> {
private static Logger logger = LoggerFactory.getLogger(KafkaLisstener.class);
private IGateInfoService gateInfoService;
private static final Logger log = LoggerFactory.getLogger(KafkaLisstener.class);
@Override
public void onMessage(ConsumerRecord<String, String> data, final Acknowledgment acknowledgment) {
String message = data.value();
log.info("offset={},partition={},timestamp={}", data.offset(), data.partition(), data.timestamp());
log.info("message={}", message);
acknowledgment.acknowledge();//提交offset
}
}
2 kafka配置
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd">
<!-- 1.定义consumer的参数 -->
<bean id="consumerProperties" class="java.util.HashMap">
<constructor-arg>
<map>
<entry key="bootstrap.servers" value="${kafka.server.ip}"/>
<entry key="group.id" value="0"/>
<entry key="enable.auto.commit" value="${enable.auto.commit}"/>
<entry key="session.timeout.ms" value="${session.timeout.ms}"/>
<entry key="key.deserializer"
value="org.apache.kafka.common.serialization.StringDeserializer"/>
<entry key="value.deserializer"
value="org.apache.kafka.common.serialization.StringDeserializer"/>
</map>
</constructor-arg>
</bean>
<!-- 2.创建consumerFactory bean -->
<bean id="consumerFactory"
class="org.springframework.kafka.core.DefaultKafkaConsumerFactory">
<constructor-arg>
<ref bean="consumerProperties"/>
</constructor-arg>
</bean>
<!-- 3.定义消费实现类 -->
<bean id="kafkaConsumerService" class="com.ylz.meta.service.impl.KafkaLisstener">
<property name="gateInfoService" ref="gateInfoService"/>
</bean>
<!-- 4.消费者容器配置信息 -->
<bean id="containerProperties" class="org.springframework.kafka.listener.config.ContainerProperties">
<constructor-arg value="${kafka.receive.topic}"/>
<property name="ackMode" value="MANUAL_IMMEDIATE"/>
<property name="messageListener" ref="kafkaConsumerService"/>
</bean>
<!-- 5.消费者并发消息监听容器,执行doStart()方法 -->
<bean id="messageListenerContainer" class="org.springframework.kafka.listener.ConcurrentMessageListenerContainer"
init-method="doStart">
<constructor-arg ref="consumerFactory"/>
<constructor-arg ref="containerProperties"/>
<property name="concurrency" value="1"/>
</bean>
<bean id="kafkaConsumer" class="org.apache.kafka.clients.consumer.KafkaConsumer">
<constructor-arg>
<ref bean="consumerProperties"/>
</constructor-arg>
</bean>
</beans>
enable.auto.commit=false
注:配置消费者容器配置信息时,配置ackMode(忘记配置会出现Acknowledgment acknowledgment=null的情况)
- RECORD
每处理一条commit一次 - BATCH(
默认)
每次poll的时候批量提交一次,频率取决于每次poll的调用频率 - TIME
每次间隔ackTime的时间去commit - COUNT
累积达到ackCount次的ack去commit - COUNT_TIME
ackTime或ackCount哪个条件先满足,就commit - MANUAL
listener负责ack,但是背后也是批量上去 - MANUAL_IMMEDIATE
listner负责ack,每调用一次,就立即commit
本文介绍了Kafka消费者如何手动提交offset,包括监听代码的实现和不同提交策略的详细解析,如RECORD、BATCH、TIME、COUNT、COUNT_TIME、MANUAL以及MANUAL_IMMEDIATE。手动提交允许更精细地控制消息确认,防止数据丢失。
1254

被折叠的 条评论
为什么被折叠?



