基于@KafkaListener注解的kafka监听代码可以手动指定要消费的kafka集群,这对于需要访问多套kafka集群的程序来说,是有效的解决方案。 这里需要注意的是,此时的消费者配置信息需使用原生kafka的配置信息格式(如:拉取消息数量上限为 ConsumerConfig.MAX_POLL_RECORDS_CONFIG = “max.poll.records”),与自动装载KafkaConsumer时的配置信息格式不同(如:拉取消息数量上限为 spring.kafka.comsumer.max-poll-records),虽然 DefaultKafkaConsumerFactory(java.util.Map<java.lang.String,java.lang.Object> configs)来自spring-kafka 。详情如下:
配置文件
配置参数及其含义,参见 @KafkaListener和KafkaTemplate 的配置使用
依赖项
依赖项的引入,参见 @KafkaListener和KafkaTemplate 的配置使用
特别说明下,其实spring-kafka已包含了kafka-clients
<!-- spring-kafka -->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.6.0</version>
</dependency>
<!-- kafka-clients -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.6.0</version>
</dependency>
KafkaListenerContainerFactory、KafkaTemplate 配置类
其中可配置多个kafka集群,每个kafka集群生成一个KafkaListenerContainerFactory实例
@Data
@Slf4j
@Configuration
public class KafkaConfig {
@Resource
Environment environment;
/**
* kafka消费者属性(对应spring-client的spring.kafka.consumer属性)
* 但请注意:ConsumerConfig是来自kafka-client的原生kafka配置
*/
@Bean
public Map<String, Object> consumerProperties() {
String servers = environment.getProperty("spring.kafka.consumer.bootstrap-servers");
String keyDeserializer = environment.getProperty("spring.kafka.consumer.key-deserializer");
String valDeserializer = environment.getProperty("spring.kafka.consumer.value-deserializer");
String groupId = environment.getProperty("spring.kafka.consumer.group-id");
String maxPollRecords = environment.getProperty("spring.kafka.consumer.max-poll-records");
String maxPollInterval = environment.getProperty("spring.kafka.properties.max.poll.interval.ms");
String sessionTimeout = environment.getProperty("spring.kafka.properties.session.timeout.ms");
String jaasConfig = environment.getProperty("spring.kafka.consumer.properties.sasl.jaas.config");
/// 注意这里,ConsumerConfig是源自原生kafka-client的配置信息格式
Map<String, Object> properties = new HashMap<>();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
properties.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollInterval);
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
/// props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
properties.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000);
properties.put("security.protocol", "SASL_PLAINTEXT");
properties.put("sasl.mechanism", "SCRAM-SHA-512");
properties.put("sasl.jaas.config", jaasConfig);
log.info("\n---------------------------------------------------\n " +
"消费者属性:{}" +
"\n---------------------------------------------------", properties);
return properties;
}
@Bean
public KafkaListenerContainerFactory<?> containerFactory() {
Integer concurrency = environment.getProperty("spring.kafka.listener.concurrency", Integer.class, 1);
Integer pollTimeout = environment.getProperty("spring.kafka.listener.poll-timeout", Integer.class, 3000);
ConcurrentKafkaListenerContainerFactory<String, String> containerFactory = new ConcurrentKafkaListenerContainerFactory<>();
containerFactory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(this.consumerProperties()));
/// spring.kafka.listener 属性
// 批量监听消息
containerFactory.setBatchListener(true);
// 消费并发数量
containerFactory.setConcurrency(concurrency);
// 消息轮询时限(即等待消息时限)
containerFactory.getContainerProperties().setPollTimeout(pollTimeout);
// 手动偏移提交之MANUAL_IMMEDIATE,设置为其他数值会导致监听函数的Acknowledgment ack参数异常
containerFactory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
log.info("容器工厂创建完毕:{}", containerFactory.getContainerProperties());
return containerFactory;
}
/**
* kafka生产者属性
*/
@Bean
public Map<String, Object> producerProperties() {
Map<String, Object> properties = new HashMap<>();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, environment.getProperty("spring.kafka.producer.bootstrap-servers"));
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
properties.put(ProducerConfig.ACKS_CONFIG, "all");
properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
properties.put(ProducerConfig.RETRIES_CONFIG, 0);
properties.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, 300);
properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
properties.put("security.protocol", "SASL_PLAINTEXT");
properties.put("sasl.mechanism", "SCRAM-SHA-512");
properties.put("sasl.jaas.config", environment.getProperty("spring.kafka.producer.properties.sasl.jaas.config"));
return properties;
}
/**
* kafka生产者工厂
*/
@Bean
public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(this.producerProperties());
}
/**
* kafka生产者客户端
*/
@Bean
public KafkaTemplate<String, String> kafkaTemplate(ProducerFactory<String, String> producerFactory) {
return new KafkaTemplate<>(producerFactory);
}
}
消费者代码
@KafkaListener注解的containerFactory参数引用上述配置类中定义的KafkaListenerContainerFactory实例(bean),也就指定了对应的kafka集群
@Slf4j
@Component
public class KafkaConsumerListen implements BatchMessageListener<String, String> {
@Autowired
private Environment environment;
@Autowired
private KafkaMsgHandleService msgHandleService;
@Autowired
private ThreadPoolTaskExecutor taskExecutor;
/************************
* 接收消息
************************/
@Override
@KafkaListener( containerFactory = "containerFactory",
groupId = "${kafka.groupId}",
topics = "#{'${kafka.topics}'.split(',')}",
concurrency = "${kafka.concurrency}")
public void onMessage(List<ConsumerRecord<String, String>> records) {
try {
final List<String> msgs = records.stream().map(ConsumerRecord::value).collect(Collectors.toList());
log.info("收到消息体:size={} content:{}", msgs.size(), JSON.toJSONString(msgs));
/// 处理消息
msgs.forEach(this::processRecord);
} catch (Exception e) {
log.error("KafkaListener_kafka_consume_error.", e);
}
}
/************************
* 处理消息
************************/
private void processRecord(String msg) {
taskExecutor.submit(() -> {
if (!environment.getProperty("kafka1.switch", Boolean.class,true)) {
log.warn("KafkaListener_turn_off_drop_message.");
return;
}
msgHandleService.handle(msg);
});
}
}
生产者代码
@Component
@Slf4j
public class KafKaProducer {
@Autowired
private KafkaTemplate kafkaTemplate;
public void sendMessage(String topic, Object object) {
/*
* 这里的 ListenableFuture 类是 spring 对 java 原生 Future 的扩展增强,是一个泛型接口,用于监听异步方法的回调 而对于
* kafka send 方法返回值而言,这里的泛型所代表的实际类型就是 SendResult<K, V>,而这里 K,V 的泛型实际上 被用于
* ProducerRecord<K, V> producerRecord,即生产者发送消息的 key,value 类型
*/
ListenableFuture<SendResult<String, Object>> future = kafkaTemplate.send(topic, object);
future.addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
@Override
public void onFailure(Throwable throwable) {
log.error("发送消息失败:" + throwable.getMessage());
}
@Override
public void onSuccess(SendResult<String, Object> sendResult){
// log.info("发送消息成功:" + sendResult.toString());
}
});
}
}