java kafka 配置多个_Spring Boot 集成多个 Kafka

本文详细介绍了如何在 Spring Boot 应用中配置并使用多个 Kafka 实例。通过 application.yml 文件设置两个 Kafka 实例的配置,包括 bootstrap-servers、group-id 和 enable-auto-commit 参数。接着展示了生产者和消费者的配置类,每个实例都有独立的 KafkaTemplate 和 KafkaListenerContainerFactory。在生产者部分,创建了发送消息到不同 Kafka 实例的方法。消费者部分包含两个监听方法,分别监听不同 Kafka 实例的同一主题,并记录接收到的消息。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

一、配置文件

application.yml

spring:

kafka:

one:

bootstrap-servers: IP:PORT

consumer:

group-id: YOUR_GROUP_ID

enable-auto-commit: true

two:

bootstrap-servers: IP:PORT

consumer:

group-id: YOUR_GROUP_ID

enable-auto-commit: true

二、生产者、消费者配置

2.1 第一个 Kafka

@EnableKafka

@Configuration

public class KafkaOneConfig {

@Value("${spring.kafka.one.bootstrap-servers}")

private String bootstrapServers;

@Value("${spring.kafka.one.consumer.group-id}")

private String groupId;

@Value("${spring.kafka.one.consumer.enable-auto-commit}")

private boolean enableAutoCommit;

@Bean

public KafkaTemplate kafkaOneTemplate() {

return new KafkaTemplate<>(producerFactory());

}

@Bean

KafkaListenerContainerFactory> kafkaOneContainerFactory() {

ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>();

factory.setConsumerFactory(consumerFactory());

factory.setConcurrency(3);

factory.getContainerProperties().setPollTimeout(3000);

return factory;

}

private ProducerFactory producerFactory() {

return new DefaultKafkaProducerFactory<>(producerConfigs());

}

public ConsumerFactory consumerFactory() {

return new DefaultKafkaConsumerFactory<>(consumerConfigs());

}

private Map producerConfigs() {

Map props = new HashMap<>();

props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);

props.put(ProducerConfig.RETRIES_CONFIG, 0);

props.put(ProducerConfig.ACKS_CONFIG, "1"); // 不能写成 1

props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);

props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);

return props;

}

private Map consumerConfigs() {

Map props = new HashMap<>();

props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);

props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);

props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);

props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

return props;

}

}

2.2 第二个 Kafka

@Configuration

public class KafkaTwoConfig {

@Value("${spring.kafka.two.bootstrap-servers}")

private String bootstrapServers;

@Value("${spring.kafka.two.consumer.group-id}")

private String groupId;

@Value("${spring.kafka.two.consumer.enable-auto-commit}")

private boolean enableAutoCommit;

@Bean

public KafkaTemplate kafkaTwoTemplate() {

return new KafkaTemplate<>(producerFactory());

}

@Bean

KafkaListenerContainerFactory> kafkaTwoContainerFactory() {

ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>();

factory.setConsumerFactory(consumerFactory());

factory.setConcurrency(3);

factory.getContainerProperties().setPollTimeout(3000);

return factory;

}

private ProducerFactory producerFactory() {

return new DefaultKafkaProducerFactory<>(producerConfigs());

}

public ConsumerFactory consumerFactory() {

return new DefaultKafkaConsumerFactory<>(consumerConfigs());

}

private Map producerConfigs() {

Map props = new HashMap<>();

props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);

props.put(ProducerConfig.RETRIES_CONFIG, 0);

props.put(ProducerConfig.ACKS_CONFIG, "1");

props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);

props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);

return props;

}

private Map consumerConfigs() {

Map props = new HashMap<>();

props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);

props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);

props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);

props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

return props;

}

}

三、生产者

@Controller

public class TestController {

@Autowired

private KafkaTemplate kafkaOneTemplate;

@Autowired

private KafkaTemplate kafkaTwoTemplate;

@RequestMapping("/send")

@ResponseBody

public String send() {

final String TOPIC = "TOPIC_1";

kafkaOneTemplate.send(TOPIC, "kafka one");

kafkaTwoTemplate.send(TOPIC, "kafka two");

return "success";

}

}

四、消费者

@Component

public class KafkaConsumer {

private static final Logger LOGGER = LoggerFactory.getLogger(KafkaConsumer.class);

final String TOPIC = "TOPIC_1";

// containerFactory 的值要与配置中 KafkaListenerContainerFactory 的 Bean 名相同

@KafkaListener(topics = {TOPIC}, containerFactory = "kafkaOneContainerFactory")

public void listenerOne(ConsumerRecord, ?> record) {

LOGGER.info(" kafka one 接收到消息:{}", record.value());

}

@KafkaListener(topics = {TOPIC}, containerFactory = "kafkaTwoContainerFactory")

public void listenerTwo(ConsumerRecord, ?> record) {

LOGGER.info(" kafka two 接收到消息:{}", record.value());

}

}

运行结果

c.k.s.consumer.KafkaConsumer : kafka one 接收到消息:kafka one

c.k.s.consumer.KafkaConsumer : kafka two 接收到消息:kafka two

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值