【Spring Boot 使用记录】kafka自动配置和自定义配置

1 自动配置

自动配置实现在 org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration

配置类为:

@ConfigurationProperties(prefix = "spring.kafka")
public class KafkaProperties {

......

}

所以使用spring boot 默认的自动配置,kafka的配置都已spring.kafka开头,如:

#kafka默认消费者配置
spring.kafka.consumer.bootstrap-servers=192.168.0.15:9092
spring.kafka.consumer.enable-auto-commit=false
spring.kafka.consumer.auto-offset-reset=earliest
#kafka默认生产者配置
spring.kafka.producer.bootstrap-servers=192.168.0.15:9092
spring.kafka.producer.acks=-1
spring.kafka.client-id=kafka-producer
spring.kafka.producer.batch-size=5

 

2 自定义配置

配置类org.springframework.boot.autoconfigure.kafka.KafkaProperties中并没有涵盖所有的org.apache.kafka.clients.producer.ProducerConfigorg.apache.kafka.clients.consumer.ConsumerConfig中的配置,这就导致某些特殊配置不能依赖spring boot自动创建,需要我们手动创建Poducer和comsumer。

生产端自定义配置例子:

package cn.ztuo.bitrade.config;

import java.util.HashMap;
import java.util.Map;

import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;

@Configuration
@EnableKafka
public class KafkaProducerConfiguration {

	@Value("${spring.kafka.bootstrap-servers}")
	private String servers;
	@Value("${spring.kafka.producer.retries}")
	private int retries;
	@Value("${spring.kafka.producer.batch.size}")
	private int batchSize;
	@Value("${spring.kafka.producer.linger}")
	private int linger;
	@Value("${spring.kafka.producer.buffer.memory}")
	private int bufferMemory;
	// 创建生产者配置map,ProducerConfig中的可配置属性比spring boot自动配置要多
	public Map<String, Object> producerConfigs() {
		Map<String, Object> props = new HashMap<>();
		props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
		props.put(ProducerConfig.RETRIES_CONFIG, retries);
		props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
		props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
		props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
//		props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "cn.ztuo.bitrade.kafka.kafkaPartitioner");
		props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
		props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
		return props;
	}

	/**
	 * 不使用spring boot的KafkaAutoConfiguration默认方式创建的DefaultKafkaProducerFactory,重新定义
	 * @return
	 */
	public ProducerFactory<String, String> producerFactory() {
		return new DefaultKafkaProducerFactory<>(producerConfigs());
	}
	/**
	 * 不使用spring boot的KafkaAutoConfiguration默认方式创建的KafkaTemplate,重新定义
	 * @return
	 */
	@Bean
	public KafkaTemplate<String, String> kafkaTemplate() {
		return new KafkaTemplate<String, String>(producerFactory());
	}

}

消费端自定义配置例子:

package cn.ztuo.bitrade.config;

import java.util.HashMap;
import java.util.Map;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
//这里创建了对应类型的bean之后,org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration中的对应Bean定义将不起作用。
@Configuration
@EnableKafka
public class KafkaConsumerConfiguration {

	@Value("${spring.kafka.bootstrap-servers}")
	private String servers;
	@Value("${spring.kafka.consumer.enable.auto.commit}")
	private boolean enableAutoCommit;
	@Value("${spring.kafka.consumer.session.timeout}")
	private String sessionTimeout;
	@Value("${spring.kafka.consumer.auto.commit.interval}")
	private String autoCommitInterval;
	@Value("${spring.kafka.consumer.group.id}")
	private String groupId;
	@Value("${spring.kafka.consumer.auto.offset.reset}")
	private String autoOffsetReset;
	@Value("${spring.kafka.consumer.concurrency}")
	private int concurrency;
	@Value("${spring.kafka.consumer.maxPollRecordsConfig}")
	private int maxPollRecordsConfig;
	//构造消费者属性map,ConsumerConfig中的可配置属性比spring boot自动配置要多
	public Map<String, Object> consumerConfigs() {
		Map<String, Object> propsMap = new HashMap<>();
		propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
		propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
		propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
		propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
		propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
		propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
		propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
		propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
		propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecordsConfig);// 每个批次获取数
		return propsMap;
	}
	/**
	 * 不使用spring boot默认方式创建的DefaultKafkaConsumerFactory,重新定义创建方式
	 * @return
	 */
	public ConsumerFactory<String, String> consumerFactory() {
		return new DefaultKafkaConsumerFactory<>(consumerConfigs());
	}

	@Bean
	public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
		ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
		factory.setConsumerFactory(consumerFactory());
		factory.setConcurrency(concurrency);
		factory.setMissingTopicsFatal(false);
		factory.getContainerProperties().setPollTimeout(1500);
		factory.setBatchListener(true);
		return factory;
	}

}

 

<think> 我们需要在Spring Boot应用中配置多个Kafka集群(即多个Kafka连接配置)。这通常意味着我们需要配置多个KafkaTemplate多个KafkaListenerContainerFactory,以便与不同的Kafka集群进行交互。 解决方案: 1. 定义多个Kafka配置类,每个配置类对应一个Kafka集群。 2. 在每个配置类中,使用@Bean注解创建KafkaTemplateKafkaListenerContainerFactory,并为它们指定不同的名称。 3. 在生产者或消费者中使用相应的KafkaTemplate或KafkaListenerContainerFactory。 注意:为了避免配置冲突,我们需要为每个配置类指定不同的配置前缀,并在配置文件中分别配置。 下面是一个示例,假设我们有两个Kafka集群,分别是kafka1kafka2。 步骤1:在application.yml(或application.properties)中配置两个Kafka集群的属性。 步骤2:创建两个Kafka配置类,分别读取不同的配置前缀。 步骤3:在每个配置类中定义所需的Bean。 示例配置文件(application.yml): ```yaml kafka: cluster1: bootstrap-servers: localhost:9092 producer: key-serializer: org.apache.kafka.common.serialization.StringSerializer value-serializer: org.apache.kafka.common.serialization.StringSerializer consumer: group-id: group1 key-deserializer: org.apache.kafka.common.serialization.StringDeserializer value-deserializer: org.apache.kafka.common.serialization.StringDeserializer auto-offset-reset: earliest cluster2: bootstrap-servers: localhost:9093 producer: key-serializer: org.apache.kafka.common.serialization.StringSerializer value-serializer: org.apache.kafka.common.serialization.StringSerializer consumer: group-id: group2 key-deserializer: org.apache.kafka.common.serialization.StringDeserializer value-deserializer: org.apache.kafka.common.serialization.StringDeserializer auto-offset-reset: earliest ``` 然后,我们创建两个配置类:KafkaCluster1ConfigKafkaCluster2Config。 注意:Spring Boot默认使用一个Kafka配置spring.kafka.*),我们需要自定义配置前缀。我们可以使用@ConfigurationProperties注解来绑定配置。 但是,由于我们有两个集群,我们需要分别为它们创建配置属性类。不过,Spring Kafka提供了ConcurrentKafkaListenerContainerFactoryKafkaTemplate的创建方式,我们可以通过不同的ProducerFactoryConsumerFactory来创建。 具体实现: 配置类1(KafkaCluster1Config): </think> ### Spring Boot配置多个Kafka集群 在Spring Boot配置多个Kafka集群需要创建不同的生产者/消费者工厂KafkaTemplate实例。以下是完整实现: #### 1. 添加Maven依赖 ```xml <dependencies> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter</artifactId> </dependency> <dependency> <groupId>org.springframework.kafka</groupId> <artifactId>spring-kafka</artifactId> </dependency> </dependencies> ``` #### 2. 配置文件(`application.yml`) ```yaml kafka: cluster1: bootstrap-servers: kafka1-server:9092 producer: key-serializer: org.apache.kafka.common.serialization.StringSerializer value-serializer: org.apache.kafka.common.serialization.StringSerializer consumer: group-id: group1 key-deserializer: org.apache.kafka.common.serialization.StringDeserializer value-deserializer: org.apache.kafka.common.serialization.StringDeserializer auto-offset-reset: earliest cluster2: bootstrap-servers: kafka2-server:9093 producer: key-serializer: org.apache.kafka.common.serialization.StringSerializer value-serializer: org.apache.kafka.common.serialization.ByteArraySerializer consumer: group-id: group2 key-deserializer: org.apache.kafka.common.serialization.StringDeserializer value-deserializer: org.apache.kafka.common.serialization.ByteArrayDeserializer auto-offset-reset: latest ``` #### 3. Java配置类 ```java import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.serialization.StringSerializer; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.kafka.core.*; import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; import java.util.HashMap; import java.util.Map; @Configuration public class MultiKafkaConfig { // Cluster1 配置 @Bean public ProducerFactory<String, String> cluster1ProducerFactory( @Value("${kafka.cluster1.bootstrap-servers}") String servers) { Map<String, Object> config = new HashMap<>(); config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); return new DefaultKafkaProducerFactory<>(config); } @Bean public KafkaTemplate<String, String> cluster1KafkaTemplate() { return new KafkaTemplate<>(cluster1ProducerFactory(null)); } @Bean public ConsumerFactory<String, String> cluster1ConsumerFactory( @Value("${kafka.cluster1.bootstrap-servers}") String servers, @Value("${kafka.cluster1.consumer.group-id}") String groupId) { Map<String, Object> config = new HashMap<>(); config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers); config.put("group.id", groupId); config.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); config.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); return new DefaultKafkaConsumerFactory<>(config); } @Bean public ConcurrentKafkaListenerContainerFactory<String, String> cluster1ListenerFactory() { ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(cluster1ConsumerFactory(null, null)); return factory; } // Cluster2 配置配置方式相同,使用不同参数) @Bean public ProducerFactory<String, byte[]> cluster2ProducerFactory( @Value("${kafka.cluster2.bootstrap-servers}") String servers) { Map<String, Object> config = new HashMap<>(); config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); return new DefaultKafkaProducerFactory<>(config); } @Bean public KafkaTemplate<String, byte[]> cluster2KafkaTemplate() { return new KafkaTemplate<>(cluster2ProducerFactory(null)); } } ``` #### 4. 生产者使用示例 ```java @Service public class KafkaProducerService { @Autowired @Qualifier("cluster1KafkaTemplate") private KafkaTemplate<String, String> template1; @Autowired @Qualifier("cluster2KafkaTemplate") private KafkaTemplate<String, byte[]> template2; public void sendToCluster1(String topic, String message) { template1.send(topic, message); } public void sendToCluster2(String topic, byte[] data) { template2.send(topic, data); } } ``` #### 5. 消费者使用示例 ```java @Service public class KafkaConsumerService { // 监听集群1的topic @KafkaListener( topics = "topic1", containerFactory = "cluster1ListenerFactory") public void listenCluster1(String message) { System.out.println("Received from Cluster1: " + message); } // 监听集群2的topic(需配置cluster2ListenerFactory) @KafkaListener( topics = "topic2", containerFactory = "cluster2ListenerFactory") public void listenCluster2(byte[] data) { System.out.println("Received from Cluster2: " + new String(data)); } } ``` ### 关键点解释 1. **配置隔离**:每个Kafka集群需要独立的: - `ProducerFactory`/`ConsumerFactory`(连接配置- `KafkaTemplate`(生产者操作) - `ConcurrentKafkaListenerContainerFactory`(消费者监听) 2. **Bean命名**:使用`@Qualifier`注入指定Bean,避免自动装配冲突 3. **序列化差异**:不同集群可使用不同的序列化方式(如示例中Cluster2使用Byte数组) 4. **配置管理**:通过`application.yml`集中管理配置使用`@Value`注入参数 5. **消费者组隔离**:不同集群的消费者组需分开配置(`group.id`) ### 常见问题原因 - **配置冲突**:未正确隔离不同集群的Bean导致注入错误 - **序列化不匹配**:生产者/消费者使用的序列化方式不一致 - **资源泄漏**:未正确关闭Kafka连接(Spring Boot自动管理) - **线程阻塞**:消费者线程阻塞导致其他集群消息积压 ### 最佳实践 1. 为每个集群创建独立的配置类 2. 使用配置文件管理连接参数 3. 不同集群使用不同的消费者组ID 4. 监控各集群的连接状态消息积压 ---
评论 1
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值