pom引入
<groupId>com.zf</groupId>
<artifactId>kafka-spring-boot-starter</artifactId>
<version>1.0.1</version>
<dependencies>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.7.3</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.16</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-autoconfigure</artifactId>
<version>2.5.2</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-configuration-processor</artifactId>
<version>2.6.14</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.12.0</version>
</dependency>
<!--<dependency>
<groupId>org.jetbrains</groupId>
<artifactId>annotations</artifactId>
</dependency>-->
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>8</source>
<target>8</target>
</configuration>
</plugin>
</plugins>
</build>
目录结构

具体实现
/**
* @author xg
*/
@Target({ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Import({KafkaAutoConfiguration.class})
public @interface EnableMcKafka {
}
package com.zf.mq.config;
import com.zf.mq.models.KafkaListernerPro;
import com.zf.mq.models.McKafkaBasePro;
import com.zf.mq.models.McKafkaConsumer;
import com.zf.mq.models.McKafkaProducer;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author xg
*/
@Configuration
@Slf4j
@EnableConfigurationProperties({McKafkaConsumer.class,McKafkaProducer.class,KafkaListernerPro.class,McKafkaBasePro.class})
public class KafkaAutoConfiguration {
@Autowired
private McKafkaConsumer kafkaConsumerProp;
@Autowired
private McKafkaProducer kafkaProducer;
@Autowired
private KafkaListernerPro kafkaListenerProp;
@Autowired
private McKafkaBasePro kafkaBasePro;
@Bean
public KafkaTemplate<String,String> mKafkaTemplate(){
return new KafkaTemplate<>(producerFactory());
}
/**
* Producer 工厂配置
*/
@Bean
public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
/**
* Producer 参数配置
*/
@Bean
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new ConcurrentHashMap<>();
//kafka地址
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,kafkaBasePro.getBootStrapServers() );
//保证幂等性、消息顺序性
props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION,kafkaProducer.getMaxInFlightRequestsPerConnection());
//只能保证单分区上的幂等性
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, kafkaProducer.getEnableIdempotence());
/**
*acks = 0 如果设置为零,则生产者将不会等待来自服务器的任何确认,该记录将立即添加到套接字缓冲区并视为已发送。在这种情况下,无法保证服务器已收到记录,并且重试配置将不会生效(因为客户端通常不会知道任何故障),为每条记录返回的偏移量始终设置为 - 1。
*acks = 1 这意味着leader会将记录写入其本地日志,但无需等待所有副本服务器的完全确认即可做出回应,在这种情况下,如果leader在确认记录后立即失败,但在将数据复制到所有的副本服务器之前,则记录将会丢失。
*acks = all 这意味着leader将等待完整的同步副本集以确认记录,这保证了只要至少一个同步副本服务器仍然存活,记录就不会丢失,这是最强有力的保证,这相当于acks = -1 的设置。
*/
props.put(ProducerConfig.ACKS_CONFIG, kafkaProducer.getAcks());
props.put(ProducerConfig.BATCH_SIZE_CONFIG, kafkaProducer.getBatchSize());
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, kafkaProducer.getBufferMemory());
// 生产者空间不足时,send()被阻塞的时间,默认60s
props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, kafkaProducer.getMaxBlockMs());
// 批量发送,延迟为30毫秒,如果30ms内凑不够batch则强制发送,提高并发
props.put(ProducerConfig.LINGER_MS_CONFIG, kafkaProducer.getLingerMs());
// 消息的最大大小限制,也就是说send的消息大小不能超过这个限制, 默认1048576(1MB)
props.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, kafkaProducer.getMaxRequestSize());
// 压缩消息,支持四种类型,分别为:none、lz4、gzip、snappy,默认为none。
// 消费者默认支持解压,所以压缩设置在生产者,消费者无需设置。
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,kafkaProducer.getCompressionType());
//消息发送失败重试次数(默认是Integer.MAX_VALUE)
props.put(ProducerConfig.RETRIES_CONFIG,kafkaProducer.getRetries());
//重试间隔时间(ms)
props.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG,kafkaProducer.getRetryBackoffMs());
//key、value的序列化方式
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,kafkaProducer.getKeySerializer());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, kafkaProducer.getValueSerializer());
return props;
}
/**
* kafka消费者工厂
* */
@Bean
public ConsumerFactory<Object, Object> consumerFactory() {
return new DefaultKafkaConsumerFactory(consumerConfigs());
}
@Bean
public Map<String,Object> consumerConfigs() {
Map<String,Object> props = new ConcurrentHashMap<>();
//配置地址
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBasePro.getBootStrapServers());
//key、value的反序列化方式
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, kafkaConsumerProp.getKeyDeserializer());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, kafkaConsumerProp.getValueDeserializer());
//消费者组
props.put(ConsumerConfig.GROUP_ID_CONFIG, kafkaConsumerProp.getGroupId());
//是否开启自动提交
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, kafkaConsumerProp.getEnableAutoCommit());
//消费策略
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, kafkaConsumerProp.getAutoOffsetReset());
//消费者默认等待服务响应时间(毫秒)
props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, kafkaConsumerProp.getFetchMaxWait());
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,kafkaConsumerProp.getMaxPollRecords());
return props;
}
/**
* 监听工厂
* */
@Bean
KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Object, Object>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<Object, Object> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
//线程数
factory.setConcurrency(kafkaListenerProp.getConcurrency());
//手动提交
factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
//开启批量处理
factory.setBatchListener(kafkaListenerProp.getBatchListener());
factory.getContainerProperties().setPollTimeout(kafkaListenerProp.getPollTimeout());
return factory;
}
}
package com.zf.mq.constant;
/**
* @author xg
*/
public class KafkaConstant {
public static String KAFKA_TEMPLATE = "KafkaTemplate";
public static String KAFKA_ADMIN = "KafkaAdmin";
public static String KAFKA_TOPIC = "KafkaTopic";
public static String KAFKA_CONSUMER = "KafkaConsumer";
}
package com.zf.mq.models;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
/**
* kafka 监听配置
* @author xg
*/
@Data
@Component
@ConfigurationProperties(prefix = "mc.kafka.listener")
public class KafkaListernerPro {
/**
* 启用线程数
*/
private Integer concurrency;
/**
* 手动提交方式
*/
private String ackMode;
/**
* 消费超时时间
*/
private Long pollTimeout;
/**
* 批量处理
*/
private Boolean batchListener;
}
package com.zf.mq.models;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
/**
* @author xg
*/
@Data
@Component
@ConfigurationProperties(prefix = "mc.kafka")
public class McKafkaBasePro {
private String bootStrapServers;
}
package com.zf.mq.models;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
/**
* @author xg
*/
@Data
@Component
@ConfigurationProperties(prefix = "mc.kafka.consumer")
public class McKafkaConsumer {
/**
* 指定默认消费者group id
* */
private String groupId;
/**
* earliest 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
* latest 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据
* none topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
*/
private String autoOffsetReset;
/**
* 是否开启自动提交
* */
private Boolean enableAutoCommit;
/**
* key的反序列化器
* */
private String keyDeserializer;
/**
* value的反序列化器
* */
private String valueDeserializer;
/**
* 消费者默认等待服务响应时间(毫秒)
* */
private String fetchMaxWait;
/**
* 设置批量消费,每次最多消费记录数
*/
private String maxPollRecords ;
private Boolean flag = Boolean.FALSE;
}
package com.zf.mq.models;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
/**
* @author xg
*/
@Data
@Component
@ConfigurationProperties(prefix = "mc.kafka.producer")
public class McKafkaProducer {
/**
* 批量发送,延迟为30毫秒,如果30ms内凑不够batch则强制发送,提高并发
* */
private String lingerMs;
/**
* 失败重试时,保证消息顺序性,会降低吞吐量
*/
private String maxInFlightRequestsPerConnection;
/**
*acks = 0 如果设置为零,则生产者将不会等待来自服务器的任何确认,该记录将立即添加到套接字缓冲区并视为已发送。在这种情况下,无法保证服务器已收到记录,并且重试配置将不会生效(因为客户端通常不会知道任何故障),为每条记录返回的偏移量始终设置为-1。
*acks = 1 这意味着leader会将记录写入其本地日志,但无需等待所有副本服务器的完全确认即可做出回应,在这种情况下,如果leader在确认记录后立即失败,但在将数据复制到所有的副本服务器之前,则记录将会丢失。
*acks = all 这意味着leader将等待完整的同步副本集以确认记录,这保证了只要至少一个同步副本服务器仍然存活,记录就不会丢失,这是最强有力的保证,这相当于acks = -1的设置。
*/
private String acks;
/**
*压测后给出最适合的值,当前为默认值(批处理字节大小,太大可能oom,或者消息不能及时发送到broker,太小性能不足)
*/
private String batchSize;
/**
* 缓存区大小
*/
private String bufferMemory;
/**
* enable_idempotence开启消息幂等性
* */
private Boolean enableIdempotence;
/**
*生产者空间不足时阻塞的时间
*/
private String maxBlockMs;
/**
* 消息压缩类型
*/
private String compressionType;
/**
* 消息发送失败重试次数
* */
private String retries;
/**
* 重试间隔
* */
private String retryBackoffMs;
/**
* key、value的序列化器
* */
private String keySerializer;
private String valueSerializer;
/**
* 消息大小限制
*/
private String maxRequestSize;
}
package com.zf.mq.models;
import lombok.Data;
/**
* @author xg
*/
@Data
public class McKafkaTopic {
/**
* topic名称
*/
public String name;
/**
* 分区数
*/
private int partition;
/**
* 副本数
*/
private short replication;
}
package com.zf.mq.util;
import org.springframework.beans.factory.support.DefaultListableBeanFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
/**
* @author xg
*/
public class BeanFactoryUtils {
/**
* 根据类型移除bean实例
*
* @param beanFactory bean工厂
* @param cls 类型
* @param <T> 类型
*/
public static <T> List<T> destroyOfType(DefaultListableBeanFactory beanFactory, Class<T> cls) {
List<T> result = new ArrayList<>();
Set<String> beanNames = beanFactory.getBeansOfType(cls).keySet();
beanNames.forEach(beanName -> {
result.add(beanFactory.getBean(beanName, cls));
beanFactory.destroySingleton(beanName);
});
return result;
}
}
package com.zf.mq;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Component;
/**
* @author xg
*/
@Component
@Slf4j
@Data
public class KafkaClient {
KafkaTemplate<String, String> kafkaTemplate;
public KafkaClient(KafkaTemplate<String, String> kafkaTemplate) {
this.kafkaTemplate = kafkaTemplate;
}
public KafkaTemplate<String, String> getKafkaTemplate() {
return kafkaTemplate;
}
public void setKafkaTemplate(KafkaTemplate<String, String> kafkaTemplate) {
this.kafkaTemplate = kafkaTemplate;
}
}