1.业务场景
当springboot项目启动时,kafka中间件可能还没有启动成功,这时候kafka项目会直接挂掉,业务需要等待redis启动成功之后再进行连接kafka。
2.项目结构配置
pom文件使用kafka依赖,配置如下:
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.8.6</version>
</dependency>
kafka连接javaConfig配置方式:
package com.hero.kafka.config;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ContainerProperties;
import java.util.HashMap;
import java.util.Map;
@Configuration
@DependsOn(value = "springContextHolder")
public class KafkaConfig {
private String kafkaServers = "192.168.1.1:2283,192.168.1.2:2283,192.168.1.3:2283";
@Value("${myConfig.kafka.producer.retries:3}")
private int retries;
@Value("${myConfig.kafka.producer.batch-size:16384}")
private int batchSize;
@Value("${myConfig.kafka.producer.buffer-memory:33554432}")
private int bufferMemory;
@Value("${myConfig.kafka.producer.acks:1}")
private String acks;
@Value("${myConfig.kafka.consumer.group-id:myKafkaGroup}")
private String consumerGroupId;
@Value("${myConfig.kafka.consumer.enable-auto-commit:false}")
private String consumerEnableAutoCommit;
@Value("${myConfig.kafka.consumer.auto-offset-reset:earliest}")
private String consumerAutoOffsetReset;
private String consumerKeyDeserializer = "org.apache.kafka.common.serialization.StringDeserializer";
private String consumerValueDeserializer = "org.apache.kafka.common.serialization.StringDeserializer";
@Bean
public ProducerFactory<String, String> producerFactory() {
Map<String, Object> configProps = new HashMap<>();
configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
configProps.put(ProducerConfig.RETRIES_CONFIG, retries);
configProps.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
configProps.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
configProps.put(ProducerConfig.ACKS_CONFIG, acks);
configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
DefaultKafkaProducerFactory<String, String> producerFactory = new DefaultKafkaProducerFactory<>(configProps);
return producerFactory;
}
@Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
@Bean
public ConsumerFactory<String, String> kafkaConsumerFactory() {
Map<String, Object> configProps = new HashMap<>();
configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
configProps.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
configProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, consumerEnableAutoCommit);
configProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, consumerAutoOffsetReset);
configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, consumerKeyDeserializer);
configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, consumerValueDeserializer);
DefaultKafkaConsumerFactory consumerFactory = new DefaultKafkaConsumerFactory<>(configProps);
return consumerFactory;
}
@Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(kafkaConsumerFactory());
factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
return factory;
}
/**
* 操作kafka的admin工具
*
* @return
*/
@Bean
public KafkaAdmin kafkaAdmin() {
Map<String, Object> props = new HashMap<>();
//配置Kafka实例的连接地址
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
KafkaAdmin admin = new KafkaAdmin(props);
return admin;
}
}
3.增加项目启动时kafka校验
校验逻辑:kafka连接失败则一直重试直到连接成功为止,代码如下:
3.1. 获取 Spring 应用上下文的抽象类
参考文章:springboot 启动时校验redis是否连接成功 中的抽象类部分AbstractValidateItem
3.2 具体校验实现类
package com.hero.validate;
import com.hero.nms.basis.framework.clusterip.ClusteripAddrManage;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.DescribeClusterResult;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Node;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.DependsOn;
import org.springframework.stereotype.Component;
import java.util.Collection;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
@Slf4j
@Component
@DependsOn(value = "springContextHolder")
public class ValidateMessageBroker extends AbstractValidateItem {
@Value("${myConfig.kafka.bootstrap-servers}")
private String kafkaServers;
@Override
public void validate() {
log.info(">>> ValidateMessageBroker ,validate ");
kafkaValid();
}
private void kafkaValid() {
for (int i = 1; i <= 1000000; i++) {
try {
Thread.sleep(backoff(i));
} catch (InterruptedException e) {
e.printStackTrace();
}
try {
int kafkaConnection = getKafkaConnection();
if (kafkaConnection == 1) {
log.info(" check connect kafka ok");
return;
} else {
log.warn(" retry connect kafka addr {} fail", i);
}
} catch (Exception e) {
if (i == 1) {
e.printStackTrace();
}
log.warn(" retry connect kafka {} fail", i);
if (i == 1000000) {
log.error(" nmsop will be restarted for retry connect rabbitmq {} fail ,error:{}", i, e);
getContext().close();
}
}
}
}
public int getKafkaConnection() throws Exception {
// 创建 AdminClient 配置
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
// 创建 AdminClient
AdminClient adminClient = AdminClient.create(props);
// 获取 Kafka 集群信息
DescribeClusterResult clusterResult = adminClient.describeCluster();
AtomicInteger flag = new AtomicInteger(-1);
// 发送请求并等待响应
KafkaFuture<Collection<Node>> future = clusterResult.nodes().whenComplete((nodes, throwable) -> {
if (throwable == null) {
flag.set(1);
ClusteripAddrManage.getSingleTon().setKafkaServer(kafkaServers);
log.info("Kafka broker连接正常");
} else {
flag.set(-1);
log.error("Kafka broker连接异常:{}", throwable.getMessage());
}
});
// 等待操作完成
future.get();
return flag.get();
}
public static int backoff(int n) {
if (n > 5) {
n = 5;
}
return 2000 * n;
}
}
这样,当srpingboot项目启动时会校验kafka是否可以成功连接,如果失败则会一直重试直到连接成功。
本文介绍了在SpringBoot项目中,如何避免因Kafka服务未启动导致的项目挂起问题,包括配置Kafka连接、使用JavaConfig管理生产者和消费者,以及在启动时增加对Kafka连接的校验,确保在Kafka连接失败时自动重试直到成功。
907

被折叠的 条评论
为什么被折叠?



