kafka消费者多个topic配置

我们的项目是springboot集成kafka的,配置文件在application.yml里如下:

#kafka配置
kafka:
  consumer:
    bootstrap: 
    maxPollRecords: 1000
    enableAutoCommit: true
    autoCommitIntervalMs: 100
    sessionTimeoutMs: 15000
    keyDeserializer: org.apache.kafka.common.serialization.StringDeserializer
    valueDeserializer: org.apache.kafka.common.serialization.StringDeserializer
    maxPollIntervalMs: 500000
    log:
      group: test
      topic: test_log
    method: 
      group: test1
      topic: test_method
  listen:
    pullTimeOut: 5000
    concurrency: 3

消费者类如下:

package consumer.consumer;

import common.utils.JsonUtils;
import consumer.method.MethodMoinitorIndexOperator;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.client.transport.TransportClient;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Map;
@Service
@Slf4j
public class MethodMonitorConsumer {
    @Autowired
    private MethodMoinitorIndexOperator methodMoinitorIndexOperator;
    @Autowired
    private TransportClient transportClient;
    @KafkaListener(id ="${kafka.consumer.method.topic}",topics = "${kafka.consumer.method.topic}", groupId = "${kafka.consumer.method.group}")
    public void methodBatchMessage(List<ConsumerRecord<?, ?>> records){
        log.info("共消费数据"+records.size()+"条。");
        BulkRequestBuilder bulkRequestBuilder = transportClient.prepareBulk();
        try {
            for (ConsumerRecord record : records) {
                Map<String, Object> recordMap = JsonUtils.fromJson(record.value().toString());
                methodMoinitorIndexOperator.handleMessages(bulkRequestBuilder, recordMap);
                log.debug("消费的数据:"+recordMap);
            }
        } catch (Exception exception) {
            log.error("consumerBatchMessage异常: {}", exception);
        }

        //批量写入es
        methodMoinitorIndexOperator.executeBuilder(bulkRequestBuilder);
    }
}

其他类如下:

import common.constans.ESConstans;
import common.utils.DateFormatUtil;
import lombok.extern.slf4j.Slf4j;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.client.transport.TransportClient;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import java.util.Date;
import java.util.Map;

@Slf4j
@Component
public class EsIndexHandler {

    private final String indexPrefixName = "method_monitor";
    private final String defaultInexType = "monitor_method";
    private final String pattern = "yyyy_MM_dd";

    @Autowired
    private TransportClient transportClient;

    public void handle(BulkRequestBuilder bulkRequestBuilder, Map<String, Object> methodIndexMap) {

        String indexId = String.valueOf(methodIndexMap.get(ESConstans.ID));
        StringBuilder sb = new StringBuilder();
        sb.append(indexPrefixName);
        String str = String.valueOf(methodIndexMap.get(ESConstans.INVOKE_TIME));
        long l = Long.parseLong(str);
        Date date = new Date(l);
        String time = DateFormatUtil.format(date, pattern);
        sb.append(time);
        String indexName = sb.toString();
        methodIndexMap.remove(ESConstans.ID);

        IndexRequestBuilder indexRequestBuilder = transportClient.prepareIndex(indexName, defaultInexType, indexId).setSource(methodIndexMap);
        bulkRequestBuilder.add(indexRequestBuilder);
        log.debug("--------"+indexRequestBuilder);
    }
}

 以及另一个类:

import lombok.extern.slf4j.Slf4j;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import java.util.Map;

@Slf4j
@Component
public class MethodIndexOperator {
    @Autowired
    private EsIndexHandler esIndexHandler;

    public void handleMessages(BulkRequestBuilder bulkRequestBuilder, Map<String, Object> methodIndexMap){
        esIndexHandler.handle(bulkRequestBuilder, methodIndexMap);
    }
    public void executeBuilder(BulkRequestBuilder bulkRequestBuilder) {
        if (bulkRequestBuilder.numberOfActions() == 0) {
            return;
        }
        BulkResponse response = bulkRequestBuilder.execute().actionGet();
        log.debug("========"+response);
    }
}

 

Spring Boot 应用整合 Kafka 消费多个 Topic 的场景非常常见,在微服务架构中尤其有用。通过合理配置,我们可以让单个消费者同时订阅并处理来自多个主题的消息。 以下是实现步骤: ### 步骤一:添加依赖项 首先需要在 Spring Boot 项目中引入 `spring-kafka` 依赖。如果使用 Maven 构建工具,则可以在 pom.xml 中加入以下内容: ```xml <dependency> <groupId>org.springframework.kafka</groupId> <artifactId>spring-kafka</artifactId> </dependency> ``` ### 步骤二:Kafka 配置文件设置 编辑 application.properties 或者 application.yml 文件,完成对 Kafka broker 地址、组ID等基础属性的设定,并指定需要监听的主题列表。例如: **application.properties 示例** ```properties # kafka服务器地址 spring.kafka.bootstrap-servers=127.0.0.1:9092 # 定义消费者分组标识符 spring.kafka.consumer.group-id=my-group-name # 自动提交offsets到kafka spring.kafka.consumer.enable-auto-commit=true # 设置key反序列化器 spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer # 设置value反序列化器 spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer # 订阅哪些topics(逗号隔开) spring.kafka.listener.topics=topic1, topic2, topic3 ``` 对于 YAML 格式也类似地可以表示出来。 ### 步骤三:创建消息接收组件 编写用于监听这些 Topics 并且处理接收到的数据的服务端点。利用 @KafkaListener 注解标记的方法会自动成为消息处理器。 示例代码如下所示: ```java import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.kafka.annotation.KafkaListener; import org.springframework.stereotype.Service; @Service public class MultiTopicConsumer { private static final Logger logger = LoggerFactory.getLogger(MultiTopicConsumer.class); /** * 使用@KafkaListener注释来声明此方法将作为Kafka消费者的回调函数, * 参数 "topics" 将从configuration file读取已列出的所有目标Topics名称。 */ @KafkaListener(topics = "#{environment['spring.kafka.listener.topics'].split(',')}") public void listen(String message) { // 处理业务逻辑 logger.info("Received Message from Kafka -> {}", message); if (message.contains("error")) { throw new RuntimeException(); // 故意抛出异常模拟失败情况 } } } ``` 上述例子展示了如何基于环境变量动态获取所需监控的目标队列集合;还可以直接硬编码进参数里头比如 `"topics={'foo','bar'}"` 这样固定下来。 此外注意,当发生错误时可根据需求自定义异常恢复机制如手动ACK确认或是回滚重试等功能扩展性较强。 --- #### 注意事项及建议: - 确保每个Topic都有足够的分区数以支持高并发量的应用负载均衡; - 如果各个Topic间数据格式差异较大考虑采用泛型解析或分别建立专用listener分开管理更加清晰直观易维护; - 监听过程中涉及到的状态更新尽量异步非阻塞操作避免影响整体吞吐性能;
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值