kaka消费者

  • 引入Kafka依赖包
    <dependencies>  
        <dependency>  
            <groupId>org.apache.kafka</groupId>  
            <artifactId>kafka_2.11</artifactId>  
            <version>0.8.2.1</version>  
        </dependency>  
        <dependency>  
            <groupId>org.apache.kafka</groupId>  
            <artifactId>kafka-clients</artifactId>  
            <version>0.8.2.1</version>  
        </dependency>  
    </dependencies>  
  • kafka消费者配置
zookeeper.connect=192.168.27.129:2181
zookeeper.session.timeout.ms=1000
zookeeper.sync.time.ms=500
auto.commit.interval.ms=500
auto.commit.enable=false
auto.offset.reset=smallest

#group.id=ecsgroup
group.id=test-group-1

#topic=ecps-test-05
topic=test

#topic=ecps-test2
derializer.class=com.ai.iis.logger.kafka.MessageDeserialize
#equals to kafka topic partitions value
thread=1
#partitioner.class=

 

kafka消费者,该类是一个抽象类,其抽象方法为doBusiness为业务处理方法,其参数为kafka的每次消息

 

package com.ai.ecsite.kafka.consumer.interfaces;

import com.ai.ecsite.util.common.PropertiesLoader;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * kafka消费者基础,完成了kafka连接、启动、解析kafka数据,并定义了业务处理规范
 *
 * Created by huxin on 2016/12/20.
 */
public abstract class AbstractKafkaConsumer {
    // 日志
    private final static Logger logger = LoggerFactory.getLogger(AbstractKafkaConsumer.class);
    // kafka消息者连接器
    private static ConsumerConnector consumer;
    // 线程池
    private ExecutorService executor;
    // 配置文件信息
    private PropertiesLoader propertiesLoader = null;

    /**
     * 初始化kafka消费者连接器
     *
     * @return
     */
    public ConsumerConnector getConsumerConnector(String file) {
        consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig(file));
        return consumer;
    }

    /**
     * kafka消息进程
     */
    public void run(String file) {
        // 初始化消息者
        consumer =  getConsumerConnector (file);
        //消费kafka的消息,进程数不能大于kafka集群的数目,否则造成偏移不齐
        int thread = Integer.valueOf(propertiesLoader.getProperty("thread") != null?propertiesLoader.getProperty("thread") : "1");
        String topic = propertiesLoader.getProperty("topic");

        // 键与值都声明为字符串解码
        StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
        StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(topic, thread);
        Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, keyDecoder, valueDecoder);
        List<KafkaStream<String, String>> streams = consumerMap.get(topic);

        try {
            executor = Executors.newFixedThreadPool(thread);
            int threadNumber = 0;
            // 遍历消息,该处为阻塞队列,如果没有消息会一直等待,直到有消息
            for (final KafkaStream<String, String> kafkaStream : streams) {
                logger.info("creating thread {}", threadNumber);
                new Thread(new Runnable() {
                    @Override
                    public void run() {
                        ConsumerIterator<String, String> it = kafkaStream.iterator();
                        while (it.hasNext()) {
                            MessageAndMetadata<String, String> data = it.next();
                            String msg = data.message();
                            try {
                                if (null != msg && msg.trim().length() > 0) {
                                    // 业务处理
                                    doBusiness(msg);
                                }
                                consumer.commitOffsets();
                            } catch (Exception e) {
                                logger.error("doing message fail! message is :" + msg + ",   mappings.com.ai.ecsite.dao.cache is :" + e);
                            }
                        }
                    }
                }).start();


                logger.debug("已经启动线程{}", threadNumber);
                threadNumber++;
            }

            logger.warn("exit kafka consumer");
            System.in.read();
        } catch (Exception e) {
            logger.warn("启动consumer线程失败,错误信息:{}", e.getMessage());
        }
    }

    /**
     * kafka消费消息时的业务处理类,此方法需要被重写
     */
    public abstract void doBusiness(String msg) ;

    /**
     * 关闭消息者与连接
     */
    public void shutdown (){
        // 关闭消费者
        if (consumer != null){
            consumer.shutdown();
        }
        if (executor != null){
            executor.shutdown();;
        }
    }

    /**
     * 创建kafka所需的配置文件
     *
     * @param
     * @return
     * @author huxin
     * @create 2016/11/24
     * @version V1.0.0
     */
    private ConsumerConfig createConsumerConfig(String file) {
        // 加载properties信息
        propertiesLoader = new PropertiesLoader(file);
        Properties props = new Properties();
        props.put("zookeeper.connect", propertiesLoader.getProperty("zookeeper.connect"));
        props.put("group.id", propertiesLoader.getProperty("group.id"));
        props.put("zookeeper.session.timeout.ms", propertiesLoader.getProperty("zookeeper.session.timeout.ms"));
        props.put("zookeeper.sync.time.ms", propertiesLoader.getProperty("zookeeper.sync.time.ms"));
        props.put("auto.commit.interval.ms", propertiesLoader.getProperty("auto.commit.interval.ms"));
        props.put("auto.commit.enable", propertiesLoader.getProperty("auto.commit.enable"));
        props.put("auto.offset.reset", propertiesLoader.getProperty("auto.offset.reset"));

        return new ConsumerConfig(props);
    }
}

 

 

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值