package iie.rocketmq;
import com.alibaba.rocketmq.client.consumer.DefaultMQPushConsumer;
import com.alibaba.rocketmq.client.consumer.listener.ConsumeConcurrentlyContext;
import com.alibaba.rocketmq.client.consumer.listener.ConsumeConcurrentlyStatus;
import com.alibaba.rocketmq.client.consumer.listener.MessageListenerConcurrently;
import com.alibaba.rocketmq.common.consumer.ConsumeFromWhere;
import com.alibaba.rocketmq.common.message.MessageExt;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import java.io.File;
import java.io.FileInputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
public class RocketmqToKafka0 {
private Integer currentPart = 0;
private Integer partNum = 1;
private Properties propsKafka = new Properties();
public void RocketmqToKafka(String[] args) throws Exception {
//加载配置文件
Properties properties = new Properties();
properties.load(new FileInputStream(new File(args[0])));
final String brokers = properties.getProperty("brokers");
String mqAddr = properties.getProperty("mqAddr");
String topicName = properties.getProperty("topicName");
final int partitionNum = Integer.parseInt(properties.getProperty("partitionnum"));
String mqCousumer = properties.getProperty("mqConsumer");
//设置RpcketMW消费者参数
DefaultMQPushConsumer consumer = new DefaultMQPushConsumer(mqCousumer);
consumer.setNamesrvAddr(mqAddr);
consumer.setConsumeThreadMax(5);
consumer.setConsumeThreadMin(1);
//从上次订阅的位置开始消费数据
consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET);
//订阅的队列名称
consumer.subscribe(topicName, "*");
//配置kafka生产者参数
//此处配置的是kafka的端口
propsKafka.put("metadata.broker.list", brokers);
//配置value的序列化类
propsKafka.put("serializer.class", "kafka.serializer.StringEncoder");
//配置key的序列化类
propsKafka.put("key.serializer.class", "kafka.serializer.StringEncoder");
consumer.registerMessageListener(new MessageListenerConcurrently() {
//init kafka producer
Producer<Integer, byte[]> producer = new Producer<Integer, byte[]>(new ProducerConfig(propsKafka));
public ConsumeConcurrentlyStatus consumeMessage(List<MessageExt> msgs, ConsumeConcurrentlyContext context) {
producer.send(getKeyedMsg(msgs));
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
}
});
consumer.start();
}
/**
* 将rocketMQ消息列表转换为kafka消息列表
* @param msgs rocketMQ消息列表
* @return
*/
public List<KeyedMessage<Integer, byte[]>> getKeyedMsg(List<MessageExt> msgs) {
List<KeyedMessage<Integer, byte[]>> kafkaMsgList = new ArrayList<KeyedMessage<Integer, byte[]>>();
for(MessageExt msg: msgs){
kafkaMsgList.add(new KeyedMessage<Integer, byte[]>(msg.getTopic(),null,getPartKey(),msg.getBody())); }
return kafkaMsgList;
}
/**
* 计算分区信息,哈希取模。
* @return
*/
private int getPartKey(){
currentPart ++;
currentPart = currentPart % partNum;
return currentPart;
}
}
从RocketMQ接收数据投放到Kafka--java示例
最新推荐文章于 2025-02-21 16:40:04 发布