数据按照相同品牌的放入一个分区中,然后写一个消费者只消费改分区中的数据

*将以下数据按照相同品牌的放入一个分区中,然后写一个消费者只消费改分区中的数据,进行验证。
tom,puma,400
jim,nike,1000
susan,nike,1200
lele,adidas,800
hua,nike,1300
jim,puma,500
lele,nike,500
tom,puma,600
lele,nike,700
hua,adidas,1200
分区器

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;

public class MyPartition implements Partitioner {
    ArrayList<String> strings = new ArrayList<>();

    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
     /*   String s = new String(valueBytes);
        String[] split = s.split(",");*/
       String[] split = value.toString().split(",");
        if (strings.contains(split[1])){
            return strings.indexOf(split[1]);
        }else {
            strings.add(split[1]);
            return strings.indexOf(split[1]);
        }
    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> configs) {

    }
}

生产者


import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.util.Properties;

public class ProducerTest {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"b02master:9092");
        properties.setProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG,MyPartition.class.getName());

        KafkaProducer<String, String> producer = new KafkaProducer<String,String>(properties);

        //读文件
        try {
            File file = new File("D:\\djz\\data.txt");
            BufferedReader bufferedReader = new BufferedReader(new FileReader(file));
            String strLine = null;
            int lineCount = 1;
            while(null != (strLine = bufferedReader.readLine())){

                ProducerRecord<String, String> record = new ProducerRecord<String, String>("test10",strLine);
                producer.send(record, new Callback() {
                    @Override
                    public void onCompletion(RecordMetadata metadata, Exception exception) {
                        if (exception == null){
                            //打印分区和偏移量
                            System.out.println("partition:"+metadata.partition()+"********"+"offset:"+metadata.offset());
                        }else {
                            exception.printStackTrace();
                        }
                    }
                });
                lineCount++;
            }
        }catch(Exception e){
            e.printStackTrace();
        }
        producer.close();
    }
}

消费者


import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;

public class ConsumerTest {
    public static String block_list = "b02master:9092";
    public static String topic = "test10";
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,block_list);
        //set  groupID
        properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"B02_2");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
        //消费者订阅  要求传入的为集合类
        //只消费分区号为2的分区
        TopicPartition partition = new TopicPartition(topic,1);
        consumer.assign(Arrays.asList(partition));

        while(true){
            //拉取数据的时间
            ConsumerRecords<String, String> records = consumer.poll(1000);
            for (ConsumerRecord record:records){
                System.out.println(record.value());
            }
        }
    }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值