使用java代码消费kafka数据

首先创建maven项目,导入jar包

<dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>3.0.0</version>
        </dependency>

        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-log4j12</artifactId>
            <version>1.7.25</version>
        </dependency>

示例一:使用java代码消费kafka所有数据

package com.bigdata.day03;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Properties;

public class Demo01 {

    public static void main(String[] args) {
        Properties properties = new Properties();
        // 连接kafka
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"bigdata01:9092");
        // 反序列化类
        /**
         *  kafka默认分区规则:
         *  1、如果指定分区,消息发个这个分区
         *  2、如果没有指定分区,指定了key, key 进行hash % 分区数取模
         *  3、都没有指定,使用粘性分区策略
         */
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
                StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
                StringDeserializer.class.getName());
        // 消费者必须指定消费者ID,否则报错 you must provide a valid group.id in the consumer configuration
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
        System.out.println(StringDeserializer.class.getName());
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(properties);
        ArrayList<String> list = new ArrayList<>();
        list.add("topicA");
        kafkaConsumer.subscribe(list);
        while(true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofSeconds(1));
            for (ConsumerRecord<String, String> record : records) {
                System.out.println(record);
                System.out.println(record.value());
            }
        }
    }
}

为了测试这个效果,可以使用如下kafka发送者代码进行测试,你需要先运行以上的kafka消费者代码,再运行下面的kafka发送者代码:

package com.bigdata;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

public class Demo01 {

    public static void main(String[] args) throws InterruptedException {
        Properties properties = new Properties();
        // 这个里面肯定有给哪个服务器哪个topic发消息
        // 设置连接kafka集群的ip和端口
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"bigdata01:9092");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringSerializer");
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<String, String>(properties);

        for(int i=0;i<100;i++){
            ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>("topicA","今天双11,你买了吗?");
            kafkaProducer.send(producerRecord);
            //Thread.sleep(2);
        }

        kafkaProducer.close();
    }
}

 效果是,消费者消费的消息都是一个分区的,因为使用了粘性分区的原因。

示例二:使用java代码消费kafka指定分区数据

package com.bigdata.day03;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicIdPartition;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Properties;

public class Demo02 {

    public static void main(String[] args) {
        Properties properties = new Properties();
        // 连接kafka
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"bigdata01:9092");
        // 反序列化类
        /**
         *  kafka默认分区规则:
         *  1、如果指定分区,消息发个这个分区
         *  2、如果没有指定分区,指定了key, key 进行hash % 分区数取模
         *  3、都没有指定,使用粘性分区策略
         */
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
                StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
                StringDeserializer.class.getName());
        // 消费者必须指定消费者ID,否则报错 you must provide a valid group.id in the consumer configuration
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "test1");
        System.out.println(StringDeserializer.class.getName());
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(properties);

        // 指定消费某个主题的某个分区
        ArrayList<TopicPartition> partitions = new ArrayList<>();
        partitions.add(new TopicPartition("topicA",0));
        partitions.add(new TopicPartition("topicA",1));
        kafkaConsumer.assign(partitions);

        while(true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofSeconds(1));
            for (ConsumerRecord<String, String> record : records) {
                System.out.println(record);
                System.out.println(record.value());
                System.out.println(record.partition());
            }
        }
    }
}

以上是kafka消费者代码,配合下面的kafka发送者代码,指定消费发送的分区号:

package com.bigdata;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

public class Demo01 {

    public static void main(String[] args) throws InterruptedException {
        Properties properties = new Properties();
        // 这个里面肯定有给哪个服务器哪个topic发消息
        // 设置连接kafka集群的ip和端口
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"bigdata01:9092");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringSerializer");
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<String, String>(properties);

        for(int i=0;i<100;i++){
            ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>("topicA",1,"abc","今天双11,你买了吗?");
            kafkaProducer.send(producerRecord);
            //Thread.sleep(2);
        }

        kafkaProducer.close();
    }
}

kafka发送者代码中,指定发送分区号为1,所以只有分区1中有数据,你可以使用消费者消费分区1的数据,别的分区没有数据。

当连接Kafka数据源时,可以使用Kafka提供的Java客户端库来实现。以下是一个简单的示例代码: ```java import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import java.util.Collections; import java.util.Properties; public class KafkaConnector { public static void main(String[] args) { // Kafka配置 Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(ConsumerConfig.GROUP_ID_CONFIG, "my-consumer-group"); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); // 创建Kafka消费者 Consumer<String, String> consumer = new KafkaConsumer<>(props); // 订阅主题 consumer.subscribe(Collections.singletonList("my-topic")); // 消费消息 while (true) { ConsumerRecords<String, String> records = consumer.poll(100); for (ConsumerRecord<String, String> record : records) { System.out.println("Received message: " + record.value()); } } } } ``` 以上代码通过创建一个Kafka消费者来连接Kafka数据源,并订阅名为"my-topic"的主题。然后,通过循环从主题中拉取消息并进行处理。 请注意,上述代码中的Kafka配置中的`bootstrap.servers`需要设置为正确的Kafka服务地址和端口。另外,`key.deserializer`和`value.deserializer`也需要根据你的实际情况进行配置。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

闫哥大数据

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值