1.引入pom依赖,Kafka和阿里的JSON
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.8.0</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.41</version>
</dependency>
2.实现kafka的生产者
2.1异步发送kafka生产者代码实现
package com.ztesoft.kafka;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import com.alibaba.fastjson.JSON;
/**
* kafka生产者代码
*
*/
public class MyKafkaProducer {
public static void main(String[] args) throws InterruptedException {
Properties props = new Properties();
// kafka集群,broker-list
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "172.21.72.166:9092");
props.put(ProducerConfig.ACKS_CONFIG, "all");
// 重试次数
props.put("retries", 1);
// 批次大小
props.put("batch.size", 16384);
// 等待时间
props.put("linger.ms", 1);
// RecordAccumulator缓冲区大小
props.put("buffer.memory", 33554432);
// 设置序列化
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<String, String>(props);
for (int i = 100; i < 200; i++) {
Map<String, Object> recordMap = new HashMap<String, Object>(20);
recordMap.put("seq", i);
recordMap.put("name", "测试" + i);
recordMap.put("age", i % 20);
ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>("user_topic",
String.valueOf(i), JSON.toJSONString(recordMap));
producer.send(producerRecord, new Callback() {
// 回调函数,该方法会在Producer收到ack时调用,为异步调用
public void onCompletion(RecordMetadata metadata, Exception e) {
String topic = metadata.topic();
int partition = metadata.partition();
long offset = metadata.offset();
if (e != null) {
System.out.printf("消息发送失败:topic=%s,offset=%d,key=%d,error=%s%n", topic, partition, offset,
e.getMessage());
} else {
System.out.printf("消息发送成功:topic=%s,offset=%d,key=%d,value=%s%n", topic, partition, offset);
}
}
});
Thread.sleep(2 * 1000);
}
producer.close();
}
}
2.2同步发送kafka生产者代码实现
package com.ztesoft.kafka;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import com.alibaba.fastjson.JSON;
/**
* kafka生产者代码
*
*/
public class MyKafkaProducer {
public static void main(String[] args) throws InterruptedException, ExecutionException {
Properties props = new Properties();
// kafka集群,broker-list
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "172.21.72.166:9092");
props.put(ProducerConfig.ACKS_CONFIG, "all");
// 重试次数
props.put("retries", 1);
// 批次大小
props.put("batch.size", 16384);
// 等待时间
props.put("linger.ms", 1);
// RecordAccumulator缓冲区大小
props.put("buffer.memory", 33554432);
// 设置序列化
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<String, String>(props);
for (int i = 100; i < 200; i++) {
Map<String, Object> recordMap = new HashMap<String, Object>(20);
recordMap.put("seq", i);
recordMap.put("name", "测试" + i);
recordMap.put("age", i % 20);
ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>("user_topic",
String.valueOf(i), JSON.toJSONString(recordMap));
//调用get方法为同步发送返回
producer.send(producerRecord).get();
Thread.sleep(2 * 1000);
}
producer.close();
}
}
3.实现Kafka的消费者
3.1实现kafka消费的自动提交offset
package com.ztesoft.kafka;
import java.util.Arrays;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
public class MyKafkaConsumer {
public static void main(String[] args) {
Properties props = new Properties();
// kafka地址
props.put("bootstrap.servers", "172.21.72.166:9092");
// 设置消费组
props.put("group.id", "bigdata");
// 是否自动提交
props.put("enable.auto.commit", "true");
// 设置自动提交时间隔
props.put("auto.commit.interval.ms", "1000");
// 设置消费,一般设置earliest或者latest
props.put("auto.offset.reset", "earliest");
// 序列化
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
consumer.subscribe(Arrays.asList("user_topic"));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(10);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("partition = %d,offset = %d, key = %s, value = %s%n", record.partition(),
record.offset(), record.key(), record.value());
int partition = record.partition();
long offset = record.offset();
String key = record.key();
String value = record.value();
// 打印消费参数
System.out.printf("partition = %d,offset = %d, key = %s, value = %s%n", partition, offset, key, value);
}
consumer.commitAsync();
}
}
}
3.2实现kafka消费的手动提交offset
package com.ztesoft.kafka;
import java.util.Arrays;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
public class MyKafkaConsumer {
public static void main(String[] args) {
Properties props = new Properties();
// kafka地址
props.put("bootstrap.servers", "172.21.72.166:9092");
// 设置消费组
props.put("group.id", "bigdata");
// 是否自动提交
props.put("enable.auto.commit", "false");
// 设置自动提交时间隔
props.put("auto.commit.interval.ms", "1000");
// 设置消费,一般设置earliest或者latest
props.put("auto.offset.reset", "earliest");
// 序列化
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
consumer.subscribe(Arrays.asList("user_topic"));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(10);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("partition = %d,offset = %d, key = %s, value = %s%n", record.partition(),
record.offset(), record.key(), record.value());
int partition = record.partition();
long offset = record.offset();
String key = record.key();
String value = record.value();
// 打印消费参数
System.out.printf("partition = %d,offset = %d, key = %s, value = %s%n", partition, offset, key, value);
}
//每次消费完数据进行异步提交
consumer.commitAsync();
}
}
}