kafka重要API
创建maven工程导入kafka依赖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.11.0.2</version>
</dependency>
Producer API
代码设计思路
- 创建Properties对象,配置参数
- 序列化,将key和value序列化
- 设置应答模式,设置为1,则说明producer向leader发送消息后,leader接受完消息,不会等到其他follow都复制完leader接受的消息再向producer说消息已经接收完毕,而是leader自己接收完了,就直接告诉producer接收完毕。
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
public class MyProducer {
public static void main(String[] args) {
Properties prop = new Properties();
prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.222.115:9092");
//TODO: 2020/8/18 序列化
prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class);
// TODO: 2020/8/18 应答模式
prop.put(ProducerConfig.ACKS_CONFIG,"1");
//创建生产者
KafkaProducer<String,String> producer = new KafkaProducer<>(prop);
// 通过for循环创建消息,并将消息发送出去
for (int i = 0; i < 20; i++) {
// TODO: 2020/8/18 生成producer记录
ProducerRecord<String, String> producerRecord = new ProducerRecord<>("kb07demo",Integer.toString(i), "hello,world22" + i);
producer.send(producerRecord);
try {
// TODO: 2020/8/18 为了方便测试 设置了时限,线程睡眠
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}
启动程序前我们监控相应的字段
程序中只有hello这样的字段
启动程序后我们就会看到程序创建了20条消息
启动之后就会发现kafka界面就会产生20条数据。
Consumer API
设计思路
- 创建Properties对象,配置参数
- 序列化
- 设置时长
- 关闭自动提交
- 设置提交间隔时长
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.util.Collections;
import java.util.Properties;
public class MyConsumer {
public static void main(String[] args) {
Properties prop = new Properties();
prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.222.115:9092");
prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
prop.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG,"30000");
// TODO: 2020/8/18 关闭自动提交
prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
// TODO: 2020/8/18 前面是false则失效,表示的是多少秒后提交
prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
//earliest: 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
prop.put(ConsumerConfig.GROUP_ID_CONFIG,"GROUP_1");
for (int i = 0; i < 3; i++) {
new Thread(new Runnable() {
@Override
public void run() {
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
consumer.subscribe(Collections.singletonList("ktest"));
while(true){
ConsumerRecords<String, String> poll = consumer.poll(100);
for (ConsumerRecord record:poll){
System.out.printf("offset: %d, key: %s, value: %s",
record.offset(),record.key(),record.value());
System.out.println(Thread.currentThread().getName());
}
}
}
}).start();
}
}
}