1.导入需要的MAVEN
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.6.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/com.fasterxml.jackson.core/jackson-databind -->
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>2.11.3</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-simple -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>1.8.0-beta4</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-nop -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-nop</artifactId>
<version>1.8.0-beta4</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-api -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.8.0-beta4</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-log4j12 -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.8.0-beta4</version>
</dependency>
<!-- https://mvnrepository.com/artifact/log4j/log4j -->
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
2.创建一个 Producer 类
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
public class Producer {
/**
* kafka地址,多个用逗号隔开
*/
public static final String brokerList="192.168.38.128:9092";
/**
* 主题名
*/
public static final String topic = "topic-demo";
public static void main(String[] args) {
Properties props = new Properties();
//key序列化
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
//value序列化
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
//kafka集群的broker地址列表(多个用逗号隔开)
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
/**
* 1 默认值,发送消息后,只要分区的 leader 副本成功写入消息,那么它就会收到来自服务端的成功响应,否则收到错误的响应
* 0 消息发送后不需要等待任何服务端响应(失败了也无从得知)
* -1/all 发送消息后,需要等待 ISR 中所有副本都成功写入消息后才能够收到来自服务端的成功响应
*/
props.put(ProducerConfig.ACKS_CONFIG,"1");
//限制Producer客户端能发送的消息的最大值(默认:1048576B,即1MB)
props.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG,"20");
//Producer客户端发送消息给服务端重试次数,默认值为0
props.put(ProducerConfig.RETRIES_CONFIG,"1");
//两次重试间的间隔,默认值100
props.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG,"1000");
//Producer等待请求响应的最长时间(默认值:30000(ms)),注意:需要比broker端参数replica.lag.time.max.ms的值要大,这样可以减少因客户端重试而引起的消息重复的概率
props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,"");
//Producer客户端用于缓存消息的缓冲区大小(默认值33554432B,即32MB)
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG,"33554432");
//消息的压缩方式(默认:none,即不会压缩),还可以配置:gzip、snappy、lz4
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,"gzip");
//多久关闭空闲连接(默认值:540000(ms),即9分钟)
props.put(ProducerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG,"60000");
//设定Producer的客户端id,不设置会自动生成 “ "producer-"+数字 ” 的形式的拼接
props.put(ProducerConfig.CLIENT_ID_CONFIG,"producer-1");
//设定Producer中send()方法和partitionsFor()方法的阻塞时间(当生产者的发送缓冲区已满,或者没有可用的元数据时,这时方法就会阻塞)(默认值:60000)
props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG,"60000");
//指定分区器(给消息计算分区号)(需要实现org.apache.kafka.clients.producer.Partitioner接口)
props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, DefaultPartitioner.class.getName());
//指定拦截器(需要实现org.apache.kafka.clients.producer.ProducerInterceptor接口)(多个同逗号隔开)
//props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG,**********.class.getName());
//限制每个连接的最多缓存的请求数(默认值:5)(发送多少条消息后,接收服务端确认,比如设置为1,就是每发一条就要确认一条,设置为5就是,发送5条消息等待一次确认,推荐设置为1,不然会出现乱序)
props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION,"5");
//如果这个时间内的元数据没被更新,则会强制更新(默认值:300000,即5分钟)(即更新broker的地址和端口号等信息)
props.put(ProducerConfig.METADATA_MAX_AGE_CONFIG,"300000");
//是否开启幂等性功能(生产者重试时可能会重复写入,这里就是为了避免这种情况)
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,"false");
//设置事务id,必须唯一(默认值:null)(幂等性不能跨越多个分区运作,事务可以弥补这个缺陷,事务可以保证对多个分区写入操作的原子性)
props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG,new Date().getTime());
//生产者发送 ProducerBatch 之前等待更多消息(ProducerRecord)加入 ProducerBatch 的时间(默认值:0)
props.put(ProducerConfig.LINGER_MS_CONFIG,"1");
//ProducerBatch可以复用的内存区大小(默认16384B,即16KB)
props.put(ProducerConfig.BATCH_SIZE_CONFIG,"16384");
//Socket接收消息缓冲区(SO_RECBUF)的大小(默认值:32768(B),即32KB),如果设置为-1,则使用操作系统默认值
props.put(ProducerConfig.RECEIVE_BUFFER_CONFIG,"-1");
//Socket发送消息缓冲区(SO_SNDBUF)的大小(默认值:131072(B),即128KB),如果设置为-1,则使用操作系统默认值
props.put(ProducerConfig.SEND_BUFFER_CONFIG,"-1");
//生产者实例
KafkaProducer<String,String> producer = new KafkaProducer<>(props);
//需要发送的消息
ProducerRecord<String,String> record = new ProducerRecord<>(topic,"hello,kafka!");
//发送
producer.send(record);
//关闭生产者
producer.close();
}
}
3.创建一个 Consumer 类
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
public class Consumer {
/**
* kafka地址,多个用逗号隔开
*/
public static final String brokerList="192.168.38.128:9092";
/**
* 主题名
*/
public static final String topic = "topic-demo";
/**
* 组名
* 不同组名可以重复消费
* 例如你先使用了组名A消费了kafka的1000条数据,但是你还想再次进行消费这1000条数据,并且不想重新去产生,那么这里你只需要更改组名就可以重复消费了
*/
public static final String groupId = "group.demo";
public static void main(String[] args) {
Properties props = new Properties();
//key序列化
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
//value序列化
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
//kafka地址
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
//设置消费组的名称
props.put(ConsumerConfig.GROUP_ID_CONFIG,groupId);
//消费者实例
KafkaConsumer<String,String> consumer = new KafkaConsumer<>(props);
//订阅主题
consumer.subscribe(Arrays.asList(topic));
//循环消费消息
while (true){
ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(1000L));
for (ConsumerRecord<String,String> record:records) {
System.out.println(record.value());
}
}
}
}
如有错误,欢迎指正;如有侵权,请联系作者。