1. 下载解压kafka安装包
tar -xvzf kafka_2.12-2.2.1.tgz -C /software/kafka/
2. 启动zookeeper
bin/zookeeper-server-start.sh config/zookeeper.properties &
3. 修改kafka配置文件server.properties
broker.id=0
port=9092
host.name=阿里云内网地址
advertised.host.name=阿里云外网地址
4. 启动kafka服务
./kafka-server-start.sh ../config/server.properties &
5. 创建topic
./kafka-topics.sh --create --zookeeper 内网ip:2181 --replication-factor 1 --partitions 1 --topic HelloWorld
6. 查看topic
./kafka-topics.sh --list --zookeeper 内网ip:2181
7. 生产者
./kafka-console-producer.sh --broker-list 内网ip:9092 --topic HelloWorld
8. 消费者
./kafka-console-consumer.sh --bootstrap-server 内网ip:9092 --topic HelloWorld --from-beginning
9. Java生产者
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
/**
* @author Ryan Feng
* @version 1.0
* @date 2019-07-09 18:19
*/
public class ProducerDemo {
public static void main(String[] args) {
Properties properties = new Properties();
properties.put("bootstrap.servers", "外网ip:9092");
properties.put("acks", "all");
properties.put("retries", 0);
properties.put("batch.size", 16384);
properties.put("linger.ms", 1);
properties.put("buffer.memory", 33554432);
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
org.apache.kafka.clients.producer.Producer<String, String> producer = null;
long start = System.currentTimeMillis();
try {
producer = new KafkaProducer<String, String>(properties);
for (int i = 0; i < 1000000; i++) {
String msg = "New Message: " + i;
producer.send(new ProducerRecord<String, String>("HelloWorld", msg));
System.out.println("Sent:" + msg);
}
long end = System.currentTimeMillis();
System.out.println("Time for 1 million message:"+ (end-start)/1000);
} catch (Exception e) {
e.printStackTrace();
} finally {
producer.close();
}
}
}
10. Java消费者
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Properties;
import java.util.Arrays;
import java.util.UUID;
/**
* @author Ryan Feng
* @version 1.0
* @date 2019-07-09 21:22
*/
public class ConsumerDemo {
public static void main(String[] args){
Properties properties = new Properties();
properties.put("bootstrap.servers", "外网ip:9092");
properties.put("group.id", UUID.randomUUID().toString());
properties.put("enable.auto.commit", "true");
properties.put("auto.commit.interval.ms", "1000");
properties.put("auto.offset.reset", "earliest");
properties.put("session.timeout.ms", "30000");
properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
kafkaConsumer.subscribe(Arrays.asList("HelloWorld"));
while (true) {
ConsumerRecords<String, String> records = kafkaConsumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, value = %s", record.offset(), record.value());
System.out.println();
}
}
}
}
本文详细介绍了在阿里云上安装和使用Kafka的步骤。包括下载解压Kafka安装包、启动Zookeeper、修改Kafka配置文件、启动Kafka服务,还介绍了创建和查看topic,以及生产者、消费者的操作,最后提及Java生产者和消费者。
619

被折叠的 条评论
为什么被折叠?



