因为Kafka集群是把状态信息保存在Zookeeper中的,并且Kafka的动态扩容是通过Zookeeper来实现的,所以需要优先搭建Zookeerper集群,建立分布式状态管理。开始准备环境,搭建集群:
zookeeper是基于Java环境开发的所以需要先安装Java 然后这里使用的zookeeper安装包版本为zookeeper-3.4.14,Kafka的安装包版本为kafka_2.11-2.2.0
实验环境:(两台虚拟主机:2核2G)
server1:192.168.136.253
server2:192.168.136.131
基本要求:
关闭防火墙、selinux:
[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# setenforce 0
同步时间:
[root@localhost ~]# yum -y install ntp
[root@localhost ~]# ntpdate pool.ntp.org
下载安装包:
[root@localhost ~]# wget http://mirrors.cnnic.cn/apache/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz
[root@localhost ~]# wget http://mirrors.tuna.tsinghua.edu.cn/apache/kafka/2.2.0/kafka_2.11-2.2.0.tgz
[root@localhost ~]# yum install java-1.8.0-openjdk* -y
修改主机名(两台):
[root@localhost ~]# yum -y install vim
[root@localhost ~]# vim /etc/hosts
[root@localhost ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.136.253 kafka01
192.168.136.128 kafka02
编译安装zookeeper并开启(两台同步):
[root@localhost ~]# tar zxf zookeeper-3.4.14.tar.gz
[root@localhost ~]# mv zookeeper-3.4.14 /usr/local/zookeeper
[root@localhost ~]# cd /usr/local/zookeeper/conf/
[root@localhost conf]# mv zoo_sample.cfg zoo.cfg
[root@localhost conf]# vim zoo.cfg
-------------末行添加----------------
server.1=192.168.136.253:2888:3888
server.2=192.168.136.128:2888:3888
------------------------------------
#创建路径(三台同步):其他两台分别echo'2'和'3'
[root@localhost ~]# mkdir /tmp/zookeeper
[root@localhost ~]# echo '1' > /tmp/zookeeper/myid
#检测:
[root@localhost ~]# /usr/local/zookeeper/bin/zkServer.sh stop
[root@localhost ~]# /usr/local/zookeeper/bin/zkServer.sh start
[root@localhost ~]# yum -y install net-tools
[root@localhost ~]# netstat -anpt |grep 2181
编译安装kafka并开启(两台同步):
[root@localhost ~]# tar zxf kafka_2.11-2.2.0.tgz
[root@localhost ~]# mv kafka_2.11-2.2.0 /usr/local/kafka
[root@localhost ~]# vim /usr/local/kafka/config/server.properties
---------------找到三行并修改-----------------
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
delete.topic.enble=true
advertised.listeners=PLAINTEXT://kafka01:9092
zookeepr.connect=192.168.136.253:2181,192.168.136.131:2181
-------------------------------------------
[root@localhost ~]# /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/s erver.properties
[root@localhost ~]# netstat -anpt |grep 9092
测试:
创建topic:
[root@localhost ~]# /usr/local/kafka/bin/kafka-topics.sh --create --zookeeper 192.168.136.253:2181 --replication-factor 2 --partitions 3 --topic yl001
查看topic:
[root@localhost ~]# /usr/local/kafka/bin/kafka-topics.sh --list --zookeeper 192.168.136.253:2181
模拟一个producter:
[root@localhost ~]# /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server 192.168.136.253:9092 --topic yl001 --from-beginning
模拟一个consumer
[root@localhost ~]# /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server 192.168.136.253:9092 --topic yl001 --from-beginning
删除topic:
[root@localhost ~]# /usr/local/kafka/bin/kafka-console-producer.sh --delete --zookeeper 192.168.136.253:2181 --topic yl001