系统:CentOS7.4,多实例
yum -y install java
---zk
wget https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper/zookeeper-3.4.10/zookeeper-3.4.10.tar.gz
mkdir -p /usr/local/zookeeper/zkdata/{zk1,zk2,zk3}
echo '1' > /usr/local/zookeeper/zkdata/zk1/myid
echo '2' > /usr/local/zookeeper/zkdata/zk2/myid
echo '3' > /usr/local/zookeeper/zkdata/zk3/myid
mkdir -p /usr/local/zookeeper/zkdatalog/{zk1,zk2,zk3}
tar zxf zookeeper-3.4.10.tar.gz -C /usr/local/zookeeper
cd /usr/local/zookeeper/zookeeper-3.4.10/conf
egrep -v "^#|^$" zk1.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper/zkdata/zk1
dataLogDir=/usr/local/zookeeper/zkdatalog/zk1
clientPort=2181
server.1=192.168.1.123:2888:3888
server.2=192.168.1.123:2888:3888
server.3=192.168.1.123:2888:3888
egrep -v "^#|^$" zk2.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper/zkdata/zk2
dataLogDir=/usr/local/zookeeper/zkdatalog/zk2
clientPort=2182
server.1=192.168.1.123:2888:3888
server.2=192.168.1.123:2888:3888
server.3=192.168.1.123:2888:3888
egrep -v "^#|^$" zk3.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper/zkdata/zk3
dataLogDir=/usr/local/zookeeper/zkdatalog/zk3
clientPort=2183
server.1=192.168.1.123:2888:3888
server.2=192.168.1.123:2888:3888
server.3=192.168.1.123:2888:3888
cd /usr/local/zookeeper/zookeeper-3.4.10/bin
./zkServer.sh start /usr/local/zookeeper/zookeeper-3.4.10/conf/zk1.cfg
./zkServer.sh start /usr/local/zookeeper/zookeeper-3.4.10/conf/zk2.cfg
./zkServer.sh start /usr/local/zookeeper/zookeeper-3.4.10/conf/zk3.cfg
netstat -tunlp|grep 218
netstat -tunlp|grep 288
netstat -tunlp|grep 388
./zkServer.sh status /usr/local/zookeeper/zookeeper-3.4.10/conf/zk1.cfg
./zkServer.sh status /usr/local/zookeeper/zookeeper-3.4.10/conf/zk2.cfg
./zkServer.sh status /usr/local/zookeeper/zookeeper-3.4.10/conf/zk3.cfg
###正常情况下是一个leader,两个follower,如果没有出现这个字段,先排查hosts文件与防火墙一类
---kafka
wget https://archive.apache.org/dist/kafka/0.10.2.1/kafka_2.10-0.10.2.1.tgz
mkdir -p /usr/local/kafka/kafkalogs/{ka1,ka2,ka3}
tar zxf kafka_2.10-0.10.2.1.tgz -C /usr/local/kafka/
cd /usr/local/kafka/kafka_2.10-0.10.2.1/config
cp server.properties ka1.properties
egrep -v "^#|^$" ka1.properties
broker.id=0
listeners=PLAINTEXT://192.168.1.123:9091
host.name=192.168.1.123
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka/kafkalogs/ka1
num.partitions=1
num.recovery.threads.per.data.dir=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.1.123:2181,192.168.1.123:2182,192.168.1.123:2183
zookeeper.connection.timeout.ms=6000
egrep -v "^#|^$" ka2.properties
broker.id=1
listeners=PLAINTEXT://192.168.1.123:9092
host.name=192.168.1.123
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka/kafkalogs/ka2
num.partitions=1
num.recovery.threads.per.data.dir=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.1.123:2181,192.168.1.123:2182,192.168.1.123:2183
zookeeper.connection.timeout.ms=6000
egrep -v "^#|^$" ka3.properties
broker.id=2
listeners=PLAINTEXT://192.168.1.123:9093
host.name=192.168.1.123
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka/kafkalogs/ka3
num.partitions=1
num.recovery.threads.per.data.dir=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.1.123:2181,192.168.1.123:2182,192.168.1.123:2183
zookeeper.connection.timeout.ms=6000
cd /usr/local/kafka/kafka_2.10-0.10.2.1/bin
./kafka-server-start.sh -daemon /usr/local/kafka/kafka_2.10-0.10.2.1/config/ka1.properties
./kafka-server-start.sh -daemon /usr/local/kafka/kafka_2.10-0.10.2.1/config/ka2.properties
./kafka-server-start.sh -daemon /usr/local/kafka/kafka_2.10-0.10.2.1/config/ka3.properties
netstat -tunlp|grep 909
###这个端口会启动的稍微慢一点
---测试集群
#起两个窗口用来测试
-producer 窗口
cd /usr/local/kafka/kafka_2.10-0.10.2.1/bin
./kafka-topics.sh --create --zookeeper 192.168.1.123:2182 --replication-factor 2 --partitions 1 --topic test
./kafka-console-producer.sh --broker-list 192.168.1.123:9092 --topic test
#随意输入字符
#随意输入字符
#随意输入字符
-consumer 窗口
cd /usr/local/kafka/kafka_2.10-0.10.2.1/bin
./kafka-console-consumer.sh --zookeeper localhost:2181 localhost:2182 localhost:2183 --topic test --from-beginning
#检查是否有消息产出
---kafka-manager
#快速版,无需编译源码(其实我TM不会编译源码)
#链接:https://pan.baidu.com/s/1qYifoa4 密码:el4o
unzip kafka-manager-1.3.3.7.zip -d /usr/local/kafka/
cd /usr/local/kafka/conf/
编辑配置文件application.conf
#kafka-manager.zkhosts="localhost:2181" ##注释这一行,下面添加一行
kafka-manager.zkhosts="192.168.1.123:2181,192.168.1.123:2182,192.168.1.123:2183"
cd ..
nohup bin/kafka-manager -Dconfig.file=conf/application.conf -Dhttp.port=9999 &
#kafka-manager的默认端口号是9000,可通过参数指定端口与配置文件
yum -y install nginx httpd-tools
htpasswd -c /etc/nginx/password/kafka-manager.user kafka
(输入两次相同密码)
vim /etc/nginx/conf.d/ops.kafka-manager.com
server {
listen 80;
server_name ops.kafka-manager.com;
auth_basic "Restricted Access";
auth_basic_user_file /etc/nginx/password/kafka-manager.user;
location / {
proxy_pass http://localhost:9999;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
域名指定服务器IP,在访问域名
接下来就是页面操作,添加节点等,可监控topic等,详情访问上面的网址吧
一般只需登陆页面后添加三个选项:
1.名字
2.zk的节点:端口
3.版本号
其余的一般为默认就可以
#kafka大佬具体操作kafka-manager
#http://www.orchome.com/55
---topic操作
#增加PartitionCount(分区)、增加ReplicationFactor(副本)
#重新分配分区等
#这些操作也可以在 kafka-manager 的界面做,但是还是在服务器的控制做比较稳妥
#可以看看json目录下的脚本,里面是六个分区、三个副本的规则
#注意!!!分区无法删除,如果是三个节点的集群,最好还是三个分区,没有为什么,我就想这样!
案例:
cd /usr/local/kafka/kafka_2.10-0.10.2.1/
bin/kafka-topics.sh --zookeeper localhost:2181,localhost:2182,localhost:2183 --describe --topic test
Topic:test PartitionCount:1 ReplicationFactor:1 Configs:
Topic:test Partition: 0 Leader: 0 Replicas: 0 Isr: 0
#查看指定topic(test)的当前状态
bin/kafka-topics.sh --alter --zookeeper localhost:2181,localhost:2182,localhost:2183 --topic test --partitions 3
#增加分区
mkdir json
vim json/test_ReplicationFactor.json
{"version":1,
"partitions":[{"topic":"test","partition":0,"replicas":[0,1,2]},
{"topic":"test","partition":1,"replicas":[1,2,0]},
{"topic":"test","partition":2,"replicas":[2,0,1]}]}
#编辑json脚本用来执行分区的副本分配规则
bin/kafka-reassign-partitions.sh --zookeeper localhost:2181,localhost:2182,localhost:2183 --reassignment-json-file json/test_ReplicationFactor.json --execute
#加载json脚本进行重新分配
bin/kafka-topics.sh --zookeeper localhost:2181,localhost:2182,localhost:2183 --describe --topic test
Topic:test PartitionCount:3 ReplicationFactor:3 Configs:
Topic: test Partition: 0 Leader: 2 Replicas: 0,1,2 Isr: 2,0,1
Topic: test Partition: 1 Leader: 0 Replicas: 1,2,0 Isr: 0,1,2
Topic: test Partition: 2 Leader: 1 Replicas: 2,0,1 Isr: 1,2,0
bin/kafka-reassign-partitions.sh --zookeeper localhost:2181,localhost:2182,localhost:2183 --reassignment-json-file json/test_ReplicationFactor.json --verify
Status of partition reassignment:
Reassignment of partition [test,0] completed successfully
Reassignment of partition [test,1] completed successfully
Reassignment of partition [test,2] completed successfully
#再次查看topic的状态,以上是两种命令方式