关闭防火墙 selinux
systemctl stop firewalld
setenforce 0
时间同步
yum -y install ntpdate
ntpdate pool.ntp.org
上传软件安装包到/usr/local/src下
[root@bogon src]# ll
总用量 265296
-rw-r--r--. 1 root root 169983496 8月 20 16:53 jdk-8u131-linux-x64_.rpm
-rw-r--r--. 1 root root 63999924 8月 20 16:53 kafka_2.11-2.2.0.tgz
-rw-r--r--. 1 root root 37676320 8月 20 16:52 zookeeper-3.4.14.tar.gz
修改主机名称
hostnamectl set-hostname kafka01
hostnamectl set-hostname kafka02
hostnamectl set-hostname kafka03
修改hosts文件
// vim /etc/hosts
192.168.248.101 kafka01
192.168.248.102 kafka02
192.168.248.103 kafka03
安装jdk
// rpm -ivh jdk-8u131-linux-x64_.rpm
验证:
// java -version
java version "1.8.0_131"
Java(TM) SE Runtime Environment (build 1.8.0_131-b11)
Java HotSpot(TM) 64-Bit Server VM (build 25.131-b11, mixed mode)
安装zookeeper
tar zxvf zookeeper-3.4.14.tar.gz
mv zookeeper-3.4.14 /usr/local/zookeeper
编辑 zoo.cfg
// cd /usr/local/zookeeper/conf
// mv zoo_sample.cfg zoo.cfg
// vim zoo.cfg
参数详解:
tickTime=2000 # zk服务器之间的心跳时间
initLimit=10 # zk连接失败的时间
syncLimit=5 # zk的同步通信时间
dataDir=/tmp/zookeeper #zk的数据目录
clientPort=2181 # zk的监听端口号
server.1=192.168.248.101:2888:3888 # 服务器编号,2888:通信端口 3888: 选举端口
server.2=192.168.248.102:2888:3888
server.3=192.168.248.103:2888:3888
创建myid文件
// mkdir /tmp/zookeeper
kafka01:
echo "1" > /tmp/zookeeper/myid
kafka02:
echo "2" > /tmp/zookeeper/myid
kafka03:
echo "3" > /tmp/zookeeper/myid
开启zk服务
// /usr/local/zookeeper/bin/zkServer.sh start
验证服务转态
// /usr/local/zookeeper/bin/zkServer.sh status
Mode: follower
Mode: leader
// netstat -lptnu | grep java
tcp6 0 0 :::2181 :::* LISTEN 3372/java
tcp6 0 0 192.168.248.101:2888 :::* LISTEN 3372/java
tcp6 0 0 192.168.248.101:3888 :::* LISTEN 3372/java
部署kafka
// tar zxvf kafka_2.11-2.2.0.tgz
// mv kafka_2.11-2.2.0 /usr/local/kafka
编辑主配置文件
// vim /usr/local/kafka/config/server.properties
kafka01:
broker.id=0
advertised.listeners=PLAINTEXT://kafka01:9092
zookeeper.connect=192.168.248.101:2181,192.168.248.102:2181,192.168.248.103:2181
kafka02:
broker.id=1
advertised.listeners=PLAINTEXT://kafka02:9092
zookeeper.connect=192.168.248.101:2181,192.168.248.102:2181,192.168.248.103:2181
kafka03:
broker.id=2
advertised.listeners=PLAINTEXT://kafka03:9092
zookeeper.connect=192.168.248.101:2181,192.168.248.102:2181,192.168.248.103:2181
开启kafka 服务
// /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
验证:
// netstat -lptnu | grep 9092
tcp6 0 0 :::9092 :::* LISTEN 14930/java
测试kafka
创建topic
// /usr/local/kafka/bin/kafka-topics.sh --create --zookeeper 192.168.248.101:2181 --replication-factor 2 --partitions 3 --topic wg007
查看当前的topic
// /usr/local/kafka/bin/kafka-topics.sh --list --zookeeper 192.168.248.101:2181
模拟生产者
// /usr/local/kafka/bin/kafka-console-producer.sh --broker-list 192.168.248.101:9092 --topic wg007
模拟消费者
// /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server 192.168.248.101:9092 --topic wg007 --from-beginning
部署filebeat
配置yum源
// cd /etc/yum.repo.d/
// vim filebeat.repo
[filebeat-6.x]
name=Elasticsearch repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
// yum -y install filebeat
编辑 filebeat.yaml
// vim /etc/filebeat
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/messages
output.kafka:
enabled: true
hosts: ["192.168.248.101:9092","192.168.248.102:9092","192.168.248.103:9092"]
topic: messages
安装elasticsearch
// rpm -ivh elasticsearch-6.6.2.rpm
编译es的配置文件
// vim /etc/elasticsearch/elasticsearch.yml
// cat /etc/elasticsearch/elasticsearch.yml |grep -v "^#"
cluster.name: wg007
node.name: node-1
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.248.101
http.port: 9200
启动es服务
// systemctl enable elasticsearch
// systemctl start elasticsearch
验证:
// netstat -lptnu|egrep "9200|9300"
tcp6 0 0 192.168.248.101:9200 :::* LISTEN 80284/java
tcp6 0 0 192.168.248.101:9300 :::* LISTEN 80284/java
部署logstash
上传rpm包
// rpm -ivh logstash-6.6.0.rpm
编辑messages.conf
[root@kafka02 conf.d]# cat messages.conf
input {
kafka {
bootstrap_servers => ["192.168.248.101:9092,192.168.248.102:9092,192.168.248.103:9092"]
group_id => "logstash"
topics => "messages"
consumer_threads => 5
}
}
output {
elasticsearch {
hosts => "192.168.248.101:9200"
index => "msg-log-%{+YYYY.MM.dd}"
}
}
验证:
// netstat -lptnu|grep 9600
tcp6 0 0 127.0.0.1:9600 :::* LISTEN 77046/java
部署kibana
// rpm -ivh kibana-6.6.2-x86_64.rpm
编辑kibana的主配置文件
// vim /etc/kibana/kibana.yml
// cat /etc/kibana/kibana.yml |grep -v "^#"|sed '/^$/d'
server.port: 5601
server.host: "192.168.248.103"
elasticsearch.hosts: ["http://192.168.248.101:9200"]
===================================
开启kibana
// systemctl enable kibana
验证:
// netstat -lptnu|grep 5601
tcp 0 0 192.168.248.103:5601 0.0.0.0:* LISTEN 72621/node
没有index 怎么办?
1: chmod 777
2: echo "test" >> /var/log/messages