Kafka+ELK日志采集处理
Kafka+Zookeeper集群部署
- Zookeeper
[root@es_cluer1 ~]# mkdir /home/zookeeper
[root@es_cluer1 srv]# tar xf zookeeper-3.4.14.tar.gz
[root@es_cluer1 ~]# cd /srv/zookeeper-3.4.14/conf/
[root@es_cluer1 conf]# mv zoo_sample.cfg zoo.cfg
[root@es_cluer1 conf]# grep '^[a-z]' zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/home/zookeeper/data
dataLogDir=/home/zookeeper/log
clientPort=2181
server.1=192.168.116.118:2888:3888
server.2=192.168.116.119:2888:3888
server.3=192.168.116.120:2888:3888
[root@es_cluer1 conf]# mkdir /home/zookeeper/data
[root@es_cluer1 conf]# mkdir /home/zookeeper/log
[root@es_cluer1 conf]# echo "1" > /home/zookeeper/data/myid //三台机器上必须都要创建myid文件。看着点,1~3节点ID是不一样的(按上面配置server.*去每台机器做配置)
启动三台zookeeper服务
[root@es_cluer1 bin]# ./zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /srv/zookeeper-3.4.14/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
- kafka
[root@es_cluer1 bin]# mkdir /home/kafka
[root@es_cluer1 srv]# tar xf kafka_2.12-2.5.0.tgz
[root@es_cluer1 bin]# cd /srv/kafka_2.12-2.5.0/config/
[root@es_cluer1 config]# grep '^[a-z]' server.properties
broker.id=1 //每个节点都要改
delete.topic.enable=true
listeners=PLAINTEXT://192.168.116.118:9092 //每个节点都要改
num.network.threads=4
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/home/kafka/kafka-logs
num.partitions=3
num.recovery.threads.per.data.dir=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.116.118:2181,192.168.116.119:2181,192.168.116.120:2181
zookeeper.connection.timeout.ms=6000
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
启动
[root@es_cluer1 bin]# ./kafka-server-start.sh -daemon ../config/server.properties
创建一个topic测试
./kafka-topics.sh --create --topic tg_system_log --zookeeper 192.168.116.118:2181,192.168.116.119:2181,192.168.116.120:2181 --partitions 3 --replication-factor 1
- 配置Logstash(数据采集)
[root@es_cluer1 bin]# cat /etc/logstash/conf.d/system.conf
input {
file {
path => "/var/log/messages"
start_position => "beginning"
type => "system_log"
discover_interval => 2
}
}
output {
kafka {
bootstrap_servers => "192.168.116.120:9092"
topic_id => "tg_system_log"
compression_type => "snappy"
}
}
[root@es_cluer1 bin]# cat /etc/logstash/conf.d/redis.conf
input {
file {
path => "/var/log/redis/redis.log"
start_position => "beginning"
type => "redis_log"
discover_interval => 2
}
}
output {
kafka {
bootstrap_servers => "192.168.116.120:9092"
topic_id => "tg_redis_log"
compression_type => "snappy"
}
}
4.配置logstash---->kafka–>ElasticSearch
[root@es_cluer1 bin]# cat /etc/logstash/conf.d/kafka_to_es.conf
input {
kafka {
#bootstrap_servers => ["192.168.116.118:9092","192.168.116.119:9092","192.168.116.120:9092"]
codec => plain
zk_connect => "192.168.116.118:2181,192.168.116.119:2181,192.168.116.120:2181"
topic_id => "tg_system_log"
consumer_threads => 5
decorate_events => true
#auto_offset_reset => "earliest"
type => "system_log"
}
kafka {
codec => plain
zk_connect => "192.168.116.118:2181,192.168.116.119:2181,192.168.116.120:2181"
topic_id => "tg_redis_log"
consumer_threads => 5
decorate_events => true
#auto_offset_reset => "earliest"
type => "redis_log"
}
}
output {
if [type] == "system_log"{
elasticsearch {
hosts => ["192.168.116.118:9200","192.168.116.119:9200","192.168.116.120:9200"]
index => "systemlog-%{+YYY.MM}"
}
}
if [type] == "redis_log"{
elasticsearch {
hosts => ["192.168.116.118:9200","192.168.116.119:9200","192.168.116.120:9200"]
index => "redislog-%{+YYY.MM}"
}
}
}
5.配置kibana
- 创建索引
*查看日志