Kafka+ELK完成日志采集处理

Kafka+ELK日志采集处理

Kafka+Zookeeper集群部署
  1. Zookeeper
[root@es_cluer1 ~]# mkdir /home/zookeeper
[root@es_cluer1 srv]# tar xf zookeeper-3.4.14.tar.gz
[root@es_cluer1 ~]# cd /srv/zookeeper-3.4.14/conf/
[root@es_cluer1 conf]# mv zoo_sample.cfg zoo.cfg
[root@es_cluer1 conf]# grep '^[a-z]' zoo.cfg 
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/home/zookeeper/data
dataLogDir=/home/zookeeper/log
clientPort=2181
server.1=192.168.116.118:2888:3888
server.2=192.168.116.119:2888:3888
server.3=192.168.116.120:2888:3888
[root@es_cluer1 conf]# mkdir /home/zookeeper/data
[root@es_cluer1 conf]# mkdir /home/zookeeper/log
[root@es_cluer1 conf]# echo "1" > /home/zookeeper/data/myid //三台机器上必须都要创建myid文件。看着点,1~3节点ID是不一样的(按上面配置server.*去每台机器做配置)
启动三台zookeeper服务
[root@es_cluer1 bin]# ./zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /srv/zookeeper-3.4.14/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
  1. kafka
[root@es_cluer1 bin]# mkdir /home/kafka
[root@es_cluer1 srv]# tar xf kafka_2.12-2.5.0.tgz
[root@es_cluer1 bin]# cd /srv/kafka_2.12-2.5.0/config/
[root@es_cluer1 config]#  grep '^[a-z]' server.properties 
broker.id=1   //每个节点都要改
delete.topic.enable=true
listeners=PLAINTEXT://192.168.116.118:9092 //每个节点都要改
num.network.threads=4
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/home/kafka/kafka-logs
num.partitions=3
num.recovery.threads.per.data.dir=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.116.118:2181,192.168.116.119:2181,192.168.116.120:2181
zookeeper.connection.timeout.ms=6000
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0

启动
[root@es_cluer1 bin]# ./kafka-server-start.sh -daemon ../config/server.properties

创建一个topic测试
./kafka-topics.sh --create --topic tg_system_log --zookeeper 192.168.116.118:2181,192.168.116.119:2181,192.168.116.120:2181 --partitions 3 --replication-factor 1
  1. 配置Logstash(数据采集)
[root@es_cluer1 bin]# cat /etc/logstash/conf.d/system.conf 
input {
  file {
    path => "/var/log/messages"
    start_position => "beginning"
    type => "system_log"
    discover_interval => 2
  }
}
output {
    kafka {
      bootstrap_servers => "192.168.116.120:9092"
      topic_id => "tg_system_log"
      compression_type => "snappy"
    }
}
[root@es_cluer1 bin]# cat /etc/logstash/conf.d/redis.conf 
input {
  file {
    path => "/var/log/redis/redis.log"
    start_position => "beginning"
    type => "redis_log"
    discover_interval => 2
  }
}
output {
    kafka {
      bootstrap_servers => "192.168.116.120:9092"
      topic_id => "tg_redis_log"
      compression_type => "snappy"
    }
}

4.配置logstash---->kafka–>ElasticSearch

[root@es_cluer1 bin]# cat /etc/logstash/conf.d/kafka_to_es.conf 
input {
    kafka {
      #bootstrap_servers => ["192.168.116.118:9092","192.168.116.119:9092","192.168.116.120:9092"]
      codec => plain
      zk_connect => "192.168.116.118:2181,192.168.116.119:2181,192.168.116.120:2181"
      topic_id => "tg_system_log"
      consumer_threads => 5
      decorate_events => true
      #auto_offset_reset => "earliest"
      type => "system_log"
  }
    kafka {
      codec => plain
      zk_connect => "192.168.116.118:2181,192.168.116.119:2181,192.168.116.120:2181"
      topic_id => "tg_redis_log"
      consumer_threads => 5
      decorate_events => true
      #auto_offset_reset => "earliest"
      type => "redis_log"
  }
}
output {
  if [type] == "system_log"{
    elasticsearch {
      hosts => ["192.168.116.118:9200","192.168.116.119:9200","192.168.116.120:9200"]
      index => "systemlog-%{+YYY.MM}"
    }
  }
  if [type] == "redis_log"{
    elasticsearch {
      hosts => ["192.168.116.118:9200","192.168.116.119:9200","192.168.116.120:9200"]
      index => "redislog-%{+YYY.MM}"
    }
  }
}

在这里插入图片描述

5.配置kibana

  • 创建索引
    在这里插入图片描述

*查看日志
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值