centos7 ELFKK搭建并测试

介绍

其实就是elk+filebeat+kafka

准备

服务器ipapps
node1172.16.10.37els-node1,filebeat,kafka(zookeeper),nginx(测试用)
node2172.16.10.41els-node2,kafka(zookeeper),logstash
node3172.16.10.42els-node3,kafka(zookeeper),kibana

部署

jdk

node1,node2,node3

1.8 以上

解压缩

$ wget 'http://223.202.203.110/hanchaoran/elk/jdk-8u151-linux-x64.tar.gz'
$ tar zxf jdk-8u151-linux-x64.tar.gz -C /etc/      

加入profile文件

$ vi /etc/profile 
export JAVA_HOME=/etc/jdk1.8.0_151
export CLASSPATH=./:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
export PATH=$JAVA_HOME/bin:$PATH

环境变量生效

$ source /etc/profile 

elasticsearch

node1,node2,node3

官网下载

https://www.elastic.co/cn/downloads/elasticsearch

解压缩

$ tar zxf elasticsearch-6.5.4.tar.gz -C /etc/
$ mv /etc/elasticsearch-6.5.4 /etc/elasticsearch

赋权

$ adduser els
$ chown -R els:els /etc/elasticsearch

修改最大连接数

$ vim /etc/security/limits.conf
* soft nofile 655360
* hard nofile 655360
* soft nproc 655360
* hard nproc 655360
* soft  memlock  unlimited
* hard memlock  unlimited
els soft memlock unlimited
els hard memlock unlimited

修改最大线程数

$ vim /etc/security/limits.d/20-nproc.conf
*          soft    nproc     65536
root       soft    nproc     unlimited

$ vim /etc/sysctl.conf
vm.max_map_count=262144
fs.file-max=65536

$ sysctl -p

修改配置

$ cd /etc/elasticsearch/config
$ cat elasticsearch.yml 
#集群的名称,同一个集群该值必须设置成相同的
cluster.name: efk
#该节点的名字
node.name: node1
#该节点有机会成为master节点
node.master: true
#该节点可以存储数据
node.data: true
path.data: /etc/elasticsearch/data
path.logs: /etc/elasticsearch/logs
bootstrap.memory_lock: true
#设置绑定的IP地址,可以是IPV4或者IPV6
network.bind_host: 0.0.0.0
#设置其他节点与该节点交互的IP地址
network.publish_host: node1
#该参数用于同时设置bind_host和publish_host
network.host: node1
#设置节点之间交互的端口号
transport.tcp.port: 9300
#设置是否压缩tcp上交互传输的数据
transport.tcp.compress: true
#设置http内容的最大大小]
http.max_content_length: 100mb
#是否开启http服务对外提供服务
http.enabled: true
http.port: 9200
discovery.zen.ping.unicast.hosts: ["node1:9300","node2:9300", "node3:9300"]
discovery.zen.minimum_master_nodes: 2
http.cors.enabled: true
http.cors.allow-origin: "*"

# 3台设备分别改成node1,node2,node3

3台设备分别启动

[root@node3 config]# su - els
[els@node3 ~]$ /etc/elasticsearch/bin/elasticsearch -d

问题:用虚机,2g内存,报内存溢出,调整为4g后启动正常

检查集群状态

[root@node1 elasticsearch]# curl -XGET 'http://node3:9200/_cluster/health?pretty=true'
{
  "cluster_name" : "efk",
  "status" : "green",
  "timed_out" : false,
  "number_of_nodes" : 3,
  "number_of_data_nodes" : 3,
  "active_primary_shards" : 0,
  "active_shards" : 0,
  "relocating_shards" : 0,
  "initializing_shards" : 0,
  "unassigned_shards" : 0,
  "delayed_unassigned_shards" : 0,
  "number_of_pending_tasks" : 0,
  "number_of_in_flight_fetch" : 0,
  "task_max_waiting_in_queue_millis" : 0,
  "active_shards_percent_as_number" : 100.0
}

装插件

$path/es/bin/plugin -install mobz/elasticsearch-head

访问集群

http://localhost:9200/_plugin/head/

kafka

node1,node2,node3

解压缩

[root@node1 ~]# tar xf kafka_2.12-2.1.0.tar -C /etc/
[root@node1 ~]# mv /etc/kafka_2.12-2.1.0 /etc/kafka

修改kakfa配置文件

[root@node1 config]# grep "^[a-Z]" server.properties 
broker.id=1
listeners=PLAINTEXT://node1:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/etc/kafka/logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=24
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=node1:2181,node2:2181,node3:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0

[root@node2 ~]# grep "^[a-Z]"  /etc/kafka/config/server.properties                     
broker.id=2
listeners=PLAINTEXT://node2:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/etc/kafka/logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=24
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=node1:2181,node2:2181,node3:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0

[root@node3 conf]# grep "^[a-Z]"  /etc/kafka/config/server.properties 
broker.id=3
listeners=PLAINTEXT://node3:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/etc/kafka/logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=24
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=node1:2181,node2:2181,node3:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0

修改zookeeper配置文件

# 3台机器配置一致
$ cat /etc/kafka/config/zookeeper.properties
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/etc/zookeeper/data
clientPort=2181
server.1=node1:2888:3888
server.2=node2:2888:3888
server.3=node3:2888:3888

使用 -daemon 的方式启动zookeeper

$ cd /etc/kafka
$ ./bin/zookeeper-server-start.sh -daemon ./config/zookeeper.properties`

使用 -daemon 的方式启动kafka

$ cd /etc/kafka
$ ./bin/kafka-server-start.sh -daemon config/server.properties

查看后台进程

$ jps
2918 Jps
2120 QuorumPeerMain
2456 Kafka
2616 Elasticsearch

# jps 是查看java进程的小工具,如果没有jps命令,说明只安装了java-1.8.0-openjdk,还需要安装java-1.8.0-openjdk-devel

logstash

node2

解压缩

[root@node2 ~]# tar zxf logstash-6.5.4.tar.gz -C /etc
[root@node2 ~]# mv /etc/logstash-6.5.4 /etc/logstash 

添加环境变量

export logstash_HOME=/etc/logstash
export PATH=$logstash_HOME/bin:$PATH

增加配置文件(kafka数据输出到els)

[root@node2 logstash]# mkdir conf.d
[root@node2 logstash]# cat conf.d/kafka2es.conf 
input{
  kafka{
    bootstrap_servers => "node1:9092,node2:9092,node3:9092"
    topics => "nginx-filebeat"
    consumer_threads => 1
    decorate_events => true
    codec => "json"
    auto_offset_reset => "latest"
  }
}
filter{
    # 日志格式自定义   
}
}

output{
  elasticsearch {
    hosts => ["node1:9200","node2:9200","node3:9200"]
    index => "nginx-filebeat-%{+YYYY.MM.dd}"
  }
  stdout{
     codec => "rubydebug"
  }
}

启动服务

[root@node2 logstash]# nohup bin/logstash -f ./conf.d/kafka2es.conf > /dev/nul &  
filebeat

node1

解压缩

[root@node1 ~]# tar zxf filebeat-6.5.4-linux-x86_64.tar.gz -C /etc/
[root@node1 ~]# mv /etc/filebeat-6.5.4-linux-x86_64 /etc/filebeat

修改配置文件

[root@node1 filebeat]# egrep -v "^(#|$|  #)" filebeat.yml  
# 加载模块使用,保留
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 3
# 设置kibana url
setup.kibana:
  host: "node1:5601"
processors:
  - add_host_metadata: ~
  - add_cloud_metadata: ~
# 设置nginx日志输出到kafka
filebeat.modules:
- module: nginx
  access:
    enabled: true
    var.paths: 
      - /var/log/nginx/access.log
filebeat.prospectors:
- type: log
  enabled: false
  paths:
    - /var/log/*.log
output.kafka:
  enabled: true
  hosts: ["node1:9092","node2:9092","node3:9092"]
  topic: "nginx-filebeat"
  partition.hash:
    reachable_only: true
  compression: gzip
  max_message_bytes: 1000000
  required_acks: 1
logging.to_files: true

启动服务

(reverse-i-search)`./file': nohup ./filebeat -e -c filebeat.yml > /dev/null &

kibana

node3

解压缩

[root@node3 ~]# tar zxf kibana-6.5.4-linux-x86_64.tar.gz -C /etc/
[root@node3 ~]# mv /etc/kibana-6.5.4-linux-x86_64 /etc/kibana

修改配置文件

[root@node3 config]# egrep -v "^(#|$)" kibana.yml 
server.port: 5601
server.host: "node1"
server.name: "dhph-kibana"
elasticsearch.url: "http://node1:9200"

启动kibana

nohup ./bin/kibana &

访问kibana

http://172.16.10.42:5601/app/kibana

ningx编译安装

node1

解压缩

$ tar xf nginx-1.15.8.tar
$ cd tar xf nginx-1.15.8

创建用户

$ useradd -s /sbin/nologin -M nginx

安装依赖

 $ yum install -y gcc* automake pcre pcre-devel zlip zlib-devel openssl openssl-devel 

编译安装

$ ./configure  --prefix=/usr/local/nginx  --sbin-path=/usr/local/nginx/sbin/nginx --conf-path=/usr/local/nginx/conf/nginx.conf --error-log-path=/var/log/nginx/error.log  --http-log-path=/var/log/nginx/access.log  --pid-path=/var/run/nginx/nginx.pid --lock-path=/var/lock/nginx.lock  --user=nginx --group=nginx --with-http_ssl_module --with-http_stub_status_module --with-http_gzip_static_module --http-client-body-temp-path=/var/tmp/nginx/client/ --http-proxy-temp-path=/var/tmp/nginx/proxy/ --http-fastcgi-temp-path=/var/tmp/nginx/fcgi/ --http-uwsgi-temp-path=/var/tmp/nginx/uwsgi --http-scgi-temp-path=/var/tmp/nginx/scgi --with-pcre
$ make
$ make install

启动nginx

$ /usr/local/nginx/sbin/nginx 
# 端口已改成8088

测试

  1. 运行nginx,测试访问
  2. kibana上创建索引,nginx-filebeat*
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值