1、es
docker run -d -p 9200:9200 -p 9300:9300 -v /home/oper/es/data:/usr/share/elasticsearch/data -v /home/oper/es/logs:/usr/share/elasticsearch/logs --name=elasticsearch docker.elastic.co/elasticsearch/elasticsearch:6.8.6
2、filebeat
a、创建 ~/filebeat/filebeat.yml
#=========================== Filebeat inputs =============================
filebeat.inputs:
- type: log
# Change to true to enable this input configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /logs/*.log
fields:
type: "ppt"
#-------------------------- Elasticsearch output ------------------------------
output.elasticsearch:
enabled: true
# 对接到63上的elsticsearch
hosts: ["elasticsearch:9200"] //elasticsearch 换成ip
# 执行管道命令用来过滤数据
#pipeline: "fault_detail1_pipline"
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
indices:
- index: "ppt-all"
when.equals:
fields.type: "ppt"
b、启动容器
docker run --log-driver json-file --log-opt max-size=100m --log-opt max-file=2 --name filebeat --user=root -d -v /tmp/:/logs/ -v ~/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml docker.io/store/elastic/filebeat:7.4.1 // /tmp为主机日志输出目录 映射到 容器内 logs目录
3、kibana
a、创建文件 ~/kibana/config/kibana.yml
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
b、运行容器
docker run -d -v ~/kibana/config/kibana.yml:/config/kibana.yml --link elasticsearch:elasticsearch -p 5601:5601 --name kibana docker.elastic.co/kibana/kibana:6.5.4