前面已经有ELK-Redis的安装,此处只讲在不改变日志格式的情况下收集Nginx日志.
1.Nginx端的日志格式设置如下:
log_format access '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /usr/local/nginx/logs/access.log access;
2.Nginx端logstash-agent的配置如下:
[root@localhost conf]# cat logstash_agent.conf
input {
file {
path => [ "/usr/local/nginx/logs/access.log" ]
type => "nginx_access"
}
}
output {
redis {
data_type => "list"
key => "nginx_access_log"
host => "192.168.100.70"
port => "6379"
}
}
3.logstash_indexer的配置如下:
[root@elk-node1 conf]# cat logstash_indexer.conf
input {
redis {
data_type => "list"
key => "nginx_access_log"
host => "192.168.100.70"
port => "6379"
}
}
filter {
grok {
patterns_dir => "./patterns"
match => { "message" => "%{NGINXACCESS}" }
}
geoip {
source => "clientip"
target => "geoip"
#database => "/usr/local/logstash/GeoLite2-City.mmdb"
database => "/usr/local/src/GeoLiteCity.dat"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float" ]
convert => [ "response","integer" ]
convert => [ "bytes","integer" ]
}
mutate {remove_field => ["message"]}
date {
match => [ "timestamp","dd/MMM/yyyy:HH:mm:ss Z"]
}
mutate {
remove_field => "timestamp"
}
}
output {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "192.168.100.71"
#protocol => "http"
index => "logstash-nginx-access-log-%{+YYYY.MM.dd}"
}
}
3.创建存放logstash格式化Nginx日志的文件。
mkdir -pv /usr/local/logstash/patterns
[root@elk-node1 ]# vim/usr/local/logstash/patterns/nginx
ERNAME [a-zA-Z\.\@\-\+_%]+
NGUSER %{NGUSERNAME}
NGINXACCESS %{IPORHOST:clientip} - %{NOTSPACE:remote_user} \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})\" %{NUMBER:response} (?:%{NUMBER:bytes}|-) %{QS:referrer} %{QS:agent} %{NOTSPACE:http_x_forwarded_for}
#这个格式要和Nginx的 log_format格式保持一致.
假如说我 nginx 日志在加上一个 nginx 响应时间呢?修改格式加上”request_time”:
修改日志结构生成数据:
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" $request_time';
修改一下 nginx 的正则匹配,多加一个选项:
[root@elk-node1 patterns]# cat nginx
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
NGUSER %{NGUSERNAME}
NGINXACCESS %{IPORHOST:clientip} - %{NGUSER:remote_user} \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})\" %{NUMBER:response} (?:%{NUMBER:bytes:float}|-) %{QS:referrer} %{QS:agent} %{NOTSPACE:http_x_forwarded_for} %{NUMBER:request_time:float}
~
~
附一份当时生产环境自己的logstash.conf配置实例(logstash-5.2.2的conf文件):


input { redis { data_type => "list" key => "uc01-nginx-access-logs" host => "192.168.100.71" port => "6379" db => "1" password => "juzi1@#$%QW" } redis { data_type => "list" key => "uc02-nginx-access-logs" host => "192.168.100.71" port => "6379" db => "1" password => "juzi1@#$%QW" } redis { data_type => "list" key => "p-nginx-access-logs" host => "192.168.100.71" port => "6379" db => "1" password => "juzi1@#$%QW" } redis { data_type => "list" key => "https-nginx-access-logs" host => "192.168.100.71" port => "6379" db => "1" password => "juzi1@#$%QW" } redis { data_type => "list" key => "rms01-nginx-access-logs" host => "192.168.100.71" port => "6379" db => "1" password => "juzi1@#$%QW" } redis { data_type => "list" key => "rms02-nginx-access-logs" host => "192.168.100.71" port => "6379" db => "1" password => "juzi1@#$%QW" } } filter { if [path] =~ "nginx" { grok { patterns_dir => "./patterns" match => { "message" => "%{NGINXACCESS}" } } mutate { remove_field => ["message"] } mutate { remove_field => "timestamp" } date { match => [ "timestamp","dd/MMM/yyyy:HH:mm:ss Z"] } geoip { source => "clientip" target => "geoip" database => "/usr/local/GeoLite2-City.mmdb" add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ] add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ] } mutate { convert => [ "[geoip][coordinates]", "float" ] } } else { drop {} } } output { if [type] == "uc01-nginx-access" { elasticsearch { hosts => [ "192.168.100.70:9200","192.168.100.71:9200" ] index => "logstash-uc01-log-%{+YYYY.MM.dd}" user => logstash_internal password => changeme } } if [type] == "uc02-nginx-access" { elasticsearch { hosts => [ "192.168.100.70:9200","192.168.100.71:9200" ] index => "logstash-uc02-log-%{+YYYY.MM.dd}" user => logstash_internal password => changeme } } if [type] == "p-nginx-access" { elasticsearch { hosts => [ "192.168.100.70:9200","192.168.100.71:9200" ] index => "logstash-p-log-%{+YYYY.MM.dd}" user => logstash_internal password => changeme } } if [type] == "https-nginx-access" { elasticsearch { hosts => [ "192.168.100.70:9200","192.168.100.71:9200" ] index => "logstash-api-log-%{+YYYY.MM.dd}" user => logstash_internal password => changeme } } if [type] == "rms01-nginx-access" { elasticsearch { hosts => [ "192.168.100.70:9200","192.168.100.71:9200" ] index => "logstash-rms01-log-%{+YYYY.MM.dd}" user => logstash_internal password => changeme } } if [type] == "rms02-nginx-access" { elasticsearch { hosts => [ "192.168.100.70:9200","192.168.100.71:9200" ] index => "logstash-rms02-log-%{+YYYY.MM.dd}" user => logstash_internal password => changeme } } }


[root@localhost ~]$cd /usr/local/logstash-5.2.2/etc [root@localhost etc]$ cat logstash_agentd.conf input { file { type => "web-nginx-access" path => "/usr/local/nginx/logs/access.log" } } output{ #file { # path => "/tmp/%{+YYYY-MM-dd}.messages.gz" # gzip => true #} redis { data_type => "list" key => "web01-nginx-access-logs" host => "192.168.100.71" port => "6379" db => "1" password => "@#$%QW" } }