flieBeat配置 与logstash配置

###################### Filebeat Configuration Example #########################

# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html

# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.

#=========================== Filebeat inputs =============================

filebeat.inputs:

# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.

- type: log

  # Change to true to enable this input configuration.
  enabled: true
  encoding: GB2312

  # Paths that should be crawled and fetched. Glob based paths.
  paths:
    #- /var/log/*.log
    - E:\nginx-1.14.2\logs\access.log
  fields:
    log_topic: nginx_access_logs

- type: log
  enabled: true
  encoding: GB2312
  paths:
    - E:\CIRMS\logs\localhost_access.*.log
  fields:
    log_topic: tomcat_access_logs
    
- type: log
  enabled: true
  encoding: GB2312
  paths:
    - E:\ELK\logs\catalina.*.log
  fields:
    log_topic: tomcat_catalina_logs
    
  # Exclude lines. A list of regular expressions to match. It drops the lines that are
  # matching any regular expression from the list.
  #exclude_lines: ['^DBG']

  # Include lines. A list of regular expressions to match. It exports the lines that are
  # matching any regular expression from the list.
  #include_lines: ['^ERR', '^WARN']

  # Exclude files. A list of regular expressions to match. Filebeat drops the files that
  # are matching any regular expression from the list. By default, no files are dropped.
  #exclude_files: ['.gz$']

  # Optional additional fields. These fields can be freely picked
  # to add additional information to the crawled log files for filtering
  #fields:
  #  level: debug
  #  review: 1

  ### Multiline options

  # Multiline can be used for log messages spanning multiple lines. This is common
  # for Java Stack Traces or C-Line Continuation

  # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
  #multiline.pattern: ^\[

  # Defines if the pattern set under pattern should be negated or not. Default is false.
  #multiline.negate: false

  # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
  # that was (not) matched before or after or as long as a pattern is not matched based on negate.
  # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
  #multiline.match: after


#============================= Filebeat modules ===============================

filebeat.config.modules:
  # Glob pattern for configuration loading
  path: ${path.config}/modules.d/*.yml

  # Set to true to enable config reloading
  reload.enabled: false

  # Period on which files under path should be checked for changes
  #reload.period: 10s

#==================== Elasticsearch template setting ==========================

setup.template.settings:
  index.number_of_shards: 3
  #index.codec: best_compression
  #_source.enabled: false

#================================ General =====================================

# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:

# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]

# Optional fields that you can specify to add additional information to the
# output.
#fields:
#  env: staging


#============================== Dashboards =====================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here, or by using the `-setup` CLI flag or the `setup` command.
#setup.dashboards.enabled: false

# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:

#============================== Kibana =====================================

# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:

  # Kibana Host
  # Scheme and port can be left out and will be set to the default (http and 5601)
  # In case you specify and additional path, the scheme is required: http://localhost:5601/path
  # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
  #host: "localhost:5601"

  # Kibana Space ID
  # ID of the Kibana Space into which the dashboards should be loaded. By default,
  # the Default Space will be used.
  #space.id:

#============================= Elastic Cloud ==================================

# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/).

# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:

# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:

#================================ Outputs =====================================

# Configure what output to use when sending the data collected by the beat.

#-------------------------- Elasticsearch output ------------------------------
#output.elasticsearch:
  # Array of hosts to connect to.
  #hosts: ["localhost:9200"]

  # Enabled ilm (beta) to use index lifecycle management instead daily indices.
  #ilm.enabled: false

  # Optional protocol and basic auth credentials.
  #protocol: "https"
  #username: "elastic"
  #password: "changeme"

#----------------------------- Logstash output --------------------------------
output.logstash:
  # The Logstash hosts
  hosts: ["localhost:5044"]
  topic: '%{[fields.log_topic]}'
  # Optional SSL. By default is off.
  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client Certificate Key
  #ssl.key: "/etc/pki/client/cert.key"

#================================ Processors =====================================

# Configure processors to enhance or manipulate events generated by the beat.

processors:
  #- add_host_metadata: ~
  #- add_cloud_metadata: ~
  - drop_fields:
        fields: ["beat", "input", "source", "offset", "prospector"]
#================================ Logging =====================================

# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: debug

# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
#logging.selectors: ["*"]

#============================== Xpack Monitoring ===============================
# filebeat can export internal metrics to a central Elasticsearch monitoring
# cluster.  This requires xpack monitoring to be enabled in Elasticsearch.  The
# reporting is disabled by default.

# Set to true to enable the monitoring reporter.
#xpack.monitoring.enabled: false

# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well. Any setting that is not set is
# automatically inherited from the Elasticsearch output configuration, so if you
# have the Elasticsearch output configured, you can simply uncomment the
# following line.
#xpack.monitoring.elasticsearch:

LOSTASH 规则配置

# Sample Logstash configuration for creating a simple
# Beats -> Logstash -> Elasticsearch pipeline.

# 输入
input {
	# FileBeat输入
    beats {
    	port => 5044
    }
    # jdbc方式输入,这里选取mysql配置
    jdbc {
    	# 为了便于下面通过type获取该日志
        type => "api_call_logs"
        # mysql 数据库链接,api为数据库名
        jdbc_connection_string => "jdbc:mysql://10.128.18.47:3306/API_ROUTER_LOGS"
        # 用户名和密码
        jdbc_user => "root"
        jdbc_password => "root"
        # 驱动
        jdbc_driver_library => "E:/ELK/logstash-6.6.1/mysql/mysql-connector-java-		5.1.45.jar"
        # 驱动类名
        jdbc_driver_class => "com.mysql.jdbc.Driver"
        # 支持分页查询
        jdbc_paging_enabled => "true"
        # 每页查询为50000
        jdbc_page_size => "50000"
        # 执行的sql 文件路径+名称
        statement_filepath => "E:/ELK/logstash-6.6.1/mysql/mysql.sql"
        # 设置监听间隔  各字段含义(由左至右)分、时、天、月、年,全部为*默认含义为每分钟都更新
        schedule => "* * * * *"
        # 使用其它字段追踪,而不是用时间
        use_column_value => true
        # 追踪的字段, id
        tracking_column => id
        # 记录上次的id值, 该配置为增量获取mysql数据
        record_last_run => true
        # 记录id值存放的位置
        last_run_metadata_path => "E:/ELK/logstash-6.6.1/mysql/record.txt"
	}
}

# 过滤
filter {
	if [fields][log_topic] == "nginx_access_logs"{
		# 规则匹配
		grok {
			match => { "message" => "%{IPORHOST:remote_ip} - %{DATA:user_name} \[%{HTTPDATE:access_time}\] \"%{WORD:http_method} %{DATA:request_url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent_bytes} \"%{DATA:referrer}\" \"%{DATA:user_agent}\" \"%{DATA:forwarded_for}\"" }
		}
		date {
                match => [ "access_time","MMM  d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601"]
        }
	}  
	if [fields][log_topic] == "tomcat_access_logs"{
		# 由于访问日志文件已经是json格式,所以这里解码出来即可
		json {
			source => "message"     
		}
		# 时间字段转换,然后赋值给@timestamp字段
		date {
			match => [ "access time" , "[dd/MMM/yyyy:HH:mm:ss Z]" ] 
		}
		# 删除不需要的字段
		mutate {
			remove_field => "@version"      
			remove_field => "message" 
		}
	}
	# 判断语句,根据log_topic不同,对日志做不同的过滤、分析,先分析的是catalina.out文件
	if [fields][log_topic] == "tomcat_catalina_logs" {   
		grok {
			match => { "message" => "%{TIMESTAMP_ISO8601:access_time}\s+\[(?<loglevel>[\s\S]*)\]\s+\[%{DATA:exception_info}\](?<tomcatcontent>[\s\S]*)" }
        }
		date {
			match => [ "access_time","MMM  d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601"]
		}
		mutate {
			remove_field => "@version"
			remove_field => "message"
			remove_field => "access_time"
		}
	}
}

# 输出到ES
output {
	if [fields][log_topic] == "nginx_access_logs"{
	  elasticsearch {
		hosts => ["http://localhost:9200"]
		index => "nginx_access_logs-%{+YYYY.MM.dd}"
	  }
	}
	if [fields][log_topic] == "tomcat_access_logs"{
	  elasticsearch {
		hosts => ["http://localhost:9200"]
		index => "tomcat_access_logs-%{+YYYY.MM.dd}"
	  }
	}
	if [fields][log_topic] == "tomcat_catalina_logs"{
	  elasticsearch {
		hosts => ["http://localhost:9200"]
		index => "tomcat_catalina_logs-%{+YYYY.MM.dd}"
	  }
	}
	
	if [type]=="api_call_logs"{
		elasticsearch {
			# ES的IP地址及端口
			hosts => ["http://localhost:9200"]
			# 索引名称
			index => "es_test_final8"
			document_type => "logger"
			document_id => "%{id}"
		}
	}
}
# ELK快速指南

#### ELK简介

ELK 是 elastic 公司旗下三款产品 [ElasticSearch](https://www.elastic.co/products/elasticsearch) 、[Logstash](https://www.elastic.co/products/logstash) 、[Kibana](https://www.elastic.co/products/kibana) 的首字母组合。

[ElasticSearch](https://www.elastic.co/products/elasticsearch) 是一个基于 [Lucene](http://lucene.apache.org/core/documentation.html) 构建的开源,分布式,RESTful 搜索引擎。

[Logstash](https://www.elastic.co/products/logstash) 传输和处理你的日志、事务或其他数据。

[Kibana](https://www.elastic.co/products/kibana) 将 Elasticsearch 的数据分析并渲染为可视化的报表。

通过 ELK 这套解决方案,可以同时实现日志收集、日志搜索和日志分析的功能

#### 安装ELK--Windows

项目使用ELK-6.6.1安装包,将打包好的ELK压缩包解压到指定盘里,建议将ELK安装到**目录系统**的服务器,假定ELK安装到了**E盘的ELK文件夹**,即**E:/ELK**,启动ELK需要借助**cmd**

##### ElashticSearch启动命令

```bash
E:\>cd ELK
E:\ELK>cd elasticsearch-6.6.1
E:\ELK\elasticsearch-6.6.1>cd bin
E:\ELK\elasticsearch-6.6.1\bin>elasticsearch
Logstash启动命令
E:\>cd ELK
E:\ELK>cd logstash-6.6.1
E:\ELK\logstash-6.6.1>cd bin
E:\ELK\logstash-6.6.1\bin>logstash -f ../mysql/mysql.conf
D:\worksoft\logstash-6.6.0\bin>logstash -f ../mysql/mysql.conf

Kibana启动命令
E:\>cd ELK
E:\ELK>cd kibana-6.6.1-windows-x86_64
E:\ELK\kibana-6.6.1-windows-x86_64>cd bin
E:\ELK\kibana-6.6.1-windows-x86_64\bin>kibana.bat


filebeat -e -c filebeat.yml

longstash的mysql.config配置


    ```
    # Sample Logstash configuration for creating a simple
    # Beats -> Logstash -> Elasticsearch pipeline.
    
    # 输入
    input {
    	# FileBeat输入
        beats {
        	port => 5044
        }
        # jdbc方式输入,这里选取mysql配置
        jdbc {
        	# 为了便于下面通过type获取该日志
            type => "api_call_logs"
            # mysql 数据库链接,api为数据库名
            jdbc_connection_string => "jdbc:mysql://10.128.18.47:3306/API_ROUTER_LOGS"
            # 用户名和密码
            jdbc_user => "root"
            jdbc_password => "root"
            # 驱动
            jdbc_driver_library => "E:/ELK/logstash-6.6.1/mysql/mysql-connector-java-5.1.45.jar"
            # 驱动类名
            jdbc_driver_class => "com.mysql.jdbc.Driver"
            # 支持分页查询
            jdbc_paging_enabled => "true"
            # 每页查询为50000
            jdbc_page_size => "50000"
            # 执行的sql 文件路径+名称
            statement_filepath => "E:/ELK/logstash-6.6.1/mysql/mysql.sql"
            # 设置监听间隔  各字段含义(由左至右)分、时、天、月、年,全部为*默认含义为每分钟都更新
            schedule => "* * * * *"
            # 使用其它字段追踪,而不是用时间
            use_column_value => true
            # 追踪的字段, id
            tracking_column => id
            # 记录上次的id值, 该配置为增量获取mysql数据
            record_last_run => true
            # 记录id值存放的位置
            last_run_metadata_path => "E:/ELK/logstash-6.6.1/mysql/record.txt"
    	}
    }
    
    # 过滤
    filter {
    	if [fields][log_topic] == "nginx_access_logs"{
    		# 规则匹配
    		grok {
    			match => { "message" => "%{IPORHOST:remote_ip} - %{DATA:user_name} \[%{HTTPDATE:access_time}\] \"%{WORD:http_method} %{DATA:request_url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent_bytes} \"%{DATA:referrer}\" \"%{DATA:user_agent}\" \"%{DATA:forwarded_for}\"" }
    		}
    		date {
                    match => [ "access_time","MMM  d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601"]
            }
    	}  
    	if [fields][log_topic] == "tomcat_access_logs"{
    		# 由于访问日志文件已经是json格式,所以这里解码出来即可
    		json {
    			source => "message"     
    		}
    		# 时间字段转换,然后赋值给@timestamp字段
    		date {
    			match => [ "access time" , "[dd/MMM/yyyy:HH:mm:ss Z]" ] 
    		}
    		# 删除不需要的字段
    		mutate {
    			remove_field => "@version"      
    			remove_field => "message" 
    		}
    	}
    	# 判断语句,根据log_topic不同,对日志做不同的过滤、分析,先分析的是catalina.out文件
    	if [fields][log_topic] == "tomcat_catalina_logs" {   
    		grok {
    			match => { "message" => "%{TIMESTAMP_ISO8601:access_time}\s+\[(?<loglevel>[\s\S]*)\]\s+\[%{DATA:exception_info}\](?<tomcatcontent>[\s\S]*)" }
            }
    		date {
    			match => [ "access_time","MMM  d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601"]
    		}
    		mutate {
    			remove_field => "@version"
    			remove_field => "message"
    			remove_field => "access_time"
    		}
    	}
    }
    
    # 输出到ES
    output {
    	if [fields][log_topic] == "nginx_access_logs"{
    	  elasticsearch {
    		hosts => ["http://localhost:9200"]
    		index => "nginx_access_logs-%{+YYYY.MM.dd}"
    	  }
    	}
    	if [fields][log_topic] == "tomcat_access_logs"{
    	  elasticsearch {
    		hosts => ["http://localhost:9200"]
    		index => "tomcat_access_logs-%{+YYYY.MM.dd}"
    	  }
    	}
    	if [fields][log_topic] == "tomcat_catalina_logs"{
    	  elasticsearch {
    		hosts => ["http://localhost:9200"]
    		index => "tomcat_catalina_logs-%{+YYYY.MM.dd}"
    	  }
    	}
    	
    	if [type]=="api_call_logs"{
    		elasticsearch {
    			# ES的IP地址及端口
    			hosts => ["http://localhost:9200"]
    			# 索引名称
    			index => "es_test_final8"
    			document_type => "logger"
    			document_id => "%{id}"
    		}
    	}
    }

filebeat启动命令:
filebeat -e -c filebeat.yml

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值