如何搭建一个完整的后台管理系统(附:教程,源码)

本文详细介绍了如何在Linux环境下搭建一套完整的后台管理系统,包括ELK(Elasticsearch、Logstash、Kibana)、Kafka、Tomcat的安装配置,以及通过Filebeat收集日志、Nginx代理、Kibana可视化和自动化监控脚本的创建。此外,还提到了数据流向、索引管理和相关命令参考。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

一、环境准备

1.安装java环境:

yum install java-1.8.0-openjdk* -y

2.添加elk执行用户:

groupadd -g 77 elk
useradd -u 77 -g elk -d /home/elk -s /bin/bash elk

3.在 /etc/security/limits.conf 追加以下内容:

elk soft memlock unlimited
elk hard memlock unlimited

  • soft nofile 65536
  • hard nofile 131072

4.执行生效

sysctl -p

5.配置主机名

hostnamectl set-hostname monitor-elk
echo “10.135.3.135 monitor-elk” >> /etc/hosts

二、服务部署

1.服务端:

1)下载ELK相关的源码包:

wget “https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.2.2.tar.gz”
wget “https://artifacts.elastic.co/downloads/logstash/logstash-5.2.2.tar.gz”
wget “https://artifacts.elastic.co/downloads/kibana/kibana-5.2.2-linux-x86_64.tar.gz”
wget “http://mirror.bit.edu.cn/apache/kafka/0.10.2.0/kafka_2.12-0.10.2.0.tgz”
wget “http://mirror.bit.edu.cn/apache/zookeeper/zookeeper-3.4.9/zookeeper-3.4.9.tar.gz”

2)创建elk目录,并将以上源码包解压至该目录:

mkdir /usr/local/elk
mkdir -p /data/elasticsearch/
chown -R elk.elk /data/elasticsearch/
mkdir -p /data/{kafka,zookeeper}
mv logstash-5.2.2 logstash && mv kibana-5.2.2-linux-x86_64 kibana && mv elasticsearch-5.2.2 elasticsearch && mv filebeat-5.2.2-linux-x86_64 filebeat && mv kafka_2.12-0.10.2.0 kafka && mv zookeeper-3.4.9 zookeeper
chown -R elk.elk /usr/local/elk/

程序目录列表如下:

3)修改以下程序的相应配置文件

①kibana:

[root@monitor-elk ~]# cat /usr/local/elk/kibana/config/kibana.yml |grep -v “#|$”
server.host: “localhost”
elasticsearch.url: “http://localhost:9200”
elasticsearch.requestTimeout: 30000
logging.dest: /data/elk/logs/kibana.log
[root@monitor-elk ~]#

②elasticsearch:

[root@monitor-elk ~]# cat /usr/local/elk/elasticsearch/config/elasticsearch.yml |grep -v “#|$”
node.name: node01
path.data: /data/elasticsearch/data
path.logs: /data/elk/logs/elasticsearch
bootstrap.memory_lock: true
network.host: 127.0.0.1
http.port: 9200
[root@monitor-elk ~]# /usr/local/elk/elasticsearch/config/jvm.options
#修改以下参数
-Xms1g
-Xmx1g

③logstash:

[root@monitor-elk ~]# cat /usr/local/elk/logstash/config/logs.yml

input {
#使用kafka的数据作为日志数据源
kafka
{
bootstrap_servers => [“127.0.0.1:9092”]
topics => “beats”
codec => json
}
}

filter {
#过滤数据,如果日志数据里面包含有该IP地址,将会被丢弃
if [message] =~ “123.151.4.10” {
drop{}
}

转码,转成正常的url编码,如中文

urldecode {

all_fields => true

}

nginx access

#通过type来判断传入的日志类型
if [type] == “hongbao-nginx-access” or [type] == “pano-nginx-access” or [type] == “logstash-nginx-access” {
grok {
#指定自定义的grok表达式路径
patterns_dir => “./patterns”
#指定自定义的正则表达式名称解析日志内容,拆分成各个字段
match => { “message” => “%{NGINXACCESS}” }
#解析完毕后,移除默认的message字段
remove_field => [“message”]
}
#使用geoip库解析IP地址
geoip {
#指定解析后的字段作为数据源
source => “clientip”
fields => [“country_name”, “ip”, “region_name”]
}
date {
#匹配日志内容里面的时间,如 05/Jun/2017:03:54:01 +0800
match => [“timestamp”,“dd/MMM/yyyy:HH:mm:ss Z”]
#将匹配到的时间赋值给@timestamp字段
target => “@timestamp”
remove_field => [“timestamp”]
}
}

tomcat access

if [type] == “hongbao-tomcat-access” or [type] == “ljq-tomcat-access” {
grok {
patterns_dir => “./patterns”
match => { “message” => “%{TOMCATACCESS}” }
remove_field => [“message”]
}
geoip {
source => “clientip”
fields => [“country_name”, “ip”, “region_name”]
}
date {
match => [“timestamp”,“dd/MMM/yyyy:HH:mm:ss Z”]
target => “@timestamp”
remove_field => [“timestamp”]
}
}

tomcat catalina

if [type] == “hongbao-tomcat-catalina” {
grok {
match => {
“message” => “^(?<log_time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}) (?\w*) (?<log_data>.+)”
}
remove_field => [“message”]
}
date {
match => [“log_time”,“yyyy-MM-dd HH:mm:ss,SSS”]
target => “@timestamp”
remove_field => [“log_time”]
}
}

}

output {

#将解析失败的记录写入到指定的文件中
if "_grokparsefailure" in [tags] {
    file {
         path => "/data/elk/logs/grokparsefailure-%{[type]}-%{+YYYY.MM}.log"
    }
}

nginx access

#根据type日志类型分别输出到elasticsearch不同的索引
if [type] == "hongbao-nginx-access" {
        #将处理后的结果输出到elasticsearch
    elasticsearch {
        hosts => ["127.0.0.1:9200"]
        #指定输出到当天的索引
        index => "hongbao-nginx-access-%{+YYYY.MM.dd}"
    }
}

if [type] == "pano-nginx-access" {
    elasticsearch {
        hosts => ["127.0.0.1:9200"]
        index => "pano-nginx-access-%{+YYYY.MM.dd}"
    }
}

if [type] == "logstash-nginx-access" {
    elasticsearch {
        hosts => ["127.0.0.1:9200"]
        index => "logstash-nginx-access-%{+YYYY.MM.dd}"
    }
}

tomcat access

if [type] == "hongbao-tomcat-access" {
    elasticsearch {
        hosts => ["127.0.0.1:9200"]
        index => "hongbao-tomcat-access-%{+YYYY.MM.dd}"
    }
}

if [type] == "ljq-tomcat-access" {
    elasticsearch {
        hosts => ["127.0.0.1:9200"]
        index => "ljq-tomcat-access-%{+YYYY.MM.dd}"
    }
}

tomcat catalina

if [type] == "hongbao-tomcat-catalina" {
    elasticsearch {
        hosts => ["127.0.0.1:9200"]
        index => "hongbao-tomcat-catalina-%{+YYYY.MM.dd}"
    }
}

}

[root@monitor-elk ~]#

配置正则表达式
[

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值