解决问题:elk日志系统设置elasticsearch外网访问,从而使不同服务器上的logstash推送获取到的日志到同一服务器的elasticsearch中
elk其他配置
1.elasticsearch设置外网访问:
节点1:
# ================= Elasticsearch Configuration ===================
# 设置集群名称,集群内所有节点的名称必须一致。
cluster.name: my-esCluster
# 设置节点名称,集群内节点名称必须唯一。
node.name: node0
# 表示该节点会不会作为主节点,true表示会;false表示不会
node.master: true
# 当前节点是否用于存储数据,是:true、否:false
node.data: true
# 索引数据存放的位置
#path.data: /opt/elasticsearch/data
# 日志文件存放的位置
#path.logs: /opt/elasticsearch/logs
# 需求锁住物理内存,是:true、否:false
#bootstrap.memory_lock: true
# 监听地址,用于访问该es
network.host: 0.0.0.0
# es对外提供的http端口,默认 9200
http.port: 9200
# TCP的默认监听端口,默认 9300
transport.tcp.port: 9300
# 设置这个参数来保证集群中的节点可以知道其它N个有master资格的节点。默认为1,对于大的集群来说,可以设置大一点的值(2-4)
discovery.zen.minimum_master_nodes: 2
# es7.x 之后新增的配置,写入候选主节点的设备地址,在开启服务后可以被选为主节点
discovery.seed_hosts: ["0.0.0.0:9300", "0.0.0.0:9301", "0.0.0.0:9302"]
discovery.zen.fd.ping_timeout: 1m
discovery.zen.fd.ping_retries: 5
# es7.x 之后新增的配置,初始化一个新的集群时需要此配置来选举master
cluster.initial_master_nodes: ["node0", "node1", "node2"]
# 是否支持跨域,是:true,在使用head插件时需要此配置
http.cors.enabled: true
# “*” 表示支持所有域名
http.cors.allow-origin: "*"
节点2:
# ================= Elasticsearch Configuration ===================
# 设置集群名称,集群内所有节点的名称必须一致。
cluster.name: my-esCluster
# 设置节点名称,集群内节点名称必须唯一。
node.name: node1
# 表示该节点会不会作为主节点,true表示会;false表示不会
node.master: true
# 当前节点是否用于存储数据,是:true、否:false
node.data: true
# 索引数据存放的位置
#path.data: /opt/elasticsearch/data
# 日志文件存放的位置
#path.logs: /opt/elasticsearch/logs
# 需求锁住物理内存,是:true、否:false
#bootstrap.memory_lock: true
# 监听地址,用于访问该es
network.host: 0.0.0.0
# es对外提供的http端口,默认 9200
http.port: 9201
# TCP的默认监听端口,默认 9300
transport.tcp.port: 9301
# 设置这个参数来保证集群中的节点可以知道其它N个有master资格的节点。默认为1,对于大的集群来说,可以设置大一点的值(2-4)
discovery.zen.minimum_master_nodes: 2
# es7.x 之后新增的配置,写入候选主节点的设备地址,在开启服务后可以被选为主节点
discovery.seed_hosts: ["0.0.0.0:9300", "0.0.0.0:9301", "0.0.0.0:9302"]
discovery.zen.fd.ping_timeout: 1m
discovery.zen.fd.ping_retries: 5
# es7.x 之后新增的配置,初始化一个新的集群时需要此配置来选举master
cluster.initial_master_nodes: ["node0", "node1", "node2"]
# 是否支持跨域,是:true,在使用head插件时需要此配置
http.cors.enabled: true
# “*” 表示支持所有域名
http.cors.allow-origin: "*"
节点3:
# ================= Elasticsearch Configuration ===================
# 设置集群名称,集群内所有节点的名称必须一致。
cluster.name: my-esCluster
# 设置节点名称,集群内节点名称必须唯一。
node.name: node2
# 表示该节点会不会作为主节点,true表示会;false表示不会
node.master: true
# 当前节点是否用于存储数据,是:true、否:false
node.data: true
# 索引数据存放的位置
#path.data: /opt/elasticsearch/data
# 日志文件存放的位置
#path.logs: /opt/elasticsearch/logs
# 需求锁住物理内存,是:true、否:false
#bootstrap.memory_lock: true
# 监听地址,用于访问该es
network.host: 0.0.0.0
# es对外提供的http端口,默认 9200
http.port: 9202
# TCP的默认监听端口,默认 9300
transport.tcp.port: 9302
# 设置这个参数来保证集群中的节点可以知道其它N个有master资格的节点。默认为1,对于大的集群来说,可以设置大一点的值(2-4)
discovery.zen.minimum_master_nodes: 2
# es7.x 之后新增的配置,写入候选主节点的设备地址,在开启服务后可以被选为主节点
discovery.seed_hosts: ["0.0.0.0:9300", "0.0.0.0:9301", "0.0.0.0:9302"]
discovery.zen.fd.ping_timeout: 1m
discovery.zen.fd.ping_retries: 5
# es7.x 之后新增的配置,初始化一个新的集群时需要此配置来选举master
cluster.initial_master_nodes: ["node0", "node1", "node2"]
# 是否支持跨域,是:true,在使用head插件时需要此配置
http.cors.enabled: true
# “*” 表示支持所有域名
http.cors.allow-origin: "*"
2.logstash设置推送es的地址
相关配置
input {
# stdin { }
tcp {
# host:port就是上面appender中的destination,
# 这里其实把Logstash作为服务,开启9250端口接收logback发出的消息
#这个需要配置成本机IP,不然logstash无法启动
host => "127.0.0.1"
#端口号
port => 9250
mode => "server" tags => ["tags"]
#将日志以json格式输入
codec => json_lines
}
}
filter {
#过滤info 中所有以 "logInfo切面日志" 开头的日志
if([logLevel]=~"INFO"){
if ("logInfo切面日志" in [message]) {
}else{
drop {}
}
}
grok {
match => [
"message","%{NOTSPACE:tag}[T ]%{NOTSPACE:method}[T ]%{NOTSPACE:api}[T ]%{NOTSPACE:params}",
"message","%{NOTSPACE:tag}[T ]%{NOTSPACE:author}[T ]%{NOTSPACE:msg}"
]
}
}
output {
elasticsearch {
hosts => ["xx.xx.xx.xx:9200"]
index => "logstash-test-%{+YYYY.MM.dd}"
action => "index"
template=>"C:/install/elk/logstash-7.10.2-windows-x86_64/logstash-7.10.2/config/logstash-test-.json"
template_name=>"logstash-test-"
manage_template => true
template_overwrite => true
}
stdout { codec => rubydebug }
}
3.logstash设置日志的筛选
①只保留部分符合条件的日志
② 使用grok pattern从日志中抽出有用的字段:
3.2.1、grok正则捕获
grok是一个十分强大的logstash filter插件,他可以通过正则解析任意文本,将非结构化日志数据弄成结构化和方便查询的结构。他是目前logstash 中解析非结构化日志数据最好的方式
grok的语法规则是:
%{语法:语义}
“语法”指的是匹配的模式。例如使用NUMBER模式可以匹配出数字,IP模式则会匹配出127.0.0.1这样的IP地址。
举例说明:
比如此处我们message的格式是:
4.系统中用切面获取用户操作的文件:
①LogAspect.java
package com.sinoccdc.devops;
import com.alibaba.fastjson.JSON;
import com.sinoccdc.devops.annotation.OperationLog;
import com.sinoccdc.devops.domain.model.common.BeanDiff;
import com.sinoccdc.devops.domain.model.common.OperationLogTypeEnum;
import com.sinoccdc.devops.domain.model.security.user.User;
import com.sinoccdc.devops.utils.common.BeanCompareUtils;
import com.sinoccdc.devops.web.apis.common.SecurityController;
import groovy.util.logging.Slf4j;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiOperation;
import org.apache.commons.lang3.StringUtils;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.reflect.MethodSignature;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import org.springframework.web.multipart.MultipartFile;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import java.io.IOException;
import java.lang.reflect.Method;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
/**
* @author syl
* AOP实现,记录操作用户信息、请求返回等信息
*/
@Aspect
@Component
@Slf4j
public class LogAspect {
@ApiOperation(value = "AOP实现,记录操作用户信息、请求返回等信息", notes = "AOP实现,记录操作用户信息、请求返回等信息")
@Around("execution(* com.sinoccdc.devops.web.apis..*(..))")
public Object insertOperationLog(ProceedingJoinPoint joinPoint) throws Throwable {
HttpServletRequest request = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest();
long start = System.currentTimeMillis();
MethodSignature signature = (MethodSignature) joinPoint.getSignature();
Method method = signature.getMethod();
//OperationLog opLog = method.getAnnotation(OperationLog.class);
LogOperation operationLog = new LogOperation();
Date now = new Date(); // 创建一个Date对象,获取当前时间
// 指定格式化格式
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
operationLog.setCreateTime(now);
operationLog.setOpCreateTime(sdf.format(now));
//用户名和Id的查询
User loginUser = SecurityController.getLoginUser();
if (loginUser != null && loginUser.getLoginId() != null) {
operationLog.setOpUserAccount(loginUser.getLoginId());
operationLog.setOpUserName(loginUser.getUsername());
}
HttpSession session = request.getSession();
//获取用户ip地址
HttpServletRequest request1 = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes())
.getRequest();
String ipAddress = getIPAddress1(request1);
operationLog.setOpIp(ipAddress);
//----------------获得swagger的注解,填入该切面方法的中文说明---------------------
try {
//ApiOperation描述方法的注解
ApiOperation apiOperation = method.getAnnotation(ApiOperation.class);
//ApiImplicitParam apiImplicitParam = method.getAnnotation(ApiImplicitParam.class);
if (StringUtils.isNotEmpty(apiOperation.value())) {
String value = apiOperation.value();
operationLog.setOpSubCategory(value);
}
if (StringUtils.isNotEmpty(apiOperation.notes())) {
String notes = apiOperation.notes();
operationLog.setOpDesc(notes);
}
}catch (Exception e){
}
try {
//API注解是用在controller的描述
Api api = method.getAnnotation(Api.class);
// Class<?> aClass = apiImplicitParam.dataTypeClass();
if (StringUtils.isNotEmpty(api.tags().toString())) {
operationLog.setOpCategory(api.tags().toString());
}
}catch (Exception e){
}
//----------------获得swagger的注解,填入该切面方法的中文说明---------------------
//swagger注解的参数
// String type = apiImplicitParam.dataType();
// String opCatetory = opLog.category();
// String opSubcategory = opLog.subcategory();
// String opDesc = opLog.desc();
// OperationLogTypeEnum opType = opLog.type();
// operationLog.setOpCategory(opCatetory);
// operationLog.setOpSubCategory(opSubcategory);
// operationLog.setOpDesc(opDesc);
// operationLog.setOpType(opType.getType());
// operationLog.setTypeStr(opType.getDesc());
String className = joinPoint.getTarget().getClass().getName();
String methodName = method.getName();
methodName = className + "." + methodName;
operationLog.setOpMethod(methodName);
List<Object> objList = new ArrayList<>();
Object[] args = joinPoint.getArgs();
for (Object arg : args) {
if (arg instanceof HttpServletRequest
|| arg instanceof HttpServletResponse
|| arg instanceof MultipartFile) {
continue;
}
objList.add(arg);
}
String params = JSON.toJSONString(objList);
operationLog.setOpParam(params);
Object result;
Logger logger = LoggerFactory.getLogger(OperationLog.class);
try {
result = joinPoint.proceed();
operationLog.setOpResult(JSON.toJSONString(result));
} catch (Exception exception) {
operationLog.setOpResult(exception.getMessage());
//logger.error("错误日志aaa"+exception);
logger.error("错误日志"+exception.toString()+","+operationLog.toString());
//logger.error("错误日志堆栈信息ccc"+exception,exception);
String TraceString = exception.toString() +"\r\n";
logger.error("错误日志堆栈信息:"+TraceString+ExcpUtil.getStackTraceString(exception));
exception.printStackTrace();
throw exception;
} finally {
long end = System.currentTimeMillis();
long cost = (end - start);
operationLog.setOpCost(cost);
// 记录数据变更
BeanDiff beanDiff = BeanCompareUtils.getBeanDiff();
if (beanDiff != null) {
operationLog.setOpType(OperationLogTypeEnum.UPDATE.getType());
operationLog.setOpExtend(JSON.toJSONString(beanDiff));
}
//保存数据到数据库
// operationLogService.insert(operationLog);
//我们这只需要把对象打印到控制台
String log = operationLog.toString();
System.out.println("bbb测试日志的内容"+log);
//logger.info("logTestInfo_测试切面日志"+JacksonUtil.obj2json(operationLog));
logger.info("logInfo切面日志"+operationLog.toString());
}
return result;
}
/**
* 获取请求主机IP地址,如果通过代理进来,则透过防火墙获取真实IP地址;
*
* @param request
* @return
* @throws IOException
*/
public final static String getIpAddress(HttpServletRequest request)
throws IOException {
// 获取请求主机IP地址,如果通过代理进来,则透过防火墙获取真实IP地址
String ip = request.getHeader("X-Forwarded-For");
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
if (ip == null || ip.length() == 0
|| "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("Proxy-Client-IP");
}
if (ip == null || ip.length() == 0
|| "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("WL-Proxy-Client-IP");
}
if (ip == null || ip.length() == 0
|| "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("HTTP_CLIENT_IP");
}
if (ip == null || ip.length() == 0
|| "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("HTTP_X_FORWARDED_FOR");
}
if (ip == null || ip.length() == 0
|| "unknown".equalsIgnoreCase(ip)) {
ip = request.getRemoteAddr();
}
} else if (ip.length() > 15) {
String[] ips = ip.split(",");
for (int index = 0; index < ips.length; index++) {
String strIp = ips[index];
if (!("unknown".equalsIgnoreCase(strIp))) {
ip = strIp;
break;
}
}
}
return ip;
}
public final static String getIPAddress1(HttpServletRequest request) {
String ip = null;
//X-Forwarded-For:Squid 服务代理
String ipAddresses = request.getHeader("X-Forwarded-For");
if (ipAddresses == null || ipAddresses.length() == 0 || "unknown".equalsIgnoreCase(ipAddresses)) {
//Proxy-Client-IP:apache 服务代理
ipAddresses = request.getHeader("Proxy-Client-IP");
}
if (ipAddresses == null || ipAddresses.length() == 0 || "unknown".equalsIgnoreCase(ipAddresses)) {
//WL-Proxy-Client-IP:weblogic 服务代理
ipAddresses = request.getHeader("WL-Proxy-Client-IP");
}
if (ipAddresses == null || ipAddresses.length() == 0 || "unknown".equalsIgnoreCase(ipAddresses)) {
//HTTP_CLIENT_IP:有些代理服务器
ipAddresses = request.getHeader("HTTP_CLIENT_IP");
}
if (ipAddresses == null || ipAddresses.length() == 0 || "unknown".equalsIgnoreCase(ipAddresses)) {
//X-Real-IP:nginx服务代理
ipAddresses = request.getHeader("X-Real-IP");
}
//有些网络通过多层代理,那么获取到的ip就会有多个,一般都是通过逗号(,)分割开来,并且第一个ip为客户端的真实IP
if (ipAddresses != null && ipAddresses.length() != 0) {
ip = ipAddresses.split(",")[0];
}
//还是不能获取到,最后再通过request.getRemoteAddr();获取
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ipAddresses)) {
ip = request.getRemoteAddr();
}
return ip.equals("0:0:0:0:0:0:0:1")?"127.0.0.1":ip;
}
}
②工具类ExcpUtil.java,打印exception中的堆栈信息
package com.sinoccdc.devops;
public class ExcpUtil {
//打印异常堆栈信息
public static String getStackTraceString(Throwable ex){//(Exception ex) {
StackTraceElement[] traceElements = ex.getStackTrace();
StringBuilder traceBuilder = new StringBuilder();
if (traceElements != null && traceElements.length > 0) {
for (StackTraceElement traceElement : traceElements) {
traceBuilder.append(traceElement.toString());
traceBuilder.append("\n");
}
}
return traceBuilder.toString();
}
//构造异常堆栈信息
public static String buildErrorMessage(Exception ex) {
String result;
String stackTrace = getStackTraceString(ex);
String exceptionType = ex.toString();
String exceptionMessage = ex.getMessage();
result = String.format("%s : %s \r\n %s", exceptionType, exceptionMessage, stackTrace);
return result;
}
}
③LogOperation.java存储操作信息的各项内容
package com.sinoccdc.devops;
import freemarker.cache.StrongCacheStorage;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import java.util.Date;
@Getter
@Setter
@AllArgsConstructor
@NoArgsConstructor
public class LogOperation {
//用户名称
String opUserName;
//调用后台的方法
String opMethod;
//请求路径
String opUrl;
//操作参数
String opParam;
//用户账号
String opUserAccount;
//操作大类
String opCategory;
//操作子类
String opSubCategory;
//操作类型,新增,修改,删除,查询
Integer opType;
//操作类型的中文说明
String typeStr;
//描述
String opDesc;
//操作结果
String opResult;
//操作耗时
Long opCost;
//请求Ip
String opIp;
//请求扩展字段
String opExtend;
//创建时间
Date createTime;
//操作时间的扩展类
String opCreateTime;
/**
* 由于这里的toString方法的格式是这样,配合logstash的filter部分应写成
* grok {
* match => [
* "message","%{NOTSPACE:tag}[T ]%{NOTSPACE:method}[\n]%{NOTSPACE:api}[\n]%{NOTSPACE:params}",
* "message","%{NOTSPACE:tag}[T ]%{NOTSPACE:author}[T ]%{NOTSPACE:msg}"
* ]
* }
* @return
*/
@Override
public String toString() {
return "{" +
"用户名为:'" + opUserName + '\'' +
", 调用方法为:'" + opMethod + '\'' +
",\n请求路径为:'" + opUrl + '\'' +
",\n参数为:'" + opParam + '\'' +
",\n用户账户为:'" + opUserAccount + '\'' +
",\n请求的模块为:'" + opCategory + '\'' +
",\n请求的子模块为:'" + opSubCategory + '\'' +
// ", opType=" + opType +
// ", typeStr='" + typeStr + '\'' +
",\n请求描述为:'" + opDesc + '\'' +
",\n请求结果为:'" + opResult + '\'' +
",\n操作耗时为:" + opCost +
",\n请求IP为:'" + opIp + '\'' +
// ", opExtend='" + opExtend + '\'' +
// ",\n 请求时间为:" + createTime +
",\n请求时间为:'" + opCreateTime + '\'' +
'}';
}
}