Nginx 文件:nginx.conf
cd /usr/local/openresty/nginx/conf
vi nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
#开启共享字典,设置内存大小为 10M,供每个 nginx 的线程消费
lua_shared_dictshared_data10m;
#配置本地域名解析
resolver127.0.0.1;
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
#root html;
#index index.htmlindex.htm;
#开启 nginx 监控
stub_statuson;
#加载 lua 文件
default_type text/html;
content_by_lua_file/usr/local/openresty/nginx/conf/controller.lua;
}
error_page 500 502 503 504 /50x.html;
location=/50x.html{
root html;
}
}
}
vi controller.lua
--数据采集阈值限制,如果 lua 采集超过阈值,则不采集
local DEFAULT_THRESHOLD=100000
--kafka 分区数
local PARTITION_NUM=6
--kafka 主题名称
local TOPIC='B2CDATA_COLLECTION3'
-- 轮询器共享变量 KEY 值
local POLLING_KEY="POLLING_KEY"
--kafka 集群(定义 kafkabroker 地址,ip 需要和 kafka 的 host.name 配置一致)
local function partitioner(key,num,correlation_id)
returntonumber(key)
end
--kafkabroker 列表
local BROKER_LIST = {{host="192.168.72.110",port=9092},{host="192.168.72.120",port=9092},{host="192.168.72.130
",port=9092}}
--kafka 参数,
local CONNECT_PARAMS = { producer_type = "async", socket_timeout = 30000, flush_time =10000,request_timeout=20000,partitioner=partitioner}
-- 共享内存计数器,用于 kafka 轮询使用
local shared_data=ngx.shared.shared_data
local pollingVal=shared_data:get(POLLING_KEY)
if not pollingVal then
pollingVal=1
shared_data:set(POLLING_KEY,pollingVal)
end
--获取每一条消息的计数器,对 PARTITION_NUM 取余数,均衡分区 local partitions=''..(tonumber(pollingVal)%PARTITION_NUM) shared_data:incr(POLLING_KEY,1)
-- 并发控制
localisGone=true
--获取 ngx.var.connections_active 进行过载保护,即如果当前活跃连接数超过阈值进行限流保护
if tonumber(ngx.var.connections_active)>tonumber(DEFAULT_THRESHOLD) then
isGone=false
end
-- 数据采集
if isGone then
local time_local=ngx.var.time_local
if time_local==nil then
time_local=""
end
local request=ngx.var.request
if request==nil then
request=""
end
local request_method=ngx.var.request_method
if request_method==nil then
request_method=""
end
local content_type=ngx.var.content_type
if content_type==nil then
content_type=""
end
ngx.req.read_body()
local request_body=ngx.var.request_body
if request_body==nil then
request_body=""
end
local http_referer=ngx.var.http_referer
if http_referer==nil then
http_referer=""
end
local remote_addr=ngx.var.remote_addr
if remote_addr==nil then
remote_addr=""
end
local http_user_agent=ngx.var.http_user_agent
if http_user_agent==nil then
http_user_agent=""
end
local time_iso8601=ngx.var.time_iso8601
if time_iso8601==nil then
time_iso8601=""
end
local server_addr=ngx.var.server_addr
if server_addr==nil then
server_addr=""
end
local http_cookie=ngx.var.http_cookie
if http_cookie==nil then
http_cookie=""
end
--封装数据
local message =time_local.."#CS#"..request.."#CS#"..request_method.."#CS#".. content_type .."#CS#"..request_body .."#CS#".. http_referer .."#CS#".. remote_addr.."#CS#"..http_user_agent.."#CS#"..time_iso8601.."#CS#"..server_addr.."#CS#"..http_cookie;
--引入 kafka 的 producer
local producer=require "resty.kafka.producer"
--创建 producer
local bp=producer:new(BROKER_LIST,CONNECT_PARAMS)
--发送数据
local ok,err=bp:send(TOPIC,partitions,message)
--打印错误日志
if not ok then
ngx.log(ngx.ERR,"kafkasenderr:",err)
return
end
end
3469

被折叠的 条评论
为什么被折叠?



