worker进程的入口是函数ngx_worker_process_cycle函数,是worker进程循环执行的主体函数。
ngx_worker_process_cycle函数首先需要初始化进程,调用函数 ngx_worker_process_init(cycle, 1);
经过初始化后进入for(;;)循环,在for循环中,worker进程接受master进程发来的各种信号,并且调用事件收发处理函数
ngx_process_events_and_timers();
下面分析一下ngx_worker_process_init(cycle,1)函数
for (i = 0; ngx_modules[i]; i++) {
if (ngx_modules[i]->init_process) {
//进程初始化, 调用每个模块的init_process,用它做模块开发的时候,使用得挺少的
//这里要特别看的是event模块:
//nginx的event模块包含一个init_process,也就是ngx_event_process_init(ngx_event.c).
//这个函数就是nginx的驱动器,他初始化事件驱动器,连接池,定时器,以及挂在listen 句柄的回调函数
if (ngx_modules[i]->init_process(cycle) == NGX_ERROR) {
/* fatal */
exit(2);
}
}
}
// 这里循环用户关闭其他worker进程的无用channel资源
for (n = 0; n < ngx_last_process; n++) {
if (ngx_processes[n].pid == -1) { //n位置的进程不存在,这里是预防性的代码
continue;
}
//ngx_process_slot是创建worker进程的时候,从master进程复制过来的,此处就是指本worker进程在数组中的索引位置
if (n == ngx_process_slot) {
continue;
}
if (ngx_processes[n].channel[1] == -1) { // channel不存在,跳过
continue;
}
// 创建worker进程时,会将master的资源复制过来,因此需要关闭无用的channel -- 其他worker进程的读端描述符
if (close(ngx_processes[n].channel[1]) == -1) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"close() channel failed");
}
}
// 关闭本worker进程的channel的写端描述符。
if (close(ngx_processes[ngx_process_slot].channel[0]) == -1) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"close() channel failed");
}
#if 0
ngx_last_process = 0;
#endif
// 将channel放入nginx关心的集合中,同时关注channel上的读事件。
if (ngx_add_channel_event(cycle, ngx_channel, NGX_READ_EVENT,
ngx_channel_handler)
== NGX_ERROR)
{
/* fatal */
exit(2);
}
}
函数中要调用ngx_event_core_moudle中定义的process_init函数,下面是模块的ngx_moudle_t结构体
ngx_module_t ngx_event_core_module = {
NGX_MODULE_V1,
&ngx_event_core_module_ctx, /* module context */
ngx_event_core_commands, /* module directives */
NGX_EVENT_MODULE, /* module type */
NULL, /* init master */
ngx_event_module_init, /* init module */
ngx_event_process_init, /* init process */
NULL, /* init thread */
NULL, /* exit thread */
NULL, /* exit process */
NULL, /* exit master */
NGX_MODULE_V1_PADDING
};
分析一下ngx_event_process_init函数
static ngx_int_t
ngx_event_process_init(ngx_cycle_t *cycle)
{
ngx_uint_t m, i;
ngx_event_t *rev, *wev;
ngx_listening_t *ls;
ngx_connection_t *c, *next, *old;
ngx_core_conf_t *ccf;
ngx_event_conf_t *ecf;
ngx_event_module_t *module;
// 获取相应模块的配置结构
ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module);
// 判断是否使用mutex锁,主要是为了控制负载均衡。ccf->master主要确定下是否是master-worker模式。单进程模式就不需要进行下面操作了。
if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) {
//使用mutex控制进程的负载均衡
ngx_use_accept_mutex = 1;
ngx_accept_mutex_held = 0;
ngx_accept_mutex_delay = ecf->accept_mutex_delay; // 抢互斥体失败后,下次再抢的间隔时间
} else {
ngx_use_accept_mutex = 0;
}
//定时器初始化
if (ngx_event_timer_init(cycle->log) == NGX_ERROR) {
return NGX_ERROR;
}
//event module的初始化
for (m = 0; ngx_modules[m]; m++) {
if (ngx_modules[m]->type != NGX_EVENT_MODULE) {
continue;
}
if (ngx_modules[m]->ctx_index != ecf->use) {
continue;
}
module = ngx_modules[m]->ctx;
//初始化模块
if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) {
/* fatal */
exit(2);
}
break;
}
//创建连接池。现在已经是在worker中了,所以每个worker都有自己的connection数组
cycle->connections =
ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log);
if (cycle->connections == NULL) {
return NGX_ERROR;
}
c = cycle->connections;
//创建所有读事件
cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n,
cycle->log);
if (cycle->read_events == NULL) {
return NGX_ERROR;
}
rev = cycle->read_events;
//初始化读事件
for (i = 0; i < cycle->connection_n; i++) {
rev[i].closed = 1;
//防止stale event
rev[i].instance = 1;
}
//创建写事件
cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n,
cycle->log);
if (cycle->write_events == NULL) {
return NGX_ERROR;
}
wev = cycle->write_events;
//初始化写事件
for (i = 0; i < cycle->connection_n; i++) {
wev[i].closed = 1;
}
i = cycle->connection_n;
next = NULL;
//初始化连接池
do {
i--;
//链表
c[i].data = next;
//每一个连接的读写事件对应cycle的读写事件
c[i].read = &cycle->read_events[i];
c[i].write = &cycle->write_events[i];
c[i].fd = (ngx_socket_t) -1;
next = &c[i];
} while (i);
//设置free 连接
cycle->free_connections = next;
cycle->free_connection_n = cycle->connection_n;
/* for each listening socket */
//下面这段初始化listen 事件 ,创建socket句柄,绑定事件回调,然后加入到事件驱动中
ls = cycle->listening.elts; // 为每一个监听套接字从connection数组中分配一个连接,即一个slot
//开始遍历listen
for (i = 0; i < cycle->listening.nelts; i++) {
//从连接池取得连接
c = ngx_get_connection(ls[i].fd, cycle->log);
if (c == NULL) {
return NGX_ERROR;
}
c->log = &ls[i].log;
c->listening = &ls[i];
ls[i].connection = c;
rev = c->read;
rev->log = c->log;
rev->accept = 1;
//设置listen句柄的事件回调,这个回调里面会accept,然后进行后续处理,这个函数是nginx事件驱动的第一个函数
rev->handler = ngx_event_accept;
//如果默认使用mutex,则会继续下面操作
if (ngx_use_accept_mutex) {
continue;
}
if (ngx_event_flags & NGX_USE_RTSIG_EVENT) {
if (ngx_add_conn(c) == NGX_ERROR) {
return NGX_ERROR;
}
} else {
//加可读事件到事件处理,如果没有使用accept互斥体,那么就在此处将监听套接字放入
if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {
return NGX_ERROR;
}
}
#endif
}
return NGX_OK;
}
函数中主要是
1.判断是否使用防止惊群和负载均衡用的accept_mutex锁,并设置标志位
2.调用相应的IO复用模块(ngx_epoll_moudle)init初始化动作
ngx_event_module_t ngx_epoll_module_ctx = {
&epoll_name,
ngx_epoll_create_conf, /* create configuration */
ngx_epoll_init_conf, /* init configuration */
{
ngx_epoll_add_event, /* add an event */
ngx_epoll_del_event, /* delete an event */
ngx_epoll_add_event, /* enable an event */
ngx_epoll_del_event, /* disable an event */
ngx_epoll_add_connection, /* add an connection */
ngx_epoll_del_connection, /* delete an connection */
NULL, /* process the changes */
ngx_epoll_process_events, /* process the events */
ngx_epoll_init, /* init the events */
ngx_epoll_done, /* done the events */
3.创建连接池和读写事件链表,并且将读写事件与每一个连接ngx_connection_t结构体建立联系
4.为每一个监听端口(ngx_listening_t)结分配一个ngx_connection_t结构体
5.将每一个连接(监听端口)对应的读事件设置为ngx_event_accept,如果没有使用accept_mutex锁将事件加入epoll事件驱动模块
等待事件到来
下面分析worker进程核心,事件收集分发函数ngx_process_timers_and_events
//是否使用accept互斥体。accept mutex的作用就是避免惊群,同时实现负载均衡
if (ngx_use_accept_mutex) {
if (ngx_accept_disabled > 0) { //大于0说明该进程接收的连接过多,放弃一次争抢accept mutex的机会
变量ngx_accept_disabled用于负载平衡,当变量大于0时,说明该进程已经接受连接数,超过可接受最大连接的7/8
ngx_accept_disabled--;
} else {
if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { //尝试获取锁
return;
}
if (ngx_accept_mutex_held) {
flags |= NGX_POST_EVENTS; //这个标志是将所有产生的事件放入到一个队列中。等释放锁以后再慢慢来处理事件。
} else {
if (timer == NGX_TIMER_INFINITE
|| timer > ngx_accept_mutex_delay) //设置最长延迟多久,再次去争抢锁
{
timer = ngx_accept_mutex_delay;
}
}
}
}
delta = ngx_current_msec;
//epoll开始wait事件
(void) ngx_process_events(cycle, timer, flags);
delta = ngx_current_msec - delta;
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"timer delta: %M", delta);
//ngx_posted_accept_events暂存epoll从监听套接字接口wait到的accept事件
if (ngx_posted_accept_events) {
ngx_event_process_posted(cycle, &ngx_posted_accept_events);
}
if (ngx_accept_mutex_held) { //所有accept事件处理完成了,如果拥有锁的话,赶紧释放了,优先处理accept事件
ngx_shmtx_unlock(&ngx_accept_mutex);
}
//delta是上文对epoll wait事件的耗时统计,存在毫秒级的耗时就对所有事件的timer进行检查,
if (delta) {
ngx_event_expire_timers();
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"posted events %p", ngx_posted_events);
//处理普通事件(连接上获得的读写事件)
if (ngx_posted_events) {
if (ngx_threaded) {
ngx_wakeup_worker_thread(cycle);
} else {
ngx_event_process_posted(cycle, &ngx_posted_events);
}
}
}
上面函数中调用了函数ngx_process_event。这个函数也是ngx_event_moudle_t中定义的11个action之一
下面分析一下函数代码
static ngx_int_t
ngx_epoll_process_events(ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags)
{
int events;
uint32_t revents;
ngx_int_t instance, i;
ngx_uint_t level;
ngx_err_t err;
ngx_event_t *rev, *wev, **queue;
ngx_connection_t *c;
/* NGX_TIMER_INFINITE == INFTIM */
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"epoll timer: %M", timer);
//一开始就是等待事件,最长等待时间为timer;nginx为事件专门用红黑树维护了一个计时器
events = epoll_wait(ep, event_list, (int) nevents, timer);
err = (events == -1) ? ngx_errno : 0;
if (flags & NGX_UPDATE_TIME || ngx_event_timer_alarm) {
ngx_time_update(); //执行一次事件更新,nginx将时间缓存到一组全局变量中,方便程序高效的获取事件
}
//处理wait错误
if (err) {
if (err == NGX_EINTR) {
if (ngx_event_timer_alarm) {
ngx_event_timer_alarm = 0;
return NGX_OK;
}
level = NGX_LOG_INFO;
} else {
level = NGX_LOG_ALERT;
}
ngx_log_error(level, cycle->log, err, "epoll_wait() failed");
return NGX_ERROR;
}
//wait返回事件数0,可能是timeout返回,也可能是非timeout返回,非timeout返回则是error
if (events == 0) {
if (timer != NGX_TIMER_INFINITE) {
return NGX_OK;
}
ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
"epoll_wait() returned no events without timeout");
return NGX_ERROR;
}
ngx_mutex_lock(ngx_posted_events_mutex);
//循环开始处理收到的所有事件
for (i = 0; i < events; i++) {
c = event_list[i].data.ptr;
instance = (uintptr_t) c & 1;
c = (ngx_connection_t *) ((uintptr_t) c & (uintptr_t) ~1);
rev = c->read;
if (c->fd == -1 || rev->instance != instance) {
/*
* the stale event from a file descriptor
* that was just closed in this iteration
*/
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"epoll: stale event %p", c);
continue;
}
//取得发生一个事件
revents = event_list[i].events;
ngx_log_debug3(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"epoll: fd:%d ev:%04XD d:%p",
c->fd, revents, event_list[i].data.ptr);
//记录wait的错误返回状态
if (revents & (EPOLLERR|EPOLLHUP)) {
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"epoll_wait() error on fd:%d ev:%04XD",
c->fd, revents);
}
#if 0
if (revents & ~(EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP)) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
"strange epoll_wait() events fd:%d ev:%04XD",
c->fd, revents);
}
#endif
//该事件是一个读事件,并该连接上注册的读事件是active的
if ((revents & (EPOLLERR|EPOLLHUP))
&& (revents & (EPOLLIN|EPOLLOUT)) == 0)
{
/*
* if the error events were returned without EPOLLIN or EPOLLOUT,
* then add these flags to handle the events at least in one
* active handler
*/
revents |= EPOLLIN|EPOLLOUT;
}
if ((revents & EPOLLIN) && rev->active) {
if ((flags & NGX_POST_THREAD_EVENTS) && !rev->accept) {
rev->posted_ready = 1;
} else {
rev->ready = 1;
}
//事件放入到相应的队列中
if (flags & NGX_POST_EVENTS) {
queue = (ngx_event_t **) (rev->accept ?
&ngx_posted_accept_events : &ngx_posted_events);//如果是新连接,将事件放入到新连接事件链表中,优先处理后释放锁
ngx_locked_post_event(rev, queue);
} else {
rev->handler(rev);//如果没有使用accept_mutex锁就直接调用回调函数
}
}
wev = c->write;
if ((revents & EPOLLOUT) && wev->active) {
if (c->fd == -1 || wev->instance != instance) {
/*
* the stale event from a file descriptor
* that was just closed in this iteration
*/
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"epoll: stale event %p", c);
continue;
}
if (flags & NGX_POST_THREAD_EVENTS) {
wev->posted_ready = 1;
} else {
wev->ready = 1;
}
if (flags & NGX_POST_EVENTS) {
ngx_locked_post_event(wev, &ngx_posted_events);
} else {
wev->handler(wev);
}
}
}
ngx_mutex_unlock(ngx_posted_events_mutex);
return NGX_OK;
}
此文章中包含一部分事件模块内容,另有一些事件模块内容放在单独一篇文章