tcp 0 0 *:7777 *:* LISTEN 32686/PC_***
tcp 0 0 linux:7777 linux:38318 ESTABLISHED 32686/PC_***
tcp 0 0 linux:34833 linux:7777 CLOSE_WAIT 28132/java
tcp 0 0 linux:58603 linux:7777 CLOSE_WAIT 28132/java
tcp 0 0 linux:38318 linux:7777 ESTABLISHED 28132/java
tcp 0 0 linux:49424 linux:7777 CLOSE_WAIT 28132/java
晕怎么这么多CLOSE_WAIT ,再复习一下tcp的状态迁移
有了这个信息再看代码很快定位到异常。
顺便再把网络事件类总结一下,以免以后做别的项目了这块出问题得看半天,在X项目中把libevent的网络事件进行了二次封装;
a.两条队列一个记录设控读写事件 一个记录active事件;一个树,记录设控时间
b.状态ev_flags ,与4个标志
以time为例
1.添加
event_queue_insert(base, ev, EVLIST_TIMEOUT);
{
......
ev->ev_flags |= queue;
......
case EVLIST_TIMEOUT:
{
wt_rbtree_insert(&base->timetree, &ev->ev_timeout_node);
break;
}
.......
}
2.loop
......
if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK))
timeout_next(base, &tv);
else
timerclear(&tv);
/* If we have no events, we just exit */
if (!event_haveevents(base))
return (1);
res = epoll_dispatch(base, evbase, &tv);
if (res == -1)
return (-1);
timeout_process(base);
......
void
timeout_process(struct event_base *base)
{
......
遍历红黑树
event_queue_remove(base, ev, EVLIST_TIMEOUT);
/*
{
case EVLIST_TIMEOUT:
wt_rbtree_delete(&base->timetree, &ev->ev_timeout_node);
break;
}
*/
event_active(ev, EV_TIMEOUT, 1);
......
}
在又见到了我们熟悉的
event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
3.proc
static void
event_process_active(struct event_base *base)
{
遍历
if (ev->ev_events & EV_PERSIST)
event_queue_remove(base, ev, EVLIST_ACTIVE);
else
event_del(ev);
回调 ncalls
}
Faq
1.libevent重要结构与函数赏析
不管 什么场景只要处理 read write事件就行了(带外数据可以不考虑)
重要结构
a.事件
struct event
{
....
int ev_flags;
};
事件状态
#define EVLIST_INIT 0x80
#define EVLIST_INSERTED 0x02
#define EVLIST_ACTIVE 0x08
#define EVLIST_TIMEOUT 0x01
b.epoll框架
struct event_base {
ngx_queue_t activequeue;
ngx_queue_t eventqueue;
ngx_rbtree_t timetree;
};
c. epoll 中
struct evepoll
{
struct event *evread;
struct event *evwrite;
};
struct epollop
{
struct evepoll *fds; //why has
int nfds;
struct epoll_event *events;
int nevents;
int epfd;
sigset_t evsigmask;
} epollop;
函数
a. event_init
ngx_queue_init(¤t_base->eventqueue);
ngx_queue_init(¤t_base->activequeue);
ngx_rbtree_init(¤t_base->timetree, ¤t_base->sentinel, ngx_rbtree_insert_value);
重要初始化三个结构
b. event_set
ev->ev_flags = EVLIST_INIT;
c. event_add
//对于读写事件
if ((ev->ev_events & (EV_READ | EV_WRITE)) &&
!(ev->ev_flags & (EVLIST_INSERTED | EVLIST_ACTIVE)))
{
event_queue_insert(base, ev, EVLIST_INSERTED);
return (epoll_add(evbase, ev));
}
epoll_add
if (evep->evread != NULL)
{
events |= EPOLLIN;
op = EPOLL_CTL_MOD;
}
if (evep->evwrite != NULL)
{
events |= EPOLLOUT;
op = EPOLL_CTL_MOD;
}
if (ev->ev_events & EV_READ)
events |= EPOLLIN;
if (ev->ev_events & EV_WRITE)
events |= EPOLLOUT;
if (ev->ev_events & EV_READ)
evep->evread = ev;
if (ev->ev_events & EV_WRITE)
evep->evwrite = ev;
d. event_base_loop
res = epoll_dispatch(base, evbase, &tv);
if (res == -1)
return (-1);
timeout_process(base);
if (base->event_count_active)
{
event_process_active(base); //处理激活队列中的
if (!base->event_count_active && (flags & EVLOOP_ONCE))
done = 1;
}
epoll_dispatch
if (evread != NULL && !(evread->ev_events & EV_PERSIST))
event_del(evread);
if (evwrite != NULL && evwrite != evread &&
!(evwrite->ev_events & EV_PERSIST))
event_del(evwrite);
if (evread != NULL)
event_active(evread, EV_READ, 1);
if (evwrite != NULL)
event_active(evwrite, EV_WRITE, 1);
//处理完就删除,这个与一般的epoll不同所以要加 EV_PERSIST
event_queue_remove
队列相关
ev->ev_flags &= ~queue;
event_queue_insert
ev->ev_flags |= queue;
2. nginx 平滑重启 nginx -s reload
3. 以一种非优雅的方式断开连接的时候,我们可以设置SO_KEEPALIVE属性使得我们在2小时以后发现对方的TCP连接是否依然存在。
keepAlive = 1;
Setsockopt(listenfd, SOL_SOCKET, SO_KEEPALIVE, (void*)&keepAlive, sizeof(keepAlive))
4. 代码复杂度与sourcemonitor
参考:
1. epoll 事件类型 http://blog.youkuaiyun.com/huangjm_13/article/details/17676591
2.state与api http://blog.163.com/xychenbaihu@yeah/blog/static/13222965520118139252103/
3.closewait http://blog.chinaunix.net/uid-20357359-id-1963662.html