libeventIO是单线程的,将所有监控的IO句柄加入到队列里面,只要对于的IO发生事件,就会触发对应的回调函数。对于一个程序既要监控所有IO句柄,又要处理所有IO事件,必然会影响效率。利用一个主线程监控监听套接字是否有事件发生,创建部分子线程监控已连接的套接字是否有事件发生,并进行处理;主线程监听到套接字后,随机选一个子线程来处理后面的请求。
由于libevent回调函数必须是静态函数成员,无法传递this指针,因此无法共享成员变量(比如,可以将主线程监控到的套接字连接请求加入到队列里面,子线程负责从队列面取出请求,进行后续处理。但是,由于无法传递成员变量,因此这种方法行不通)。那么主线程和子线程之间可以通过pipe管道来传递信息。
服务器头文件tcp_server_threadpoolevent.h
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/epoll.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <fcntl.h>
#include <thread>
#include <pthread.h>
#include <string>
#include <list>
#include <vector>
#include <mutex>
#include <event2/event.h>
#include <event.h>
#include "tcp_server.h"
//using namespace std;
#define MAXLINE 4096
#define MAXEPOLL 10000
class threadTask{
public:
struct event_base *thread_base;
std::shared_ptr<std::thread> m_thread;
struct event notify_event;
int notify_recvfd;
int notify_sendfd;
};
class poolEventServer : public Server{
public:
poolEventServer(string ip, int port, int num = 10):Server(ip, port),threadnum(num){
}
int start();
static void accept_socket_cb(int fd, short ev, void *args);
static void read_socket_cb(int fd, short ev, void *args);
int get_thread_num();
void threadSetup(class threadTask *me);
void threadStart(class threadTask *me);
static void threadProcess(int fd, short which, void *args);
private:
int threadnum;
std::vector<class threadTask*> thread_tasks;
//std::list<int> connfd_list;
//std::mutex m_connfd_mutex;
};
服务器tcp_server_threadpoolevent.cpp
#include <iostream>
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/epoll.h>
#include <sys/types.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <fcntl.h>
#include <thread>
#include <pthread.h>
#include <string>
#include <memory>
#include <mutex>
#include <sys/wait.h>
#include "tcp_server_threadpoolevent.h"
int poolEventServer::start(){
int sockfd,connfd;
struct sockaddr_in cliaddr;
socklen_t clilen;
//thread process start
for(int i=0; i< threadnum; i++){
class threadTask *ptr = new threadTask();
//std::shared_ptr<class threadTask> ptr = std::make_shared<class threadTask>(threadptr);
//thread_tasks.emplace_back(ptr);
threadSetup(ptr);
ptr->m_thread = std::make_shared<std::thread>(&poolEventServer::threadStart,this,ptr);
thread_tasks.emplace_back(ptr);
}
//thread process end
if((sockfd = socket(AF_INET, SOCK_STREAM, 0)) < 0){
m_log->outputLog("socket create failed: error %s, errno:%d",strerror(errno),errno);
return 0;
}
m_log->outputLog("socket create %d", sockfd);
int ret = ::bind(sockfd, (struct sockaddr*) &addr, sizeof(addr));
if(ret == -1){
m_log->outputLog("bind socket error: %s, errno:%d",strerror(errno),errno);
return 0;
}
if((listen(sockfd,10)) == -1){
m_log->outputLog("listen error: %s, errno:%d",strerror(errno),errno);
return 0;
}
//set nonblocking
evutil_make_socket_nonblocking(sockfd);
struct event_base* base = event_base_new();
struct event* ev_listen = event_new(base, sockfd, EV_READ | EV_PERSIST, accept_socket_cb, (void*)this);
event_add(ev_listen, NULL);
event_base_dispatch(base);
/*clilen = sizeof(cliaddr);
while(true){
if((connfd = accept(sockfd, (struct sockaddr*)&cliaddr, &clilen)) == -1){
m_log->outputLog("listenfd: %d, accept error: %s, errno:%d",sockfd,strerror(errno),errno);
return -1;
}
m_log->outputLog("listenfd: %d, acceptfd :%d",sockfd,connfd);
evutil_make_socket_nonblocking(connfd);
//select a thread
int num = rand() % threadnum;
m_log->outputLog("thread select:%d",num);
int sendfd = thread_tasks[num]->notify_sendfd;
write(sendfd,&connfd,sizeof(evutil_socket_t));
}*/
return 0;
}
void poolEventServer::accept_socket_cb(int fd, short events, void *args){
int connfd;
struct sockaddr_in cliaddr;
socklen_t clilen = sizeof(cliaddr);
if((connfd = accept(fd, (struct sockaddr*)&cliaddr, &clilen)) == -1){
m_log->outputLog("listenfd: %d, accept error: %s, errno:%d",fd,strerror(errno),errno);
return;
}
m_log->outputLog("listenfd: %d, acceptfd :%d",fd,connfd);
evutil_make_socket_nonblocking(connfd);
class poolEventServer *server = (class poolEventServer*) args;
int num = rand() % server->threadnum;
m_log->outputLog("thread select:%d",num);
int sendfd = server->thread_tasks[num]->notify_sendfd;
write(sendfd,&connfd,sizeof(evutil_socket_t));
//struct event_base *base = (struct event_base*)args;
//struct event* ev = event_new(NULL, -1, 0, NULL, NULL);
//event_assign(ev, base, connfd, EV_READ | EV_PERSIST, read_socket_cb, (void*)ev);
//event_add(ev,NULL);
}
void poolEventServer::read_socket_cb(int fd, short events, void *args){
char buff[MAXLINE];
//tcp info check socket info
struct tcp_info info;
int len = sizeof(info);
struct event* ev = (struct event*)args;
while(1){
int n = recv(fd,buff,MAXLINE,MSG_DONTWAIT);
if(n>0){
buff[n] = '\0';
m_log->outputLog("thread:%d,connfd:%d,recv msg:%s",std::this_thread::get_id(),fd,buff);
}else{
//if(errno == EINTR || errno == EAGAIN){
getsockopt(fd,IPPROTO_TCP,TCP_INFO,&info,(socklen_t *)&len);
if(info.tcpi_state == TCP_ESTABLISHED){
//m_log->outputLog("established recv error: %s, errno:%d",strerror(errno),errno);
//break;
}else{
//del connfd and close
event_free(ev);
m_log->outputLog("close connfd: %d error: %s, errno:%d",fd,strerror(errno),errno);
close(fd);
}
m_log->outputLog("connfd:%d,recv ok",fd);
break;
}
}
}
void poolEventServer::threadStart(class threadTask *me){
event_base_dispatch(me->thread_base);
};
void poolEventServer::threadProcess(int fd, short which, void *args){
class threadTask *me = (class threadTask *)args;
evutil_socket_t connfd;
read(me->notify_recvfd,&connfd,sizeof(evutil_socket_t));
struct event* ev = event_new(NULL, -1, 0, NULL, NULL);
event_assign(ev, me->thread_base, connfd, EV_READ | EV_PERSIST, read_socket_cb, (void*)ev);
event_add(ev,NULL);
//struct event* ev_listen = event_new(me->thread_base, connfd, EV_READ | EV_PERSIST, read_socket_cb, (void*)me->thread_base);
//event_add(ev_listen, NULL);
}
void poolEventServer::threadSetup(class threadTask * me){
me->thread_base = event_base_new();
if(me->thread_base == nullptr){
return;
}
//create pipe
int fds[2];
if(pipe(fds)){
return;
}
me->notify_recvfd = fds[0];
me->notify_sendfd = fds[1];
event_set(&me->notify_event, me->notify_recvfd, EV_READ | EV_PERSIST, threadProcess, me);
event_base_set(me->thread_base,&me->notify_event);
event_add(&me->notify_event,0);
}
test_server.cpp
#include "logger.h"
#include "tcp_server.h"
#include "tcp_server_thread.h"
#include "tcp_server_process.h"
#include "tcp_server_epoll.h"
#include "tcp_server_libevent.h"
#include "tcp_server_threadpoolevent.h"
#include <signal.h>
#include <sys/wait.h>
using namespace std;
logger *m_log = logger::get_instance();
int main(){
//signal(SIGCHLD, &func_waitpid);
//logger *m_log = logger::get_instance();
m_log->openLogFile("./","test_server.log",true);
//Server *m_server = new Server("111.206.73.111",12345);
//Server *m_server = new ThreadServer("111.206.73.111",12345);
//Server *m_server = new ProcessServer("111.206.73.111",12345);
//Server *m_server = new EpollServer("111.206.73.111",12345);
Server *m_server = new poolEventServer("111.206.73.111",12345);
//Server *m_server = new libeventServer("111.206.73.111",12345);
m_server->init(m_log);
m_server->start();
}
本文介绍了一种使用libevent库实现的线程池服务器设计方案。通过主线程监控监听套接字,子线程处理已连接套接字的IO事件,解决了单线程libevent在高并发场景下的效率问题。文章详细描述了服务器的类结构和关键函数,包括如何通过管道在主线程和子线程间传递信息。
878

被折叠的 条评论
为什么被折叠?



