比1加入了 Socket_类、 function_ALL类的封装 和 线程池的引入 。
#include <bits/stdc++.h>
#include <errno.h>
#include <sys/epoll.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <fcntl.h>
#include <sys/types.h>
#include <arpa/inet.h>
#include <unistd.h>
using namespace std;
using callback = void (*)(void *);
constexpr int NUMBER = 2;
#define PORT 9876
void *Perror(char *buf) // 错误输出函数
{
perror(buf);
exit(1);
}
struct accept_arg //对于function_ALL类下参数的结构体 lfd、epfd等
{
int epfd;
int lfd;
int fd;
};
void fcntl_NOBLOCK(int &fd) // 设置非阻塞状态
{
int flags = fcntl(fd, F_GETFL);
fcntl(fd, F_SETFL, flags | O_NONBLOCK);
}
class function_ALL // 对于各类任务的函数封装类
{
public:
static void accept_(void *arg) // 这个是接收函数
{
accept_arg *temp = (accept_arg *)arg;
sockaddr_in client_addr;
socklen_t clit_len = sizeof(client_addr);
char buf[1000]; // 储存点分十进制的数组
int cfd = accept(temp->lfd, (sockaddr *)&client_addr, &clit_len);
fcntl_NOBLOCK(cfd); // 设置为非阻塞;
epoll_event event;
event.data.fd = cfd;
event.events = EPOLLIN | EPOLLET;
epoll_ctl(temp->epfd, EPOLL_CTL_ADD, cfd, &event);
const char *result = inet_ntop(AF_INET, &client_addr.sin_addr, buf, sizeof(buf));
if (result == NULL)
{
Perror("inet_ntop error");
}
else
cout << "成功连接一名客户 客户ip地址 = " << buf << endl;
}
static void read_(void *arg) // 这个是读函数
{
accept_arg *temp = (accept_arg *)arg;
int ret = 0;
char *buf[10];
while (1)
{
int len = recv(temp->fd, buf, sizeof(buf), 0); // MSG_DONTWAIT
if (len)
{
send(temp->fd, buf, sizeof(buf), 0);
}
else if (len == -1)
{
if (errno == EAGAIN)
{
cout << "已经读到为尾" << endl;
break;
}
Perror("recv error");
}
else if (ret == 0)
{
cout << "客户端一方断开连接" << endl;
}
}
}
};
class Socket_ // Socket下的监听套接字设置
{
public:
Socket_(int port) // 初始化拿到端口号 进行监听套接字准备
{
int ret;
this->port = port;
}
void socket_init() // 监听套接字的准备和初始化
{
int ret;
sockaddr_in server_addr; // 服务端客户端地址结构
server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
server_addr.sin_family = AF_INET;
server_addr.sin_port = htons(this->port);
this->lfd = socket(AF_INET, SOCK_STREAM, 0);
if (lfd == -1)
Perror("socket error");
fcntl_NOBLOCK(this->lfd); // 将监听的文件描述符lfd设置为非阻塞
int opt = 1; // 设置端口复用
ret = setsockopt(this->lfd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
if (ret == -1)
Perror("setsockopt error");
ret = bind(this->lfd, (sockaddr *)&server_addr, sizeof(server_addr));
if (ret == -1)
Perror("bind error");
ret = listen(this->lfd, 256);
if (ret == -1)
Perror("listen error");
}
~Socket_() // 关闭
{
close(this->lfd);
}
int get_lfd() // 得到lfd文件描述符
{
return this->lfd;
}
private:
int lfd;
int port;
};
class Task // 任务队列
{
public:
callback function;
void *arg;
Task(callback function, void *arg)
{
this->function = function;
this->arg = arg;
}
};
class Threadpool // 线程池
{
private:
// 任务队列模块
queue<Task> task_queue;
condition_variable cond; // 信号 是否为空
// 线程池板块
vector<thread> threadIDs;
thread managerID;
mutex mutexPool;
bool shutdown;
int busy_num;
int min_num;
int max_num;
int living_num;
int exit_num;
private:
static void manager(void *arg) // 管理线程
{
Threadpool *pool = static_cast<Threadpool *>(arg); // 从void*转换为Threadpool*更像是一种类型的重新解释,因为void*只是一个通用的、无具体类型指向的指针,
while (!pool->shutdown)
{
std::this_thread::sleep_for(std::chrono::seconds(3)); // 不懂哦
int busy_num = pool->Busynum();
int living_num = pool->Livenum();
unique_lock<mutex> lock(pool->mutexPool);
int queue_size = pool->task_queue.size();
lock.unlock();
if (queue_size > living_num && living_num < pool->max_num){
lock.lock();
int count = 0;
for (int i = 0; i < pool->max_num && pool->living_num < pool->max_num && count < NUMBER; i++)
{
if (!pool->threadIDs[i].joinable())
{
pool->threadIDs[i] = thread(worker, pool);
count++;
pool->living_num++;
}
}
lock.unlock();
}
if (busy_num * 2 < living_num && living_num > pool->min_num)
// 减线程是因为 忙线程占存活线程部分太少 所以应该减少
{
// 前者是需要达成条件,后者是逻辑条件
// 与加线程不同,加线程是需要创建线程 减线程而是把存活线程里面删除,是让他们退出while(1) 的循环,然后自然回收
lock.lock();
pool->exit_num = NUMBER; // 每次都刷新 不用考虑因为某些情况退出的堆叠问题
lock.unlock();
for (int i = 0; pool->living_num > pool->min_num && i < NUMBER; i++)
{
pool->cond.notify_all(); // 发送两次请求 然后规定数量下的闲置线程被骗去回收
}
}
}
}
static void worker(void *arg) //工作线程
{
Threadpool *pool = static_cast<Threadpool *>(arg);
// static_cast显示转换为Threadpool类型
while (1)
{
std::unique_lock<mutex> lock(pool->mutexPool); // 操作前进行加锁
while (pool->task_queue.empty() && !pool->shutdown)
// 生产者消费者模型+欺骗回收(当任务队列为空就进来等着)
{
pool->cond.wait(lock); // 只会有两个地方发送信号,一个就是真的添加了任务,一个是回收的时候进行欺骗让你退出阻塞,是真是假只需要看exit_num是否为0就知道
if (pool->exit_num > 0)
{
pool->exit_num--;
if (pool->living_num > pool->min_num)
{
pool->living_num--;
cout << "Thread_ID :" << std::this_thread::get_id() << " exit..... " << endl;
lock.unlock();
// 这里不多余的原因是 这是一个良好的习惯 上面那个只是提供了一个线程结束自动回收锁 但是还是应该使用完手动解锁,避免因为到达return的这段距离有别的线程需要用锁,所以用完就解锁,这是一个习惯
// 这里可能还有代码
return;
}
}
}
if (pool->shutdown) // 如果线程池需要关闭了 判断一下
{
cout << "Thread_ID :" << std::this_thread::get_id() << " exit..... " << endl;
return;
}
Task task = pool->task_queue.front();
pool->task_queue.pop(); // 先进先出 队列
pool->busy_num++;
lock.unlock();
task.function(task.arg);
cout << "Thread_ID :" << std::this_thread::get_id() << " starting work..... " << endl;
free(task.arg);
task.arg = nullptr;
pool->mutexPool.lock(); // 对池加锁 用的时候就加锁
cout << "Thread_ID :" << std::this_thread::get_id() << " ending work..... " << endl;
pool->busy_num--;
pool->mutexPool.unlock();
}
}
public:
Threadpool(int min, int max)
{
// 线程池是一个处理任务的流水线机制 但是接收网络连接什么的还是需要socket来 比如接收到一个请求就丢给线程池,让他(后厨)去处理任务,socket负责接收客人。
do
{
this->min_num = min;
this->max_num = max;
this->busy_num = 0;
this->living_num = min;
int exit_num = 0;
shutdown = 0;
managerID = thread(manager, this);
threadIDs.resize(max);
for (int i = 0; i < min; i++)
{
threadIDs[i] = thread(worker, this);
}
return;
} while (0);
}
void Add(Task task)
{
unique_lock<mutex> lock(mutexPool);
if (shutdown)
{
return;
}
task_queue.push(task);
cond.notify_all();
}
void Add(callback function, void *arg)
{
unique_lock<mutex> lock(mutexPool);
if (shutdown)
{
return;
}
task_queue.push(Task(function, arg));
cond.notify_all();
}
~Threadpool()
{
shutdown = 1;
if (managerID.joinable()) // 当主线程等待另一个线程完成(通过join操作)时,主线程会被阻塞。等待回收
managerID.join();
cond.notify_all(); // 唤醒所有等待信号的
for (int i = 0; i < max_num; i++)
{
if (threadIDs[i].joinable())
threadIDs[i].join();
}
}
int Busynum()
{
unique_lock<mutex> lock(mutexPool);
return this->busy_num;
}
int Livenum()
{
unique_lock<mutex> lock(mutexPool);
return this->living_num;
}
};
int main(int argc, char *argv[])
{
int ret;
if (argc < 2)
{
cout << "please input port" << endl;
return 0;
}
Socket_ server_socket(atoi(argv[1])); // 传入端口号
server_socket.socket_init(); // 初始化和创建套接字
int lfd = server_socket.get_lfd();
int epfd = epoll_create(300);
if (epfd == -1)
Perror("epoll_create error");
epoll_event server_event;
server_event.data.fd = lfd;
server_event.events = EPOLLIN | EPOLLET; // 监听的event变量
ret = epoll_ctl(epfd, EPOLL_CTL_ADD, lfd, &server_event);
epoll_event events[256]; // 监听epoll的数组
memset(events, 0, sizeof(epoll_event) * 256);
while (1) // 开始监听epoll树
{
int len = epoll_wait(epfd, events, sizeof(events), -1);
for (int i; i < len; i++)
{
if (events[i].data.fd == lfd)
{
accept_arg temp;
temp.epfd = epfd;
temp.lfd = lfd;
function_ALL::accept_((void *)&temp); //
}
else if (events[i].events == (EPOLLIN | EPOLLET)) // 读
{
accept_arg temp;
temp.fd = events[i].data.fd;
function_ALL::read_((void *)&temp);
}
else if (events[i].events == (EPOLLOUT | EPOLLET)) // 写
{
;
}
}
}
return 0;
}