线程池的作用
避免大量的线程的创建和销毁,节约系统资源
线程池的分类
线程池的实现分为两类:
1)半同步半异步的线程池
2)领导者和追随者模式
参考:
半同步半异步的线程池
共分为3层,第一层,同步服务层,处理来自上层的任务请求,第二层是同步队列,第三层是:异步服务层
同步队列
功能:存储待处理的任务,交给线程池来处理。
实现:锁保证线程同步,条件变量实现线程通信,线程池空要等待,不空通知一个线程去处理;线程池满就等待,不满通知上层去添加新的任务。
#pragma once
#include<list>
#include<thread>
#include<condition_variable>
template<typename T>
class SyncQueue {
public:
SyncQueue(const int maxSize):m_maxSize(maxSize),m_needStop(false){}
bool notFull() {
bool full = m_queue.size() >= m_maxSize;
if (full)
std::cout << "缓冲区满了,不能添加了" << std::endl;
return !full;
}
bool notEmpty() {
bool empty = m_queue.empty();
return !empty();
}
template<typename F>
void Add(F&& x) {
std::unique_lock<std::mutex> locker(m_mutex);
m_notFull.wait(locker, [this] {return m_needStop || notFull(); });
if (m_needStop)
return;
m_queue.push_back(std::forward(x));
m_notEmpty.notify_one();
}
void put(T&& x) {
Add(std::forward(x));
}
void Take(std::list<T>& list) {
std::unique_lock<std::mutex> locker(m_mutex);
m_notEmpty.wait(locker, [this] {return m_needStop || notEmpty(); });
if (m_needStop) {
return;
}
list = std::move(m_queue);
m_notFull.notify_one();
}
void Take(T& t) {
std::unique_lock<std::mutex> locker(m_mutex);
m_notEmpty.wait(locker, [this] {return m_needStop || notEmpty(); });
if (m_needStop) {
return;
}
t = m_queue.front();
m_queue.pop_front();
m_notFull.notify_one();
}
void Stop() {
{
std::lock_guard<std::mutex> locker(m_mutex);
m_needStop = true;
}
m_notFull.notify_all();
m_notEmpty.notify_all();
}
bool Empty() {
std::lock_guard<std::mutex> locker(m_mutex);
return m_queue.empty();
}
bool Full() {
std::lock_guard<std::mutex> locker(m_mutex);
return m_queue.size() == m_maxSize;
}
size_t Size() {
std::lock_guard<std::mutex> locker(m_mutex);
return m_queue.size() ;
}
size_t Count() {
std::lock_guard<std::mutex> locker(m_mutex);
return m_queue.size();
}
private:
std::list<T> m_queue;
std::mutex m_mutex;
std::condition_variable m_notEmpty;
std::condition_variable m_notFull;
int m_maxSize;
bool m_needStop;
};
线程池实现
#pragma once
#include"syncQueue.h"
#include<functional>
#include<atomic>
const int MaxTaskCount = 10;
class ThreadPool {
public:
using Task = std::function<void()>;
ThreadPool(int numThreads=std::thread::hardware_concurrency()):m_queue(MaxTaskCount) {
Start(numThreads);
}
~ThreadPool()
{
Stop();
}
void Stop() {
std::call_once(m_flag, [this] {StopThreadGroup(); });
}
void AddTask(Task&& task) {
m_queue.put(std::forward<Task>(task));
}
void AddTask(const Task& task) {
m_queue.put(task);
}
private:
void Start(int numThreads) {
m_runing = true;
for (int i = 0; i < numThreads; i++) {
m_threadgroup.push_back(std::make_shared<std::thread>(&ThreadPool::RunInThread, this));
}
}
void StopThreadGroup() {
while (m_queue.notEmpty());
m_queue.Stop();
m_runing = false;
for (auto thread : m_threadgroup) {
if (thread)
thread->join();
}
m_threadgroup.clear();
}
void RunInThread() {
Task task;
while (m_runing) {
m_queue.Take(task);
if (!m_runing)
return;
task();
/*
std::list<Task> list;
m_queue.Take(list);
std::cout <<"list.size:"<< list.size() << std::endl;
for (auto& task : list) {
if (!m_runing)
return;
task();
}
*/
}
}
std::list<std::shared_ptr<std::thread>> m_threadgroup;
SyncQueue<Task> m_queue;
std::atomic_bool m_runing;
std::once_flag m_flag;
};
830

被折叠的 条评论
为什么被折叠?



