线程池 ThreadPool
半同步半异步线程池(简略版)C++11实现,详细解析
同步队列
SynchronousQueue.hpp
#include <list>
#include <mutex>
#include <thread>
#include <condition_variable>
#include <iostream>
using namespace std;
template<typename T>
class SyncQueue
{
private:
std::list<T> m_queue;
std::mutex m_mutex;
std::condition_variable m_notEmpty;
std::condition_variable m_notFull;
int m_maxSize;
bool m_needStop; // stop flag
public:
SyncQueue(int maxSize) :m_maxSize(maxSize), m_needStop(false){}
void Put(const T& x)
{
Add(x);
}
void Put(T&& x)
{
Add(std::forward<T>(x));
}
void Take(std::list<T>& list)
{
std::unique_lock<std::mutex> locker(m_mutex);
m_notEmpty.wait(locker, [this] {return m_needStop || NotEmpty(); });
if (m_needStop)
{
return;
}
list = std::move( m_queue);
m_notEmpty.notify_one();
}
void Take(T& t)
{
std::unique_lock<std::mutex> locker(m_mutex);
m_notEmpty.wait(locker, [this] {return m_needStop || NotEmpty(); });
if (m_needStop)
return;
t = m_queue.front();
m_queue.pop_front();
m_notFull.notify_one();
}
void Stop()
{
{
std::lock_guard<std::mutex> locker(m_mutex);
m_needStop = true;
}
m_notFull.notify_all();
m_notEmpty.notify_all();
}
bool Empty()
{
std::lock_guard<std::mutex> locker(m_mutex);
return m_queue.empty();
}
bool Full()
{
std::lock_guard<std::mutex> locker(m_mutex);
return m_queue.size() == m_maxSize;
}
size_t Size()
{
std::lock_guard<std::mutex> locker(m_mutex);
return m_queue.size();
}
int Count()
{
return m_queue.size();
}
private:
bool NotFull() const
{
bool full = (m_queue.size() >= m_maxSize);
if(full)
{
cout << "缓冲区满了" << endl;
}
return !full;
}
bool NotEmpty() const
{
bool empty = m_queue.empty();
if (empty)
cout << "缓冲区空了,需要等待 IDP:"<<std::this_thread::get_id() << endl;
return !empty;
}
template<typename F>
void Add(F&&x)
{
std::unique_lock<std::mutex> locker(m_mutex);
m_notFull.wait(locker, [this] {return m_needStop || NotFull(); });
if (m_needStop)
{
return;
}
m_queue.push_back(std::forward<F>(x));
m_notEmpty.notify_one();
}
};
Take
函数解析:
void Take(std::list<T>& list)
{
std::unique_lock<std::mutex> locker(m_mutex);
m_notEmpty.wait(locker, [this] {return m_needStop || NotEmpty(); });
if (m_needStop)
{
return;
}
list = std::move( m_queue);
m_notEmpty.notify_one();
}
创建一个unique_lock
获取mutex
然后通过m_notEmpty
等待判断式 ,{return m_needStop || NotEmpty(); }
判断式由两个部分组成,一个是停止的标志,另一个是不为空的条件
- 当两个条件都不满足时,条件变量会释放
mutex
然后等待waiting
条件满足直到其他线程通知notify_one()
或者notify_all()
- 当满足
m_needStop
时,通过判断式结束函数 - 当满足
NotEmpty()
时,用一个队列将等待队列中的全部任务取出
释放锁,并唤醒一个线程去添加任务
Add
函数
template<typename F>
void Add(F&&x)
{
std::unique_lock<std::mutex> locker(m_mutex);
m_notFull.wait(locker, [this] {return m_needStop || NotFull(); });
if (m_needStop)
{
return;
}
m_queue.push_back(std::forward<F>(x));
m_notEmpty.notify_one();
}
与上一个相似
- 如果满足条件,就执行以下逻辑,退出函数或者是添加新元素,然后唤醒取任务的线程
Stop()函数
void Stop()
{
{
std::lock_guard<std::mutex> locker(m_mutex);
m_needStop = true;
}
m_notFull.notify_all();
m_notEmpty.notify_all();
}
Stop
函数先获取mutex
,然后将停止标志置为true
,注意为了保证线程安全这里需要先获取锁,并将m_needStop
标志为true
再唤醒所有等待线程
线程池
一个完整的线程池包括三层
- 同步服务层
- 排队层
- 异步服务层
这也是一种生产者–消费者模式,同步层是生产者,不断将新任务丢到排队层中,因此线程池需要提供一个添加新任务的接口供生产者使用;
消费者是异步层,由线程池中与预先创建好的线程去处理同步队列中的任务;
排队层:同步队列,保证生产者,消费者对任务正常的访问,同时还要限制任务的数量,防止无限的任务被添加进来
除此之外,还要求用户能控制线程池的开启和停止,让我们在需要的时候开启/停止线程池
如下实现:
ThreadPool.hpp
#include <list>
#include <thread>
#include <functional>
#include <memory>
#include <atomic>
#include "SynchronousQueue.hpp"
constexpr int MaxTaskCount = 100;
class ThreadPool
{
public:
using Task = std::function<void()>;
ThreadPool(int numThreads = std::thread::hardware_concurrency()) :m_queue(MaxTaskCount)
{
Start(numThreads);
}
~ThreadPool()
{
//如果没有停止就主动停止线程池
Stop();
}
void Stop()
{
std::call_once(m_flag, [this] {StopThreadGroup(); });
}
void AddTask(Task&& task)
{
m_queue.Put(std::forward<Task>(task));
}
void AddTask(const Task& task)
{
m_queue.Put(task);
}
void Start(int numThreads)
{
m_running = true;
for (int i = 0; i<numThreads;i++)
{
m_threadgroup.push_back(std::make_shared<std::thread>(&ThreadPool::RunInThread, this));
}
}
void RunInThread()
{
while (m_running)
{
std::list<Task> list;
m_queue.Take(list);
for (auto& task : list)
{
if (!m_running)
{
return;
}
task();
}
}
}
void StopThreadGroup()
{
m_queue.Stop();
m_running = false;
for (auto thread : m_threadgroup)
{
if (thread)
thread->join();
}
m_threadgroup.clear();
}
private:
std::list<std::shared_ptr<std::thread>> m_threadgroup; // 处理任务的线程组
SyncQueue<Task> m_queue; // 同步队列
atomic_bool m_running;
std::once_flag m_flag;
};
ThreadPool(int numThreads = std::thread::hardware_concurrency())
构造函数用硬件支持的线程数初始化总线程数
然后Start(numThreads)
将m_running
标志置为true
,在线程池中放入numThreads
个线程的指针AddTask
由测试程序向同步队列中添加任务(函数)RunInThread()
取出任务并执行
测试程序实现
main.cpp
#include "ThreadPool.hpp"
void TestThdPool()
{
ThreadPool pool;
pool.Start(2);
std::thread thd1([&pool]
{
for (int i = 0; i < 10; ++i)
{
auto thdId = this_thread::get_id();
pool.AddTask([thdId] {
cout << "同步线程1的线程ID" << thdId << endl;
});
}
});
std::thread thd2([&pool]
{
for (int i = 0; i < 10; ++i)
{
auto thdId = this_thread::get_id();
pool.AddTask([thdId]
{
cout << "同步层线程的线程ID:" << thdId << endl;
});
}
});
this_thread::sleep_for(std::chrono::seconds(2));
getchar();
pool.Stop();
thd1.join();
thd2.join();
}
int main()
{
TestThdPool();
}