eis/third_party/dsf/include/drsdkBK/thread_pool.h

158 lines
4.5 KiB
C
Raw Permalink Normal View History

/* Filename: thread_pool.h
* Copyright: Shanghai Baosight Software Co., Ltd.
*
* Description: thread pool for async tasks
*
* @author: wuzheqiang
* @version: 09/10/2024 wuzheqiang Initial Version
**************************************************************/
#ifndef DRSDK_THREAD_POOL_H
#define DRSDK_THREAD_POOL_H
#include <atomic>
#include <condition_variable>
#include <functional>
#include <future>
#include <memory>
#include <mutex>
#include <queue>
#include <stdexcept>
#include <thread>
#include <vector>
namespace dsfapi
{
class ThreadPool
{
public:
/**
* @brief the constructor just launches some amount of workers
* @param [in] nThreads threads number
* @version 09/10/2024 wuzheqiang Initial Version
*/
ThreadPool(uint32_t nThreads);
/**
* @brief the destructor joins all threads
* @version 09/10/2024 wuzheqiang Initial Version
*/
~ThreadPool();
/**
* @brief add new work item to the pool
* @param [in] assign assign the thread
* @param [in] f function to be executed
* @param [in] args function arguments
* @return futurn if success
* @version 09/10/2024 wuzheqiang Initial Version
*/
template <class F, class... Args>
auto Enqueue(const int assign, F &&f, Args &&...args) -> std::future<typename std::result_of<F(Args...)>::type>;
bool IsFull(const int assign)
{
static uint32_t MAX_SIZE = 100;
std::unique_lock<std::mutex> lock(m_QueueMutex[assign % m_nThreads]);
return m_QueTasks[assign % m_nThreads].size() >= MAX_SIZE;
}
private:
// need to keep track of threads so we can join them
uint32_t m_nThreads = 0;
std::vector<std::thread> m_vecWorkers;
// the task queue
std::unique_ptr<std::queue<std::function<void()>>[]> m_QueTasks;
// synchronization
std::unique_ptr<std::mutex[]> m_QueueMutex;
std::unique_ptr<std::condition_variable[]> m_QueueCv;
std::atomic_bool m_bStop = {false};
};
inline ThreadPool::ThreadPool(uint32_t nThreads) : m_nThreads(nThreads)
{
// m_nThreads = nThreads;
if (m_nThreads <= 0 || m_nThreads > 16)
{
m_nThreads = 16;
}
m_bStop.store(false);
m_QueTasks =
std::unique_ptr<std::queue<std::function<void()>>[]>(new std::queue<std::function<void()>>[m_nThreads]);
m_QueueMutex = std::unique_ptr<std::mutex[]>(new std::mutex[m_nThreads]);
m_QueueCv = std::unique_ptr<std::condition_variable[]>(new std::condition_variable[m_nThreads]);
for (uint32_t i = 0; i < m_nThreads; ++i)
{
std::thread td([this, i] {
while (!m_bStop)
{
std::function<void()> task;
{
std::unique_lock<std::mutex> lock(this->m_QueueMutex[i]);
this->m_QueueCv[i].wait(lock, [this, i] { return this->m_bStop || !this->m_QueTasks[i].empty(); });
if (this->m_bStop && this->m_QueTasks[i].empty())
return;
task = std::move(this->m_QueTasks[i].front());
this->m_QueTasks[i].pop();
}
try
{
task();
}
catch (...)
{
}
}
});
std::string name = "pool_" + std::to_string(i);
pthread_setname_np(td.native_handle(), name.c_str());
m_vecWorkers.emplace_back(std::move(td));
}
}
inline ThreadPool::~ThreadPool()
{
if (m_bStop.exchange(true))
{
return;
}
for (int i = 0; i < m_nThreads; ++i)
{
m_QueueCv[i].notify_all();
}
for (std::thread &worker : m_vecWorkers)
{
worker.join();
}
}
template <class F, class... Args>
auto ThreadPool::Enqueue(const int assign, F &&f, Args &&...args)
-> std::future<typename std::result_of<F(Args...)>::type>
{
using return_type = typename std::result_of<F(Args...)>::type;
auto task =
std::make_shared<std::packaged_task<return_type()>>(std::bind(std::forward<F>(f), std::forward<Args>(args)...));
auto work_id = assign % m_nThreads;
std::future<return_type> res = task->get_future();
{
std::unique_lock<std::mutex> lock(m_QueueMutex[work_id]);
// don't allow enqueueing after stopping the pool
if (m_bStop)
return std::future<return_type>();
m_QueTasks[work_id].emplace([task]() { (*task)(); });
}
m_QueueCv[work_id].notify_one();
return res;
}
} // namespace dsfapi
#endif