/* Filename: thread_pool.h * Copyright: Shanghai Baosight Software Co., Ltd. * * Description: thread pool for async tasks * * @author: wuzheqiang * @version: 09/10/2024 wuzheqiang Initial Version **************************************************************/ #ifndef DRSDK_THREAD_POOL_H #define DRSDK_THREAD_POOL_H #include #include #include #include #include #include #include #include #include #include namespace dsfapi { class ThreadPool { public: /** * @brief the constructor just launches some amount of workers * @param [in] nThreads threads number * @version 09/10/2024 wuzheqiang Initial Version */ ThreadPool(uint32_t nThreads); /** * @brief the destructor joins all threads * @version 09/10/2024 wuzheqiang Initial Version */ ~ThreadPool(); /** * @brief add new work item to the pool * @param [in] assign assign the thread * @param [in] f function to be executed * @param [in] args function arguments * @return futurn if success * @version 09/10/2024 wuzheqiang Initial Version */ template auto Enqueue(const int assign, F &&f, Args &&...args) -> std::future::type>; bool IsFull(const int assign) { static uint32_t MAX_SIZE = 100; std::unique_lock lock(m_QueueMutex[assign % m_nThreads]); return m_QueTasks[assign % m_nThreads].size() >= MAX_SIZE; } private: // need to keep track of threads so we can join them uint32_t m_nThreads = 0; std::vector m_vecWorkers; // the task queue std::unique_ptr>[]> m_QueTasks; // synchronization std::unique_ptr m_QueueMutex; std::unique_ptr m_QueueCv; std::atomic_bool m_bStop = {false}; }; inline ThreadPool::ThreadPool(uint32_t nThreads) : m_nThreads(nThreads) { // m_nThreads = nThreads; if (m_nThreads <= 0 || m_nThreads > 16) { m_nThreads = 16; } m_bStop.store(false); m_QueTasks = std::unique_ptr>[]>(new std::queue>[m_nThreads]); m_QueueMutex = std::unique_ptr(new std::mutex[m_nThreads]); m_QueueCv = std::unique_ptr(new std::condition_variable[m_nThreads]); for (uint32_t i = 0; i < m_nThreads; ++i) { std::thread td([this, i] { while (!m_bStop) { std::function task; { std::unique_lock lock(this->m_QueueMutex[i]); this->m_QueueCv[i].wait(lock, [this, i] { return this->m_bStop || !this->m_QueTasks[i].empty(); }); if (this->m_bStop && this->m_QueTasks[i].empty()) return; task = std::move(this->m_QueTasks[i].front()); this->m_QueTasks[i].pop(); } try { task(); } catch (...) { } } }); std::string name = "pool_" + std::to_string(i); pthread_setname_np(td.native_handle(), name.c_str()); m_vecWorkers.emplace_back(std::move(td)); } } inline ThreadPool::~ThreadPool() { if (m_bStop.exchange(true)) { return; } for (int i = 0; i < m_nThreads; ++i) { m_QueueCv[i].notify_all(); } for (std::thread &worker : m_vecWorkers) { worker.join(); } } template auto ThreadPool::Enqueue(const int assign, F &&f, Args &&...args) -> std::future::type> { using return_type = typename std::result_of::type; auto task = std::make_shared>(std::bind(std::forward(f), std::forward(args)...)); auto work_id = assign % m_nThreads; std::future res = task->get_future(); { std::unique_lock lock(m_QueueMutex[work_id]); // don't allow enqueueing after stopping the pool if (m_bStop) return std::future(); m_QueTasks[work_id].emplace([task]() { (*task)(); }); } m_QueueCv[work_id].notify_one(); return res; } } // namespace dsfapi #endif