lk中的mutex_t机制

lk中还实现thread对全局资源保护的机制:mutex_t。主要用于对全局变量资源保护.而event 主要用于多thread之间


实现这套机制总共就5个函数,我们一个一个看一下


mutex_x的初始化函数和event一样,也是每个mutex_t 需要时wait queue
void mutex_init(mutex_t *m)
{
#if MUTEX_CHECK
// ASSERT(m->magic != MUTEX_MAGIC);
#endif


m->magic = MUTEX_MAGIC;
m->count = 0;
m->holder = 0;
wait_queue_init(&m->wait);
}


mutex_x的获取函数如下,可见如果只是两个thread之间协调的话m->count ==1,并不会使用wait_queue,而且会记住这个mutex_x 是哪个thread持有
status_t mutex_acquire(mutex_t *m)
{
status_t ret = NO_ERROR;


if (current_thread == m->holder)
panic("mutex_acquire: thread %p (%s) tried to acquire mutex %p it already owns.\n",
current_thread, current_thread->name, m);


enter_critical_section();




m->count++;
if (unlikely(m->count > 1)) {
ret = wait_queue_block(&m->wait, INFINITE_TIME);
if (ret < 0)
goto err;
}
m->holder = current_thread;


err:
exit_critical_section();


return ret;
}


如果用在一个thread之内保护资源的话mutex_acquire_timeout 和 mutex_acquire 是一样
status_t mutex_release(mutex_t *m)
{
if (current_thread != m->holder)
panic("mutex_release: thread %p (%s) tried to release mutex %p it doesn't own. owned by %p (%s)\n", 
current_thread, current_thread->name, m, m->holder, m->holder ? m->holder->name : "none");


enter_critical_section();


m->holder = 0;
m->count--;
if (unlikely(m->count >= 1)) {
/* release a thread */
// dprintf("releasing thread\n");
wait_queue_wake_one(&m->wait, true, NO_ERROR);
}


exit_critical_section();


return NO_ERROR;
}
mutex_release的实现也很简单,将m->count--,一般情况下等于0.也就是if (unlikely(m->count >= 1))
一般不成立。




void mutex_destroy(mutex_t *m)
{
enter_critical_section();


#if MUTEX_CHECK
ASSERT(m->magic == MUTEX_MAGIC);
#endif


// if (m->holder != 0 && current_thread != m->holder)
// panic("mutex_destroy: thread %p (%s) tried to release mutex %p it doesn't own. owned by %p (%s)\n", 
// current_thread, current_thread->name, m, m->holder, m->holder ? m->holder->name : "none");


m->magic = 0;
m->count = 0;
wait_queue_destroy(&m->wait, true);
exit_critical_section();
}


mutex_destroy用于销毁mutex_x。个人感觉这个函数应该要先判断一下m->count-- 是否为0,如果为0的话,就没必要调用wait_queue_destroy 这个函数了


void wait_queue_destroy(wait_queue_t *wait, bool reschedule)
{
#if THREAD_CHECKS
ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
ASSERT(in_critical_section());
#endif
wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
wait->magic = 0;
}


继续调用wait_queue_wake_all 来wakeup在等待这个mutex_x的所有thread


int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
{
thread_t *t;
int ret = 0;


if (reschedule && wait->count > 0) {
current_thread->state = THREAD_READY;
insert_in_run_queue_head(current_thread);
}


/* pop all the threads off the wait queue into the run queue */
while ((t = list_remove_head_type(&wait->list, thread_t, queue_node))) {
wait->count--;


t->state = THREAD_READY;
t->wait_queue_block_ret = wait_queue_error;
t->blocking_wait_queue = NULL;


insert_in_run_queue_head(t);
ret++;
}


if (reschedule && ret > 0)
thread_resched();


return ret;
}


如果wait->count > 0的话,由于reschedule ==1.就说明多个thread在使用这个mutex_x.就先将当前thread 放到run_queue中,然后设置current_thread->state = THREAD_READY;,再将其放到run_queue中,饭后将wait list中的每一个thread 都取出来放到run_queue中,单后调用thread_resched来调用
这样run_queue就先运行wait list中的thread.
/* Copyright © 2022-present, ZEEKR Inc. All rights reserved. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. / #ifndef APF_MESSAGE_H #define APF_MESSAGE_H #include #include #include <condition_variable> #include #include “ara/core/variant.h” #include #define DOUBLE_QUEUE_NORNAL 0 #define DOUBLE_QUEUE_LIMIT 1 #define DOUBLE_QUEUE_SIZE 2 #define DOUBLE_QUEUE_DEPTH 50 namespace apf { namespace osa { /* @brief block message queue. @details block message queue. @Error interrupt handling : none @Input conditions : none @Output conditions : none / template class BlockMessageQueue { enum class MsgQueueId {}; public: /* @brief : send message. @details : send message. @param [in] _msg: the message to be sent. @return : none. @note add user data msg to queue and notify worker thread @see none / void sendMessage(const Msg &_msg) { std::unique_lockstd::mutex lk(m_mutex); m_queue.push(_msg); m_cv.notify_one(); } /* @brief : send message. @details : send message. @param [in] _msg: the message to be sent. @return : none. @note add user data msg to queue and notify worker thread @see none */ void sendMessage(Msg &&_msg) { std::unique_lockstd::mutex lk(m_mutex); m_queue.emplace(std::move(_msg)); m_cv.notify_one(); } /** @brief : receive message. @details : receive message. @param [out] : _msg: received message @return : return true if receive message success, otherwise return false @note Wait for a message to be added to the queue @see none */ bool receiveMessage(Msg &_msg) { std::unique_lockstd::mutex lk(m_mutex); @brief message queue. @details message queue. @Error interrupt handling : none @Input conditions : none @Output conditions : none */ template class MessageQueue { enum class MsgQueueId {}; public: using queue_prop_t = std::array<std::shared_ptr, DOUBLE_QUEUE_SIZE>; /** * @brief : send message. * @details : send message. * @param [in] std::shared_ptr _msg: the message to be sent. * @return : none. * @note add user data msg to queue and notify worker thread * @see none / void sendMessage(std::shared_ptr _msg) { std::unique_lockstd::mutex lk(m_mutex); if (m_queue.empty()) { m_queue.push(_msg); m_cv.notify_one(); } else { m_queue.push(_msg); } } /* * @brief : send limit queue message. * @details : send message. * @param [in] _msg: the message to be sent. * @return : none. * @note add user data msg to queue and notify worker thread * @see none / void sendLimitMessage(std::shared_ptr _msg) { std::unique_lockstd::mutex lk(m_mutex); if (m_limitQueue.size() >= DOUBLE_QUEUE_DEPTH) { m_limitQueue.push(_msg); m_limitQueue.pop(); return; } if (m_limitQueue.empty()) { m_limitQueue.push(_msg); m_cv.notify_one(); } else { m_limitQueue.push(_msg); } } /* * @brief : receive message. * @details : receive message. * @param [in] : none. * @return : std::array<std::shared_ptr, 2>, received message. * @note Wait for a message to be added to the queue * @see none / queue_prop_t recievdMessage() { std::unique_lockstd::mutex lk(m_mutex); queue_prop_t msg {nullptr, nullptr}; m_cv.wait(lk, & {return (!m_queue.empty() || !m_limitQueue.empty() || !m_isRunning);}); if (m_queue.empty() && m_limitQueue.empty()) { return msg; } if (!m_queue.empty()) { msg[DOUBLE_QUEUE_NORNAL] = m_queue.front(); m_queue.pop(); } if (!m_limitQueue.empty()) { msg[DOUBLE_QUEUE_LIMIT] = m_limitQueue.front(); m_limitQueue.pop(); } return msg; } /* * @brief : stop message queue. * @details : stop message queue. * @param [in] : none. * @return : none. * @note none * @see none */ void threadEndNotify() { m_isRunning = false; std::unique_lockstd::mutex lk(m_mutex); m_cv.notify_one(); } private: std::mutex m_mutex; /< mutex */ std::condition_variable m_cv; /< condition variables that control message reception */ std::queue<std::shared_ptr> m_queue; /< message queue */ std::queue<std::shared_ptr> m_limitQueue; /< message limit queue */ bool m_isRunning = true; /*< message queue state / }; / @brief message sender. @details message sender. @Error interrupt handling : none @Input conditions : none @Output conditions : none / template class MessageSender { public: /* @brief : MessageSender constructor. @details : MessageSender constructor. @param [in] MessageQueue & _msg_queue: message queue. @return : none. @note none @see none / MessageSender(MessageQueue & _msg_queue) :m_message_queue_proxy(&_msg_queue) {} /* @brief : constructor. @details : constructor. @param [in] MessageQueue & _msg_queue: message queue. @return : none. @note none @see none / MessageSender() :m_message_queue_proxy(nullptr) {} /* @brief : send message. @details : send message. @param [in] std::shared_ptr _msg: the message to be sent. @return : none. @note add user data msg to queue and notify worker thread @see MessageQueue::sendMessage() / void sendMessage(std::shared_ptr _msg) { if (m_message_queue_proxy) { m_message_queue_proxy->sendMessage(_msg); } } /* @brief : send sendLimitMessage. @details : send sendLimitMessage. @param [in] std::shared_ptr _msg: the message to be sent. @return : none. @note add user data msg to queue and notify worker thread with queue limit @see MessageQueue::sendLimitMessage() */ void sendLimitMessage(std::shared_ptr _msg) { if (m_message_queue_proxy) { m_message_queue_proxy->sendLimitMessage(_msg); } } private: MessageQueue *m_message_queue_proxy; /**< message queue proxy */ }; /// @brief template <typename MsgType, typename …BodyArgs> class MessageBase final { public: using Type = MsgType; //MessageBase() = default; //template <typename T, typename… Args> //static MessageBase& CreateMessage(Args… _args) { //// emplaceMsgBody(std::forward(_args)…); // return *this; //} template static std::shared_ptr CreateMessageByCopy(T const & _right_obj) { auto msg = std::make_shared(); msg->template emplaceMsgBodyByCopy(_right_obj); return msg; } template <typename T, typename… Args> static std::shared_ptr CreateMessage(Args&&… _args) { auto msg = std::make_shared(); msg->template emplaceMsgBody(std::forward(_args)…); return msg; } template T const& getMessageBody() const { return ara::core::get(m_msg_body); } MsgType getMessageType() const { return m_msg_type; } template <typename T, typename… Args> void emplaceMsgBody(Args&&… _args) { m_msg_body.template emplace(std::forward(_args)…); m_msg_type = T::m_s_message_type; } template void emplaceMsgBodyByCopy(T const & _right_obj) { m_msg_body.template emplace(_right_obj); m_msg_type = T::m_s_message_type; } private: MsgType m_msg_type{0}; ara::core::Variant<BodyArgs…> m_msg_body; }; } } #endif 需要画在原队列sendMessage的基础上新加sendLimitMessage队列相关处理。也要体现队列空后,进入wait后使用notifyonce再次触发线程的原理
最新发布
08-08
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值