我觉得很多讲Future模式的文章并没有深刻理解Future模式,其实Future模式只是生产者-消费者模型的扩展。经典“生产者-消费者”模型中消息的生产者不关心消费者何时处理完该条消息,也不关心处理结果。Future模式则可以让消息的生产者等待直到消息处理结束,如果需要的话还可以取得处理结果。
但是Futrue模式有个重大缺陷:当消费者工作得不够快的时候,它会阻塞住生产者线程,从而可能导致系统吞吐量的下降。所以不建议在高性能的服务端使用。
下面有两段代码,由于写的时间差比较久,风格略有不一致。my_msg_queue.h里实现了一个阻塞的消息队列。test.cpp中示范了生产者如何等待处理结束。Job类既是Command,又是FutrueData,这样简化了模型,更突出了Future模型的本质。顺便抱怨下,设计模式的书总是喜欢故弄玄虚,把本来很简单的东西搞得很复杂的样子。
======================test.cpp==============================
#include "my_msq_queue.h"
#include <string>
#include <iostream>
#include <boost/thread.hpp>
#include <boost/shared_ptr.hpp>
#include <windows.h>
class Job
{
public:
Job(const
std::string& cmd_)
:
cmd(cmd_)
,
ready(false)
,
result(0){
}
void
work(){
boost::mutex::scoped_lock lock(mtx);
if
(!ready){
result =
cmd.length();
ready =
true;
cond.notify_all();
}
}
int
getResult(){
boost::mutex::scoped_lock lock(mtx);
while(!ready){
cond.wait(lock);
}
return
result;
}
private:
std::string
cmd;
int
result;
bool
ready;
boost::mutex
mtx;
boost::condition_variable cond;
};
typedef boost::shared_ptr<Job>
MyJob;
MsgQueue<MyJob> jobs;
bool producer_running_flag = true;
bool consumer_running_flag = true;
int job_cnt = 0;
int done_cnt = 0;
void producer()
{
while
(producer_running_flag){
++
job_cnt;
MyJob
job(new Job("123456"));
jobs.push(job);
if
(job->getResult() == 6){
++
done_cnt;
}
}
}
void consumer()
{
while
(consumer_running_flag){
MyJob job =
jobs.pop();
job->work();
Sleep(100);
}
}
int main()
{
boost::thread thr1(producer);
boost::thread thr2(consumer);
Sleep(1000);
producer_running_flag = false;
thr1.join();
consumer_running_flag = false;
thr2.join();
std::cout
<< job_cnt
<< std::endl;
std::cout
<< done_cnt
<< std::endl;
return
0;
}
======================my_msg_queue.h==============================
#include
<boost/shared_array.hpp>
#include <boost/thread.hpp>
#include <stdexcept>
#include <queue>
template<class Msg>
class MsgQueue
{
public:
MsgQueue(std::size_t max_size_
= (size_t)-1)
: max_size(max_size_)
, close_flag(false)
{
if (max_size < 1)
{
throw std::runtime_error("MsgQueue::MsgQueue : max_size
< 1");
}
}
~MsgQueue()
{
close();
}
void
push(Msg msg)
{
boost::mutex::scoped_lock lock(mtx);
while (msgs.size() >= max_size
&& !close_flag)
{
full_cond.wait(lock);
}
if (close_flag)
{
throw std::runtime_error("MsgQueue::push : msg queue has been
shutdown.");
}
msgs.push(msg);
if(msgs.size() == 1)
{
empty_cond.notify_all();
}
}
Msg
pop()
{
boost::mutex::scoped_lock lock(mtx);
while (msgs.empty() &&
!close_flag)
{
empty_cond.wait(lock);
}
if (close_flag)
{
throw std::runtime_error("MsgQueue::pop : msg queue has been
shutdown.");
}
Msg msg = msgs.front();
msgs.pop();
if(msgs.size() == max_size - 1)
{
full_cond.notify_all();
}
return msg;
}
void
close()
{
close_flag = true;
{
boost::mutex::scoped_lock lock(mtx);
full_cond.notify_all();
empty_cond.notify_all();
}
}
std::size_t
getMaxSize()
{
return max_size;
}
std::size_t
getSize() // the result may be not newest due to other
threads
{
boost::mutex::scoped_lock lock(mtx);
return msgs.size();
}
private:
boost::mutex
mtx;
boost::condition_variable_any full_cond;
boost::condition_variable_any empty_cond;
std::queue<Msg>
msgs;
std::size_t
const max_size;
bool
close_flag;
};