1.吞吐量计算示例
在《Linux多线程服务端编程使用mudouC++网络库》中使用定时器的部分介绍了两个例子:
1.Boost.Asio Timer
2.Java Netty
第2个例子模拟Java Netty流量统计,统计吞吐量(每秒字节数)和每秒接收的消息数,用到了固定间隔的定时器EventLoop::runEvery
。先来看看流量统计的server。
注:这里是测试的是单向吞吐量,因此服务端收到消息直接discard,echo client/server测试的是双向吞吐量。
muduo/examples/netty/discard/server.cc:
#include <muduo/net/TcpServer.h>
#include <muduo/base/Atomic.h>
#include <muduo/base/Logging.h>
#include <muduo/base/Thread.h>
#include <muduo/net/EventLoop.h>
#include <muduo/net/InetAddress.h>
#include <boost/bind.hpp>
#include <utility>
#include <stdio.h>
#include <unistd.h>
using namespace muduo;
using namespace muduo::net;
int numThreads = 0;
class DiscardServer
{
public:
DiscardServer(EventLoop* loop, const InetAddress& listenAddr)
: server_(loop, listenAddr, "DiscardServer"),
oldCounter_(0),
startTime_(Timestamp::now())
{
server_.setConnectionCallback(
boost::bind(&DiscardServer::onConnection, this, _1));
server_.setMessageCallback(
boost::bind(&DiscardServer::onMessage, this, _1, _2, _3));
server_.setThreadNum(numThreads);
loop->runEvery(3.0, boost::bind(&DiscardServer::printThroughput, this));
}
void start()
{
LOG_INFO << "starting " << numThreads << " threads.";
server_.start();
}
private:
/*接收连接的回调函数*/
void onConnection(const TcpConnectionPtr& conn)
{
LOG_TRACE << conn->peerAddress().toIpPort() << " -> "
<< conn->localAddress().toIpPort() << " is "
<< (conn->connected() ? "UP" : "DOWN");
}
/*收到消息的回调函数*/
void onMessage(const TcpConnectionPtr& conn, Buffer* buf, Timestamp)
{
size_t len = buf->readableBytes();
/*字节数增加len bytes*/
transferred_.add(len);
/*消息数增加1 */
receivedMessages_.incrementAndGet();
buf->retrieveAll();
}
/*计算吞吐量的回调函数*/
void printThroughput()
{
Timestamp endTime = Timestamp::now();
int64_t newCounter = transferred_.get();
int64_t bytes = newCounter - oldCounter_;
int64_t msgs = receivedMessages_.getAndSet(0);
double time = timeDifference(endTime, startTime_);
/*打印流量*/
printf("%4.3f MiB/s %4.3f Ki Msgs/s %6.2f bytes per msg\n",
static_cast<double>(bytes)/time/1024/1024,
static_cast<double>(msgs)/time/1024,
static_cast<double>(bytes)/static_cast<double>(msgs));
oldCounter_ = newCounter;
startTime_ = endTime;
}
TcpServer server_;
/*传输的字节数*/
AtomicInt64 transferred_;
/*收到的消息数*/
AtomicInt64 receivedMessages_;
/*保存上一次收到的消息*/
int64_t oldCounter_;
/*记录时间戳*/
Timestamp startTime_;
};
int main(int argc, char* argv[])
{
LOG_INFO << "pid = " << getpid() << ", tid = " << CurrentThread::tid();
if (argc > 1)
{
numThreads = atoi(argv[1]);
}
EventLoop loop;
InetAddress listenAddr(2009);
DiscardServer server(&loop, listenAddr);
server.start();
loop.loop();
}
运行测试服务/客户程序,结果如下:
2.Atomic设计
在这个服务器类统计字节数和消息数用的AtomicInt64
这个数据结构,我们看看它的源码:
muduo/base/Atomic.h
#ifndef MUDUO_BASE_ATOMIC_H
#define MUDUO_BASE_ATOMIC_H
#include <boost/noncopyable.hpp>
#include <stdint.h>
namespace muduo
{
namespace detail
{
template<typename T>
class AtomicIntegerT : boost::noncopyable
{
public:
AtomicIntegerT()
: value_(0)
{
}
T get()
{
// in gcc >= 4.7: __atomic_load_n(&value_, __ATOMIC_SEQ_CST)
/*GCC 提供的原子操作*/
return __sync_val_compare_and_swap(&value_, 0, 0);
}
T getAndAdd(T x)
{
// in gcc >= 4.7: __atomic_fetch_add(&value_, x, __ATOMIC_SEQ_CST)
/*GCC 提供的原子操作*/
return __sync_fetch_and_add(&value_, x);
}
T addAndGet(T x)
{
return getAndAdd(x) + x;
}
T incrementAndGet()
{
return addAndGet(1);
}
T decrementAndGet()
{
return addAndGet(-1);
}
void add(T x)
{
getAndAdd(x);
}
void increment()
{
incrementAndGet();
}
void decrement()
{
decrementAndGet();
}
T getAndSet(T newValue)
{
// in gcc >= 4.7: __atomic_store_n(&value, newValue, __ATOMIC_SEQ_CST)
return __sync_lock_test_and_set(&value_, newValue);
}
private:
volatile T value_;
};
}
typedef detail::AtomicIntegerT<int32_t> AtomicInt32;
typedef detail::AtomicIntegerT<int64_t> AtomicInt64;
}
#endif // MUDUO_BASE_ATOMIC_H
我们看到GCC提供的原子操作,gcc从4.1.2提供了__sync_*系列的built-in函数,用于提供加减和逻辑运算的原子操作。muduo作者在这里使用原子操作符,是为了保证多线程环境下的安全。
看了Atomic.h
的源码,例如在server.cc
中transferred_.add(len);
其实是对__sync_fetch_and_add
的封装,而receivedMessages_.incrementAndGet();
则类似于receivedMessages_.add(1)
3.参考
1.Linux多线程服务端编程使用muduo C++网络库
2.http://www.cnblogs.com/FrankTan/archive/2010/12/11/1903377.html