brpc中有两种分配资源的池,分别是object_pool和resource_pool,object_pool是resource_pool的简化版,所以在这里主要介绍resource_pool的原理。
可以对照我画的架构图看更加清楚明了
源码分析
从获取一个对象的方法开始
template <typename T> inline T* get_resource(ResourceId<T>* id) {
return ResourcePool<T>::singleton()->get_resource(id);
}
创建一个T对象,返回一个id
继续
inline T* get_resource(ResourceId<T>* id) {
LocalPool* lp = get_or_new_local_pool();
if (__builtin_expect(lp != NULL, 1)) {
return lp->get(id);
}
return NULL;
}
__builtin_expect作用类似于linux内核中的likely,用于对分支预测的优化,然后看get_or_new_local_pool的实现
inline LocalPool* get_or_new_local_pool() {
LocalPool* lp = _local_pool;
if (lp != NULL) {
return lp;
}
lp = new(std::nothrow) LocalPool(this);
if (NULL == lp) {
return NULL;
}
BAIDU_SCOPED_LOCK(_change_thread_mutex); //avoid race with clear()
_local_pool = lp;
butil::thread_atexit(LocalPool::delete_local_pool, lp);
_nlocal.fetch_add(1, butil::memory_order_relaxed);
return lp;
}
template <typename T>
BAIDU_THREAD_LOCAL typename ResourcePool<T>::LocalPool*
ResourcePool<T>::_local_pool = NULL;
resource_pool通过thread_local的pool进行对象的分配,避免了多线程之间的data race和locality。因为使用thread local所以该单例是没有多线程问题的,_change_thread_mutex我看仅用于单元测试中所以不关注。
再继续介绍实现前,先介绍相关的数据结构
ResourcePool* _pool;
Block* _cur_block;
size_t _cur_block_index;
FreeChunk _cur_free;
struct BAIDU_CACHELINE_ALIGNMENT Block {
char items[sizeof(T) * BLOCK_NITEM];
size_t nitem;
Block() : nitem(0) {}
};
typedef ResourcePoolFreeChunk<T, FREE_CHUNK_NITEM> FreeChunk;
static const size_t BLOCK_NITEM = ResourcePoolBlockItemNum<T>::value;
static const size_t FREE_CHUNK_NITEM = BLOCK_NITEM;
template <typename T>
class ResourcePoolBlockItemNum {
static const size_t N1 = ResourcePoolBlockMaxSize<T>::value / sizeof(T);
static const size_t N2 = (N1 < 1 ? 1 : N1);
public:
static const size_t value = (N2 > ResourcePoolBlockMaxItem<T>::value ?
ResourcePoolBlockMaxItem<T>::value : N2);
};
template <typename T> struct ResourcePoolBlockMaxSize {
static const size_t value = 64 * 1024; // bytes
};
template <typename T> struct ResourcePoolBlockMaxItem {
static const size_t value = 25