// 根据size的不同, 可以实现不同大小的多个内存池
template<unsigned size, unsigned align_> union freeblock
{
typedef typename boost::type_with_alignment<align_>::type aligner_type;
aligner_type aligner;
// 这个地方利用union真的很棒
char bytes[size];
freeblock * next;
};
template<unsigned size, unsigned align_> struct allocator_impl
{
typedef freeblock<size, align_> block;
// It may seem odd to use such small pages.
//
// However, on a typical Windows implementation that uses
// the OS allocator, "normal size" pages interact with the
// "ordinary" operator new, slowing it down dramatically.
//
// 512 byte pages are handled by the small object allocator,
// and don't interfere with ::new.
//
// The other alternative is to use much bigger pages (1M.)
//
// It is surprisingly easy to hit pathological behavior by
// varying the page size. g++ 2.96 on Red Hat Linux 7.2,
// for example, passionately dislikes 496. 512 seems OK.
#if defined(BOOST_QA_PAGE_SIZE)
enum { items_per_page = BOOST_QA_PAGE_SIZE / size };
#else
enum { items_per_page = 512 / size }; // 1048560 / size
#endif
#ifdef BOOST_HAS_THREADS
static lightweight_mutex & mutex()
{
static freeblock< sizeof( lightweight_mutex ), boost::alignment_of< lightweight_mutex >::value > fbm;
static lightweight_mutex * pm = new( &fbm ) lightweight_mutex;
return *pm;
}
static lightweight_mutex * mutex_init;
#endif
static block * free;
static block * page;
static unsigned last;
static inline void * alloc()
{
#ifdef BOOST_HAS_THREADS
lightweight_mutex::scoped_lock lock( mutex() );
#endif
if(block * x = free)
{
free = x->next;
return x;
}
else
{
if(last == items_per_page)
{
// "Listen to me carefully: there is no memory leak"
// -- Scott Meyers, Eff C++ 2nd Ed Item 10
page = ::new block[items_per_page];
last = 0;
}
return &page[last++];
}
}
static inline void * alloc(std::size_t n)
{
if(n != size) // class-specific new called for a derived object
{
return ::operator new(n);
}
else
{
#ifdef BOOST_HAS_THREADS
lightweight_mutex::scoped_lock lock( mutex() );
#endif
if(block * x = free)
{
free = x->next;
return x;
}
else
{
if(last == items_per_page)
{
page = ::new block[items_per_page];
last = 0;
}
return &page[last++];
}
}
}
static inline void dealloc(void * pv)
{
if(pv != 0) // 18.4.1.1/13
{
#ifdef BOOST_HAS_THREADS
lightweight_mutex::scoped_lock lock( mutex() );
#endif
block * pb = static_cast<block *>(pv);
pb->next = free;
free = pb;
}
}
static inline void dealloc(void * pv, std::size_t n)
{
if(n != size) // class-specific delete called for a derived object
{
::operator delete(pv);
}
else if(pv != 0) // 18.4.1.1/13
{
#ifdef BOOST_HAS_THREADS
lightweight_mutex::scoped_lock lock( mutex() );
#endif
block * pb = static_cast<block *>(pv);
pb->next = free;
free = pb;
}
}
};