在上一篇中分析了RHINO_CONFIG_MM_BLK/RHINO_CONFIG_MM_TLF_BLK_SIZE没有打开的情况下,堆内存的初始化,和分配释放,现在再了解一下开启这两个宏后的情况,注意,这里RHINO_CONFIG_MM_TLF_BLK_SIZE我设定的是1024。
还是从krhino_init_mm_head开始(略去上一篇分析过的部分):
kstat_t krhino_init_mm_head(k_mm_head **ppmmhead, void *addr, size_t len )
{
/*执行init_mm_region,k_mm_free(pmmhead, nextblk->mbinfo.buffer);等
pmmhead内存分配已经可用
*/
...
#if (RHINO_CONFIG_MM_BLK > 0)
/* note: stats_addsize inside */
/* 直接malloc一块RHINO_CONFIG_MM_TLF_BLK_SIZE + sizeof(mblk_pool_t)的内存大小作为内存池*/
mmblk_pool = k_mm_alloc(pmmhead, RHINO_CONFIG_MM_TLF_BLK_SIZE + MM_ALIGN_UP(sizeof(mblk_pool_t)));
if (mmblk_pool) {
/*申请成功后初始化内存池*/
stat = krhino_mblk_pool_init(mmblk_pool, "fixed_mm_blk",
(void *)((size_t)mmblk_pool + MM_ALIGN_UP(sizeof(mblk_pool_t))),
RHINO_CONFIG_MM_BLK_SIZE, RHINO_CONFIG_MM_TLF_BLK_SIZE);
if (stat == RHINO_SUCCESS) {
/* 初始化成功后, 将mmblk_pool内存池给pmmhead统一管理*/
pmmhead->fix_pool = mmblk_pool;
#if (K_MM_STATISTIC > 0)
stats_removesize(pmmhead, RHINO_CONFIG_MM_TLF_BLK_SIZE);
#endif
} else {
/*初始化内存池失败的话,释放掉之前申请的内存*/
/* note: stats_removesize inside */
k_mm_free(pmmhead, mmblk_pool);
}
#if (K_MM_STATISTIC > 0)
pmmhead->maxused_size = pmmhead->used_size;
#endif
}
#endif
return RHINO_SUCCESS;
}
接下来就是分析初始化内存池krhino_mblk_pool_init,为了更好了解分配过程,标出各个部分对象实际值 :
/* pool=0x200018B0, pool_start=0x200018D8, blk_size=0x20, pool_size=400 */
kstat_t krhino_mblk_pool_init(mblk_pool_t *pool, const name_t *name,
void *pool_start,
size_t blk_size, size_t pool_size)
{
uint32_t blks; /* max blocks mem pool offers */
uint8_t *blk_cur; /* block pointer for traversing */
uint8_t *blk_next; /* next block pointe for traversing */
uint8_t *pool_end; /* mem pool end */
uint8_t addr_align_mask; /* address alignment */
NULL_PARA_CHK(pool);
NULL_PARA_CHK(name);
NULL_PARA_CHK(pool_start);
/* over one block at least */
if (pool_size < (blk_size << 1u)) {
return RHINO_BLK_POOL_SIZE_ERR;
}
/* check address & size alignment */
/*addr_align_mask = 0x3*/
addr_align_mask = sizeof(void *) - 1u;
if (((size_t)pool_start & addr_align_mask) > 0u) {
return RHINO_INV_ALIGN;
}
if ((blk_size & addr_align_mask) > 0u) {
return RHINO_INV_ALIGN;
}
if ((pool_size & addr_align_mask) > 0u) {
return RHINO_INV_ALIGN;
}
krhino_spin_lock_init(&pool->blk_lock);
/*poll_end=0x20001CD8, blk_cur=0x200018D8, blk_next=0x200018F8*/
pool_end = (uint8_t *)pool_start + pool_size;
blks = 0u;
blk_cur = (uint8_t *)pool_start;
blk_next = blk_cur + blk_size;
while (blk_next < pool_end) {
blks++;
/* use initial 4 byte point to next block */
*(uint8_t **)blk_cur = blk_next;
blk_cur = blk_next;
blk_next = blk_cur + blk_size;
}
if (blk_next == pool_end) {
blks++;
}
/* the last one */
*((uint8_t **)blk_cur) = NULL;
pool->pool_name = name;
pool->pool_start = pool_start;
pool->pool_end = pool_end;
pool->blk_whole = blks;
pool->blk_avail = blks;
pool->blk_size = blk_size;
pool->avail_list = (uint8_t *)pool_start;
/*这句代码没有实现*/
TRACE_MBLK_POOL_CREATE(krhino_cur_task_get(), pool);
return RHINO_SUCCESS;
}
while (blk_next < pool_end) 开始的这段,将整个memory pool开始以blk_size开始分段,示意图如下:
初始化完pool后,调用stats_removesize将这部分空间重新加到mmhead->free_size里,因为这部分空间还是算作可用空间。
然后我们同样,再做一次malloc(1),来看看空间怎么分配的。
调用过程还是和之前一样:用户态调用malloc(size)-> aos_malloc(size) -> krhino_mm_alloc(size) -> k_mm_alloc(g_kmm_head, size);这个过程中size始终为1, 我们直接分析k_mm_alloc(g_kmm_head, 1):
void *k_mm_alloc(k_mm_head *mmhead, size_t size)
{
void *retptr;
k_mm_list_t *get_b, *new_b, *next_b;
int32_t level;
size_t left_size;
size_t req_size = size;
#if (RHINO_CONFIG_MM_BLK > 0)
mblk_pool_t *mm_pool;
#endif
cpu_cpsr_t flags_cpsr;
(void)flags_cpsr;
(void)req_size;
if (!mmhead) {
return NULL;
}
if (size == 0) {
return NULL;
}
MM_CRITICAL_ENTER(mmhead, flags_cpsr);
#if (RHINO_CONFIG_MM_BLK > 0)
/* little blk, try to get from mm_pool */
if (mmhead->fix_pool != NULL) {
mm_pool = (mblk_pool_t *)mmhead->fix_pool;
if (size <= RHINO_CONFIG_MM_BLK_SIZE && mm_pool->blk_avail > 0) {
retptr = k_mm_smallblk_alloc(mmhead, size);
if (retptr) {
MM_CRITICAL_EXIT(mmhead, flags_cpsr);
return retptr;
}
}
}
#endif
...
/*下面这部分不会调用到,不分析*/
}
static void *k_mm_smallblk_alloc(k_mm_head *mmhead, size_t size)
{
kstat_t sta;
void *tmp;
if (!mmhead) {
return NULL;
}
sta = krhino_mblk_alloc((mblk_pool_t *)mmhead->fix_pool, &tmp);
if (sta != RHINO_SUCCESS) {
return NULL;
}
//stats_addsize和stats_
stats_addsize(mmhead, RHINO_CONFIG_MM_BLK_SIZE, 0);
return tmp;
}
kstat_t krhino_mblk_alloc(mblk_pool_t *pool, void **blk)
{
kstat_t status;
uint8_t *avail_blk;
cpu_cpsr_t flags_cpsr;
NULL_PARA_CHK(pool);
NULL_PARA_CHK(blk);
krhino_spin_lock_irq_save(&pool->blk_lock, flags_cpsr);
if (pool->blk_avail > 0u) {
avail_blk = pool->avail_list;
/* 当前blk地址作为内容放到blk对象中传出去*/
*((uint8_t **)blk) = avail_blk;
/* the first 4 byte is the pointer for next block */
/*pool->avail_list指向了自身地址上内容作为地址的地方,也就是下一个blk*/
pool->avail_list = *(uint8_t **)(avail_blk);
/*pool中可用块减一*/
pool->blk_avail--;
status = RHINO_SUCCESS;
} else {
*((uint8_t **)blk) = NULL;
status = RHINO_NO_MEM;
}
krhino_spin_unlock_irq_restore(&pool->blk_lock, flags_cpsr);
return status;
}
由于我们申请的size=1< RHINO_CONFIG_MM_BLK_SIZE,所以会调用到memory pool中的分配,而不会像上篇那样, 再给我们切出一个blk分配。可以看到,调用的关键在krhino_mblk_alloc里,将pool->avail_list当前的内存地址放到了**blk中传出去,将pool->avail_list指向下一个空闲blk的地址。分配就完成了,给出去的地址包含一个32bytes的空闲区域,肯定满足申请size需要。
我们再来看下free掉这个malloc(1)时候的情况:
free()->aos_free()-> krhino_mm_free(mem)->k_mm_free(g_kmm_head, ptr)->
void k_mm_free(k_mm_head *mmhead, void *ptr)
{
k_mm_list_t *free_b, *next_b, *prev_b;
cpu_cpsr_t flags_cpsr;
(void)flags_cpsr;
if (!ptr || !mmhead) {
return;
}
MM_CRITICAL_ENTER(mmhead, flags_cpsr);
#if (RHINO_CONFIG_MM_BLK > 0)
/* fix blk, free to mm_pool */
if (krhino_mblk_check(mmhead->fix_pool, ptr)) {
/*it's fixed size memory block*/
k_mm_smallblk_free(mmhead, ptr);
MM_CRITICAL_EXIT(mmhead, flags_cpsr);
return;
}
#endif
...
/*下面这部分不会调用到,不分析*/
}
#define krhino_mblk_check(pool, blk) \
((pool) != NULL \
&& ((void *)(blk) >= ((mblk_pool_t*)(pool))->pool_start) \
&& ((void *)(blk) < ((mblk_pool_t*)(pool))->pool_end))
在krhino_mblk_check中,判断释放内存的起始地址是不是在memory pool申请的地址返回内,是的话,执行pool的释放操作k_mm_smallblk_free。
static void k_mm_smallblk_free(k_mm_head *mmhead, void *ptr)
{
kstat_t sta;
if (!mmhead || !ptr) {
return;
}
sta = krhino_mblk_free((mblk_pool_t *)mmhead->fix_pool, ptr);
if (sta != RHINO_SUCCESS) {
k_err_proc(RHINO_SYS_FATAL_ERR);
}
stats_removesize(mmhead, RHINO_CONFIG_MM_BLK_SIZE);
}
kstat_t krhino_mblk_free(mblk_pool_t *pool, void *blk)
{
cpu_cpsr_t flags_cpsr;
NULL_PARA_CHK(pool);
NULL_PARA_CHK(blk);
krhino_spin_lock_irq_save(&pool->blk_lock, flags_cpsr);
/* use the first 4 byte of the free block point to head of avail list */
*((uint8_t **)blk) = pool->avail_list;
pool->avail_list = blk;
pool->blk_avail++;
krhino_spin_unlock_irq_restore(&pool->blk_lock, flags_cpsr);
return RHINO_SUCCESS;
}
最后执行到krhino_mblk_free,操作很简单,将要释放的blk内容中的第一个地址指向pool->avail_list, 再讲pool->avail_list直接指向要释放的blk,pool->blk_avail++,这样就完成了释放这个blk动作,也就是说,新释放的blk放到了memory pool的首个位置。而整个申请和释放pool中blk的过程中, mmhead->fix_pool中的pool_start,pool_end是不会变的,变的只是avail_list(指向空闲首个blk),blk_avail(可用空闲blk)。
对比上一篇malloc(1)耗费40bytes,虽然只节省了8bytes,但如果在频繁申请不同大小内存的情况下,还能够避免一定程度上的内存碎片的产生,申请过程也更简单。