cma_create_area()会调用cma_activate_area(),cma_activate_area()函数则会针对每个page调用:
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
这个函数则会通过set_pageblock_migratetype(page, MIGRATE_CMA)将页设置为MIGRATE_CMA类型的:
#ifdef CONFIG_CMA
/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
void__init init_cma_reserved_pageblock(structpage *page)
{
unsigned i = pageblock_nr_pages;
structpage *p = page;
do{
__ClearPageReserved(p);
set_page_count(p, 0);
}while(++p, --i);
set_page_refcounted(page);
set_pageblock_migratetype(page, MIGRATE_CMA);
__free_pages(page, pageblock_order);
totalram_pages += pageblock_nr_pages;
}
#endif同时其中调用的__free_pages(page, pageblock_order);最终会调用到__free_one_page(page, zone, order, migratetype);
相关的page会被加到MIGRATE_CMA的free_list上面去:
list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
申请连续内存
申请连续内存仍然使用标准的arch/arm/mm/dma-mapping.c中定义的dma_alloc_coherent()和dma_alloc_writecombine(),这二者会间接调用drivers/base/dma-contiguous.c中的
structpage *dma_alloc_from_contiguous(structdevice *dev,intcount,
unsignedintalign)
->
structpage *dma_alloc_from_contiguous(structdevice *dev,intcount,
unsignedintalign)
{
...
for(;;) {
pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
start, count, mask);
if(pageno >= cma->count) {
ret = -ENOMEM;
gotoerror;
}
pfn = cma->base_pfn + pageno;
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
if(ret == 0) {
bitmap_set(cma->bitmap, pageno, count);
break;
}elseif(ret != -EBUSY) {
gotoerror;
}
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
/* try again with a bit different memory target */
start = pageno + mask + 1;
}
...
}->
int alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype)
需要隔离page,隔离page的作用通过代码的注释可以体现:
/*
* What we do here is we mark all pageblocks in range as
* MIGRATE_ISOLATE. Because of the way page allocator work, we
* align the range to MAX_ORDER pages so that page allocator
* won't try to merge buddies from different pageblocks and
* change MIGRATE_ISOLATE to some other migration type.
*
* Once the pageblocks are marked as MIGRATE_ISOLATE, we
* migrate the pages from an unaligned range (ie. pages that
* we are interested in). This will put all the pages in
* range back to page allocator as MIGRATE_ISOLATE.
*
* When this is done, we take the pages in range from page
* allocator removing them from the buddy system. This way
* page allocator will never consider using them.
*
* This lets us mark the pageblocks back as
* MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
* MAX_ORDER aligned range but not in the unaligned, original
* range are put back to page allocator so that buddy can use
* them.
*/
ret = start_isolate_page_range(pfn_align_to_maxpage_down(start),
pfn_align_to_maxpage_up(end),
migratetype);简单地说,就是把相关的page标记为MIGRATE_ISOLATE,这样buddy系统就不会再使用他们。
/*
* start_isolate_page_range() -- make page-allocation-type of range of pages
* to be MIGRATE_ISOLATE.
* @start_pfn: The lower PFN of the range to be isolated.
* @end_pfn: The upper PFN of the range to be isolated.
* @migratetype: migrate type to set in error recovery.
*
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
* the range will never be allocated. Any free pages and pages freed in the
* future will not be allocated again.
*
* start_pfn/end_pfn must be aligned to pageblock_order.
* Returns 0 on success and -EBUSY if any part of range cannot be isolated.
*/
intstart_isolate_page_range(unsignedlongstart_pfn, unsignedlongend_pfn,
unsigned migratetype)
{
unsignedlongpfn;
unsignedlongundo_pfn;
structpage *page;
BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
for(pfn = start_pfn;
pfn
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
if(page && set_migratetype_isolate(page)) {
undo_pfn = pfn;
gotoundo;
}
}
return0;
undo:
for(pfn = start_pfn;
pfn
pfn += pageblock_nr_pages)
unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
return-EBUSY;
}
接下来调用__alloc_contig_migrate_range()进行页面隔离和迁移:
staticint__alloc_contig_migrate_range(unsignedlongstart, unsignedlongend)
{
/* This function is based on compact_zone() from compaction.c. */
unsignedlongpfn = start;
unsignedinttries = 0;
intret = 0;
structcompact_control cc = {
.nr_migratepages = 0,
.order = -1,
.zone = page_zone(pfn_to_page(start)),
.sync =true,
};
INIT_LIST_HEAD(&cc.migratepages);
migrate_prep_local();
while(pfn
if(fatal_signal_pending(current)) {
ret = -EINTR;
break;
}
if(list_empty(&cc.migratepages)) {
cc.nr_migratepages = 0;
pfn = isolate_migratepages_range(cc.zone, &cc,
pfn, end);
if(!pfn) {
ret = -EINTR;
break;
}
tries = 0;
}elseif(++tries == 5) {
ret = ret
break;
}
ret = migrate_pages(&cc.migratepages,
__alloc_contig_migrate_alloc,
0,false,true);
}
putback_lru_pages(&cc.migratepages);
returnret > 0 ? 0 : ret;
}
本文详细解析了CMA(Contiguous Memory Allocator)内存分配机制的工作原理。从cma_create_area()函数开始,介绍如何初始化预留的内存页,并将其设置为MIGRATE_CMA类型。进一步解释了如何通过dma_alloc_from_contiguous()申请连续内存,以及如何隔离和迁移页面来确保连续内存块的可用性。
1700

被折叠的 条评论
为什么被折叠?



