声明:此处代码分析,来源与 nuttx 12.8.0版本。
/****************************************************************************
* Name: mm_memalign
*
* Description:
* memalign requests more than enough space from malloc, finds a region
* within that chunk that meets the alignment request and then frees any
* leading or trailing space.
*
* The alignment argument must be a power of two. 16-byte alignment is
* guaranteed by normal malloc calls.
*
****************************************************************************/
FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
size_t size)
{
FAR struct mm_allocnode_s *node;
uintptr_t rawchunk;
uintptr_t alignedchunk;
size_t mask;
size_t allocsize;
size_t newsize;
/* Make sure that alignment is less than half max size_t */
if (alignment >= (SIZE_MAX / 2))
{
return NULL;
}
/* Make sure that alignment is a power of 2 */
if ((alignment & -alignment) != alignment)
{
return NULL;
}
#ifdef CONFIG_MM_HEAP_MEMPOOL
if (heap->mm_mpool)
{
node = mempool_multiple_memalign(heap->mm_mpool, alignment, size);
if (node != NULL)
{
return node;
}
}
#endif
/* If this requested alinement's less than or equal to the natural
* alignment of malloc, then just let malloc do the work.
*/
if (alignment <= MM_ALIGN)
{
FAR void *ptr = mm_malloc(heap, size);
DEBUGASSERT(ptr == NULL || ((uintptr_t)ptr) % alignment == 0);
return ptr;
}
else if (alignment < MM_MIN_CHUNK)
{
alignment = MM_MIN_CHUNK;
}
mask = alignment - 1;
/* Adjust the size to account for (1) the size of the allocated node and
* (2) to make sure that it is aligned with MM_ALIGN and its size is at
* least MM_MIN_CHUNK.
*
* Notice that we increase the allocation size by twice the requested
* alignment. We do this so that there will be at least two valid
* alignment points within the allocated memory.
*
* NOTE: These are sizes given to malloc and not chunk sizes. They do
* not include MM_SIZEOF_ALLOCNODE.
*/
if (size < MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD)
{
size = MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD;
}
newsize = MM_ALIGN_UP(size); /* Make multiples of our granule size */
allocsize = newsize + 2 * alignment; /* Add double full alignment size */
if (newsize < size || allocsize < newsize)
{
/* Integer overflow */
return NULL;
}
/* Then malloc that size */
rawchunk = (uintptr_t)mm_malloc(heap, allocsize);
if (rawchunk == 0)
{
return NULL;
}
kasan_poison((FAR void *)rawchunk,
mm_malloc_size(heap, (FAR void *)rawchunk));
rawchunk = (uintptr_t)kasan_reset_tag((FAR void *)rawchunk);
/* We need to hold the MM mutex while we muck with the chunks and
* nodelist.
*/
DEBUGVERIFY(mm_lock(heap));
/* Get the node associated with the allocation and the next node after
* the allocation.
*/
node = (FAR struct mm_allocnode_s *)(rawchunk - MM_SIZEOF_ALLOCNODE);
heap->mm_curused -= MM_SIZEOF_NODE(node);
/* Find the aligned subregion */
alignedchunk = (rawchunk + mask) & ~mask;
/* Check if there is free space at the beginning of the aligned chunk */
if (alignedchunk != rawchunk)
{
FAR struct mm_allocnode_s *newnode;
FAR struct mm_allocnode_s *next;
size_t precedingsize;
size_t newnodesize;
/* Get the node the next node after the allocation. */
next = (FAR struct mm_allocnode_s *)
((FAR char *)node + MM_SIZEOF_NODE(node));
newnode = (FAR struct mm_allocnode_s *)
(alignedchunk - MM_SIZEOF_ALLOCNODE);
/* Preceding size is full size of the new 'node,' including
* MM_SIZEOF_ALLOCNODE
*/
precedingsize = (uintptr_t)newnode - (uintptr_t)node;
/* If we were unlucky, then the alignedchunk can lie in such a position
* that precedingsize < SIZEOF_NODE_FREENODE. We can't let that happen
* because we are going to cast 'node' to struct mm_freenode_s below.
* This is why we allocated memory large enough to support two
* alignment points. In this case, we will simply use the second
* alignment point.
*/
if (precedingsize < MM_MIN_CHUNK)
{
alignedchunk += alignment;
newnode = (FAR struct mm_allocnode_s *)
(alignedchunk - MM_SIZEOF_ALLOCNODE);
precedingsize = (uintptr_t)newnode - (uintptr_t)node;
}
/* If the previous node is free, merge node and previous node, then
* set up the node size.
*/
if (MM_PREVNODE_IS_FREE(node))
{
FAR struct mm_freenode_s *prev =
(FAR struct mm_freenode_s *)((FAR char *)node - node->preceding);
/* Remove the node. There must be a predecessor, but there may
* not be a successor node.
*/
DEBUGASSERT(prev->blink);
prev->blink->flink = prev->flink;
if (prev->flink)
{
prev->flink->blink = prev->blink;
}
precedingsize += MM_SIZEOF_NODE(prev);
node = (FAR struct mm_allocnode_s *)prev;
}
node->size = precedingsize;
/* Set up the size of the new node */
newnodesize = (uintptr_t)next - (uintptr_t)newnode;
newnode->size = newnodesize | MM_ALLOC_BIT | MM_PREVFREE_BIT;
newnode->preceding = precedingsize;
/* Clear the previous free bit of the next node */
next->size &= ~MM_PREVFREE_BIT;
/* Convert the newnode chunk size back into malloc-compatible size by
* subtracting the header size MM_ALLOCNODE_OVERHEAD.
*/
allocsize = newnodesize - MM_ALLOCNODE_OVERHEAD;
/* Add the original, newly freed node to the free nodelist */
mm_addfreechunk(heap, (FAR struct mm_freenode_s *)node);
/* Replace the original node with the newlay realloaced,
* aligned node
*/
node = newnode;
}
/* Check if there is free space at the end of the aligned chunk. Convert
* malloc-compatible chunk size to include MM_ALLOCNODE_OVERHEAD as needed
* for mm_shrinkchunk.
*/
size = MM_ALIGN_UP(size + MM_ALLOCNODE_OVERHEAD);
if (allocsize > size)
{
/* Shrink the chunk by that much -- remember, mm_shrinkchunk wants
* internal chunk sizes that include MM_ALLOCNODE_OVERHEAD.
*/
mm_shrinkchunk(heap, node, size);
}
/* Update heap statistics */
size = MM_SIZEOF_NODE(node);
heap->mm_curused += size;
if (heap->mm_curused > heap->mm_maxused)
{
heap->mm_maxused = heap->mm_curused;
}
sched_note_heap(NOTE_HEAP_ALLOC, heap, (FAR void *)alignedchunk, size,
heap->mm_curused);
mm_unlock(heap);
MM_ADD_BACKTRACE(heap, node);
alignedchunk = (uintptr_t)kasan_unpoison((FAR const void *)alignedchunk,
size - MM_ALLOCNODE_OVERHEAD);
DEBUGASSERT(alignedchunk % alignment == 0);
minfo("Aligned %"PRIxPTR" to %"PRIxPTR", size %zu\n",
rawchunk, alignedchunk, size);
return (FAR void *)alignedchunk;
}
显然,此函数的功能是从heap中,以alignment对齐来分配size大小的空间。
在对alignment的校验过程中,存在以下代码:
if (alignment <= MM_ALIGN)
{
FAR void *ptr = mm_malloc(heap, size);
DEBUGASSERT(ptr == NULL || ((uintptr_t)ptr) % alignment == 0);
return ptr;
}
MM_ALIGN确是mm_malloc的默认对齐粒度。同时也是分配空间时最小的对齐粒度。
FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
{
......
DEBUGASSERT(alignsize >= MM_ALIGN);
......
}
当 alignment <MM_ALIGN时,强制分配的空间以MM_ALIGN对齐,但是用户实际要求是以alignment对齐,显然,这是有差距的,明显不符合用户要求。那么,问题来了,撇开可能存在的空间浪费不论,这里的主要问题是强制以MM_ALIGN对齐的空间,一定是alignment对齐的吗?答案是肯定的。首先,我们已经确定了alignment是2的指数幂。
if ((alignment & -alignment) != alignment)
{
return NULL;
}
MM_ALIGN/alignment = (n≠0)且MM_ALIGN%alignment = 0,换言之,MM_ALIGN是alignment的倍数,因此,以MM_ALIGN对齐的地址一定alignment对齐。
那下边的代码又是怎么回事呢?
else if (alignment < MM_MIN_CHUNK)
{
alignment = MM_MIN_CHUNK;
}
有人说了,MM_MIN_CHUNK是mm_nodelist中最小的单元。确实是,那,背后的原因是什么呢?上面我们说明了,当 alignment <MM_ALIGN时,以MM_ALIGN对齐的地址一定alignment对齐。但是以当 alignment <MM_ALIGN时, 以alignment对齐的地址未必MM_ALIGN对齐。当alignment < MM_MIN_CHUNK时,如果以alignment对齐来分配,且分配了一块MM_MIN_CHUNK大小的空间,当空间释放后,存在一种情况,此空间无法被更大的alignment使用。经过一段时间的积累,就会导致内存的碎片化,以至于出现有很多空闲内存,但是mm_memalign却失败的情况。如果我们强制当alignment <MM_MIN_CHUNK时,alignment = MM_MIN_CHUNK,就不会存在这样的情况。此块可空间被使用的概率会增大。
接下来是设置申请空间节点的的大小。
/* Notice that we increase the allocation size by twice the requested
* alignment. We do this so that there will be at least two valid
* alignment points within the allocated memory.
*/
allocsize = newsize + 2 * alignment; /* Add double full alignment size */
这究竟是什么意思呢?
rawchunk = (uintptr_t)mm_malloc(heap, allocsize);
rawchunk只能保证是MM_ALIGN对齐的,但此时alignment >= MM_MIN_CHUNK>MM_ALIGN。因此rawchunk并不一定是alignment对齐的。那么,如果,rawchunk是alignment对齐的,那么剩余的空间由函数mm_shrinkchunk处理。如果,rawchunk不是alignment对齐的?此时,我们要对rawchunk进行alignment对齐。
alignedchunk = (rawchunk + mask) & ~mask;
此时,alignedchunk是第一个对齐点。当 alignedchunk - node < MM_MIN_CHUNK,我们就需要使用到第二个对齐点了。
alignedchunk += alignment;
那,allocsize = newsize + alignment,行不行呢?
当rawchunk是alignment对齐的,没问题。那,不对齐呢?经过对齐操作后, alignedchunk - node < MM_MIN_CHUNK,会形成碎片,无法使用,对于碎片最好的情况, alignedchunk - node = alignment -1, 那么,用户可能存在使用空间越界的情况。从而由于越届检测而导致此次分配失败。
1220

被折叠的 条评论
为什么被折叠?



