Nuttx之mm_memalign

声明:此处代码分析,来源与 nuttx 12.8.0版本。

/****************************************************************************

* Name: mm_memalign

*

* Description:

* memalign requests more than enough space from malloc, finds a region

* within that chunk that meets the alignment request and then frees any

* leading or trailing space.

*

* The alignment argument must be a power of two. 16-byte alignment is

* guaranteed by normal malloc calls.

*

****************************************************************************/


FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,

size_t size)

{

FAR struct mm_allocnode_s *node;

uintptr_t rawchunk;

uintptr_t alignedchunk;

size_t mask;

size_t allocsize;

size_t newsize;


/* Make sure that alignment is less than half max size_t */


if (alignment >= (SIZE_MAX / 2))

{

return NULL;

}


/* Make sure that alignment is a power of 2 */


if ((alignment & -alignment) != alignment)

{

return NULL;

}


#ifdef CONFIG_MM_HEAP_MEMPOOL

if (heap->mm_mpool)

{

node = mempool_multiple_memalign(heap->mm_mpool, alignment, size);

if (node != NULL)

{

return node;

}

}

#endif


/* If this requested alinement's less than or equal to the natural

* alignment of malloc, then just let malloc do the work.

*/


if (alignment <= MM_ALIGN)

{

FAR void *ptr = mm_malloc(heap, size);

DEBUGASSERT(ptr == NULL || ((uintptr_t)ptr) % alignment == 0);

return ptr;

}

else if (alignment < MM_MIN_CHUNK)

{

alignment = MM_MIN_CHUNK;

}


mask = alignment - 1;


/* Adjust the size to account for (1) the size of the allocated node and

* (2) to make sure that it is aligned with MM_ALIGN and its size is at

* least MM_MIN_CHUNK.

*

* Notice that we increase the allocation size by twice the requested

* alignment. We do this so that there will be at least two valid

* alignment points within the allocated memory.

*

* NOTE: These are sizes given to malloc and not chunk sizes. They do

* not include MM_SIZEOF_ALLOCNODE.

*/


if (size < MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD)

{

size = MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD;

}


newsize = MM_ALIGN_UP(size); /* Make multiples of our granule size */

allocsize = newsize + 2 * alignment; /* Add double full alignment size */


if (newsize < size || allocsize < newsize)

{

/* Integer overflow */


return NULL;

}


/* Then malloc that size */


rawchunk = (uintptr_t)mm_malloc(heap, allocsize);

if (rawchunk == 0)

{

return NULL;

}


kasan_poison((FAR void *)rawchunk,

mm_malloc_size(heap, (FAR void *)rawchunk));


rawchunk = (uintptr_t)kasan_reset_tag((FAR void *)rawchunk);


/* We need to hold the MM mutex while we muck with the chunks and

* nodelist.

*/


DEBUGVERIFY(mm_lock(heap));


/* Get the node associated with the allocation and the next node after

* the allocation.

*/


node = (FAR struct mm_allocnode_s *)(rawchunk - MM_SIZEOF_ALLOCNODE);

heap->mm_curused -= MM_SIZEOF_NODE(node);


/* Find the aligned subregion */


alignedchunk = (rawchunk + mask) & ~mask;


/* Check if there is free space at the beginning of the aligned chunk */


if (alignedchunk != rawchunk)

{

FAR struct mm_allocnode_s *newnode;

FAR struct mm_allocnode_s *next;

size_t precedingsize;

size_t newnodesize;


/* Get the node the next node after the allocation. */


next = (FAR struct mm_allocnode_s *)

((FAR char *)node + MM_SIZEOF_NODE(node));


newnode = (FAR struct mm_allocnode_s *)

(alignedchunk - MM_SIZEOF_ALLOCNODE);


/* Preceding size is full size of the new 'node,' including

* MM_SIZEOF_ALLOCNODE

*/


precedingsize = (uintptr_t)newnode - (uintptr_t)node;


/* If we were unlucky, then the alignedchunk can lie in such a position

* that precedingsize < SIZEOF_NODE_FREENODE. We can't let that happen

* because we are going to cast 'node' to struct mm_freenode_s below.

* This is why we allocated memory large enough to support two

* alignment points. In this case, we will simply use the second

* alignment point.

*/


if (precedingsize < MM_MIN_CHUNK)

{

alignedchunk += alignment;

newnode = (FAR struct mm_allocnode_s *)

(alignedchunk - MM_SIZEOF_ALLOCNODE);

precedingsize = (uintptr_t)newnode - (uintptr_t)node;

}


/* If the previous node is free, merge node and previous node, then

* set up the node size.

*/


if (MM_PREVNODE_IS_FREE(node))

{

FAR struct mm_freenode_s *prev =

(FAR struct mm_freenode_s *)((FAR char *)node - node->preceding);


/* Remove the node. There must be a predecessor, but there may

* not be a successor node.

*/


DEBUGASSERT(prev->blink);

prev->blink->flink = prev->flink;

if (prev->flink)

{

prev->flink->blink = prev->blink;

}


precedingsize += MM_SIZEOF_NODE(prev);

node = (FAR struct mm_allocnode_s *)prev;

}


node->size = precedingsize;


/* Set up the size of the new node */


newnodesize = (uintptr_t)next - (uintptr_t)newnode;

newnode->size = newnodesize | MM_ALLOC_BIT | MM_PREVFREE_BIT;

newnode->preceding = precedingsize;


/* Clear the previous free bit of the next node */


next->size &= ~MM_PREVFREE_BIT;


/* Convert the newnode chunk size back into malloc-compatible size by

* subtracting the header size MM_ALLOCNODE_OVERHEAD.

*/


allocsize = newnodesize - MM_ALLOCNODE_OVERHEAD;


/* Add the original, newly freed node to the free nodelist */


mm_addfreechunk(heap, (FAR struct mm_freenode_s *)node);


/* Replace the original node with the newlay realloaced,

* aligned node

*/


node = newnode;

}


/* Check if there is free space at the end of the aligned chunk. Convert

* malloc-compatible chunk size to include MM_ALLOCNODE_OVERHEAD as needed

* for mm_shrinkchunk.

*/


size = MM_ALIGN_UP(size + MM_ALLOCNODE_OVERHEAD);


if (allocsize > size)

{

/* Shrink the chunk by that much -- remember, mm_shrinkchunk wants

* internal chunk sizes that include MM_ALLOCNODE_OVERHEAD.

*/


mm_shrinkchunk(heap, node, size);

}


/* Update heap statistics */


size = MM_SIZEOF_NODE(node);

heap->mm_curused += size;

if (heap->mm_curused > heap->mm_maxused)

{

heap->mm_maxused = heap->mm_curused;

}


sched_note_heap(NOTE_HEAP_ALLOC, heap, (FAR void *)alignedchunk, size,

heap->mm_curused);


mm_unlock(heap);


MM_ADD_BACKTRACE(heap, node);


alignedchunk = (uintptr_t)kasan_unpoison((FAR const void *)alignedchunk,

size - MM_ALLOCNODE_OVERHEAD);

DEBUGASSERT(alignedchunk % alignment == 0);

minfo("Aligned %"PRIxPTR" to %"PRIxPTR", size %zu\n",

rawchunk, alignedchunk, size);

return (FAR void *)alignedchunk;

}

显然,此函数的功能是从heap中,以alignment对齐来分配size大小的空间。

在对alignment的校验过程中,存在以下代码:

if (alignment <= MM_ALIGN)
    {
      FAR void *ptr = mm_malloc(heap, size);
      DEBUGASSERT(ptr == NULL || ((uintptr_t)ptr) % alignment == 0);
      return ptr;
    }

MM_ALIGN确是mm_malloc的默认对齐粒度。同时也是分配空间时最小的对齐粒度。

FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
{
    ......

    DEBUGASSERT(alignsize >= MM_ALIGN);

    ......
}

当 alignment <MM_ALIGN时,强制分配的空间以MM_ALIGN对齐,但是用户实际要求是以alignment对齐,显然,这是有差距的,明显不符合用户要求。那么,问题来了,撇开可能存在的空间浪费不论,这里的主要问题是强制以MM_ALIGN对齐的空间,一定是alignment对齐的吗?答案是肯定的。首先,我们已经确定了alignment是2的指数幂。

  if ((alignment & -alignment) != alignment)
    {
      return NULL;
    }

MM_ALIGN/alignment = 2^{n}(n≠0)且MM_ALIGN%alignment = 0,换言之,MM_ALIGN是alignment的倍数,因此,以MM_ALIGN对齐的地址一定alignment对齐。

那下边的代码又是怎么回事呢?

  else if (alignment < MM_MIN_CHUNK)
    {
      alignment = MM_MIN_CHUNK;
    }

有人说了,MM_MIN_CHUNK是mm_nodelist中最小的单元。确实是,那,背后的原因是什么呢?上面我们说明了,当 alignment <MM_ALIGN时,以MM_ALIGN对齐的地址一定alignment对齐。但是以当 alignment <MM_ALIGN时, 以alignment对齐的地址未必MM_ALIGN对齐。当alignment < MM_MIN_CHUNK时,如果以alignment对齐来分配,且分配了一块MM_MIN_CHUNK大小的空间,当空间释放后,存在一种情况,此空间无法被更大的alignment使用。经过一段时间的积累,就会导致内存的碎片化,以至于出现有很多空闲内存,但是mm_memalign却失败的情况。如果我们强制当alignment <MM_MIN_CHUNK时,alignment = MM_MIN_CHUNK,就不会存在这样的情况。此块可空间被使用的概率会增大。

接下来是设置申请空间节点的的大小。

/* Notice that we increase the allocation size by twice the requested
 * alignment.  We do this so that there will be at least two valid
 * alignment points within the allocated memory.
 */
  allocsize = newsize + 2 * alignment; /* Add double full alignment size */

这究竟是什么意思呢?

  rawchunk = (uintptr_t)mm_malloc(heap, allocsize);

rawchunk只能保证是MM_ALIGN对齐的,但此时alignment >= MM_MIN_CHUNK>MM_ALIGN。因此rawchunk并不一定是alignment对齐的。那么,如果,rawchunk是alignment对齐的,那么剩余的空间由函数mm_shrinkchunk处理。如果,rawchunk不是alignment对齐的?此时,我们要对rawchunk进行alignment对齐。

  alignedchunk = (rawchunk + mask) & ~mask;

此时,alignedchunk是第一个对齐点。当 alignedchunk - node < MM_MIN_CHUNK,我们就需要使用到第二个对齐点了。

          alignedchunk += alignment;

那,allocsize = newsize + alignment,行不行呢?

当rawchunk是alignment对齐的,没问题。那,不对齐呢?经过对齐操作后, alignedchunk - node < MM_MIN_CHUNK,会形成碎片,无法使用,对于碎片最好的情况, alignedchunk - node = alignment -1, 那么,用户可能存在使用空间越界的情况。从而由于越届检测而导致此次分配失败。

代码下载地址: https://pan.quark.cn/s/b4a8e0160cfc 齿轮与轴系零件在机械设备中扮演着至关重要的角色,它们负责实现动力传输、调整运动形态以及承受工作载荷等核心功能。 在机械工程的设计实践中,齿轮和轴系的设计是一项关键的技术任务,其内容涵盖了材料选用、构造规划、承载能力分析等多个技术层面。 下面将系统性地介绍《齿轮及轴系零件结构设计指导书》中的核心知识点。 一、齿轮设计1. 齿轮种类:依据齿廓轮廓的不同,齿轮可划分为直齿齿轮、斜齿轮以及人字齿轮等类别,各类齿轮均具有特定的性能特点与适用工况,能够满足多样化的工作环境与载荷需求。 2. 齿轮规格参数:模数大小、压力角数值、齿数数量、分度圆尺寸等是齿轮设计的基础数据,这些参数直接决定了齿轮的物理尺寸与运行性能。 3. 齿轮材质选用:齿轮材料的确定需综合评估其耐磨损性能、硬度水平以及韧性表现,常用的材料包括铸铁、钢材、铝合金等。 4. 齿轮强度验证:需进行齿面接触应力分析与齿根弯曲应力分析,以确保齿轮在实际运行过程中不会出现过度磨损或结构破坏。 5. 齿轮加工工艺:涉及切削加工、滚齿加工、剃齿加工、淬火处理等工艺流程,工艺方案的选择将直接影响齿轮的加工精度与使用寿命。 二、轴设计1. 轴的分类方式:依据轴在机械装置中的功能定位与受力特点,可将轴划分为心轴、转轴以及传动轴等类型。 2. 轴的材料选择:通常采用钢材作为轴的材料,例如碳素结构钢或合金结构钢,特殊需求时可选用不锈钢材料或轻质合金材料。 3. 轴的构造规划:需详细考虑轴的轴向长度、截面直径、键槽布置、轴承安装位置等要素,以满足轴的强度要求、刚度要求以及稳定性要求。 4. 轴的强度验证:需进行轴的扭转强度分析与弯曲强度分析,以防止轴在运行过程中发生塑性变形...
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值