Skip to content

Commit

Permalink
mm: replace mutex with spinlock in the implementation of memory alloc…
Browse files Browse the repository at this point in the history
…ation.

1. Since memory allocation operations are all non-blocking, we can replace mutex with spinlock.
2. Spinlock is significantly faster than mutex, improving the real-time capability and performance of the system.
3. We can allocate memory in irq.
4. In many cases, memory allocation operates within critical sections, and mutex may cause context switches, leading to confusion
5. Impacting the use of memory allocation within spinlock scope

Signed-off-by: hujun5 <hujun5@xiaomi.com>
  • Loading branch information
hujun260 committed Sep 6, 2024
1 parent 659448a commit 35c46e1
Show file tree
Hide file tree
Showing 12 changed files with 91 additions and 262 deletions.
1 change: 0 additions & 1 deletion mm/mm_heap/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ if(CONFIG_MM_DEFAULT_MANAGER)

set(SRCS
mm_initialize.c
mm_lock.c
mm_addfreechunk.c
mm_size2ndx.c
mm_malloc_size.c
Expand Down
2 changes: 1 addition & 1 deletion mm/mm_heap/Make.defs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

ifeq ($(CONFIG_MM_DEFAULT_MANAGER),y)

CSRCS += mm_initialize.c mm_lock.c mm_addfreechunk.c mm_size2ndx.c
CSRCS += mm_initialize.c mm_addfreechunk.c mm_size2ndx.c
CSRCS += mm_malloc_size.c mm_shrinkchunk.c mm_brkaddr.c mm_calloc.c
CSRCS += mm_extend.c mm_free.c mm_mallinfo.c mm_malloc.c mm_foreach.c
CSRCS += mm_memalign.c mm_realloc.c mm_zalloc.c mm_heapmember.c mm_memdump.c
Expand Down
9 changes: 2 additions & 7 deletions mm/mm_heap/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -219,9 +219,9 @@ struct mm_delaynode_s

struct mm_heap_s
{
/* Mutex for controling access to this heap */
/* Spinlock for controling access to this heap */

mutex_t mm_lock;
spinlock_t mm_lock;

/* This is the size of the heap provided to mm */

Expand Down Expand Up @@ -280,11 +280,6 @@ typedef CODE void (*mm_node_handler_t)(FAR struct mm_allocnode_s *node,
* Public Function Prototypes
****************************************************************************/

/* Functions contained in mm_lock.c *****************************************/

int mm_lock(FAR struct mm_heap_s *heap);
void mm_unlock(FAR struct mm_heap_s *heap);

/* Functions contained in mm_shrinkchunk.c **********************************/

void mm_shrinkchunk(FAR struct mm_heap_s *heap,
Expand Down
5 changes: 3 additions & 2 deletions mm/mm_heap/mm_extend.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,
FAR struct mm_allocnode_s *newnode;
uintptr_t blockstart;
uintptr_t blockend;
irqstate_t flags;

/* Make sure that we were passed valid parameters */

Expand All @@ -77,7 +78,7 @@ void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,

/* Take the memory manager mutex */

DEBUGVERIFY(mm_lock(heap));
flags = spin_lock_irqsave(&heap->mm_lock);

/* Get the terminal node in the old heap. The block to extend must
* immediately follow this node.
Expand Down Expand Up @@ -109,7 +110,7 @@ void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,
/* Finally, increase the total heap size accordingly */

heap->mm_heapsize += size;
mm_unlock(heap);
spin_unlock_irqrestore(&heap->mm_lock, flags);

/* Finally "free" the new block of memory where the old terminal node was
* located.
Expand Down
7 changes: 5 additions & 2 deletions mm/mm_heap/mm_foreach.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,9 @@ void mm_foreach(FAR struct mm_heap_s *heap, mm_node_handler_t handler,
{
FAR struct mm_allocnode_s *node;
FAR struct mm_allocnode_s *prev;
irqstate_t flags;
size_t nodesize;

#if CONFIG_MM_REGIONS > 1
int region;
#else
Expand All @@ -69,11 +71,12 @@ void mm_foreach(FAR struct mm_heap_s *heap, mm_node_handler_t handler,
* Retake the mutex for each region to reduce latencies
*/

if (mm_lock(heap) < 0)
if (_SCHED_GETTID() < 0)
{
return;
}

flags = spin_lock_irqsave(&heap->mm_lock);
for (node = heap->mm_heapstart[region];
node < heap->mm_heapend[region];
node = (FAR struct mm_allocnode_s *)((FAR char *)node + nodesize))
Expand All @@ -96,7 +99,7 @@ void mm_foreach(FAR struct mm_heap_s *heap, mm_node_handler_t handler,
DEBUGASSERT(node == heap->mm_heapend[region]);
handler(node, arg);

mm_unlock(heap);
spin_unlock_irqrestore(&heap->mm_lock, flags);
}
#undef region
}
8 changes: 5 additions & 3 deletions mm/mm_heap/mm_free.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,11 @@ void mm_delayfree(FAR struct mm_heap_s *heap, FAR void *mem, bool delay)
FAR struct mm_freenode_s *node;
FAR struct mm_freenode_s *prev;
FAR struct mm_freenode_s *next;
irqstate_t flags;
size_t nodesize;
size_t prevsize;

if (mm_lock(heap) < 0)
if (_SCHED_GETTID() < 0)
{
/* Meet -ESRCH return, which means we are in situations
* during context switching(See mm_lock() & gettid()).
Expand All @@ -97,6 +98,7 @@ void mm_delayfree(FAR struct mm_heap_s *heap, FAR void *mem, bool delay)
return;
}

flags = spin_lock_irqsave(&heap->mm_lock);
#ifdef CONFIG_MM_FILL_ALLOCATIONS
memset(mem, MM_FREE_MAGIC, mm_malloc_size(heap, mem));
#endif
Expand All @@ -105,7 +107,7 @@ void mm_delayfree(FAR struct mm_heap_s *heap, FAR void *mem, bool delay)

if (delay)
{
mm_unlock(heap);
spin_unlock_irqrestore(&heap->mm_lock, flags);
add_delaylist(heap, mem);
return;
}
Expand Down Expand Up @@ -200,7 +202,7 @@ void mm_delayfree(FAR struct mm_heap_s *heap, FAR void *mem, bool delay)
/* Add the merged node to the nodelist */

mm_addfreechunk(heap, node);
mm_unlock(heap);
spin_unlock_irqrestore(&heap->mm_lock, flags);
}

/****************************************************************************
Expand Down
10 changes: 5 additions & 5 deletions mm/mm_heap/mm_initialize.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
FAR struct mm_freenode_s *node;
uintptr_t heapbase;
uintptr_t heapend;
irqstate_t flags;
#if CONFIG_MM_REGIONS > 1
int IDX;

Expand Down Expand Up @@ -139,7 +140,7 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,

kasan_register(heapstart, &heapsize);

DEBUGVERIFY(mm_lock(heap));
flags = spin_lock_irqsave(&heap->mm_lock);

/* Adjust the provided heap start and size.
*
Expand Down Expand Up @@ -198,7 +199,7 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,

mm_addfreechunk(heap, node);
heap->mm_curused += 2 * MM_SIZEOF_ALLOCNODE;
mm_unlock(heap);
spin_unlock_irqrestore(&heap->mm_lock, flags);
}

/****************************************************************************
Expand Down Expand Up @@ -255,11 +256,11 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
heap->mm_nodelist[i].blink = &heap->mm_nodelist[i - 1];
}

/* Initialize the malloc mutex to one (to support one-at-
/* Initialize the malloc spinlock to one (to support one-at-
* a-time access to private data sets).
*/

nxmutex_init(&heap->mm_lock);
spin_initialize(&heap->mm_lock, SP_UNLOCKED);

#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMINFO)
# if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
Expand Down Expand Up @@ -364,5 +365,4 @@ void mm_uninitialize(FAR struct mm_heap_s *heap)
procfs_unregister_meminfo(&heap->mm_procfs);
# endif
#endif
nxmutex_destroy(&heap->mm_lock);
}
117 changes: 0 additions & 117 deletions mm/mm_heap/mm_lock.c

This file was deleted.

5 changes: 3 additions & 2 deletions mm/mm_heap/mm_malloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,7 @@ void mm_free_delaylist(FAR struct mm_heap_s *heap)
FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
{
FAR struct mm_freenode_s *node;
irqstate_t flags;
size_t alignsize;
size_t nodesize;
FAR void *ret = NULL;
Expand Down Expand Up @@ -209,7 +210,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)

/* We need to hold the MM mutex while we muck with the nodelist. */

DEBUGVERIFY(mm_lock(heap));
flags = spin_lock_irqsave(&heap->mm_lock);

/* Convert the request size into a nodelist index */

Expand Down Expand Up @@ -317,7 +318,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
}

DEBUGASSERT(ret == NULL || mm_heapmember(heap, ret));
mm_unlock(heap);
spin_unlock_irqrestore(&heap->mm_lock, flags);

if (ret)
{
Expand Down
5 changes: 3 additions & 2 deletions mm/mm_heap/mm_memalign.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
FAR struct mm_allocnode_s *node;
uintptr_t rawchunk;
uintptr_t alignedchunk;
irqstate_t flags;
size_t mask;
size_t allocsize;
size_t newsize;
Expand Down Expand Up @@ -142,7 +143,7 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
* nodelist.
*/

DEBUGVERIFY(mm_lock(heap));
flags = spin_lock_irqsave(&heap->mm_lock);

/* Get the node associated with the allocation and the next node after
* the allocation.
Expand Down Expand Up @@ -271,7 +272,7 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
heap->mm_maxused = heap->mm_curused;
}

mm_unlock(heap);
spin_unlock_irqrestore(&heap->mm_lock, flags);

MM_ADD_BACKTRACE(heap, node);

Expand Down
9 changes: 5 additions & 4 deletions mm/mm_heap/mm_realloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
FAR struct mm_allocnode_s *oldnode;
FAR struct mm_freenode_s *prev = NULL;
FAR struct mm_freenode_s *next;
irqstate_t flags;
size_t newsize;
size_t oldsize;
size_t prevsize = 0;
Expand Down Expand Up @@ -133,7 +134,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,

/* We need to hold the MM mutex while we muck with the nodelist. */

DEBUGVERIFY(mm_lock(heap));
flags = spin_lock_irqsave(&heap->mm_lock);
DEBUGASSERT(MM_NODE_IS_ALLOC(oldnode));

/* Check if this is a request to reduce the size of the allocation. */
Expand All @@ -155,7 +156,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,

/* Then return the original address */

mm_unlock(heap);
spin_unlock_irqrestore(&heap->mm_lock, flags);
MM_ADD_BACKTRACE(heap, oldnode);

return oldmem;
Expand Down Expand Up @@ -380,7 +381,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
heap->mm_maxused = heap->mm_curused;
}

mm_unlock(heap);
spin_unlock_irqrestore(&heap->mm_lock, flags);
MM_ADD_BACKTRACE(heap, (FAR char *)newmem - MM_SIZEOF_ALLOCNODE);

kasan_unpoison(newmem, mm_malloc_size(heap, newmem));
Expand All @@ -406,7 +407,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
* leave the original memory in place.
*/

mm_unlock(heap);
spin_unlock_irqrestore(&heap->mm_lock, flags);
newmem = mm_malloc(heap, size);
if (newmem)
{
Expand Down
Loading

0 comments on commit 35c46e1

Please sign in to comment.