patches/uClibc/0.9.30.2/270-malloc-fix-race-condition-and-other-bugs-in-the-no-m.patch
Merge.
1 From fa476d01f1c1990a92ee49d1f1c557b83805d0e9 Mon Sep 17 00:00:00 2001
2 From: Freeman Wang <xwang@ubicom.com>
3 Date: Sat, 19 Dec 2009 13:43:00 -0800
4 Subject: [PATCH 09/15] malloc: fix race condition and other bugs in the no-mmu malloc
6 Fixes multiple race conditions on mmb list. This was done by
7 making the mmb_heap_lock into a recursive lock and making the
8 regular heap_lock extend to cover the mmb heap handling.
10 Also move the new_mmb allocation up to before the mmb list is
11 iterated through to find the insertion point. When the mmb_heap
12 also runs out and needs to be extended when the regular heap is
13 just extended, the mmb list could be messed up.
15 Signed-off-by: Freeman Wang <xwang@ubicom.com>
16 Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
18 libc/stdlib/malloc/free.c | 6 +++---
19 libc/stdlib/malloc/malloc.c | 7 ++++---
20 2 files changed, 7 insertions(+), 6 deletions(-)
22 diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c
23 index 90e18f4..741248a 100644
24 --- a/libc/stdlib/malloc/free.c
25 +++ b/libc/stdlib/malloc/free.c
26 @@ -179,14 +179,14 @@ __free_to_heap (void *mem, struct heap_free_area **heap
27 /* Start searching again from the end of this block. */
30 + /* Release the descriptor block we used. */
31 + free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
33 /* We have to unlock the heap before we recurse to free the mmb
34 descriptor, because we might be unmapping from the mmb
36 __heap_unlock (heap_lock);
38 - /* Release the descriptor block we used. */
39 - free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
41 /* Do the actual munmap. */
42 munmap ((void *)mmb_start, mmb_end - mmb_start);
44 diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c
45 index 71f9e58..84a6acd 100644
46 --- a/libc/stdlib/malloc/malloc.c
47 +++ b/libc/stdlib/malloc/malloc.c
48 @@ -48,7 +48,7 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0;
49 HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
50 struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
51 #ifdef HEAP_USE_LOCKING
52 -pthread_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER;
53 +pthread_mutex_t __malloc_mmb_heap_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
55 #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
57 @@ -151,19 +151,19 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
58 /* Try again to allocate. */
59 mem = __heap_alloc (heap, &size);
61 - __heap_unlock (heap_lock);
63 #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
64 /* Insert a record of BLOCK in sorted order into the
65 __malloc_mmapped_blocks list. */
67 + new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
69 for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
71 prev_mmb = mmb, mmb = mmb->next)
75 - new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
78 new_mmb->size = block_size;
79 @@ -177,6 +177,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
81 (unsigned)new_mmb->mem, block_size);
82 #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
83 + __heap_unlock (heap_lock);