diff --git a/CHANGELOG b/CHANGELOG index 09339f6e..41353daf 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -19,6 +19,12 @@ HISTORY ++ New features: + 2008-06-30 Simon Goldschmidt + * mem.c, opt.h, stats.h: fixed bug #21433: Calling mem_free/pbuf_free from + interrupt context isn't safe: LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT allows + mem_free to run between mem_malloc iterations. Added illegal counter for + mem stats. + 2008-06-27 Simon Goldschmidt * stats.h/.c, some other files: patch #6483: stats module improvement: Added defines to display each module's statistic individually, added stats diff --git a/src/core/mem.c b/src/core/mem.c index 5817e29e..1e376b49 100644 --- a/src/core/mem.c +++ b/src/core/mem.c @@ -178,26 +178,33 @@ static struct mem *ram_end; /** pointer to the lowest free block, this is used for faster search */ static struct mem *lfree; - -#if LWIP_USE_HEAP_FROM_INTERRUPT - -/* Protect the heap by disabeling interrupts */ -#define LWIP_MEM_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev) -#define LWIP_MEM_PROTECT() SYS_ARCH_PROTECT(lev) -#define LWIP_MEM_UNPROTECT() SYS_ARCH_UNPROTECT(lev) - -#else /* LWIP_USE_HEAP_FROM_INTERRUPT */ - -/* Protect the heap using a semaphore */ - /** concurrent access protection */ static sys_sem_t mem_sem; -#define LWIP_MEM_DECL_PROTECT() -#define LWIP_MEM_PROTECT() sys_arch_sem_wait(mem_sem, 0) -#define LWIP_MEM_UNPROTECT() sys_sem_signal(mem_sem) +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT -#endif /* LWIP_USE_HEAP_FROM_INTERRUPT */ +static volatile u8_t mem_free_count; + +/* Allow mem_free from other (e.g. interrupt) context */ +#define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free) +#define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free) +#define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free) +#define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc) + +#else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/* Protect the heap only by using a semaphore */ +#define LWIP_MEM_FREE_DECL_PROTECT() +#define LWIP_MEM_FREE_PROTECT() sys_arch_sem_wait(mem_sem, 0) +#define LWIP_MEM_FREE_UNPROTECT() sys_sem_signal(mem_sem) +/* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */ +#define LWIP_MEM_ALLOC_DECL_PROTECT() +#define LWIP_MEM_ALLOC_PROTECT() +#define LWIP_MEM_ALLOC_UNPROTECT() + +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ /** @@ -270,9 +277,7 @@ mem_init(void) ram_end->next = MEM_SIZE_ALIGNED; ram_end->prev = MEM_SIZE_ALIGNED; -#if !LWIP_USE_HEAP_FROM_INTERRUPT mem_sem = sys_sem_new(1); -#endif /* LWIP_USE_HEAP_FROM_INTERRUPT */ /* initialize the lowest-free pointer to the start of the heap */ lfree = (struct mem *)ram; @@ -290,7 +295,7 @@ void mem_free(void *rmem) { struct mem *mem; - LWIP_MEM_DECL_PROTECT(); + LWIP_MEM_FREE_DECL_PROTECT(); if (rmem == NULL) { LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | 2, ("mem_free(p == NULL) was called.\n")); @@ -298,18 +303,20 @@ mem_free(void *rmem) } LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0); - /* protect the heap from concurrent access */ - LWIP_MEM_PROTECT(); - LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram && (u8_t *)rmem < (u8_t *)ram_end); if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { + SYS_ARCH_DECL_PROTECT(lev); LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_free: illegal memory\n")); - MEM_STATS_INC(err); - LWIP_MEM_UNPROTECT(); + /* protect mem stats from concurrent access */ + SYS_ARCH_PROTECT(lev); + MEM_STATS_INC(illegal); + SYS_ARCH_UNPROTECT(lev); return; } + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); /* Get the corresponding struct mem ... */ mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); /* ... which has to be in a used state ... */ @@ -326,7 +333,10 @@ mem_free(void *rmem) /* finally, see if prev or next are free also */ plug_holes(mem); - LWIP_MEM_UNPROTECT(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); } /** @@ -338,6 +348,8 @@ mem_free(void *rmem) * @param newsize required size after shrinking (needs to be smaller than or * equal to the previous size) * @return for compatibility reasons: is always == rmem, at the moment + * or NULL if newsize is > old size, in which case rmem is NOT touched + * or freed! */ void * mem_realloc(void *rmem, mem_size_t newsize) @@ -345,7 +357,8 @@ mem_realloc(void *rmem, mem_size_t newsize) mem_size_t size; mem_size_t ptr, ptr2; struct mem *mem, *mem2; - LWIP_MEM_DECL_PROTECT(); + /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */ + LWIP_MEM_FREE_DECL_PROTECT(); /* Expand the size of the allocated memory region so that we can adjust for alignment. */ @@ -364,7 +377,12 @@ mem_realloc(void *rmem, mem_size_t newsize) (u8_t *)rmem < (u8_t *)ram_end); if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { + SYS_ARCH_DECL_PROTECT(lev); LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_realloc: illegal memory\n")); + /* protect mem stats from concurrent access */ + SYS_ARCH_PROTECT(lev); + MEM_STATS_INC(illegal); + SYS_ARCH_UNPROTECT(lev); return rmem; } /* Get the corresponding struct mem ... */ @@ -384,7 +402,7 @@ mem_realloc(void *rmem, mem_size_t newsize) } /* protect the heap from concurrent access */ - LWIP_MEM_PROTECT(); + LWIP_MEM_FREE_PROTECT(); MEM_STATS_DEC_USED(used, (size - newsize)); @@ -442,7 +460,10 @@ mem_realloc(void *rmem, mem_size_t newsize) -> don't do anyhting. -> the remaining space stays unused since it is too small } */ - LWIP_MEM_UNPROTECT(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); return rmem; } @@ -460,7 +481,10 @@ mem_malloc(mem_size_t size) { mem_size_t ptr, ptr2; struct mem *mem, *mem2; - LWIP_MEM_DECL_PROTECT(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + u8_t local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_ALLOC_DECL_PROTECT(); if (size == 0) { return NULL; @@ -480,78 +504,103 @@ mem_malloc(mem_size_t size) } /* protect the heap from concurrent access */ - LWIP_MEM_PROTECT(); + sys_arch_sem_wait(mem_sem, 0); + LWIP_MEM_ALLOC_PROTECT(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* run as long as a mem_free disturbed mem_malloc */ + do { + local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ - /* Scan through the heap searching for a free block that is big enough, - * beginning with the lowest free block. - */ - for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE_ALIGNED - size; - ptr = ((struct mem *)&ram[ptr])->next) { - mem = (struct mem *)&ram[ptr]; - - if ((!mem->used) && - (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { - /* mem is not used and at least perfect fit is possible: - * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ - - if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { - /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing - * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') - * -> split large block, create empty remainder, - * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if - * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, - * struct mem would fit in but no data between mem2 and mem2->next - * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty - * region that couldn't hold data, but when mem->next gets freed, - * the 2 regions would be combined, resulting in more free memory - */ - ptr2 = ptr + SIZEOF_STRUCT_MEM + size; - /* create mem2 struct */ - mem2 = (struct mem *)&ram[ptr2]; - mem2->used = 0; - mem2->next = mem->next; - mem2->prev = ptr; - /* and insert it between mem and mem->next */ - mem->next = ptr2; - mem->used = 1; - - if (mem2->next != MEM_SIZE_ALIGNED) { - ((struct mem *)&ram[mem2->next])->prev = ptr2; - } - MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); - } else { - /* (a mem2 struct does no fit into the user data space of mem and mem->next will always - * be used at this point: if not we have 2 unused structs in a row, plug_holes should have - * take care of this). - * -> near fit or excact fit: do not split, no mem2 creation - * also can't move mem->next directly behind mem, since mem->next - * will always be used at this point! - */ - mem->used = 1; - MEM_STATS_INC_USED(used, mem->next - ((u8_t *)mem - ram)); + /* Scan through the heap searching for a free block that is big enough, + * beginning with the lowest free block. + */ + for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE_ALIGNED - size; + ptr = ((struct mem *)&ram[ptr])->next) { + mem = (struct mem *)&ram[ptr]; +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* allow mem_free to run */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + local_mem_free_count = mem_free_count; } + mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ - if (mem == lfree) { - /* Find next free block after mem and update lowest free pointer */ - while (lfree->used && lfree != ram_end) { - lfree = (struct mem *)&ram[lfree->next]; + if ((!mem->used) && + (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { + /* mem is not used and at least perfect fit is possible: + * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ + + if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { + /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing + * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') + * -> split large block, create empty remainder, + * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if + * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, + * struct mem would fit in but no data between mem2 and mem2->next + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory + */ + ptr2 = ptr + SIZEOF_STRUCT_MEM + size; + /* create mem2 struct */ + mem2 = (struct mem *)&ram[ptr2]; + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + /* and insert it between mem and mem->next */ + mem->next = ptr2; + mem->used = 1; + + if (mem2->next != MEM_SIZE_ALIGNED) { + ((struct mem *)&ram[mem2->next])->prev = ptr2; + } + MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); + } else { + /* (a mem2 struct does no fit into the user data space of mem and mem->next will always + * be used at this point: if not we have 2 unused structs in a row, plug_holes should have + * take care of this). + * -> near fit or excact fit: do not split, no mem2 creation + * also can't move mem->next directly behind mem, since mem->next + * will always be used at this point! + */ + mem->used = 1; + MEM_STATS_INC_USED(used, mem->next - ((u8_t *)mem - ram)); } - LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); - } - LWIP_MEM_UNPROTECT(); - LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", - (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); - LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", - (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); - LWIP_ASSERT("mem_malloc: sanity check alignment", - (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0); - return (u8_t *)mem + SIZEOF_STRUCT_MEM; + if (mem == lfree) { + /* Find next free block after mem and update lowest free pointer */ + while (lfree->used && lfree != ram_end) { + LWIP_MEM_ALLOC_UNPROTECT(); + /* prevent high interrupt latency... */ + LWIP_MEM_ALLOC_PROTECT(); + lfree = (struct mem *)&ram[lfree->next]; + } + LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); + } + LWIP_MEM_ALLOC_UNPROTECT(); + sys_sem_signal(mem_sem); + LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", + (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); + LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", + (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); + LWIP_ASSERT("mem_malloc: sanity check alignment", + (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0); + + return (u8_t *)mem + SIZEOF_STRUCT_MEM; + } } - } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* if we got interrupted by a mem_free, try again */ + } while(local_mem_free_count != 0); +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); MEM_STATS_INC(err); - LWIP_MEM_UNPROTECT(); + LWIP_MEM_ALLOC_UNPROTECT(); + sys_sem_signal(mem_sem); return NULL; } diff --git a/src/include/lwip/opt.h b/src/include/lwip/opt.h index 5b8eca9e..8fd94423 100644 --- a/src/include/lwip/opt.h +++ b/src/include/lwip/opt.h @@ -156,21 +156,25 @@ #endif /** - * This is for NO_SYS=0 only; in NO_SYS=1 configurations, the heap may not be accessed - * from interrupt level! + * Set this to 1 if you want to free PBUF_RAM pbufs (or call mem_free()) from + * interrupt context (or another context that doesn't allow waiting for a + * semaphore). + * If set to 1, mem_malloc will be protected by a semaphore and SYS_ARCH_PROTECT, + * while mem_free will only use SYS_ARCH_PROTECT. mem_malloc SYS_ARCH_UNPROTECTs + * with each loop so that mem_free can run. * - * If you want to free PBUF_RAM pbufs (or call mem_free()) from interrupt context, - * the heap cannot be protected by a semaphore. Setting this to 1 will disable - * interrupts while walking the heap. + * ATTENTION: As you can see from the above description, this leads to dis-/ + * enabling interrupts often, which can be slow! Also, on low memory, mem_malloc + * can need longer. * - * *** USE THIS WITH CARE: Setting this to 1 can disable interrupts for a long time! *** - * - * If you don't want that, call + * If you don't want that, at least for NO_SYS=0, you can still use the following + * functions to enqueue a deallocation call which then runs in the tcpip_thread + * context: * - pbuf_free_callback(p); * - mem_free_callback(m); */ -#ifndef LWIP_USE_HEAP_FROM_INTERRUPT -#define LWIP_USE_HEAP_FROM_INTERRUPT 0 +#ifndef LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT +#define LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 0 #endif /* diff --git a/src/include/lwip/stats.h b/src/include/lwip/stats.h index 1507b4f5..c44df10d 100644 --- a/src/include/lwip/stats.h +++ b/src/include/lwip/stats.h @@ -86,7 +86,8 @@ struct stats_mem { mem_size_t avail; mem_size_t used; mem_size_t max; - mem_size_t err; + STAT_COUNTER err; + STAT_COUNTER illegal; }; struct stats_syselem {