diff --git a/src/core/mem.c b/src/core/mem.c index 689d7f0e..3a434495 100644 --- a/src/core/mem.c +++ b/src/core/mem.c @@ -337,6 +337,17 @@ static volatile u8_t mem_free_count; #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ +static struct mem * +ptr_to_mem(mem_size_t ptr) +{ + return (struct mem *)(void *)&ram[ptr]; +} + +static mem_size_t +mem_to_ptr(void *mem) +{ + return (mem_size_t)((u8_t *)mem - ram); +} /** * "Plug holes" by combining adjacent empty struct mems. @@ -362,25 +373,25 @@ plug_holes(struct mem *mem) /* plug hole forward */ LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); - nmem = (struct mem *)(void *)&ram[mem->next]; + nmem = ptr_to_mem(mem->next); if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { /* if mem->next is unused and not end of ram, combine mem and mem->next */ if (lfree == nmem) { lfree = mem; } mem->next = nmem->next; - ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram); + ptr_to_mem(nmem->next)->prev = mem_to_ptr(mem); } /* plug hole backward */ - pmem = (struct mem *)(void *)&ram[mem->prev]; + pmem = ptr_to_mem(mem->prev); if (pmem != mem && pmem->used == 0) { /* if mem->prev is unused, combine mem and mem->prev */ if (lfree == mem) { lfree = pmem; } pmem->next = mem->next; - ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram); + ptr_to_mem(mem->next)->prev = mem_to_ptr(pmem); } } @@ -403,7 +414,7 @@ mem_init(void) mem->prev = 0; mem->used = 0; /* initialize the end of the heap */ - ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED]; + ram_end = ptr_to_mem(MEM_SIZE_ALIGNED); ram_end->used = 1; ram_end->next = MEM_SIZE_ALIGNED; ram_end->prev = MEM_SIZE_ALIGNED; @@ -426,9 +437,9 @@ mem_link_valid(struct mem *mem) { struct mem *nmem, *pmem; mem_size_t rmem_idx; - rmem_idx = (mem_size_t)((u8_t *)mem - ram); - nmem = (struct mem *)(void *)&ram[mem->next]; - pmem = (struct mem *)(void *)&ram[mem->prev]; + rmem_idx = mem_to_ptr(mem); + nmem = ptr_to_mem(mem->next); + pmem = ptr_to_mem(mem->prev); if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) || ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) || ((nmem != ram_end) && (nmem->prev != rmem_idx))) { @@ -555,7 +566,7 @@ mem_trim(void *rmem, mem_size_t new_size) /* cast through void* to get rid of alignment warnings */ mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); /* ... and its offset pointer */ - ptr = (mem_size_t)((u8_t *)mem - ram); + ptr = mem_to_ptr(mem); size = (mem_size_t)((mem_size_t)(mem->next - ptr) - SIZEOF_STRUCT_MEM); LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); @@ -571,7 +582,7 @@ mem_trim(void *rmem, mem_size_t new_size) /* protect the heap from concurrent access */ LWIP_MEM_FREE_PROTECT(); - mem2 = (struct mem *)(void *)&ram[mem->next]; + mem2 = ptr_to_mem(mem->next); if (mem2->used == 0) { /* The next struct is unused, we can simply move it at little */ mem_size_t next; @@ -580,9 +591,9 @@ mem_trim(void *rmem, mem_size_t new_size) /* create new struct mem which is moved directly after the shrinked mem */ ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); if (lfree == mem2) { - lfree = (struct mem *)(void *)&ram[ptr2]; + lfree = ptr_to_mem(ptr2); } - mem2 = (struct mem *)(void *)&ram[ptr2]; + mem2 = ptr_to_mem(ptr2); mem2->used = 0; /* restore the next pointer */ mem2->next = next; @@ -594,7 +605,7 @@ mem_trim(void *rmem, mem_size_t new_size) * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not * the end of the heap */ if (mem2->next != MEM_SIZE_ALIGNED) { - ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; + ptr_to_mem(mem2->next)->prev = ptr2; } MEM_STATS_DEC_USED(used, (size - newsize)); /* no need to plug holes, we've already done that */ @@ -607,7 +618,7 @@ mem_trim(void *rmem, mem_size_t new_size) * region that couldn't hold data, but when mem->next gets freed, * the 2 regions would be combined, resulting in more free memory */ ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); - mem2 = (struct mem *)(void *)&ram[ptr2]; + mem2 = ptr_to_mem(ptr2); if (mem2 < lfree) { lfree = mem2; } @@ -616,7 +627,7 @@ mem_trim(void *rmem, mem_size_t new_size) mem2->prev = ptr; mem->next = ptr2; if (mem2->next != MEM_SIZE_ALIGNED) { - ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; + ptr_to_mem(mem2->next)->prev = ptr2; } MEM_STATS_DEC_USED(used, (size - newsize)); /* the original mem->next is used, so no need to plug holes! */ @@ -681,9 +692,9 @@ mem_malloc(mem_size_t size_in) /* Scan through the heap searching for a free block that is big enough, * beginning with the lowest free block. */ - for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size; - ptr = ((struct mem *)(void *)&ram[ptr])->next) { - mem = (struct mem *)(void *)&ram[ptr]; + for (ptr = mem_to_ptr(lfree); ptr < MEM_SIZE_ALIGNED - size; + ptr = ptr_to_mem(ptr)->next) { + mem = ptr_to_mem(ptr); #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT mem_free_count = 0; LWIP_MEM_ALLOC_UNPROTECT(); @@ -715,7 +726,7 @@ mem_malloc(mem_size_t size_in) */ ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size); /* create mem2 struct */ - mem2 = (struct mem *)(void *)&ram[ptr2]; + mem2 = ptr_to_mem(ptr2); mem2->used = 0; mem2->next = mem->next; mem2->prev = ptr; @@ -724,7 +735,7 @@ mem_malloc(mem_size_t size_in) mem->used = 1; if (mem2->next != MEM_SIZE_ALIGNED) { - ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; + ptr_to_mem(mem2->next)->prev = ptr2; } MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); } else { @@ -736,7 +747,7 @@ mem_malloc(mem_size_t size_in) * will always be used at this point! */ mem->used = 1; - MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram)); + MEM_STATS_INC_USED(used, mem->next - mem_to_ptr(mem)); } #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT mem_malloc_adjust_lfree: @@ -756,7 +767,7 @@ mem_malloc_adjust_lfree: goto mem_malloc_adjust_lfree; } #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ - cur = (struct mem *)(void *)&ram[cur->next]; + cur = ptr_to_mem(cur->next); } lfree = cur; LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));