3 * Dynamic memory manager
5 * This is a lightweight replacement for the standard C library malloc().
7 * If you want to use the standard C library malloc() instead, define
8 * MEM_LIBC_MALLOC to 1 in your lwipopts.h
10 * To let mem_malloc() use pools (prevents fragmentation and is much faster than
11 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
12 * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
13 * of pools like this (more pools can be added between _START and _END):
15 * Define three pools with sizes 256, 512, and 1512 bytes
16 * LWIP_MALLOC_MEMPOOL_START
17 * LWIP_MALLOC_MEMPOOL(20, 256)
18 * LWIP_MALLOC_MEMPOOL(10, 512)
19 * LWIP_MALLOC_MEMPOOL(5, 1512)
20 * LWIP_MALLOC_MEMPOOL_END
24 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
25 * All rights reserved.
27 * Redistribution and use in source and binary forms, with or without modification,
28 * are permitted provided that the following conditions are met:
30 * 1. Redistributions of source code must retain the above copyright notice,
31 * this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright notice,
33 * this list of conditions and the following disclaimer in the documentation
34 * and/or other materials provided with the distribution.
35 * 3. The name of the author may not be used to endorse or promote products
36 * derived from this software without specific prior written permission.
38 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
39 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
40 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
41 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
42 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
43 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
46 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
49 * This file is part of the lwIP TCP/IP stack.
51 * Author: Adam Dunkels <adam@sics.se>
58 #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
63 #include "lwip/stats.h"
69 /* lwIP head implemented with different sized pools */
72 * Allocate memory: determine the smallest pool that is big enough
73 * to contain an element of 'size' and get an element from that pool.
75 * @param size the size in bytes of the memory needed
76 * @return a pointer to the allocated memory or NULL if the pool is empty
79 mem_malloc(mem_size_t size
)
81 struct memp_malloc_helper
*element
;
83 mem_size_t required_size
= size
+ sizeof(struct memp_malloc_helper
);
85 for (poolnr
= MEMP_POOL_FIRST
; poolnr
<= MEMP_POOL_LAST
; poolnr
= (memp_t
)(poolnr
+ 1)) {
86 #if MEM_USE_POOLS_TRY_BIGGER_POOL
88 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
89 /* is this pool big enough to hold an element of the required size
90 plus a struct memp_malloc_helper that saves the pool this element came from? */
91 if (required_size
<= memp_sizes
[poolnr
]) {
95 if (poolnr
> MEMP_POOL_LAST
) {
96 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
99 element
= (struct memp_malloc_helper
*)memp_malloc(poolnr
);
100 if (element
== NULL
) {
101 /* No need to DEBUGF or ASSERT: This error is already
102 taken care of in memp.c */
103 #if MEM_USE_POOLS_TRY_BIGGER_POOL
104 /** Try a bigger pool if this one is empty! */
105 if (poolnr
< MEMP_POOL_LAST
) {
109 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
113 /* save the pool number this element came from */
114 element
->poolnr
= poolnr
;
115 /* and return a pointer to the memory directly after the struct memp_malloc_helper */
122 * Free memory previously allocated by mem_malloc. Loads the pool number
123 * and calls memp_free with that pool number to put the element back into
126 * @param rmem the memory element to free
131 struct memp_malloc_helper
*hmem
= (struct memp_malloc_helper
*)rmem
;
133 LWIP_ASSERT("rmem != NULL", (rmem
!= NULL
));
134 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem
== LWIP_MEM_ALIGN(rmem
)));
136 /* get the original struct memp_malloc_helper */
139 LWIP_ASSERT("hmem != NULL", (hmem
!= NULL
));
140 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem
== LWIP_MEM_ALIGN(hmem
)));
141 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem
->poolnr
< MEMP_MAX
));
143 /* and put it in the pool we saved earlier */
144 memp_free(hmem
->poolnr
, hmem
);
147 #else /* MEM_USE_POOLS */
148 /* lwIP replacement for your libc malloc() */
151 * The heap is made up as a list of structs of this type.
152 * This does not have to be aligned since for getting its size,
153 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
156 /** index (-> ram[next]) of the next struct */
158 /** index (-> ram[prev]) of the previous struct */
160 /** 1: this area is used; 0: this area is unused */
164 /** All allocated blocks will be MIN_SIZE bytes big, at least!
165 * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
166 * larger values could prevent too small blocks to fragment the RAM too much. */
169 #endif /* MIN_SIZE */
170 /* some alignment macros: we define them here for better source code layout */
171 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
172 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
173 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
175 /** If you want to relocate the heap to external memory, simply define
176 * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
177 * If so, make sure the memory at that location is big enough (see below on
178 * how that space is calculated). */
179 #ifndef LWIP_RAM_HEAP_POINTER
181 #if defined(TARGET_LPC4088) || defined(TARGET_LPC4088_DM)
182 # if defined (__ICCARM__)
183 # define ETHMEM_SECTION
184 # elif defined(TOOLCHAIN_GCC_CR)
185 # define ETHMEM_SECTION __attribute__((section(".data.$RamPeriph32")))
187 # define ETHMEM_SECTION __attribute__((section("AHBSRAM1"),aligned))
189 #elif defined(TARGET_LPC1768)
190 # define ETHMEM_SECTION __attribute((section("AHBSRAM0")))
192 # define ETHMEM_SECTION
195 /** the heap. we need one struct mem at the end and some room for alignment */
196 u8_t ram_heap
[MEM_SIZE_ALIGNED
+ (2*SIZEOF_STRUCT_MEM
) + MEM_ALIGNMENT
] ETHMEM_SECTION
;
197 #define LWIP_RAM_HEAP_POINTER ram_heap
198 #endif /* LWIP_RAM_HEAP_POINTER */
200 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
202 /** the last entry, always unused! */
203 static struct mem
*ram_end
;
204 /** pointer to the lowest free block, this is used for faster search */
205 static struct mem
*lfree
;
207 /** concurrent access protection */
208 static sys_mutex_t mem_mutex
;
210 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
212 static volatile u8_t mem_free_count
;
214 /* Allow mem_free from other (e.g. interrupt) context */
215 #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
216 #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
217 #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
218 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
219 #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
220 #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
222 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
224 /* Protect the heap only by using a semaphore */
225 #define LWIP_MEM_FREE_DECL_PROTECT()
226 #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
227 #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
228 /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
229 #define LWIP_MEM_ALLOC_DECL_PROTECT()
230 #define LWIP_MEM_ALLOC_PROTECT()
231 #define LWIP_MEM_ALLOC_UNPROTECT()
233 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
237 * "Plug holes" by combining adjacent empty struct mems.
238 * After this function is through, there should not exist
239 * one empty struct mem pointing to another empty struct mem.
241 * @param mem this points to a struct mem which just has been freed
242 * @internal this function is only called by mem_free() and mem_trim()
244 * This assumes access to the heap is protected by the calling function
248 plug_holes(struct mem
*mem
)
253 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t
*)mem
>= ram
);
254 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t
*)mem
< (u8_t
*)ram_end
);
255 LWIP_ASSERT("plug_holes: mem->used == 0", mem
->used
== 0);
257 /* plug hole forward */
258 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem
->next
<= MEM_SIZE_ALIGNED
);
260 nmem
= (struct mem
*)(void *)&ram
[mem
->next
];
261 if (mem
!= nmem
&& nmem
->used
== 0 && (u8_t
*)nmem
!= (u8_t
*)ram_end
) {
262 /* if mem->next is unused and not end of ram, combine mem and mem->next */
266 mem
->next
= nmem
->next
;
267 ((struct mem
*)(void *)&ram
[nmem
->next
])->prev
= (mem_size_t
)((u8_t
*)mem
- ram
);
270 /* plug hole backward */
271 pmem
= (struct mem
*)(void *)&ram
[mem
->prev
];
272 if (pmem
!= mem
&& pmem
->used
== 0) {
273 /* if mem->prev is unused, combine mem and mem->prev */
277 pmem
->next
= mem
->next
;
278 ((struct mem
*)(void *)&ram
[mem
->next
])->prev
= (mem_size_t
)((u8_t
*)pmem
- ram
);
283 * Zero the heap and initialize start, end and lowest-free
290 LWIP_ASSERT("Sanity check alignment",
291 (SIZEOF_STRUCT_MEM
& (MEM_ALIGNMENT
-1)) == 0);
294 ram
= (u8_t
*)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER
);
295 /* initialize the start of the heap */
296 mem
= (struct mem
*)(void *)ram
;
297 mem
->next
= MEM_SIZE_ALIGNED
;
300 /* initialize the end of the heap */
301 ram_end
= (struct mem
*)(void *)&ram
[MEM_SIZE_ALIGNED
];
303 ram_end
->next
= MEM_SIZE_ALIGNED
;
304 ram_end
->prev
= MEM_SIZE_ALIGNED
;
306 /* initialize the lowest-free pointer to the start of the heap */
307 lfree
= (struct mem
*)(void *)ram
;
309 MEM_STATS_AVAIL(avail
, MEM_SIZE_ALIGNED
);
311 if(sys_mutex_new(&mem_mutex
) != ERR_OK
) {
312 LWIP_ASSERT("failed to create mem_mutex", 0);
317 * Put a struct mem back on the heap
319 * @param rmem is the data portion of a struct mem as returned by a previous
320 * call to mem_malloc()
326 LWIP_MEM_FREE_DECL_PROTECT();
329 LWIP_DEBUGF(MEM_DEBUG
| LWIP_DBG_TRACE
| LWIP_DBG_LEVEL_SERIOUS
, ("mem_free(p == NULL) was called.\n"));
332 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t
)rmem
) & (MEM_ALIGNMENT
-1)) == 0);
334 LWIP_ASSERT("mem_free: legal memory", (u8_t
*)rmem
>= (u8_t
*)ram
&&
335 (u8_t
*)rmem
< (u8_t
*)ram_end
);
337 if ((u8_t
*)rmem
< (u8_t
*)ram
|| (u8_t
*)rmem
>= (u8_t
*)ram_end
) {
338 SYS_ARCH_DECL_PROTECT(lev
);
339 LWIP_DEBUGF(MEM_DEBUG
| LWIP_DBG_LEVEL_SEVERE
, ("mem_free: illegal memory\n"));
340 /* protect mem stats from concurrent access */
341 SYS_ARCH_PROTECT(lev
);
342 MEM_STATS_INC(illegal
);
343 SYS_ARCH_UNPROTECT(lev
);
346 /* protect the heap from concurrent access */
347 LWIP_MEM_FREE_PROTECT();
348 /* Get the corresponding struct mem ... */
349 mem
= (struct mem
*)(void *)((u8_t
*)rmem
- SIZEOF_STRUCT_MEM
);
350 /* ... which has to be in a used state ... */
351 LWIP_ASSERT("mem_free: mem->used", mem
->used
);
352 /* ... and is now unused. */
356 /* the newly freed struct is now the lowest */
360 MEM_STATS_DEC_USED(used
, mem
->next
- (mem_size_t
)(((u8_t
*)mem
- ram
)));
362 /* finally, see if prev or next are free also */
364 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
366 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
367 LWIP_MEM_FREE_UNPROTECT();
371 * Shrink memory returned by mem_malloc().
373 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
374 * @param newsize required size after shrinking (needs to be smaller than or
375 * equal to the previous size)
376 * @return for compatibility reasons: is always == rmem, at the moment
377 * or NULL if newsize is > old size, in which case rmem is NOT touched
381 mem_trim(void *rmem
, mem_size_t newsize
)
384 mem_size_t ptr
, ptr2
;
385 struct mem
*mem
, *mem2
;
386 /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
387 LWIP_MEM_FREE_DECL_PROTECT();
389 /* Expand the size of the allocated memory region so that we can
390 adjust for alignment. */
391 newsize
= LWIP_MEM_ALIGN_SIZE(newsize
);
393 if(newsize
< MIN_SIZE_ALIGNED
) {
394 /* every data block must be at least MIN_SIZE_ALIGNED long */
395 newsize
= MIN_SIZE_ALIGNED
;
398 if (newsize
> MEM_SIZE_ALIGNED
) {
402 LWIP_ASSERT("mem_trim: legal memory", (u8_t
*)rmem
>= (u8_t
*)ram
&&
403 (u8_t
*)rmem
< (u8_t
*)ram_end
);
405 if ((u8_t
*)rmem
< (u8_t
*)ram
|| (u8_t
*)rmem
>= (u8_t
*)ram_end
) {
406 SYS_ARCH_DECL_PROTECT(lev
);
407 LWIP_DEBUGF(MEM_DEBUG
| LWIP_DBG_LEVEL_SEVERE
, ("mem_trim: illegal memory\n"));
408 /* protect mem stats from concurrent access */
409 SYS_ARCH_PROTECT(lev
);
410 MEM_STATS_INC(illegal
);
411 SYS_ARCH_UNPROTECT(lev
);
414 /* Get the corresponding struct mem ... */
415 mem
= (struct mem
*)(void *)((u8_t
*)rmem
- SIZEOF_STRUCT_MEM
);
416 /* ... and its offset pointer */
417 ptr
= (mem_size_t
)((u8_t
*)mem
- ram
);
419 size
= mem
->next
- ptr
- SIZEOF_STRUCT_MEM
;
420 LWIP_ASSERT("mem_trim can only shrink memory", newsize
<= size
);
421 if (newsize
> size
) {
425 if (newsize
== size
) {
426 /* No change in size, simply return */
430 /* protect the heap from concurrent access */
431 LWIP_MEM_FREE_PROTECT();
433 mem2
= (struct mem
*)(void *)&ram
[mem
->next
];
434 if(mem2
->used
== 0) {
435 /* The next struct is unused, we can simply move it at little */
437 /* remember the old next pointer */
439 /* create new struct mem which is moved directly after the shrinked mem */
440 ptr2
= ptr
+ SIZEOF_STRUCT_MEM
+ newsize
;
442 lfree
= (struct mem
*)(void *)&ram
[ptr2
];
444 mem2
= (struct mem
*)(void *)&ram
[ptr2
];
446 /* restore the next pointer */
448 /* link it back to mem */
452 /* last thing to restore linked list: as we have moved mem2,
453 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
454 * the end of the heap */
455 if (mem2
->next
!= MEM_SIZE_ALIGNED
) {
456 ((struct mem
*)(void *)&ram
[mem2
->next
])->prev
= ptr2
;
458 MEM_STATS_DEC_USED(used
, (size
- newsize
));
459 /* no need to plug holes, we've already done that */
460 } else if (newsize
+ SIZEOF_STRUCT_MEM
+ MIN_SIZE_ALIGNED
<= size
) {
461 /* Next struct is used but there's room for another struct mem with
462 * at least MIN_SIZE_ALIGNED of data.
463 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
464 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
465 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
466 * region that couldn't hold data, but when mem->next gets freed,
467 * the 2 regions would be combined, resulting in more free memory */
468 ptr2
= ptr
+ SIZEOF_STRUCT_MEM
+ newsize
;
469 mem2
= (struct mem
*)(void *)&ram
[ptr2
];
474 mem2
->next
= mem
->next
;
477 if (mem2
->next
!= MEM_SIZE_ALIGNED
) {
478 ((struct mem
*)(void *)&ram
[mem2
->next
])->prev
= ptr2
;
480 MEM_STATS_DEC_USED(used
, (size
- newsize
));
481 /* the original mem->next is used, so no need to plug holes! */
484 next struct mem is used but size between mem and mem2 is not big enough
485 to create another struct mem
486 -> don't do anyhting.
487 -> the remaining space stays unused since it is too small
489 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
491 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
492 LWIP_MEM_FREE_UNPROTECT();
497 * Adam's mem_malloc() plus solution for bug #17922
498 * Allocate a block of memory with a minimum of 'size' bytes.
500 * @param size is the minimum size of the requested block in bytes.
501 * @return pointer to allocated memory or NULL if no free memory was found.
503 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
506 mem_malloc(mem_size_t size
)
508 mem_size_t ptr
, ptr2
;
509 struct mem
*mem
, *mem2
;
510 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
511 u8_t local_mem_free_count
= 0;
512 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
513 LWIP_MEM_ALLOC_DECL_PROTECT();
519 /* Expand the size of the allocated memory region so that we can
520 adjust for alignment. */
521 size
= LWIP_MEM_ALIGN_SIZE(size
);
523 if(size
< MIN_SIZE_ALIGNED
) {
524 /* every data block must be at least MIN_SIZE_ALIGNED long */
525 size
= MIN_SIZE_ALIGNED
;
528 if (size
> MEM_SIZE_ALIGNED
) {
532 /* protect the heap from concurrent access */
533 sys_mutex_lock(&mem_mutex
);
534 LWIP_MEM_ALLOC_PROTECT();
535 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
536 /* run as long as a mem_free disturbed mem_malloc */
538 local_mem_free_count
= 0;
539 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
541 /* Scan through the heap searching for a free block that is big enough,
542 * beginning with the lowest free block.
544 for (ptr
= (mem_size_t
)((u8_t
*)lfree
- ram
); ptr
< MEM_SIZE_ALIGNED
- size
;
545 ptr
= ((struct mem
*)(void *)&ram
[ptr
])->next
) {
546 mem
= (struct mem
*)(void *)&ram
[ptr
];
547 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
549 LWIP_MEM_ALLOC_UNPROTECT();
550 /* allow mem_free to run */
551 LWIP_MEM_ALLOC_PROTECT();
552 if (mem_free_count
!= 0) {
553 local_mem_free_count
= mem_free_count
;
556 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
559 (mem
->next
- (ptr
+ SIZEOF_STRUCT_MEM
)) >= size
) {
560 /* mem is not used and at least perfect fit is possible:
561 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
563 if (mem
->next
- (ptr
+ SIZEOF_STRUCT_MEM
) >= (size
+ SIZEOF_STRUCT_MEM
+ MIN_SIZE_ALIGNED
)) {
564 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
565 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
566 * -> split large block, create empty remainder,
567 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
568 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
569 * struct mem would fit in but no data between mem2 and mem2->next
570 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
571 * region that couldn't hold data, but when mem->next gets freed,
572 * the 2 regions would be combined, resulting in more free memory
574 ptr2
= ptr
+ SIZEOF_STRUCT_MEM
+ size
;
575 /* create mem2 struct */
576 mem2
= (struct mem
*)(void *)&ram
[ptr2
];
578 mem2
->next
= mem
->next
;
580 /* and insert it between mem and mem->next */
584 if (mem2
->next
!= MEM_SIZE_ALIGNED
) {
585 ((struct mem
*)(void *)&ram
[mem2
->next
])->prev
= ptr2
;
587 MEM_STATS_INC_USED(used
, (size
+ SIZEOF_STRUCT_MEM
));
589 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
590 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
591 * take care of this).
592 * -> near fit or excact fit: do not split, no mem2 creation
593 * also can't move mem->next directly behind mem, since mem->next
594 * will always be used at this point!
597 MEM_STATS_INC_USED(used
, mem
->next
- (mem_size_t
)((u8_t
*)mem
- ram
));
601 /* Find next free block after mem and update lowest free pointer */
602 while (lfree
->used
&& lfree
!= ram_end
) {
603 LWIP_MEM_ALLOC_UNPROTECT();
604 /* prevent high interrupt latency... */
605 LWIP_MEM_ALLOC_PROTECT();
606 lfree
= (struct mem
*)(void *)&ram
[lfree
->next
];
608 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree
== ram_end
) || (!lfree
->used
)));
610 LWIP_MEM_ALLOC_UNPROTECT();
611 sys_mutex_unlock(&mem_mutex
);
612 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
613 (mem_ptr_t
)mem
+ SIZEOF_STRUCT_MEM
+ size
<= (mem_ptr_t
)ram_end
);
614 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
615 ((mem_ptr_t
)mem
+ SIZEOF_STRUCT_MEM
) % MEM_ALIGNMENT
== 0);
616 LWIP_ASSERT("mem_malloc: sanity check alignment",
617 (((mem_ptr_t
)mem
) & (MEM_ALIGNMENT
-1)) == 0);
619 return (u8_t
*)mem
+ SIZEOF_STRUCT_MEM
;
622 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
623 /* if we got interrupted by a mem_free, try again */
624 } while(local_mem_free_count
!= 0);
625 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
626 LWIP_DEBUGF(MEM_DEBUG
| LWIP_DBG_LEVEL_SERIOUS
, ("mem_malloc: could not allocate %"S16_F
" bytes\n", (s16_t
)size
));
628 LWIP_MEM_ALLOC_UNPROTECT();
629 sys_mutex_unlock(&mem_mutex
);
633 #endif /* MEM_USE_POOLS */
635 * Contiguously allocates enough space for count objects that are size bytes
636 * of memory each and returns a pointer to the allocated memory.
638 * The allocated memory is filled with bytes of value zero.
640 * @param count number of objects to allocate
641 * @param size size of the objects to allocate
642 * @return pointer to allocated memory / NULL pointer if there is an error
644 void *mem_calloc(mem_size_t count
, mem_size_t size
)
648 /* allocate 'count' objects of size 'size' */
649 p
= mem_malloc(count
* size
);
651 /* zero the memory */
652 memset(p
, 0, count
* size
);
657 #endif /* !MEM_LIBC_MALLOC */