iPXE
malloc.c
Go to the documentation of this file.
1/*
2 * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25FILE_SECBOOT ( PERMITTED );
26
27#include <stddef.h>
28#include <stdint.h>
29#include <string.h>
30#include <ipxe/io.h>
31#include <ipxe/list.h>
32#include <ipxe/init.h>
33#include <ipxe/refcnt.h>
34#include <ipxe/malloc.h>
35#include <valgrind/memcheck.h>
36
37/** @file
38 *
39 * Dynamic memory allocation
40 *
41 */
42
43/** A free block of memory */
45 /** Size of this block */
46 size_t size;
47 /** Padding
48 *
49 * This padding exists to cover the "count" field of a
50 * reference counter, in the common case where a reference
51 * counter is the first element of a dynamically-allocated
52 * object. It avoids clobbering the "count" field as soon as
53 * the memory is freed, and so allows for the possibility of
54 * detecting reference counting errors.
55 */
56 char pad[ offsetof ( struct refcnt, count ) +
57 sizeof ( ( ( struct refcnt * ) NULL )->count ) ];
58 /** List of free blocks */
60};
61
62/** Physical address alignment maintained for free blocks of memory
63 *
64 * We keep memory blocks aligned on a power of two that is at least
65 * large enough to hold a @c struct @c memory_block.
66 */
67#define MIN_MEMBLOCK_ALIGN ( 4 * sizeof ( void * ) )
68
69/** A block of allocated memory complete with size information */
71 /** Size of this block */
72 size_t size;
73 /** Remaining data */
74 char data[0];
75};
76
77/**
78 * Heap area size
79 *
80 * Currently fixed at 4MB.
81 */
82#define HEAP_SIZE ( 4096 * 1024 )
83
84/** Heap area alignment */
85#define HEAP_ALIGN MIN_MEMBLOCK_ALIGN
86
87/** The heap area */
88static char __attribute__ (( aligned ( HEAP_ALIGN ) )) heap_area[HEAP_SIZE];
89
90/**
91 * Mark all blocks in free list as defined
92 *
93 * @v heap Heap
94 */
95static inline void valgrind_make_blocks_defined ( struct heap *heap ) {
96 struct memory_block *block;
97
98 /* Do nothing unless running under Valgrind */
99 if ( RUNNING_ON_VALGRIND <= 0 )
100 return;
101
102 /* Traverse free block list, marking each block structure as
103 * defined. Some contortions are necessary to avoid errors
104 * from list_check().
105 */
106
107 /* Mark block list itself as defined */
109
110 /* Mark areas accessed by list_check() as defined */
112 sizeof ( heap->blocks.prev->next ) );
114 sizeof ( *heap->blocks.next ) );
116 sizeof ( heap->blocks.next->next->prev ) );
117
118 /* Mark each block in list as defined */
120
121 /* Mark block as defined */
122 VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
123
124 /* Mark areas accessed by list_check() as defined */
125 VALGRIND_MAKE_MEM_DEFINED ( block->list.next,
126 sizeof ( *block->list.next ) );
127 VALGRIND_MAKE_MEM_DEFINED ( &block->list.next->next->prev,
128 sizeof ( block->list.next->next->prev ) );
129 }
130}
131
132/**
133 * Mark all blocks in free list as inaccessible
134 *
135 * @v heap Heap
136 */
137static inline void valgrind_make_blocks_noaccess ( struct heap *heap ) {
138 struct memory_block *block;
139 struct memory_block *prev = NULL;
140
141 /* Do nothing unless running under Valgrind */
142 if ( RUNNING_ON_VALGRIND <= 0 )
143 return;
144
145 /* Traverse free block list, marking each block structure as
146 * inaccessible. Some contortions are necessary to avoid
147 * errors from list_check().
148 */
149
150 /* Mark each block in list as inaccessible */
152
153 /* Mark previous block (if any) as inaccessible. (Current
154 * block will be accessed by list_check().)
155 */
156 if ( prev )
157 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
158 prev = block;
159
160 /* At the end of the list, list_check() will end up
161 * accessing the first list item. Temporarily mark
162 * this area as defined.
163 */
165 sizeof ( heap->blocks.next->prev ));
166 }
167 /* Mark last block (if any) as inaccessible */
168 if ( prev )
169 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
170
171 /* Mark as inaccessible the area that was temporarily marked
172 * as defined to avoid errors from list_check().
173 */
175 sizeof ( heap->blocks.next->prev ) );
176
177 /* Mark block list itself as inaccessible */
179}
180
181/**
182 * Check integrity of the blocks in the free list
183 *
184 * @v heap Heap
185 */
186static inline void check_blocks ( struct heap *heap ) {
187 struct memory_block *block;
188 struct memory_block *prev = NULL;
189
190 if ( ! ASSERTING )
191 return;
192
194
195 /* Check alignment */
196 assert ( ( virt_to_phys ( block ) &
197 ( heap->align - 1 ) ) == 0 );
198
199 /* Check that list structure is intact */
200 list_check ( &block->list );
201
202 /* Check that block size is not too small */
203 assert ( block->size >= sizeof ( *block ) );
204 assert ( block->size >= heap->align );
205
206 /* Check that block does not wrap beyond end of address space */
207 assert ( ( ( void * ) block + block->size ) >
208 ( ( void * ) block ) );
209
210 /* Check that blocks remain in ascending order, and
211 * that adjacent blocks have been merged.
212 */
213 if ( prev ) {
214 assert ( ( ( void * ) block ) > ( ( void * ) prev ) );
215 assert ( ( ( void * ) block ) >
216 ( ( ( void * ) prev ) + prev->size ) );
217 }
218 prev = block;
219 }
220}
221
222/**
223 * Discard some cached data
224 *
225 * @v size Failed allocation size
226 * @ret discarded Number of cached items discarded
227 */
228static unsigned int discard_cache ( size_t size __unused ) {
229 struct cache_discarder *discarder;
230 unsigned int discarded;
231
233 discarded = discarder->discard();
234 if ( discarded )
235 return discarded;
236 }
237 return 0;
238}
239
240/**
241 * Discard all cached data
242 *
243 */
244static void discard_all_cache ( void ) {
245 unsigned int discarded;
246
247 do {
248 discarded = discard_cache ( 0 );
249 } while ( discarded );
250}
251
252/**
253 * Allocate a memory block
254 *
255 * @v heap Heap
256 * @v size Requested size
257 * @v align Physical alignment
258 * @v offset Offset from physical alignment
259 * @ret ptr Memory block, or NULL
260 *
261 * Allocates a memory block @b physically aligned as requested. No
262 * guarantees are provided for the alignment of the virtual address.
263 *
264 * @c align must be a power of two. @c size may not be zero.
265 */
266static void * heap_alloc_block ( struct heap *heap, size_t size, size_t align,
267 size_t offset ) {
268 struct memory_block *block;
269 size_t actual_offset;
270 size_t align_mask;
271 size_t actual_size;
272 size_t pre_size;
273 size_t post_size;
274 struct memory_block *pre;
275 struct memory_block *post;
276 unsigned int grown;
277 void *ptr;
278
279 /* Sanity checks */
280 assert ( size != 0 );
281 assert ( ( align == 0 ) || ( ( align & ( align - 1 ) ) == 0 ) );
283 check_blocks ( heap );
284
285 /* Limit offset to requested alignment */
286 offset &= ( align ? ( align - 1 ) : 0 );
287
288 /* Calculate offset of memory block */
289 actual_offset = ( offset & ~( heap->align - 1 ) );
290 assert ( actual_offset <= offset );
291
292 /* Calculate size of memory block */
293 actual_size = ( ( size + offset - actual_offset + heap->align - 1 )
294 & ~( heap->align - 1 ) );
295 if ( ! actual_size ) {
296 /* The requested size is not permitted to be zero. A
297 * zero result at this point indicates that either the
298 * original requested size was zero, or that unsigned
299 * integer overflow has occurred.
300 */
301 ptr = NULL;
302 goto done;
303 }
304 assert ( actual_size >= size );
305
306 /* Calculate alignment mask */
307 align_mask = ( ( align - 1 ) | ( heap->align - 1 ) );
308
309 DBGC2 ( heap, "HEAP allocating %#zx (aligned %#zx+%#zx)\n",
310 size, align, offset );
311 while ( 1 ) {
312 /* Search through blocks for the first one with enough space */
314 pre_size = ( ( actual_offset - virt_to_phys ( block ) )
315 & align_mask );
316 if ( ( block->size < pre_size ) ||
317 ( ( block->size - pre_size ) < actual_size ) )
318 continue;
319 post_size = ( block->size - pre_size - actual_size );
320 /* Split block into pre-block, block, and
321 * post-block. After this split, the "pre"
322 * block is the one currently linked into the
323 * free list.
324 */
325 pre = block;
326 block = ( ( ( void * ) pre ) + pre_size );
327 post = ( ( ( void * ) block ) + actual_size );
328 DBGC2 ( heap, "HEAP splitting [%p,%p) -> [%p,%p) "
329 "+ [%p,%p)\n", pre,
330 ( ( ( void * ) pre ) + pre->size ), pre, block,
331 post, ( ( ( void * ) pre ) + pre->size ) );
332 /* If there is a "post" block, add it in to
333 * the free list.
334 */
335 if ( post_size ) {
336 assert ( post_size >= sizeof ( *block ) );
337 assert ( ( post_size &
338 ( heap->align - 1 ) ) == 0 );
340 sizeof ( *post ));
341 post->size = post_size;
342 list_add ( &post->list, &pre->list );
343 }
344 /* Shrink "pre" block, leaving the main block
345 * isolated and no longer part of the free
346 * list.
347 */
348 pre->size = pre_size;
349 /* If there is no "pre" block, remove it from
350 * the list.
351 */
352 if ( ! pre_size ) {
353 list_del ( &pre->list );
355 sizeof ( *pre ) );
356 } else {
357 assert ( pre_size >= sizeof ( *block ) );
358 assert ( ( pre_size &
359 ( heap->align - 1 ) ) == 0 );
360 }
361 /* Update memory usage statistics */
362 heap->freemem -= actual_size;
363 heap->usedmem += actual_size;
364 if ( heap->usedmem > heap->maxusedmem )
366 /* Return allocated block */
367 ptr = ( ( ( void * ) block ) + offset - actual_offset );
368 DBGC2 ( heap, "HEAP allocated [%p,%p) within "
369 "[%p,%p)\n", ptr, ( ptr + size ), block,
370 ( ( ( void * ) block ) + actual_size ) );
372 goto done;
373 }
374
375 /* Attempt to grow heap to satisfy allocation */
376 DBGC ( heap, "HEAP attempting to grow for %#zx (aligned "
377 "%#zx+%zx), used %zdkB\n", size, align, offset,
378 ( heap->usedmem >> 10 ) );
380 grown = ( heap->grow ? heap->grow ( actual_size ) : 0 );
382 check_blocks ( heap );
383 if ( ! grown ) {
384 /* Heap did not grow: fail allocation */
385 DBGC ( heap, "HEAP failed to allocate %#zx (aligned "
386 "%#zx)\n", size, align );
387 ptr = NULL;
388 goto done;
389 }
390 }
391
392 done:
393 check_blocks ( heap );
395 return ptr;
396}
397
398/**
399 * Free a memory block
400 *
401 * @v heap Heap
402 * @v ptr Memory allocated by heap_alloc_block(), or NULL
403 * @v size Size of the memory
404 *
405 * If @c ptr is NULL, no action is taken.
406 */
407static void heap_free_block ( struct heap *heap, void *ptr, size_t size ) {
408 struct memory_block *freeing;
409 struct memory_block *block;
410 struct memory_block *tmp;
411 size_t sub_offset;
412 size_t actual_size;
413 ssize_t gap_before;
414 ssize_t gap_after = -1;
415
416 /* Allow for ptr==NULL */
417 if ( ! ptr )
418 return;
420
421 /* Sanity checks */
423 check_blocks ( heap );
424
425 /* Round up to match actual block that heap_alloc_block() would
426 * have allocated.
427 */
428 assert ( size != 0 );
429 sub_offset = ( virt_to_phys ( ptr ) & ( heap->align - 1 ) );
430 freeing = ( ptr - sub_offset );
431 actual_size = ( ( size + sub_offset + heap->align - 1 ) &
432 ~( heap->align - 1 ) );
433 DBGC2 ( heap, "HEAP freeing [%p,%p) within [%p,%p)\n",
434 ptr, ( ptr + size ), freeing,
435 ( ( ( void * ) freeing ) + actual_size ) );
436 VALGRIND_MAKE_MEM_UNDEFINED ( freeing, sizeof ( *freeing ) );
437
438 /* Check that this block does not overlap the free list */
439 if ( ASSERTING ) {
441 if ( ( ( ( void * ) block ) <
442 ( ( void * ) freeing + actual_size ) ) &&
443 ( ( void * ) freeing <
444 ( ( void * ) block + block->size ) ) ) {
445 assert ( 0 );
446 DBGC ( heap, "HEAP double free of [%p,%p) "
447 "overlapping [%p,%p) detected from %p\n",
448 freeing,
449 ( ( ( void * ) freeing ) + size ), block,
450 ( ( void * ) block + block->size ),
451 __builtin_return_address ( 0 ) );
452 }
453 }
454 }
455
456 /* Insert/merge into free list */
457 freeing->size = actual_size;
459 /* Calculate gaps before and after the "freeing" block */
460 gap_before = ( ( ( void * ) freeing ) -
461 ( ( ( void * ) block ) + block->size ) );
462 gap_after = ( ( ( void * ) block ) -
463 ( ( ( void * ) freeing ) + freeing->size ) );
464 /* Merge with immediately preceding block, if possible */
465 if ( gap_before == 0 ) {
466 DBGC2 ( heap, "HEAP merging [%p,%p) + [%p,%p) -> "
467 "[%p,%p)\n", block,
468 ( ( ( void * ) block ) + block->size ), freeing,
469 ( ( ( void * ) freeing ) + freeing->size ),
470 block,
471 ( ( ( void * ) freeing ) + freeing->size ) );
472 block->size += actual_size;
473 list_del ( &block->list );
475 sizeof ( *freeing ) );
476 freeing = block;
477 }
478 /* Stop processing as soon as we reach a following block */
479 if ( gap_after >= 0 )
480 break;
481 }
482
483 /* Insert before the immediately following block. If
484 * possible, merge the following block into the "freeing"
485 * block.
486 */
487 DBGC2 ( heap, "HEAP freed [%p,%p)\n",
488 freeing, ( ( ( void * ) freeing ) + freeing->size ) );
489 list_add_tail ( &freeing->list, &block->list );
490 if ( gap_after == 0 ) {
491 DBGC2 ( heap, "HEAP merging [%p,%p) + [%p,%p) -> [%p,%p)\n",
492 freeing, ( ( ( void * ) freeing ) + freeing->size ),
493 block, ( ( ( void * ) block ) + block->size ), freeing,
494 ( ( ( void * ) block ) + block->size ) );
495 freeing->size += block->size;
496 list_del ( &block->list );
497 VALGRIND_MAKE_MEM_NOACCESS ( block, sizeof ( *block ) );
498 }
499
500 /* Update memory usage statistics */
501 heap->freemem += actual_size;
502 heap->usedmem -= actual_size;
503
504 /* Allow heap to shrink */
505 if ( heap->shrink && heap->shrink ( freeing, freeing->size ) ) {
506 list_del ( &freeing->list );
507 heap->freemem -= freeing->size;
508 VALGRIND_MAKE_MEM_UNDEFINED ( freeing, freeing->size );
509 }
510
511 /* Sanity checks */
512 check_blocks ( heap );
514}
515
516/**
517 * Reallocate memory
518 *
519 * @v heap Heap
520 * @v old_ptr Memory previously allocated by heap_realloc(), or NULL
521 * @v new_size Requested size
522 * @ret new_ptr Allocated memory, or NULL
523 *
524 * Allocates memory with no particular alignment requirement. @c
525 * new_ptr will be aligned to at least a multiple of sizeof(void*).
526 * If @c old_ptr is non-NULL, then the contents of the newly allocated
527 * memory will be the same as the contents of the previously allocated
528 * memory, up to the minimum of the old and new sizes. The old memory
529 * will be freed.
530 *
531 * If allocation fails the previously allocated block is left
532 * untouched and NULL is returned.
533 *
534 * Calling heap_realloc() with a new size of zero is a valid way to
535 * free a memory block.
536 */
537void * heap_realloc ( struct heap *heap, void *old_ptr, size_t new_size ) {
538 struct autosized_block *old_block;
539 struct autosized_block *new_block;
540 size_t old_total_size;
541 size_t new_total_size;
542 size_t old_size;
543 size_t offset = offsetof ( struct autosized_block, data );
544 void *new_ptr = NOWHERE;
545
546 /* Allocate new memory if necessary. If allocation fails,
547 * return without touching the old block.
548 */
549 if ( new_size ) {
550 new_total_size = ( new_size + offset );
551 if ( new_total_size < new_size )
552 return NULL;
553 new_block = heap_alloc_block ( heap, new_total_size,
554 heap->ptr_align, -offset );
555 if ( ! new_block )
556 return NULL;
557 new_block->size = new_total_size;
558 VALGRIND_MAKE_MEM_NOACCESS ( &new_block->size,
559 sizeof ( new_block->size ) );
560 new_ptr = &new_block->data;
561 VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
562 assert ( ( ( ( intptr_t ) new_ptr ) &
563 ( heap->ptr_align - 1 ) ) == 0 );
564 }
565
566 /* Copy across relevant part of the old data region (if any),
567 * then free it. Note that at this point either (a) new_ptr
568 * is valid, or (b) new_size is 0; either way, the memcpy() is
569 * valid.
570 */
571 if ( old_ptr && ( old_ptr != NOWHERE ) ) {
572 old_block = container_of ( old_ptr, struct autosized_block,
573 data );
574 VALGRIND_MAKE_MEM_DEFINED ( &old_block->size,
575 sizeof ( old_block->size ) );
576 old_total_size = old_block->size;
577 assert ( old_total_size != 0 );
578 old_size = ( old_total_size - offset );
579 memcpy ( new_ptr, old_ptr,
580 ( ( old_size < new_size ) ? old_size : new_size ) );
581 VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
582 heap_free_block ( heap, old_block, old_total_size );
583 }
584
585 if ( ASSERTED ) {
586 DBGC ( heap, "HEAP detected possible memory corruption "
587 "from %p\n", __builtin_return_address ( 0 ) );
588 }
589 return new_ptr;
590}
591
592/** The global heap */
593static struct heap heap = {
594 .blocks = LIST_HEAD_INIT ( heap.blocks ),
595 .align = MIN_MEMBLOCK_ALIGN,
596 .ptr_align = sizeof ( void * ),
598};
599
600/**
601 * Reallocate memory
602 *
603 * @v old_ptr Memory previously allocated by malloc(), or NULL
604 * @v new_size Requested size
605 * @ret new_ptr Allocated memory, or NULL
606 */
607void * realloc ( void *old_ptr, size_t new_size ) {
608
609 return heap_realloc ( &heap, old_ptr, new_size );
610}
611
612/**
613 * Allocate memory
614 *
615 * @v size Requested size
616 * @ret ptr Memory, or NULL
617 *
618 * Allocates memory with no particular alignment requirement. @c ptr
619 * will be aligned to at least a multiple of sizeof(void*).
620 */
621void * malloc ( size_t size ) {
622 void *ptr;
623
624 ptr = realloc ( NULL, size );
625 if ( ASSERTED ) {
626 DBGC ( &heap, "HEAP detected possible memory corruption "
627 "from %p\n", __builtin_return_address ( 0 ) );
628 }
629 return ptr;
630}
631
632/**
633 * Free memory
634 *
635 * @v ptr Memory allocated by malloc(), or NULL
636 *
637 * Memory allocated with malloc_phys() cannot be freed with free(); it
638 * must be freed with free_phys() instead.
639 *
640 * If @c ptr is NULL, no action is taken.
641 */
642void free ( void *ptr ) {
643
644 realloc ( ptr, 0 );
645 if ( ASSERTED ) {
646 DBGC ( &heap, "HEAP detected possible memory corruption "
647 "from %p\n", __builtin_return_address ( 0 ) );
648 }
649}
650
651/**
652 * Allocate cleared memory
653 *
654 * @v size Requested size
655 * @ret ptr Allocated memory
656 *
657 * Allocate memory as per malloc(), and zero it.
658 *
659 * This function name is non-standard, but pretty intuitive.
660 * zalloc(size) is always equivalent to calloc(1,size)
661 */
662void * zalloc ( size_t size ) {
663 void *data;
664
665 data = malloc ( size );
666 if ( data )
667 memset ( data, 0, size );
668 if ( ASSERTED ) {
669 DBGC ( &heap, "HEAP detected possible memory corruption "
670 "from %p\n", __builtin_return_address ( 0 ) );
671 }
672 return data;
673}
674
675/**
676 * Allocate memory with specified physical alignment and offset
677 *
678 * @v size Requested size
679 * @v align Physical alignment
680 * @v offset Offset from physical alignment
681 * @ret ptr Memory, or NULL
682 *
683 * @c align must be a power of two. @c size may not be zero.
684 */
685void * malloc_phys_offset ( size_t size, size_t phys_align, size_t offset ) {
686 void * ptr;
687
688 ptr = heap_alloc_block ( &heap, size, phys_align, offset );
689 if ( ptr && size ) {
690 assert ( ( phys_align == 0 ) ||
691 ( ( ( virt_to_phys ( ptr ) ^ offset ) &
692 ( phys_align - 1 ) ) == 0 ) );
693 VALGRIND_MALLOCLIKE_BLOCK ( ptr, size, 0, 0 );
694 }
695 return ptr;
696}
697
698/**
699 * Allocate memory with specified physical alignment
700 *
701 * @v size Requested size
702 * @v align Physical alignment
703 * @ret ptr Memory, or NULL
704 *
705 * @c align must be a power of two. @c size may not be zero.
706 */
707void * malloc_phys ( size_t size, size_t phys_align ) {
708
709 return malloc_phys_offset ( size, phys_align, 0 );
710}
711
712/**
713 * Free memory allocated with malloc_phys()
714 *
715 * @v ptr Memory allocated by malloc_phys(), or NULL
716 * @v size Size of memory, as passed to malloc_phys()
717 *
718 * Memory allocated with malloc_phys() can only be freed with
719 * free_phys(); it cannot be freed with the standard free().
720 *
721 * If @c ptr is NULL, no action is taken.
722 */
723void free_phys ( void *ptr, size_t size ) {
724
725 VALGRIND_FREELIKE_BLOCK ( ptr, 0 );
726 heap_free_block ( &heap, ptr, size );
727}
728
729/**
730 * Add memory to allocation pool
731 *
732 * @v heap Heap
733 * @v start Start address
734 * @v len Length of memory
735 *
736 * Adds a block of memory to the allocation pool. The memory must be
737 * aligned to the heap's required free memory block alignment.
738 */
739void heap_populate ( struct heap *heap, void *start, size_t len ) {
740
741 /* Sanity checks */
742 assert ( ( virt_to_phys ( start ) & ( heap->align - 1 ) ) == 0 );
743 assert ( ( len & ( heap->align - 1 ) ) == 0 );
744
745 /* Add to allocation pool */
747
748 /* Fix up memory usage statistics */
749 heap->usedmem += len;
750}
751
752/**
753 * Initialise the heap
754 *
755 */
756static void init_heap ( void ) {
757
758 /* Sanity check */
759 build_assert ( MIN_MEMBLOCK_ALIGN >= sizeof ( struct memory_block ) );
760
761 /* Populate heap */
764 heap_populate ( &heap, heap_area, sizeof ( heap_area ) );
765}
766
767/** Memory allocator initialisation function */
768struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
769 .name = "heap",
770 .initialise = init_heap,
771};
772
773/**
774 * Discard all cached data on shutdown
775 *
776 */
777static void shutdown_cache ( int booting __unused ) {
779 DBGC ( &heap, "HEAP maximum usage %zdkB\n",
780 ( heap.maxusedmem >> 10 ) );
781}
782
783/** Memory allocator shutdown function */
784struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
785 .name = "heap",
786 .shutdown = shutdown_cache,
787};
788
789/**
790 * Dump free block list (for debugging)
791 *
792 */
793void heap_dump ( struct heap *heap ) {
794 struct memory_block *block;
795
796 dbg_printf ( "HEAP free block list:\n" );
798 dbg_printf ( "...[%p,%p] (size %#zx)\n", block,
799 ( ( ( void * ) block ) + block->size ),
800 block->size );
801 }
802}
#define NULL
NULL pointer (VOID *)
Definition Base.h:322
unsigned long intptr_t
Definition stdint.h:21
signed long ssize_t
Definition stdint.h:7
#define build_assert(condition)
Assert a condition at build time (after dead code elimination)
Definition assert.h:77
#define ASSERTED
Definition assert.h:26
#define ASSERTING
Definition assert.h:20
#define assert(condition)
Assert a condition at run-time.
Definition assert.h:50
struct bofm_section_header done
Definition bofm_test.c:46
uint16_t offset
Offset to command line.
Definition bzimage.h:3
ring len
Length.
Definition dwmac.h:226
uint8_t data[48]
Additional event data.
Definition ena.h:11
#define __unused
Declare a variable or data structure as unused.
Definition compiler.h:573
#define DBGC2(...)
Definition compiler.h:522
#define DBGC(...)
Definition compiler.h:505
void dbg_printf(const char *fmt,...)
Print debug message.
Definition debug.c:39
#define INIT_EARLY
Early initialisation.
Definition init.h:30
uint32_t start
Starting offset.
Definition netvsc.h:1
uint16_t size
Buffer size.
Definition dwmac.h:3
static unsigned int count
Number of entries.
Definition dwmac.h:220
#define FILE_LICENCE(_licence)
Declare a particular licence as applying to a file.
Definition compiler.h:896
#define FILE_SECBOOT(_status)
Declare a file's UEFI Secure Boot permission status.
Definition compiler.h:926
#define STARTUP_EARLY
Early startup.
Definition init.h:64
#define __attribute__(x)
Definition compiler.h:10
iPXE I/O API
String functions.
void * memcpy(void *dest, const void *src, size_t len) __nonnull
void * memset(void *dest, int character, size_t len) __nonnull
#define __init_fn(init_order)
Declare an initialisation functon.
Definition init.h:24
#define __startup_fn(startup_order)
Declare a startup/shutdown function.
Definition init.h:53
unsigned long tmp
Definition linux_pci.h:65
Linked lists.
#define LIST_HEAD_INIT(list)
Initialise a static list head.
Definition list.h:31
#define list_for_each_entry_safe(pos, tmp, head, member)
Iterate over entries in a list, safe against deletion of the current entry.
Definition list.h:459
#define list_add_tail(new, head)
Add a new entry to the tail of a list.
Definition list.h:94
#define list_for_each_entry(pos, head, member)
Iterate over entries in a list.
Definition list.h:432
#define list_del(list)
Delete an entry from a list.
Definition list.h:120
#define list_check(list)
Check a list entry or list head is valid.
Definition list.h:56
#define list_add(new, head)
Add a new entry to the head of a list.
Definition list.h:70
void * heap_realloc(struct heap *heap, void *old_ptr, size_t new_size)
Reallocate memory.
Definition malloc.c:537
static char heap_area[HEAP_SIZE]
The heap area.
Definition malloc.c:88
#define HEAP_SIZE
Heap area size.
Definition malloc.c:82
static unsigned int discard_cache(size_t size __unused)
Discard some cached data.
Definition malloc.c:228
void * realloc(void *old_ptr, size_t new_size)
Reallocate memory.
Definition malloc.c:607
void * zalloc(size_t size)
Allocate cleared memory.
Definition malloc.c:662
void * malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition malloc.c:707
static void init_heap(void)
Initialise the heap.
Definition malloc.c:756
void heap_dump(struct heap *heap)
Dump free block list (for debugging)
Definition malloc.c:793
static void shutdown_cache(int booting __unused)
Discard all cached data on shutdown.
Definition malloc.c:777
static void valgrind_make_blocks_defined(struct heap *heap)
Mark all blocks in free list as defined.
Definition malloc.c:95
void * malloc(size_t size)
Allocate memory.
Definition malloc.c:621
static void discard_all_cache(void)
Discard all cached data.
Definition malloc.c:244
void * malloc_phys_offset(size_t size, size_t phys_align, size_t offset)
Allocate memory with specified physical alignment and offset.
Definition malloc.c:685
#define MIN_MEMBLOCK_ALIGN
Physical address alignment maintained for free blocks of memory.
Definition malloc.c:67
#define HEAP_ALIGN
Heap area alignment.
Definition malloc.c:85
void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition malloc.c:723
static void check_blocks(struct heap *heap)
Check integrity of the blocks in the free list.
Definition malloc.c:186
static void valgrind_make_blocks_noaccess(struct heap *heap)
Mark all blocks in free list as inaccessible.
Definition malloc.c:137
void heap_populate(struct heap *heap, void *start, size_t len)
Add memory to allocation pool.
Definition malloc.c:739
static void * heap_alloc_block(struct heap *heap, size_t size, size_t align, size_t offset)
Allocate a memory block.
Definition malloc.c:266
static void heap_free_block(struct heap *heap, void *ptr, size_t size)
Free a memory block.
Definition malloc.c:407
Dynamic memory allocation.
#define CACHE_DISCARDERS
Cache discarder table.
Definition malloc.h:103
#define NOWHERE
Address for zero-length memory blocks.
Definition malloc.h:42
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len)
Definition memcheck.h:112
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
Definition memcheck.h:132
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
Definition memcheck.h:122
uint8_t block[3][8]
DES-encrypted blocks.
Definition mschapv2.h:1
Reference counting.
static void(* free)(struct refcnt *refcnt))
Definition refcnt.h:55
#define offsetof(type, field)
Get offset of a field within a structure.
Definition stddef.h:25
#define container_of(ptr, type, field)
Get containing structure.
Definition stddef.h:36
A block of allocated memory complete with size information.
Definition malloc.c:70
size_t size
Size of this block.
Definition malloc.c:72
char data[0]
Remaining data.
Definition malloc.c:74
A cache discarder.
Definition malloc.h:93
unsigned int(* discard)(void)
Discard some cached data.
Definition malloc.h:99
A heap.
Definition malloc.h:45
size_t usedmem
Total amount of used memory.
Definition malloc.h:57
size_t freemem
Total amount of free memory.
Definition malloc.h:55
size_t ptr_align
Alignment for size-tracked allocations.
Definition malloc.h:52
unsigned int(* grow)(size_t size)
Attempt to grow heap (optional)
Definition malloc.h:67
struct list_head blocks
List of free memory blocks.
Definition malloc.h:47
size_t maxusedmem
Maximum amount of used memory.
Definition malloc.h:59
size_t align
Alignment for free memory blocks.
Definition malloc.h:50
unsigned int(* shrink)(void *ptr, size_t size)
Allow heap to shrink (optional)
Definition malloc.h:79
An initialisation function.
Definition init.h:15
A doubly-linked list entry (or list head)
Definition list.h:19
struct list_head * next
Next list entry.
Definition list.h:21
struct list_head * prev
Previous list entry.
Definition list.h:23
A free block of memory.
Definition malloc.c:44
size_t size
Size of this block.
Definition malloc.c:46
struct list_head list
List of free blocks.
Definition malloc.c:59
char pad[offsetof(struct refcnt, count)+sizeof(((struct refcnt *) NULL) ->count)]
Padding.
Definition malloc.c:57
A reference counter.
Definition refcnt.h:27
A startup/shutdown function.
Definition init.h:43
#define for_each_table_entry(pointer, table)
Iterate through all entries within a linker table.
Definition tables.h:386
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
Definition valgrind.h:4412
#define RUNNING_ON_VALGRIND
Definition valgrind.h:4176
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
Definition valgrind.h:4422