iPXE
malloc.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of the
7  * License, or any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA.
18  *
19  * You can also choose to distribute this program under the terms of
20  * the Unmodified Binary Distribution Licence (as given in the file
21  * COPYING.UBDL), provided that you have satisfied its requirements.
22  */
23 
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25 
26 #include <stddef.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <strings.h>
30 #include <ipxe/io.h>
31 #include <ipxe/list.h>
32 #include <ipxe/init.h>
33 #include <ipxe/refcnt.h>
34 #include <ipxe/malloc.h>
35 #include <valgrind/memcheck.h>
36 
37 /** @file
38  *
39  * Dynamic memory allocation
40  *
41  */
42 
43 /** A free block of memory */
44 struct memory_block {
45  /** Size of this block */
46  size_t size;
47  /** Padding
48  *
49  * This padding exists to cover the "count" field of a
50  * reference counter, in the common case where a reference
51  * counter is the first element of a dynamically-allocated
52  * object. It avoids clobbering the "count" field as soon as
53  * the memory is freed, and so allows for the possibility of
54  * detecting reference counting errors.
55  */
56  char pad[ offsetof ( struct refcnt, count ) +
57  sizeof ( ( ( struct refcnt * ) NULL )->count ) ];
58  /** List of free blocks */
59  struct list_head list;
60 };
61 
62 /** Physical address alignment maintained for free blocks of memory */
63 #define MEMBLOCK_ALIGN \
64  ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
65 
66 /** A block of allocated memory complete with size information */
68  /** Size of this block */
69  size_t size;
70  /** Remaining data */
71  char data[0];
72 };
73 
74 /**
75  * Address for zero-length memory blocks
76  *
77  * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
78  * NOWHERE. Calling @c free(NOWHERE) will have no effect.
79  *
80  * This is consistent with the ANSI C standards, which state that
81  * "either NULL or a pointer suitable to be passed to free()" must be
82  * returned in these cases. Using a special non-NULL value means that
83  * the caller can take a NULL return value to indicate failure,
84  * without first having to check for a requested size of zero.
85  *
86  * Code outside of malloc.c do not ever need to refer to the actual
87  * value of @c NOWHERE; this is an internal definition.
88  */
89 #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
90 
91 /** List of free memory blocks */
92 static LIST_HEAD ( free_blocks );
93 
94 /** Total amount of free memory */
95 size_t freemem;
96 
97 /** Total amount of used memory */
98 size_t usedmem;
99 
100 /** Maximum amount of used memory */
101 size_t maxusedmem;
102 
103 /**
104  * Heap size
105  *
106  * Currently fixed at 512kB.
107  */
108 #define HEAP_SIZE ( 512 * 1024 )
109 
110 /** The heap itself */
111 static char heap[HEAP_SIZE];
112 
113 /**
114  * Mark all blocks in free list as defined
115  *
116  */
117 static inline void valgrind_make_blocks_defined ( void ) {
118  struct memory_block *block;
119 
120  /* Do nothing unless running under Valgrind */
121  if ( RUNNING_ON_VALGRIND <= 0 )
122  return;
123 
124  /* Traverse free block list, marking each block structure as
125  * defined. Some contortions are necessary to avoid errors
126  * from list_check().
127  */
128 
129  /* Mark block list itself as defined */
130  VALGRIND_MAKE_MEM_DEFINED ( &free_blocks, sizeof ( free_blocks ) );
131 
132  /* Mark areas accessed by list_check() as defined */
133  VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.prev->next,
134  sizeof ( free_blocks.prev->next ) );
135  VALGRIND_MAKE_MEM_DEFINED ( free_blocks.next,
136  sizeof ( *free_blocks.next ) );
137  VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->next->prev,
138  sizeof ( free_blocks.next->next->prev ) );
139 
140  /* Mark each block in list as defined */
141  list_for_each_entry ( block, &free_blocks, list ) {
142 
143  /* Mark block as defined */
144  VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
145 
146  /* Mark areas accessed by list_check() as defined */
147  VALGRIND_MAKE_MEM_DEFINED ( block->list.next,
148  sizeof ( *block->list.next ) );
149  VALGRIND_MAKE_MEM_DEFINED ( &block->list.next->next->prev,
150  sizeof ( block->list.next->next->prev ) );
151  }
152 }
153 
154 /**
155  * Mark all blocks in free list as inaccessible
156  *
157  */
158 static inline void valgrind_make_blocks_noaccess ( void ) {
159  struct memory_block *block;
160  struct memory_block *prev = NULL;
161 
162  /* Do nothing unless running under Valgrind */
163  if ( RUNNING_ON_VALGRIND <= 0 )
164  return;
165 
166  /* Traverse free block list, marking each block structure as
167  * inaccessible. Some contortions are necessary to avoid
168  * errors from list_check().
169  */
170 
171  /* Mark each block in list as inaccessible */
172  list_for_each_entry ( block, &free_blocks, list ) {
173 
174  /* Mark previous block (if any) as inaccessible. (Current
175  * block will be accessed by list_check().)
176  */
177  if ( prev )
178  VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
179  prev = block;
180 
181  /* At the end of the list, list_check() will end up
182  * accessing the first list item. Temporarily mark
183  * this area as defined.
184  */
185  VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->prev,
186  sizeof ( free_blocks.next->prev ) );
187  }
188  /* Mark last block (if any) as inaccessible */
189  if ( prev )
190  VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
191 
192  /* Mark as inaccessible the area that was temporarily marked
193  * as defined to avoid errors from list_check().
194  */
195  VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks.next->prev,
196  sizeof ( free_blocks.next->prev ) );
197 
198  /* Mark block list itself as inaccessible */
199  VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
200 }
201 
202 /**
203  * Check integrity of the blocks in the free list
204  *
205  */
206 static inline void check_blocks ( void ) {
207  struct memory_block *block;
208  struct memory_block *prev = NULL;
209 
210  if ( ! ASSERTING )
211  return;
212 
213  list_for_each_entry ( block, &free_blocks, list ) {
214 
215  /* Check alignment */
216  assert ( ( virt_to_phys ( block ) &
217  ( MEMBLOCK_ALIGN - 1 ) ) == 0 );
218 
219  /* Check that list structure is intact */
220  list_check ( &block->list );
221 
222  /* Check that block size is not too small */
223  assert ( block->size >= sizeof ( *block ) );
224  assert ( block->size >= MEMBLOCK_ALIGN );
225 
226  /* Check that block does not wrap beyond end of address space */
227  assert ( ( ( void * ) block + block->size ) >
228  ( ( void * ) block ) );
229 
230  /* Check that blocks remain in ascending order, and
231  * that adjacent blocks have been merged.
232  */
233  if ( prev ) {
234  assert ( ( ( void * ) block ) > ( ( void * ) prev ) );
235  assert ( ( ( void * ) block ) >
236  ( ( ( void * ) prev ) + prev->size ) );
237  }
238  prev = block;
239  }
240 }
241 
242 /**
243  * Discard some cached data
244  *
245  * @ret discarded Number of cached items discarded
246  */
247 static unsigned int discard_cache ( void ) {
248  struct cache_discarder *discarder;
249  unsigned int discarded;
250 
251  for_each_table_entry ( discarder, CACHE_DISCARDERS ) {
252  discarded = discarder->discard();
253  if ( discarded )
254  return discarded;
255  }
256  return 0;
257 }
258 
259 /**
260  * Discard all cached data
261  *
262  */
263 static void discard_all_cache ( void ) {
264  unsigned int discarded;
265 
266  do {
267  discarded = discard_cache();
268  } while ( discarded );
269 }
270 
271 /**
272  * Allocate a memory block
273  *
274  * @v size Requested size
275  * @v align Physical alignment
276  * @v offset Offset from physical alignment
277  * @ret ptr Memory block, or NULL
278  *
279  * Allocates a memory block @b physically aligned as requested. No
280  * guarantees are provided for the alignment of the virtual address.
281  *
282  * @c align must be a power of two. @c size may not be zero.
283  */
284 void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
285  struct memory_block *block;
286  size_t actual_offset;
287  size_t align_mask;
288  size_t actual_size;
289  size_t pre_size;
290  size_t post_size;
291  struct memory_block *pre;
292  struct memory_block *post;
293  unsigned int discarded;
294  void *ptr;
295 
296  /* Sanity checks */
297  assert ( size != 0 );
298  assert ( ( align == 0 ) || ( ( align & ( align - 1 ) ) == 0 ) );
300  check_blocks();
301 
302  /* Calculate offset of memory block */
303  actual_offset = ( offset & ~( MEMBLOCK_ALIGN - 1 ) );
304  assert ( actual_offset <= offset );
305 
306  /* Calculate size of memory block */
307  actual_size = ( ( size + offset - actual_offset + MEMBLOCK_ALIGN - 1 )
308  & ~( MEMBLOCK_ALIGN - 1 ) );
309  if ( ! actual_size ) {
310  /* The requested size is not permitted to be zero. A
311  * zero result at this point indicates that either the
312  * original requested size was zero, or that unsigned
313  * integer overflow has occurred.
314  */
315  ptr = NULL;
316  goto done;
317  }
318  assert ( actual_size >= size );
319 
320  /* Calculate alignment mask */
321  align_mask = ( ( align - 1 ) | ( MEMBLOCK_ALIGN - 1 ) );
322 
323  DBGC2 ( &heap, "HEAP allocating %#zx (aligned %#zx+%zx)\n",
324  size, align, offset );
325  while ( 1 ) {
326  /* Search through blocks for the first one with enough space */
327  list_for_each_entry ( block, &free_blocks, list ) {
328  pre_size = ( ( actual_offset - virt_to_phys ( block ) )
329  & align_mask );
330  if ( ( block->size < pre_size ) ||
331  ( ( block->size - pre_size ) < actual_size ) )
332  continue;
333  post_size = ( block->size - pre_size - actual_size );
334  /* Split block into pre-block, block, and
335  * post-block. After this split, the "pre"
336  * block is the one currently linked into the
337  * free list.
338  */
339  pre = block;
340  block = ( ( ( void * ) pre ) + pre_size );
341  post = ( ( ( void * ) block ) + actual_size );
342  DBGC2 ( &heap, "HEAP splitting [%p,%p) -> [%p,%p) "
343  "+ [%p,%p)\n", pre,
344  ( ( ( void * ) pre ) + pre->size ), pre, block,
345  post, ( ( ( void * ) pre ) + pre->size ) );
346  /* If there is a "post" block, add it in to
347  * the free list.
348  */
349  if ( post_size ) {
350  assert ( post_size >= MEMBLOCK_ALIGN );
352  sizeof ( *post ));
353  post->size = post_size;
354  list_add ( &post->list, &pre->list );
355  }
356  /* Shrink "pre" block, leaving the main block
357  * isolated and no longer part of the free
358  * list.
359  */
360  pre->size = pre_size;
361  /* If there is no "pre" block, remove it from
362  * the list.
363  */
364  if ( ! pre_size ) {
365  list_del ( &pre->list );
367  sizeof ( *pre ) );
368  } else {
369  assert ( pre_size >= MEMBLOCK_ALIGN );
370  }
371  /* Update memory usage statistics */
372  freemem -= actual_size;
373  usedmem += actual_size;
374  if ( usedmem > maxusedmem )
376  /* Return allocated block */
377  ptr = ( ( ( void * ) block ) + offset - actual_offset );
378  DBGC2 ( &heap, "HEAP allocated [%p,%p) within "
379  "[%p,%p)\n", ptr, ( ptr + size ), block,
380  ( ( ( void * ) block ) + actual_size ) );
382  goto done;
383  }
384 
385  /* Try discarding some cached data to free up memory */
386  DBGC ( &heap, "HEAP attempting discard for %#zx (aligned "
387  "%#zx+%zx), used %zdkB\n", size, align, offset,
388  ( usedmem >> 10 ) );
390  discarded = discard_cache();
392  check_blocks();
393  if ( ! discarded ) {
394  /* Nothing available to discard */
395  DBGC ( &heap, "HEAP failed to allocate %#zx (aligned "
396  "%#zx)\n", size, align );
397  ptr = NULL;
398  goto done;
399  }
400  }
401 
402  done:
403  check_blocks();
405  return ptr;
406 }
407 
408 /**
409  * Free a memory block
410  *
411  * @v ptr Memory allocated by alloc_memblock(), or NULL
412  * @v size Size of the memory
413  *
414  * If @c ptr is NULL, no action is taken.
415  */
416 void free_memblock ( void *ptr, size_t size ) {
417  struct memory_block *freeing;
418  struct memory_block *block;
419  struct memory_block *tmp;
420  size_t sub_offset;
421  size_t actual_size;
422  ssize_t gap_before;
423  ssize_t gap_after = -1;
424 
425  /* Allow for ptr==NULL */
426  if ( ! ptr )
427  return;
429 
430  /* Sanity checks */
432  check_blocks();
433 
434  /* Round up to match actual block that alloc_memblock() would
435  * have allocated.
436  */
437  assert ( size != 0 );
438  sub_offset = ( virt_to_phys ( ptr ) & ( MEMBLOCK_ALIGN - 1) );
439  freeing = ( ptr - sub_offset );
440  actual_size = ( ( size + sub_offset + MEMBLOCK_ALIGN - 1 ) &
441  ~( MEMBLOCK_ALIGN - 1 ) );
442  DBGC2 ( &heap, "HEAP freeing [%p,%p) within [%p,%p)\n",
443  ptr, ( ptr + size ), freeing,
444  ( ( ( void * ) freeing ) + actual_size ) );
445  VALGRIND_MAKE_MEM_UNDEFINED ( freeing, sizeof ( *freeing ) );
446 
447  /* Check that this block does not overlap the free list */
448  if ( ASSERTING ) {
449  list_for_each_entry ( block, &free_blocks, list ) {
450  if ( ( ( ( void * ) block ) <
451  ( ( void * ) freeing + actual_size ) ) &&
452  ( ( void * ) freeing <
453  ( ( void * ) block + block->size ) ) ) {
454  assert ( 0 );
455  DBGC ( &heap, "HEAP double free of [%p,%p) "
456  "overlapping [%p,%p) detected from %p\n",
457  freeing,
458  ( ( ( void * ) freeing ) + size ), block,
459  ( ( void * ) block + block->size ),
460  __builtin_return_address ( 0 ) );
461  }
462  }
463  }
464 
465  /* Insert/merge into free list */
466  freeing->size = actual_size;
467  list_for_each_entry_safe ( block, tmp, &free_blocks, list ) {
468  /* Calculate gaps before and after the "freeing" block */
469  gap_before = ( ( ( void * ) freeing ) -
470  ( ( ( void * ) block ) + block->size ) );
471  gap_after = ( ( ( void * ) block ) -
472  ( ( ( void * ) freeing ) + freeing->size ) );
473  /* Merge with immediately preceding block, if possible */
474  if ( gap_before == 0 ) {
475  DBGC2 ( &heap, "HEAP merging [%p,%p) + [%p,%p) -> "
476  "[%p,%p)\n", block,
477  ( ( ( void * ) block ) + block->size ), freeing,
478  ( ( ( void * ) freeing ) + freeing->size ),
479  block,
480  ( ( ( void * ) freeing ) + freeing->size ) );
481  block->size += actual_size;
482  list_del ( &block->list );
483  VALGRIND_MAKE_MEM_NOACCESS ( freeing,
484  sizeof ( *freeing ) );
485  freeing = block;
486  }
487  /* Stop processing as soon as we reach a following block */
488  if ( gap_after >= 0 )
489  break;
490  }
491 
492  /* Insert before the immediately following block. If
493  * possible, merge the following block into the "freeing"
494  * block.
495  */
496  DBGC2 ( &heap, "HEAP freed [%p,%p)\n",
497  freeing, ( ( ( void * ) freeing ) + freeing->size ) );
498  list_add_tail ( &freeing->list, &block->list );
499  if ( gap_after == 0 ) {
500  DBGC2 ( &heap, "HEAP merging [%p,%p) + [%p,%p) -> [%p,%p)\n",
501  freeing, ( ( ( void * ) freeing ) + freeing->size ),
502  block, ( ( ( void * ) block ) + block->size ), freeing,
503  ( ( ( void * ) block ) + block->size ) );
504  freeing->size += block->size;
505  list_del ( &block->list );
506  VALGRIND_MAKE_MEM_NOACCESS ( block, sizeof ( *block ) );
507  }
508 
509  /* Update memory usage statistics */
510  freemem += actual_size;
511  usedmem -= actual_size;
512 
513  check_blocks();
515 }
516 
517 /**
518  * Reallocate memory
519  *
520  * @v old_ptr Memory previously allocated by malloc(), or NULL
521  * @v new_size Requested size
522  * @ret new_ptr Allocated memory, or NULL
523  *
524  * Allocates memory with no particular alignment requirement. @c
525  * new_ptr will be aligned to at least a multiple of sizeof(void*).
526  * If @c old_ptr is non-NULL, then the contents of the newly allocated
527  * memory will be the same as the contents of the previously allocated
528  * memory, up to the minimum of the old and new sizes. The old memory
529  * will be freed.
530  *
531  * If allocation fails the previously allocated block is left
532  * untouched and NULL is returned.
533  *
534  * Calling realloc() with a new size of zero is a valid way to free a
535  * memory block.
536  */
537 void * realloc ( void *old_ptr, size_t new_size ) {
538  struct autosized_block *old_block;
539  struct autosized_block *new_block;
540  size_t old_total_size;
541  size_t new_total_size;
542  size_t old_size;
543  void *new_ptr = NOWHERE;
544 
545  /* Allocate new memory if necessary. If allocation fails,
546  * return without touching the old block.
547  */
548  if ( new_size ) {
549  new_total_size = ( new_size +
550  offsetof ( struct autosized_block, data ) );
551  if ( new_total_size < new_size )
552  return NULL;
553  new_block = alloc_memblock ( new_total_size, 1, 0 );
554  if ( ! new_block )
555  return NULL;
556  new_block->size = new_total_size;
557  VALGRIND_MAKE_MEM_NOACCESS ( &new_block->size,
558  sizeof ( new_block->size ) );
559  new_ptr = &new_block->data;
560  VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
561  }
562 
563  /* Copy across relevant part of the old data region (if any),
564  * then free it. Note that at this point either (a) new_ptr
565  * is valid, or (b) new_size is 0; either way, the memcpy() is
566  * valid.
567  */
568  if ( old_ptr && ( old_ptr != NOWHERE ) ) {
569  old_block = container_of ( old_ptr, struct autosized_block,
570  data );
571  VALGRIND_MAKE_MEM_DEFINED ( &old_block->size,
572  sizeof ( old_block->size ) );
573  old_total_size = old_block->size;
574  assert ( old_total_size != 0 );
575  old_size = ( old_total_size -
576  offsetof ( struct autosized_block, data ) );
577  memcpy ( new_ptr, old_ptr,
578  ( ( old_size < new_size ) ? old_size : new_size ) );
579  VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
580  free_memblock ( old_block, old_total_size );
581  }
582 
583  if ( ASSERTED ) {
584  DBGC ( &heap, "HEAP detected possible memory corruption "
585  "from %p\n", __builtin_return_address ( 0 ) );
586  }
587  return new_ptr;
588 }
589 
590 /**
591  * Allocate memory
592  *
593  * @v size Requested size
594  * @ret ptr Memory, or NULL
595  *
596  * Allocates memory with no particular alignment requirement. @c ptr
597  * will be aligned to at least a multiple of sizeof(void*).
598  */
599 void * malloc ( size_t size ) {
600  void *ptr;
601 
602  ptr = realloc ( NULL, size );
603  if ( ASSERTED ) {
604  DBGC ( &heap, "HEAP detected possible memory corruption "
605  "from %p\n", __builtin_return_address ( 0 ) );
606  }
607  return ptr;
608 }
609 
610 /**
611  * Free memory
612  *
613  * @v ptr Memory allocated by malloc(), or NULL
614  *
615  * Memory allocated with malloc_phys() cannot be freed with free(); it
616  * must be freed with free_phys() instead.
617  *
618  * If @c ptr is NULL, no action is taken.
619  */
620 void free ( void *ptr ) {
621 
622  realloc ( ptr, 0 );
623  if ( ASSERTED ) {
624  DBGC ( &heap, "HEAP detected possible memory corruption "
625  "from %p\n", __builtin_return_address ( 0 ) );
626  }
627 }
628 
629 /**
630  * Allocate cleared memory
631  *
632  * @v size Requested size
633  * @ret ptr Allocated memory
634  *
635  * Allocate memory as per malloc(), and zero it.
636  *
637  * This function name is non-standard, but pretty intuitive.
638  * zalloc(size) is always equivalent to calloc(1,size)
639  */
640 void * zalloc ( size_t size ) {
641  void *data;
642 
643  data = malloc ( size );
644  if ( data )
645  memset ( data, 0, size );
646  if ( ASSERTED ) {
647  DBGC ( &heap, "HEAP detected possible memory corruption "
648  "from %p\n", __builtin_return_address ( 0 ) );
649  }
650  return data;
651 }
652 
653 /**
654  * Add memory to allocation pool
655  *
656  * @v start Start address
657  * @v len Length of memory
658  *
659  * Adds a block of memory to the allocation pool. This is a one-way
660  * operation; there is no way to reclaim this memory.
661  */
662 static void mpopulate ( void *start, size_t len ) {
663  size_t skip;
664 
665  /* Align start of block */
666  skip = ( ( -virt_to_phys ( start ) ) & ( MEMBLOCK_ALIGN - 1 ) );
667  if ( skip > len )
668  return;
669  start += skip;
670  len -= skip;
671 
672  /* Align end of block */
673  len &= ~( MEMBLOCK_ALIGN - 1 );
674  if ( ! len )
675  return;
676 
677  /* Add to allocation pool */
678  free_memblock ( start, len );
679 
680  /* Fix up memory usage statistics */
681  usedmem += len;
682 }
683 
684 /**
685  * Initialise the heap
686  *
687  */
688 static void init_heap ( void ) {
689  VALGRIND_MAKE_MEM_NOACCESS ( heap, sizeof ( heap ) );
690  VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
691  mpopulate ( heap, sizeof ( heap ) );
692 }
693 
694 /** Memory allocator initialisation function */
695 struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
697 };
698 
699 /**
700  * Discard all cached data on shutdown
701  *
702  */
703 static void shutdown_cache ( int booting __unused ) {
705  DBGC ( &heap, "HEAP maximum usage %zdkB\n", ( maxusedmem >> 10 ) );
706 }
707 
708 /** Memory allocator shutdown function */
709 struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
710  .name = "heap",
711  .shutdown = shutdown_cache,
712 };
713 
714 /**
715  * Dump free block list (for debugging)
716  *
717  */
718 void mdumpfree ( void ) {
719  struct memory_block *block;
720 
721  dbg_printf ( "HEAP free block list:\n" );
722  list_for_each_entry ( block, &free_blocks, list ) {
723  dbg_printf ( "...[%p,%p] (size %#zx)\n", block,
724  ( ( ( void * ) block ) + block->size ),
725  block->size );
726  }
727 }
static __always_inline void struct dma_mapping size_t size_t align
Definition: dma.h:222
void free_memblock(void *ptr, size_t size)
Free a memory block.
Definition: malloc.c:416
iPXE I/O API
void(* initialise)(void)
Definition: init.h:15
#define list_add(new, head)
Add a new entry to the head of a list.
Definition: list.h:69
#define INIT_EARLY
Early initialisation.
Definition: init.h:28
#define STARTUP_EARLY
Early startup.
Definition: init.h:62
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
Definition: memcheck.h:131
uint8_t size
Entry size (in 32-bit words)
Definition: ena.h:16
char data[0]
Remaining data.
Definition: malloc.c:71
#define DBGC(...)
Definition: compiler.h:505
#define CACHE_DISCARDERS
Cache discarder table.
Definition: malloc.h:92
#define RUNNING_ON_VALGRIND
Definition: valgrind.h:4175
#define offsetof(type, field)
Get offset of a field within a structure.
Definition: stddef.h:24
static void mpopulate(void *start, size_t len)
Add memory to allocation pool.
Definition: malloc.c:662
static void valgrind_make_blocks_noaccess(void)
Mark all blocks in free list as inaccessible.
Definition: malloc.c:158
const char * name
Definition: init.h:42
static __always_inline unsigned long virt_to_phys(volatile const void *addr)
Convert virtual address to a physical address.
Definition: uaccess.h:361
A doubly-linked list entry (or list head)
Definition: list.h:18
Dynamic memory allocation.
A reference counter.
Definition: refcnt.h:26
static char heap[HEAP_SIZE]
The heap itself.
Definition: malloc.c:111
uint32_t start
Starting offset.
Definition: netvsc.h:12
unsigned long tmp
Definition: linux_pci.h:63
struct list_head list
List of free blocks.
Definition: malloc.c:59
A startup/shutdown function.
Definition: init.h:41
#define list_del(list)
Delete an entry from a list.
Definition: list.h:119
void * memcpy(void *dest, const void *src, size_t len) __nonnull
An initialisation function.
Definition: init.h:14
size_t freemem
Total amount of free memory.
Definition: malloc.c:95
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
#define container_of(ptr, type, field)
Get containing structure.
Definition: stddef.h:35
static void check_blocks(void)
Check integrity of the blocks in the free list.
Definition: malloc.c:206
unsigned int(* discard)(void)
Discard some cached data.
Definition: malloc.h:88
#define list_for_each_entry(pos, head, member)
Iterate over entries in a list.
Definition: list.h:431
#define __unused
Declare a variable or data structure as unused.
Definition: compiler.h:573
#define list_add_tail(new, head)
Add a new entry to the tail of a list.
Definition: list.h:93
#define ASSERTING
Definition: assert.h:19
uint16_t count
Number of entries.
Definition: ena.h:22
#define list_for_each_entry_safe(pos, tmp, head, member)
Iterate over entries in a list, safe against deletion of the current entry.
Definition: list.h:458
Linked lists.
size_t size
Size of this block.
Definition: malloc.c:69
struct init_fn heap_init_fn __init_fn(INIT_EARLY)
Memory allocator initialisation function.
void * zalloc(size_t size)
Allocate cleared memory.
Definition: malloc.c:640
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
Definition: valgrind.h:4411
static void discard_all_cache(void)
Discard all cached data.
Definition: malloc.c:263
#define MEMBLOCK_ALIGN
Physical address alignment maintained for free blocks of memory.
Definition: malloc.c:63
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len)
Definition: memcheck.h:111
#define for_each_table_entry(pointer, table)
Iterate through all entries within a linker table.
Definition: tables.h:385
static LIST_HEAD(free_blocks)
List of free memory blocks.
A cache discarder.
Definition: malloc.h:82
void * alloc_memblock(size_t size, size_t align, size_t offset)
Allocate a memory block.
Definition: malloc.c:284
#define ASSERTED
Definition: assert.h:25
A free block of memory.
Definition: malloc.c:44
#define NOWHERE
Address for zero-length memory blocks.
Definition: malloc.c:89
void * malloc(size_t size)
Allocate memory.
Definition: malloc.c:599
#define HEAP_SIZE
Heap size.
Definition: malloc.c:108
#define list_check(list)
Check a list entry or list head is valid.
Definition: list.h:55
static unsigned int discard_cache(void)
Discard some cached data.
Definition: malloc.c:247
void free(void *ptr)
Free memory.
Definition: malloc.c:620
FILE_LICENCE(GPL2_OR_LATER_OR_UBDL)
#define DBGC2(...)
Definition: compiler.h:522
uint8_t block[3][8]
DES-encrypted blocks.
Definition: mschapv2.h:12
char pad[offsetof(struct refcnt, count)+sizeof(((struct refcnt *) NULL) ->count)]
Padding.
Definition: malloc.c:57
size_t size
Size of this block.
Definition: malloc.c:46
void mdumpfree(void)
Dump free block list (for debugging)
Definition: malloc.c:718
A block of allocated memory complete with size information.
Definition: malloc.c:67
Reference counting.
static void init_heap(void)
Initialise the heap.
Definition: malloc.c:688
uint8_t data[48]
Additional event data.
Definition: ena.h:22
static void valgrind_make_blocks_defined(void)
Mark all blocks in free list as defined.
Definition: malloc.c:117
void * realloc(void *old_ptr, size_t new_size)
Reallocate memory.
Definition: malloc.c:537
uint16_t offset
Offset to command line.
Definition: bzimage.h:8
struct startup_fn heap_startup_fn __startup_fn(STARTUP_EARLY)
Memory allocator shutdown function.
signed long ssize_t
Definition: stdint.h:7
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
Definition: memcheck.h:121
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
Definition: valgrind.h:4421
uint32_t len
Length.
Definition: ena.h:14
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
size_t maxusedmem
Maximum amount of used memory.
Definition: malloc.c:101
String functions.
struct bofm_section_header done
Definition: bofm_test.c:46
void dbg_printf(const char *fmt,...)
Print debug message.
Definition: debug.c:38
static void shutdown_cache(int booting __unused)
Discard all cached data on shutdown.
Definition: malloc.c:703
String functions.
size_t usedmem
Total amount of used memory.
Definition: malloc.c:98
void * memset(void *dest, int character, size_t len) __nonnull