iPXE
malloc.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of the
7  * License, or any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA.
18  *
19  * You can also choose to distribute this program under the terms of
20  * the Unmodified Binary Distribution Licence (as given in the file
21  * COPYING.UBDL), provided that you have satisfied its requirements.
22  */
23 
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25 
26 #include <stddef.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <ipxe/io.h>
30 #include <ipxe/list.h>
31 #include <ipxe/init.h>
32 #include <ipxe/refcnt.h>
33 #include <ipxe/malloc.h>
34 #include <valgrind/memcheck.h>
35 
36 /** @file
37  *
38  * Dynamic memory allocation
39  *
40  */
41 
42 /** A free block of memory */
43 struct memory_block {
44  /** Size of this block */
45  size_t size;
46  /** Padding
47  *
48  * This padding exists to cover the "count" field of a
49  * reference counter, in the common case where a reference
50  * counter is the first element of a dynamically-allocated
51  * object. It avoids clobbering the "count" field as soon as
52  * the memory is freed, and so allows for the possibility of
53  * detecting reference counting errors.
54  */
55  char pad[ offsetof ( struct refcnt, count ) +
56  sizeof ( ( ( struct refcnt * ) NULL )->count ) ];
57  /** List of free blocks */
58  struct list_head list;
59 };
60 
61 /** Physical address alignment maintained for free blocks of memory
62  *
63  * We keep memory blocks aligned on a power of two that is at least
64  * large enough to hold a @c struct @c memory_block.
65  */
66 #define MIN_MEMBLOCK_ALIGN ( 4 * sizeof ( void * ) )
67 
68 /** A block of allocated memory complete with size information */
70  /** Size of this block */
71  size_t size;
72  /** Remaining data */
73  char data[0];
74 };
75 
76 /**
77  * Heap area size
78  *
79  * Currently fixed at 512kB.
80  */
81 #define HEAP_SIZE ( 512 * 1024 )
82 
83 /** Heap area alignment */
84 #define HEAP_ALIGN MIN_MEMBLOCK_ALIGN
85 
86 /** The heap area */
87 static char __attribute__ (( aligned ( HEAP_ALIGN ) )) heap_area[HEAP_SIZE];
88 
89 /**
90  * Mark all blocks in free list as defined
91  *
92  * @v heap Heap
93  */
94 static inline void valgrind_make_blocks_defined ( struct heap *heap ) {
95  struct memory_block *block;
96 
97  /* Do nothing unless running under Valgrind */
98  if ( RUNNING_ON_VALGRIND <= 0 )
99  return;
100 
101  /* Traverse free block list, marking each block structure as
102  * defined. Some contortions are necessary to avoid errors
103  * from list_check().
104  */
105 
106  /* Mark block list itself as defined */
107  VALGRIND_MAKE_MEM_DEFINED ( &heap->blocks, sizeof ( heap->blocks ) );
108 
109  /* Mark areas accessed by list_check() as defined */
111  sizeof ( heap->blocks.prev->next ) );
113  sizeof ( *heap->blocks.next ) );
115  sizeof ( heap->blocks.next->next->prev ) );
116 
117  /* Mark each block in list as defined */
119 
120  /* Mark block as defined */
121  VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
122 
123  /* Mark areas accessed by list_check() as defined */
124  VALGRIND_MAKE_MEM_DEFINED ( block->list.next,
125  sizeof ( *block->list.next ) );
126  VALGRIND_MAKE_MEM_DEFINED ( &block->list.next->next->prev,
127  sizeof ( block->list.next->next->prev ) );
128  }
129 }
130 
131 /**
132  * Mark all blocks in free list as inaccessible
133  *
134  * @v heap Heap
135  */
136 static inline void valgrind_make_blocks_noaccess ( struct heap *heap ) {
137  struct memory_block *block;
138  struct memory_block *prev = NULL;
139 
140  /* Do nothing unless running under Valgrind */
141  if ( RUNNING_ON_VALGRIND <= 0 )
142  return;
143 
144  /* Traverse free block list, marking each block structure as
145  * inaccessible. Some contortions are necessary to avoid
146  * errors from list_check().
147  */
148 
149  /* Mark each block in list as inaccessible */
151 
152  /* Mark previous block (if any) as inaccessible. (Current
153  * block will be accessed by list_check().)
154  */
155  if ( prev )
156  VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
157  prev = block;
158 
159  /* At the end of the list, list_check() will end up
160  * accessing the first list item. Temporarily mark
161  * this area as defined.
162  */
164  sizeof ( heap->blocks.next->prev ));
165  }
166  /* Mark last block (if any) as inaccessible */
167  if ( prev )
168  VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
169 
170  /* Mark as inaccessible the area that was temporarily marked
171  * as defined to avoid errors from list_check().
172  */
174  sizeof ( heap->blocks.next->prev ) );
175 
176  /* Mark block list itself as inaccessible */
177  VALGRIND_MAKE_MEM_NOACCESS ( &heap->blocks, sizeof ( heap->blocks ) );
178 }
179 
180 /**
181  * Check integrity of the blocks in the free list
182  *
183  * @v heap Heap
184  */
185 static inline void check_blocks ( struct heap *heap ) {
186  struct memory_block *block;
187  struct memory_block *prev = NULL;
188 
189  if ( ! ASSERTING )
190  return;
191 
193 
194  /* Check alignment */
195  assert ( ( virt_to_phys ( block ) &
196  ( heap->align - 1 ) ) == 0 );
197 
198  /* Check that list structure is intact */
199  list_check ( &block->list );
200 
201  /* Check that block size is not too small */
202  assert ( block->size >= sizeof ( *block ) );
203  assert ( block->size >= heap->align );
204 
205  /* Check that block does not wrap beyond end of address space */
206  assert ( ( ( void * ) block + block->size ) >
207  ( ( void * ) block ) );
208 
209  /* Check that blocks remain in ascending order, and
210  * that adjacent blocks have been merged.
211  */
212  if ( prev ) {
213  assert ( ( ( void * ) block ) > ( ( void * ) prev ) );
214  assert ( ( ( void * ) block ) >
215  ( ( ( void * ) prev ) + prev->size ) );
216  }
217  prev = block;
218  }
219 }
220 
221 /**
222  * Discard some cached data
223  *
224  * @v size Failed allocation size
225  * @ret discarded Number of cached items discarded
226  */
227 static unsigned int discard_cache ( size_t size __unused ) {
228  struct cache_discarder *discarder;
229  unsigned int discarded;
230 
231  for_each_table_entry ( discarder, CACHE_DISCARDERS ) {
232  discarded = discarder->discard();
233  if ( discarded )
234  return discarded;
235  }
236  return 0;
237 }
238 
239 /**
240  * Discard all cached data
241  *
242  */
243 static void discard_all_cache ( void ) {
244  unsigned int discarded;
245 
246  do {
247  discarded = discard_cache ( 0 );
248  } while ( discarded );
249 }
250 
251 /**
252  * Allocate a memory block
253  *
254  * @v heap Heap
255  * @v size Requested size
256  * @v align Physical alignment
257  * @v offset Offset from physical alignment
258  * @ret ptr Memory block, or NULL
259  *
260  * Allocates a memory block @b physically aligned as requested. No
261  * guarantees are provided for the alignment of the virtual address.
262  *
263  * @c align must be a power of two. @c size may not be zero.
264  */
265 static void * heap_alloc_block ( struct heap *heap, size_t size, size_t align,
266  size_t offset ) {
267  struct memory_block *block;
268  size_t actual_offset;
269  size_t align_mask;
270  size_t actual_size;
271  size_t pre_size;
272  size_t post_size;
273  struct memory_block *pre;
274  struct memory_block *post;
275  unsigned int grown;
276  void *ptr;
277 
278  /* Sanity checks */
279  assert ( size != 0 );
280  assert ( ( align == 0 ) || ( ( align & ( align - 1 ) ) == 0 ) );
282  check_blocks ( heap );
283 
284  /* Limit offset to requested alignment */
285  offset &= ( align ? ( align - 1 ) : 0 );
286 
287  /* Calculate offset of memory block */
288  actual_offset = ( offset & ~( heap->align - 1 ) );
289  assert ( actual_offset <= offset );
290 
291  /* Calculate size of memory block */
292  actual_size = ( ( size + offset - actual_offset + heap->align - 1 )
293  & ~( heap->align - 1 ) );
294  if ( ! actual_size ) {
295  /* The requested size is not permitted to be zero. A
296  * zero result at this point indicates that either the
297  * original requested size was zero, or that unsigned
298  * integer overflow has occurred.
299  */
300  ptr = NULL;
301  goto done;
302  }
303  assert ( actual_size >= size );
304 
305  /* Calculate alignment mask */
306  align_mask = ( ( align - 1 ) | ( heap->align - 1 ) );
307 
308  DBGC2 ( heap, "HEAP allocating %#zx (aligned %#zx+%#zx)\n",
309  size, align, offset );
310  while ( 1 ) {
311  /* Search through blocks for the first one with enough space */
313  pre_size = ( ( actual_offset - virt_to_phys ( block ) )
314  & align_mask );
315  if ( ( block->size < pre_size ) ||
316  ( ( block->size - pre_size ) < actual_size ) )
317  continue;
318  post_size = ( block->size - pre_size - actual_size );
319  /* Split block into pre-block, block, and
320  * post-block. After this split, the "pre"
321  * block is the one currently linked into the
322  * free list.
323  */
324  pre = block;
325  block = ( ( ( void * ) pre ) + pre_size );
326  post = ( ( ( void * ) block ) + actual_size );
327  DBGC2 ( heap, "HEAP splitting [%p,%p) -> [%p,%p) "
328  "+ [%p,%p)\n", pre,
329  ( ( ( void * ) pre ) + pre->size ), pre, block,
330  post, ( ( ( void * ) pre ) + pre->size ) );
331  /* If there is a "post" block, add it in to
332  * the free list.
333  */
334  if ( post_size ) {
335  assert ( post_size >= sizeof ( *block ) );
336  assert ( ( post_size &
337  ( heap->align - 1 ) ) == 0 );
339  sizeof ( *post ));
340  post->size = post_size;
341  list_add ( &post->list, &pre->list );
342  }
343  /* Shrink "pre" block, leaving the main block
344  * isolated and no longer part of the free
345  * list.
346  */
347  pre->size = pre_size;
348  /* If there is no "pre" block, remove it from
349  * the list.
350  */
351  if ( ! pre_size ) {
352  list_del ( &pre->list );
354  sizeof ( *pre ) );
355  } else {
356  assert ( pre_size >= sizeof ( *block ) );
357  assert ( ( pre_size &
358  ( heap->align - 1 ) ) == 0 );
359  }
360  /* Update memory usage statistics */
361  heap->freemem -= actual_size;
362  heap->usedmem += actual_size;
363  if ( heap->usedmem > heap->maxusedmem )
365  /* Return allocated block */
366  ptr = ( ( ( void * ) block ) + offset - actual_offset );
367  DBGC2 ( heap, "HEAP allocated [%p,%p) within "
368  "[%p,%p)\n", ptr, ( ptr + size ), block,
369  ( ( ( void * ) block ) + actual_size ) );
371  goto done;
372  }
373 
374  /* Attempt to grow heap to satisfy allocation */
375  DBGC ( heap, "HEAP attempting to grow for %#zx (aligned "
376  "%#zx+%zx), used %zdkB\n", size, align, offset,
377  ( heap->usedmem >> 10 ) );
379  grown = ( heap->grow ? heap->grow ( actual_size ) : 0 );
381  check_blocks ( heap );
382  if ( ! grown ) {
383  /* Heap did not grow: fail allocation */
384  DBGC ( heap, "HEAP failed to allocate %#zx (aligned "
385  "%#zx)\n", size, align );
386  ptr = NULL;
387  goto done;
388  }
389  }
390 
391  done:
392  check_blocks ( heap );
394  return ptr;
395 }
396 
397 /**
398  * Free a memory block
399  *
400  * @v heap Heap
401  * @v ptr Memory allocated by heap_alloc_block(), or NULL
402  * @v size Size of the memory
403  *
404  * If @c ptr is NULL, no action is taken.
405  */
406 static void heap_free_block ( struct heap *heap, void *ptr, size_t size ) {
407  struct memory_block *freeing;
408  struct memory_block *block;
409  struct memory_block *tmp;
410  size_t sub_offset;
411  size_t actual_size;
412  ssize_t gap_before;
413  ssize_t gap_after = -1;
414 
415  /* Allow for ptr==NULL */
416  if ( ! ptr )
417  return;
419 
420  /* Sanity checks */
422  check_blocks ( heap );
423 
424  /* Round up to match actual block that heap_alloc_block() would
425  * have allocated.
426  */
427  assert ( size != 0 );
428  sub_offset = ( virt_to_phys ( ptr ) & ( heap->align - 1 ) );
429  freeing = ( ptr - sub_offset );
430  actual_size = ( ( size + sub_offset + heap->align - 1 ) &
431  ~( heap->align - 1 ) );
432  DBGC2 ( heap, "HEAP freeing [%p,%p) within [%p,%p)\n",
433  ptr, ( ptr + size ), freeing,
434  ( ( ( void * ) freeing ) + actual_size ) );
435  VALGRIND_MAKE_MEM_UNDEFINED ( freeing, sizeof ( *freeing ) );
436 
437  /* Check that this block does not overlap the free list */
438  if ( ASSERTING ) {
440  if ( ( ( ( void * ) block ) <
441  ( ( void * ) freeing + actual_size ) ) &&
442  ( ( void * ) freeing <
443  ( ( void * ) block + block->size ) ) ) {
444  assert ( 0 );
445  DBGC ( heap, "HEAP double free of [%p,%p) "
446  "overlapping [%p,%p) detected from %p\n",
447  freeing,
448  ( ( ( void * ) freeing ) + size ), block,
449  ( ( void * ) block + block->size ),
450  __builtin_return_address ( 0 ) );
451  }
452  }
453  }
454 
455  /* Insert/merge into free list */
456  freeing->size = actual_size;
458  /* Calculate gaps before and after the "freeing" block */
459  gap_before = ( ( ( void * ) freeing ) -
460  ( ( ( void * ) block ) + block->size ) );
461  gap_after = ( ( ( void * ) block ) -
462  ( ( ( void * ) freeing ) + freeing->size ) );
463  /* Merge with immediately preceding block, if possible */
464  if ( gap_before == 0 ) {
465  DBGC2 ( heap, "HEAP merging [%p,%p) + [%p,%p) -> "
466  "[%p,%p)\n", block,
467  ( ( ( void * ) block ) + block->size ), freeing,
468  ( ( ( void * ) freeing ) + freeing->size ),
469  block,
470  ( ( ( void * ) freeing ) + freeing->size ) );
471  block->size += actual_size;
472  list_del ( &block->list );
473  VALGRIND_MAKE_MEM_NOACCESS ( freeing,
474  sizeof ( *freeing ) );
475  freeing = block;
476  }
477  /* Stop processing as soon as we reach a following block */
478  if ( gap_after >= 0 )
479  break;
480  }
481 
482  /* Insert before the immediately following block. If
483  * possible, merge the following block into the "freeing"
484  * block.
485  */
486  DBGC2 ( heap, "HEAP freed [%p,%p)\n",
487  freeing, ( ( ( void * ) freeing ) + freeing->size ) );
488  list_add_tail ( &freeing->list, &block->list );
489  if ( gap_after == 0 ) {
490  DBGC2 ( heap, "HEAP merging [%p,%p) + [%p,%p) -> [%p,%p)\n",
491  freeing, ( ( ( void * ) freeing ) + freeing->size ),
492  block, ( ( ( void * ) block ) + block->size ), freeing,
493  ( ( ( void * ) block ) + block->size ) );
494  freeing->size += block->size;
495  list_del ( &block->list );
496  VALGRIND_MAKE_MEM_NOACCESS ( block, sizeof ( *block ) );
497  }
498 
499  /* Update memory usage statistics */
500  heap->freemem += actual_size;
501  heap->usedmem -= actual_size;
502 
503  /* Allow heap to shrink */
504  if ( heap->shrink && heap->shrink ( freeing, freeing->size ) ) {
505  list_del ( &freeing->list );
506  heap->freemem -= freeing->size;
507  VALGRIND_MAKE_MEM_UNDEFINED ( freeing, freeing->size );
508  }
509 
510  /* Sanity checks */
511  check_blocks ( heap );
513 }
514 
515 /**
516  * Reallocate memory
517  *
518  * @v heap Heap
519  * @v old_ptr Memory previously allocated by heap_realloc(), or NULL
520  * @v new_size Requested size
521  * @ret new_ptr Allocated memory, or NULL
522  *
523  * Allocates memory with no particular alignment requirement. @c
524  * new_ptr will be aligned to at least a multiple of sizeof(void*).
525  * If @c old_ptr is non-NULL, then the contents of the newly allocated
526  * memory will be the same as the contents of the previously allocated
527  * memory, up to the minimum of the old and new sizes. The old memory
528  * will be freed.
529  *
530  * If allocation fails the previously allocated block is left
531  * untouched and NULL is returned.
532  *
533  * Calling heap_realloc() with a new size of zero is a valid way to
534  * free a memory block.
535  */
536 void * heap_realloc ( struct heap *heap, void *old_ptr, size_t new_size ) {
537  struct autosized_block *old_block;
538  struct autosized_block *new_block;
539  size_t old_total_size;
540  size_t new_total_size;
541  size_t old_size;
542  size_t offset = offsetof ( struct autosized_block, data );
543  void *new_ptr = NOWHERE;
544 
545  /* Allocate new memory if necessary. If allocation fails,
546  * return without touching the old block.
547  */
548  if ( new_size ) {
549  new_total_size = ( new_size + offset );
550  if ( new_total_size < new_size )
551  return NULL;
552  new_block = heap_alloc_block ( heap, new_total_size,
553  heap->ptr_align, -offset );
554  if ( ! new_block )
555  return NULL;
556  new_block->size = new_total_size;
557  VALGRIND_MAKE_MEM_NOACCESS ( &new_block->size,
558  sizeof ( new_block->size ) );
559  new_ptr = &new_block->data;
560  VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
561  assert ( ( ( ( intptr_t ) new_ptr ) &
562  ( heap->ptr_align - 1 ) ) == 0 );
563  }
564 
565  /* Copy across relevant part of the old data region (if any),
566  * then free it. Note that at this point either (a) new_ptr
567  * is valid, or (b) new_size is 0; either way, the memcpy() is
568  * valid.
569  */
570  if ( old_ptr && ( old_ptr != NOWHERE ) ) {
571  old_block = container_of ( old_ptr, struct autosized_block,
572  data );
573  VALGRIND_MAKE_MEM_DEFINED ( &old_block->size,
574  sizeof ( old_block->size ) );
575  old_total_size = old_block->size;
576  assert ( old_total_size != 0 );
577  old_size = ( old_total_size - offset );
578  memcpy ( new_ptr, old_ptr,
579  ( ( old_size < new_size ) ? old_size : new_size ) );
580  VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
581  heap_free_block ( heap, old_block, old_total_size );
582  }
583 
584  if ( ASSERTED ) {
585  DBGC ( heap, "HEAP detected possible memory corruption "
586  "from %p\n", __builtin_return_address ( 0 ) );
587  }
588  return new_ptr;
589 }
590 
591 /** The global heap */
592 static struct heap heap = {
594  .align = MIN_MEMBLOCK_ALIGN,
595  .ptr_align = sizeof ( void * ),
596  .grow = discard_cache,
597 };
598 
599 /**
600  * Reallocate memory
601  *
602  * @v old_ptr Memory previously allocated by malloc(), or NULL
603  * @v new_size Requested size
604  * @ret new_ptr Allocated memory, or NULL
605  */
606 void * realloc ( void *old_ptr, size_t new_size ) {
607 
608  return heap_realloc ( &heap, old_ptr, new_size );
609 }
610 
611 /**
612  * Allocate memory
613  *
614  * @v size Requested size
615  * @ret ptr Memory, or NULL
616  *
617  * Allocates memory with no particular alignment requirement. @c ptr
618  * will be aligned to at least a multiple of sizeof(void*).
619  */
620 void * malloc ( size_t size ) {
621  void *ptr;
622 
623  ptr = realloc ( NULL, size );
624  if ( ASSERTED ) {
625  DBGC ( &heap, "HEAP detected possible memory corruption "
626  "from %p\n", __builtin_return_address ( 0 ) );
627  }
628  return ptr;
629 }
630 
631 /**
632  * Free memory
633  *
634  * @v ptr Memory allocated by malloc(), or NULL
635  *
636  * Memory allocated with malloc_phys() cannot be freed with free(); it
637  * must be freed with free_phys() instead.
638  *
639  * If @c ptr is NULL, no action is taken.
640  */
641 void free ( void *ptr ) {
642 
643  realloc ( ptr, 0 );
644  if ( ASSERTED ) {
645  DBGC ( &heap, "HEAP detected possible memory corruption "
646  "from %p\n", __builtin_return_address ( 0 ) );
647  }
648 }
649 
650 /**
651  * Allocate cleared memory
652  *
653  * @v size Requested size
654  * @ret ptr Allocated memory
655  *
656  * Allocate memory as per malloc(), and zero it.
657  *
658  * This function name is non-standard, but pretty intuitive.
659  * zalloc(size) is always equivalent to calloc(1,size)
660  */
661 void * zalloc ( size_t size ) {
662  void *data;
663 
664  data = malloc ( size );
665  if ( data )
666  memset ( data, 0, size );
667  if ( ASSERTED ) {
668  DBGC ( &heap, "HEAP detected possible memory corruption "
669  "from %p\n", __builtin_return_address ( 0 ) );
670  }
671  return data;
672 }
673 
674 /**
675  * Allocate memory with specified physical alignment and offset
676  *
677  * @v size Requested size
678  * @v align Physical alignment
679  * @v offset Offset from physical alignment
680  * @ret ptr Memory, or NULL
681  *
682  * @c align must be a power of two. @c size may not be zero.
683  */
684 void * malloc_phys_offset ( size_t size, size_t phys_align, size_t offset ) {
685  void * ptr;
686 
687  ptr = heap_alloc_block ( &heap, size, phys_align, offset );
688  if ( ptr && size ) {
689  assert ( ( phys_align == 0 ) ||
690  ( ( ( virt_to_phys ( ptr ) ^ offset ) &
691  ( phys_align - 1 ) ) == 0 ) );
692  VALGRIND_MALLOCLIKE_BLOCK ( ptr, size, 0, 0 );
693  }
694  return ptr;
695 }
696 
697 /**
698  * Allocate memory with specified physical alignment
699  *
700  * @v size Requested size
701  * @v align Physical alignment
702  * @ret ptr Memory, or NULL
703  *
704  * @c align must be a power of two. @c size may not be zero.
705  */
706 void * malloc_phys ( size_t size, size_t phys_align ) {
707 
708  return malloc_phys_offset ( size, phys_align, 0 );
709 }
710 
711 /**
712  * Free memory allocated with malloc_phys()
713  *
714  * @v ptr Memory allocated by malloc_phys(), or NULL
715  * @v size Size of memory, as passed to malloc_phys()
716  *
717  * Memory allocated with malloc_phys() can only be freed with
718  * free_phys(); it cannot be freed with the standard free().
719  *
720  * If @c ptr is NULL, no action is taken.
721  */
722 void free_phys ( void *ptr, size_t size ) {
723 
724  VALGRIND_FREELIKE_BLOCK ( ptr, 0 );
725  heap_free_block ( &heap, ptr, size );
726 }
727 
728 /**
729  * Add memory to allocation pool
730  *
731  * @v heap Heap
732  * @v start Start address
733  * @v len Length of memory
734  *
735  * Adds a block of memory to the allocation pool. The memory must be
736  * aligned to the heap's required free memory block alignment.
737  */
738 void heap_populate ( struct heap *heap, void *start, size_t len ) {
739 
740  /* Sanity checks */
741  assert ( ( virt_to_phys ( start ) & ( heap->align - 1 ) ) == 0 );
742  assert ( ( len & ( heap->align - 1 ) ) == 0 );
743 
744  /* Add to allocation pool */
746 
747  /* Fix up memory usage statistics */
748  heap->usedmem += len;
749 }
750 
751 /**
752  * Initialise the heap
753  *
754  */
755 static void init_heap ( void ) {
756 
757  /* Sanity check */
758  build_assert ( MIN_MEMBLOCK_ALIGN >= sizeof ( struct memory_block ) );
759 
760  /* Populate heap */
763  heap_populate ( &heap, heap_area, sizeof ( heap_area ) );
764 }
765 
766 /** Memory allocator initialisation function */
767 struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
768  .name = "heap",
769  .initialise = init_heap,
770 };
771 
772 /**
773  * Discard all cached data on shutdown
774  *
775  */
776 static void shutdown_cache ( int booting __unused ) {
778  DBGC ( &heap, "HEAP maximum usage %zdkB\n",
779  ( heap.maxusedmem >> 10 ) );
780 }
781 
782 /** Memory allocator shutdown function */
783 struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
784  .name = "heap",
785  .shutdown = shutdown_cache,
786 };
787 
788 /**
789  * Dump free block list (for debugging)
790  *
791  */
792 void heap_dump ( struct heap *heap ) {
793  struct memory_block *block;
794 
795  dbg_printf ( "HEAP free block list:\n" );
797  dbg_printf ( "...[%p,%p] (size %#zx)\n", block,
798  ( ( ( void * ) block ) + block->size ),
799  block->size );
800  }
801 }
#define __attribute__(x)
Definition: compiler.h:10
iPXE I/O API
#define NOWHERE
Address for zero-length memory blocks.
Definition: malloc.h:41
unsigned int(* shrink)(void *ptr, size_t size)
Allow heap to shrink (optional)
Definition: malloc.h:78
static unsigned int discard_cache(size_t size __unused)
Discard some cached data.
Definition: malloc.c:227
#define list_add(new, head)
Add a new entry to the head of a list.
Definition: list.h:69
struct list_head * next
Next list entry.
Definition: list.h:20
#define INIT_EARLY
Early initialisation.
Definition: init.h:29
#define STARTUP_EARLY
Early startup.
Definition: init.h:63
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
Definition: memcheck.h:131
uint16_t size
Buffer size.
Definition: dwmac.h:14
char data[0]
Remaining data.
Definition: malloc.c:73
#define DBGC(...)
Definition: compiler.h:505
#define CACHE_DISCARDERS
Cache discarder table.
Definition: malloc.h:102
static void * heap_alloc_block(struct heap *heap, size_t size, size_t align, size_t offset)
Allocate a memory block.
Definition: malloc.c:265
static char heap_area[HEAP_SIZE]
The heap area.
Definition: malloc.c:87
#define HEAP_ALIGN
Heap area alignment.
Definition: malloc.c:84
#define RUNNING_ON_VALGRIND
Definition: valgrind.h:4175
#define offsetof(type, field)
Get offset of a field within a structure.
Definition: stddef.h:24
unsigned long intptr_t
Definition: stdint.h:21
const char * name
Definition: init.h:43
A doubly-linked list entry (or list head)
Definition: list.h:18
Dynamic memory allocation.
A reference counter.
Definition: refcnt.h:26
uint32_t start
Starting offset.
Definition: netvsc.h:12
unsigned long tmp
Definition: linux_pci.h:64
struct list_head list
List of free blocks.
Definition: malloc.c:58
A startup/shutdown function.
Definition: init.h:42
#define list_del(list)
Delete an entry from a list.
Definition: list.h:119
unsigned int(* grow)(size_t size)
Attempt to grow heap (optional)
Definition: malloc.h:66
void * memcpy(void *dest, const void *src, size_t len) __nonnull
An initialisation function.
Definition: init.h:14
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
#define container_of(ptr, type, field)
Get containing structure.
Definition: stddef.h:35
static void valgrind_make_blocks_noaccess(struct heap *heap)
Mark all blocks in free list as inaccessible.
Definition: malloc.c:136
unsigned int(* discard)(void)
Discard some cached data.
Definition: malloc.h:98
#define list_for_each_entry(pos, head, member)
Iterate over entries in a list.
Definition: list.h:431
#define __unused
Declare a variable or data structure as unused.
Definition: compiler.h:573
#define list_add_tail(new, head)
Add a new entry to the tail of a list.
Definition: list.h:93
ring len
Length.
Definition: dwmac.h:231
void heap_populate(struct heap *heap, void *start, size_t len)
Add memory to allocation pool.
Definition: malloc.c:738
const char * name
Definition: init.h:15
#define build_assert(condition)
Assert a condition at build time (after dead code elimination)
Definition: assert.h:76
#define ASSERTING
Definition: assert.h:19
void * malloc_phys_offset(size_t size, size_t phys_align, size_t offset)
Allocate memory with specified physical alignment and offset.
Definition: malloc.c:684
static unsigned int count
Number of entries.
Definition: dwmac.h:225
static void valgrind_make_blocks_defined(struct heap *heap)
Mark all blocks in free list as defined.
Definition: malloc.c:94
size_t ptr_align
Alignment for size-tracked allocations.
Definition: malloc.h:51
#define list_for_each_entry_safe(pos, tmp, head, member)
Iterate over entries in a list, safe against deletion of the current entry.
Definition: list.h:458
Linked lists.
size_t size
Size of this block.
Definition: malloc.c:71
struct init_fn heap_init_fn __init_fn(INIT_EARLY)
Memory allocator initialisation function.
void * zalloc(size_t size)
Allocate cleared memory.
Definition: malloc.c:661
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
Definition: valgrind.h:4411
static void discard_all_cache(void)
Discard all cached data.
Definition: malloc.c:243
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len)
Definition: memcheck.h:111
#define for_each_table_entry(pointer, table)
Iterate through all entries within a linker table.
Definition: tables.h:385
#define MIN_MEMBLOCK_ALIGN
Physical address alignment maintained for free blocks of memory.
Definition: malloc.c:66
size_t maxusedmem
Maximum amount of used memory.
Definition: malloc.h:58
A cache discarder.
Definition: malloc.h:92
#define ASSERTED
Definition: assert.h:25
size_t freemem
Total amount of free memory.
Definition: malloc.h:54
void heap_dump(struct heap *heap)
Dump free block list (for debugging)
Definition: malloc.c:792
A free block of memory.
Definition: malloc.c:43
struct list_head blocks
List of free memory blocks.
Definition: malloc.h:46
size_t usedmem
Total amount of used memory.
Definition: malloc.h:56
void * malloc(size_t size)
Allocate memory.
Definition: malloc.c:620
#define HEAP_SIZE
Heap area size.
Definition: malloc.c:81
#define list_check(list)
Check a list entry or list head is valid.
Definition: list.h:55
void free(void *ptr)
Free memory.
Definition: malloc.c:641
FILE_LICENCE(GPL2_OR_LATER_OR_UBDL)
static void check_blocks(struct heap *heap)
Check integrity of the blocks in the free list.
Definition: malloc.c:185
struct list_head * prev
Previous list entry.
Definition: list.h:22
#define DBGC2(...)
Definition: compiler.h:522
uint8_t block[3][8]
DES-encrypted blocks.
Definition: mschapv2.h:12
char pad[offsetof(struct refcnt, count)+sizeof(((struct refcnt *) NULL) ->count)]
Padding.
Definition: malloc.c:56
A heap.
Definition: malloc.h:44
size_t size
Size of this block.
Definition: malloc.c:45
void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition: malloc.c:722
A block of allocated memory complete with size information.
Definition: malloc.c:69
Reference counting.
static void init_heap(void)
Initialise the heap.
Definition: malloc.c:755
static void heap_free_block(struct heap *heap, void *ptr, size_t size)
Free a memory block.
Definition: malloc.c:406
uint8_t data[48]
Additional event data.
Definition: ena.h:22
void * realloc(void *old_ptr, size_t new_size)
Reallocate memory.
Definition: malloc.c:606
struct startup_fn heap_startup_fn __startup_fn(STARTUP_EARLY)
Memory allocator shutdown function.
signed long ssize_t
Definition: stdint.h:7
uint16_t offset
Offset to command line.
Definition: bzimage.h:8
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
Definition: memcheck.h:121
#define LIST_HEAD_INIT(list)
Initialise a static list head.
Definition: list.h:30
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
Definition: valgrind.h:4421
size_t align
Alignment for free memory blocks.
Definition: malloc.h:49
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
String functions.
void * heap_realloc(struct heap *heap, void *old_ptr, size_t new_size)
Reallocate memory.
Definition: malloc.c:536
struct bofm_section_header done
Definition: bofm_test.c:46
void dbg_printf(const char *fmt,...)
Print debug message.
Definition: debug.c:38
static void shutdown_cache(int booting __unused)
Discard all cached data on shutdown.
Definition: malloc.c:776
void * malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition: malloc.c:706
void * memset(void *dest, int character, size_t len) __nonnull