iPXE
malloc.c
Go to the documentation of this file.
00001 /*
00002  * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
00003  *
00004  * This program is free software; you can redistribute it and/or
00005  * modify it under the terms of the GNU General Public License as
00006  * published by the Free Software Foundation; either version 2 of the
00007  * License, or any later version.
00008  *
00009  * This program is distributed in the hope that it will be useful, but
00010  * WITHOUT ANY WARRANTY; without even the implied warranty of
00011  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00012  * General Public License for more details.
00013  *
00014  * You should have received a copy of the GNU General Public License
00015  * along with this program; if not, write to the Free Software
00016  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
00017  * 02110-1301, USA.
00018  *
00019  * You can also choose to distribute this program under the terms of
00020  * the Unmodified Binary Distribution Licence (as given in the file
00021  * COPYING.UBDL), provided that you have satisfied its requirements.
00022  */
00023 
00024 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
00025 
00026 #include <stddef.h>
00027 #include <stdint.h>
00028 #include <string.h>
00029 #include <strings.h>
00030 #include <ipxe/io.h>
00031 #include <ipxe/list.h>
00032 #include <ipxe/init.h>
00033 #include <ipxe/refcnt.h>
00034 #include <ipxe/malloc.h>
00035 #include <valgrind/memcheck.h>
00036 
00037 /** @file
00038  *
00039  * Dynamic memory allocation
00040  *
00041  */
00042 
00043 /** A free block of memory */
00044 struct memory_block {
00045         /** Size of this block */
00046         size_t size;
00047         /** Padding
00048          *
00049          * This padding exists to cover the "count" field of a
00050          * reference counter, in the common case where a reference
00051          * counter is the first element of a dynamically-allocated
00052          * object.  It avoids clobbering the "count" field as soon as
00053          * the memory is freed, and so allows for the possibility of
00054          * detecting reference counting errors.
00055          */
00056         char pad[ offsetof ( struct refcnt, count ) +
00057                   sizeof ( ( ( struct refcnt * ) NULL )->count ) ];
00058         /** List of free blocks */
00059         struct list_head list;
00060 };
00061 
00062 #define MIN_MEMBLOCK_SIZE \
00063         ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
00064 
00065 /** A block of allocated memory complete with size information */
00066 struct autosized_block {
00067         /** Size of this block */
00068         size_t size;
00069         /** Remaining data */
00070         char data[0];
00071 };
00072 
00073 /**
00074  * Address for zero-length memory blocks
00075  *
00076  * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
00077  * NOWHERE.  Calling @c free(NOWHERE) will have no effect.
00078  *
00079  * This is consistent with the ANSI C standards, which state that
00080  * "either NULL or a pointer suitable to be passed to free()" must be
00081  * returned in these cases.  Using a special non-NULL value means that
00082  * the caller can take a NULL return value to indicate failure,
00083  * without first having to check for a requested size of zero.
00084  *
00085  * Code outside of malloc.c do not ever need to refer to the actual
00086  * value of @c NOWHERE; this is an internal definition.
00087  */
00088 #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
00089 
00090 /** List of free memory blocks */
00091 static LIST_HEAD ( free_blocks );
00092 
00093 /** Total amount of free memory */
00094 size_t freemem;
00095 
00096 /** Total amount of used memory */
00097 size_t usedmem;
00098 
00099 /** Maximum amount of used memory */
00100 size_t maxusedmem;
00101 
00102 /**
00103  * Heap size
00104  *
00105  * Currently fixed at 512kB.
00106  */
00107 #define HEAP_SIZE ( 512 * 1024 )
00108 
00109 /** The heap itself */
00110 static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
00111 
00112 /**
00113  * Mark all blocks in free list as defined
00114  *
00115  */
00116 static inline void valgrind_make_blocks_defined ( void ) {
00117         struct memory_block *block;
00118 
00119         /* Do nothing unless running under Valgrind */
00120         if ( RUNNING_ON_VALGRIND <= 0 )
00121                 return;
00122 
00123         /* Traverse free block list, marking each block structure as
00124          * defined.  Some contortions are necessary to avoid errors
00125          * from list_check().
00126          */
00127 
00128         /* Mark block list itself as defined */
00129         VALGRIND_MAKE_MEM_DEFINED ( &free_blocks, sizeof ( free_blocks ) );
00130 
00131         /* Mark areas accessed by list_check() as defined */
00132         VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.prev->next,
00133                                     sizeof ( free_blocks.prev->next ) );
00134         VALGRIND_MAKE_MEM_DEFINED ( free_blocks.next,
00135                                     sizeof ( *free_blocks.next ) );
00136         VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->next->prev,
00137                                     sizeof ( free_blocks.next->next->prev ) );
00138 
00139         /* Mark each block in list as defined */
00140         list_for_each_entry ( block, &free_blocks, list ) {
00141 
00142                 /* Mark block as defined */
00143                 VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
00144 
00145                 /* Mark areas accessed by list_check() as defined */
00146                 VALGRIND_MAKE_MEM_DEFINED ( block->list.next,
00147                                             sizeof ( *block->list.next ) );
00148                 VALGRIND_MAKE_MEM_DEFINED ( &block->list.next->next->prev,
00149                                       sizeof ( block->list.next->next->prev ) );
00150         }
00151 }
00152 
00153 /**
00154  * Mark all blocks in free list as inaccessible
00155  *
00156  */
00157 static inline void valgrind_make_blocks_noaccess ( void ) {
00158         struct memory_block *block;
00159         struct memory_block *prev = NULL;
00160 
00161         /* Do nothing unless running under Valgrind */
00162         if ( RUNNING_ON_VALGRIND <= 0 )
00163                 return;
00164 
00165         /* Traverse free block list, marking each block structure as
00166          * inaccessible.  Some contortions are necessary to avoid
00167          * errors from list_check().
00168          */
00169 
00170         /* Mark each block in list as inaccessible */
00171         list_for_each_entry ( block, &free_blocks, list ) {
00172 
00173                 /* Mark previous block (if any) as inaccessible. (Current
00174                  * block will be accessed by list_check().)
00175                  */
00176                 if ( prev )
00177                         VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
00178                 prev = block;
00179 
00180                 /* At the end of the list, list_check() will end up
00181                  * accessing the first list item.  Temporarily mark
00182                  * this area as defined.
00183                  */
00184                 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->prev,
00185                                             sizeof ( free_blocks.next->prev ) );
00186         }
00187         /* Mark last block (if any) as inaccessible */
00188         if ( prev )
00189                 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
00190 
00191         /* Mark as inaccessible the area that was temporarily marked
00192          * as defined to avoid errors from list_check().
00193          */
00194         VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks.next->prev,
00195                                      sizeof ( free_blocks.next->prev ) );
00196 
00197         /* Mark block list itself as inaccessible */
00198         VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
00199 }
00200 
00201 /**
00202  * Check integrity of the blocks in the free list
00203  *
00204  */
00205 static inline void check_blocks ( void ) {
00206         struct memory_block *block;
00207         struct memory_block *prev = NULL;
00208 
00209         if ( ! ASSERTING )
00210                 return;
00211 
00212         list_for_each_entry ( block, &free_blocks, list ) {
00213 
00214                 /* Check that list structure is intact */
00215                 list_check ( &block->list );
00216 
00217                 /* Check that block size is not too small */
00218                 assert ( block->size >= sizeof ( *block ) );
00219                 assert ( block->size >= MIN_MEMBLOCK_SIZE );
00220 
00221                 /* Check that block does not wrap beyond end of address space */
00222                 assert ( ( ( void * ) block + block->size ) >
00223                          ( ( void * ) block ) );
00224 
00225                 /* Check that blocks remain in ascending order, and
00226                  * that adjacent blocks have been merged.
00227                  */
00228                 if ( prev ) {
00229                         assert ( ( ( void * ) block ) > ( ( void * ) prev ) );
00230                         assert ( ( ( void * ) block ) >
00231                                  ( ( ( void * ) prev ) + prev->size ) );
00232                 }
00233                 prev = block;
00234         }
00235 }
00236 
00237 /**
00238  * Discard some cached data
00239  *
00240  * @ret discarded       Number of cached items discarded
00241  */
00242 static unsigned int discard_cache ( void ) {
00243         struct cache_discarder *discarder;
00244         unsigned int discarded;
00245 
00246         for_each_table_entry ( discarder, CACHE_DISCARDERS ) {
00247                 discarded = discarder->discard();
00248                 if ( discarded )
00249                         return discarded;
00250         }
00251         return 0;
00252 }
00253 
00254 /**
00255  * Discard all cached data
00256  *
00257  */
00258 static void discard_all_cache ( void ) {
00259         unsigned int discarded;
00260 
00261         do {
00262                 discarded = discard_cache();
00263         } while ( discarded );
00264 }
00265 
00266 /**
00267  * Allocate a memory block
00268  *
00269  * @v size              Requested size
00270  * @v align             Physical alignment
00271  * @v offset            Offset from physical alignment
00272  * @ret ptr             Memory block, or NULL
00273  *
00274  * Allocates a memory block @b physically aligned as requested.  No
00275  * guarantees are provided for the alignment of the virtual address.
00276  *
00277  * @c align must be a power of two.  @c size may not be zero.
00278  */
00279 void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
00280         struct memory_block *block;
00281         size_t align_mask;
00282         size_t actual_size;
00283         size_t pre_size;
00284         size_t post_size;
00285         struct memory_block *pre;
00286         struct memory_block *post;
00287         unsigned int discarded;
00288         void *ptr;
00289 
00290         /* Sanity checks */
00291         assert ( size != 0 );
00292         assert ( ( align == 0 ) || ( ( align & ( align - 1 ) ) == 0 ) );
00293         valgrind_make_blocks_defined();
00294         check_blocks();
00295 
00296         /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
00297          * calculate alignment mask.
00298          */
00299         actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
00300                         ~( MIN_MEMBLOCK_SIZE - 1 ) );
00301         if ( ! actual_size ) {
00302                 /* The requested size is not permitted to be zero.  A
00303                  * zero result at this point indicates that either the
00304                  * original requested size was zero, or that unsigned
00305                  * integer overflow has occurred.
00306                  */
00307                 ptr = NULL;
00308                 goto done;
00309         }
00310         assert ( actual_size >= size );
00311         align_mask = ( ( align - 1 ) | ( MIN_MEMBLOCK_SIZE - 1 ) );
00312 
00313         DBGC2 ( &heap, "Allocating %#zx (aligned %#zx+%zx)\n",
00314                 size, align, offset );
00315         while ( 1 ) {
00316                 /* Search through blocks for the first one with enough space */
00317                 list_for_each_entry ( block, &free_blocks, list ) {
00318                         pre_size = ( ( offset - virt_to_phys ( block ) )
00319                                      & align_mask );
00320                         if ( ( block->size < pre_size ) ||
00321                              ( ( block->size - pre_size ) < actual_size ) )
00322                                 continue;
00323                         post_size = ( block->size - pre_size - actual_size );
00324                         /* Split block into pre-block, block, and
00325                          * post-block.  After this split, the "pre"
00326                          * block is the one currently linked into the
00327                          * free list.
00328                          */
00329                         pre   = block;
00330                         block = ( ( ( void * ) pre   ) + pre_size );
00331                         post  = ( ( ( void * ) block ) + actual_size );
00332                         DBGC2 ( &heap, "[%p,%p) -> [%p,%p) + [%p,%p)\n", pre,
00333                                 ( ( ( void * ) pre ) + pre->size ), pre, block,
00334                                 post, ( ( ( void * ) pre ) + pre->size ) );
00335                         /* If there is a "post" block, add it in to
00336                          * the free list.  Leak it if it is too small
00337                          * (which can happen only at the very end of
00338                          * the heap).
00339                          */
00340                         if ( post_size >= MIN_MEMBLOCK_SIZE ) {
00341                                 VALGRIND_MAKE_MEM_UNDEFINED ( post,
00342                                                               sizeof ( *post ));
00343                                 post->size = post_size;
00344                                 list_add ( &post->list, &pre->list );
00345                         }
00346                         /* Shrink "pre" block, leaving the main block
00347                          * isolated and no longer part of the free
00348                          * list.
00349                          */
00350                         pre->size = pre_size;
00351                         /* If there is no "pre" block, remove it from
00352                          * the list.  Also remove it (i.e. leak it) if
00353                          * it is too small, which can happen only at
00354                          * the very start of the heap.
00355                          */
00356                         if ( pre_size < MIN_MEMBLOCK_SIZE ) {
00357                                 list_del ( &pre->list );
00358                                 VALGRIND_MAKE_MEM_NOACCESS ( pre,
00359                                                              sizeof ( *pre ) );
00360                         }
00361                         /* Update memory usage statistics */
00362                         freemem -= actual_size;
00363                         usedmem += actual_size;
00364                         if ( usedmem > maxusedmem )
00365                                 maxusedmem = usedmem;
00366                         /* Return allocated block */
00367                         DBGC2 ( &heap, "Allocated [%p,%p)\n", block,
00368                                 ( ( ( void * ) block ) + size ) );
00369                         ptr = block;
00370                         VALGRIND_MAKE_MEM_UNDEFINED ( ptr, size );
00371                         goto done;
00372                 }
00373 
00374                 /* Try discarding some cached data to free up memory */
00375                 DBGC ( &heap, "Attempting discard for %#zx (aligned %#zx+%zx), "
00376                        "used %zdkB\n", size, align, offset, ( usedmem >> 10 ) );
00377                 valgrind_make_blocks_noaccess();
00378                 discarded = discard_cache();
00379                 valgrind_make_blocks_defined();
00380                 check_blocks();
00381                 if ( ! discarded ) {
00382                         /* Nothing available to discard */
00383                         DBGC ( &heap, "Failed to allocate %#zx (aligned "
00384                                "%#zx)\n", size, align );
00385                         ptr = NULL;
00386                         goto done;
00387                 }
00388         }
00389 
00390  done:
00391         check_blocks();
00392         valgrind_make_blocks_noaccess();
00393         return ptr;
00394 }
00395 
00396 /**
00397  * Free a memory block
00398  *
00399  * @v ptr               Memory allocated by alloc_memblock(), or NULL
00400  * @v size              Size of the memory
00401  *
00402  * If @c ptr is NULL, no action is taken.
00403  */
00404 void free_memblock ( void *ptr, size_t size ) {
00405         struct memory_block *freeing;
00406         struct memory_block *block;
00407         struct memory_block *tmp;
00408         size_t actual_size;
00409         ssize_t gap_before;
00410         ssize_t gap_after = -1;
00411 
00412         /* Allow for ptr==NULL */
00413         if ( ! ptr )
00414                 return;
00415         VALGRIND_MAKE_MEM_NOACCESS ( ptr, size );
00416 
00417         /* Sanity checks */
00418         valgrind_make_blocks_defined();
00419         check_blocks();
00420 
00421         /* Round up size to match actual size that alloc_memblock()
00422          * would have used.
00423          */
00424         assert ( size != 0 );
00425         actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
00426                         ~( MIN_MEMBLOCK_SIZE - 1 ) );
00427         freeing = ptr;
00428         VALGRIND_MAKE_MEM_UNDEFINED ( freeing, sizeof ( *freeing ) );
00429         DBGC2 ( &heap, "Freeing [%p,%p)\n",
00430                 freeing, ( ( ( void * ) freeing ) + size ) );
00431 
00432         /* Check that this block does not overlap the free list */
00433         if ( ASSERTING ) {
00434                 list_for_each_entry ( block, &free_blocks, list ) {
00435                         if ( ( ( ( void * ) block ) <
00436                                ( ( void * ) freeing + actual_size ) ) &&
00437                              ( ( void * ) freeing <
00438                                ( ( void * ) block + block->size ) ) ) {
00439                                 assert ( 0 );
00440                                 DBGC ( &heap, "Double free of [%p,%p) "
00441                                        "overlapping [%p,%p) detected from %p\n",
00442                                        freeing,
00443                                        ( ( ( void * ) freeing ) + size ), block,
00444                                        ( ( void * ) block + block->size ),
00445                                        __builtin_return_address ( 0 ) );
00446                         }
00447                 }
00448         }
00449 
00450         /* Insert/merge into free list */
00451         freeing->size = actual_size;
00452         list_for_each_entry_safe ( block, tmp, &free_blocks, list ) {
00453                 /* Calculate gaps before and after the "freeing" block */
00454                 gap_before = ( ( ( void * ) freeing ) - 
00455                                ( ( ( void * ) block ) + block->size ) );
00456                 gap_after = ( ( ( void * ) block ) - 
00457                               ( ( ( void * ) freeing ) + freeing->size ) );
00458                 /* Merge with immediately preceding block, if possible */
00459                 if ( gap_before == 0 ) {
00460                         DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", block,
00461                                 ( ( ( void * ) block ) + block->size ), freeing,
00462                                 ( ( ( void * ) freeing ) + freeing->size ),
00463                                 block,
00464                                 ( ( ( void * ) freeing ) + freeing->size ) );
00465                         block->size += actual_size;
00466                         list_del ( &block->list );
00467                         VALGRIND_MAKE_MEM_NOACCESS ( freeing,
00468                                                      sizeof ( *freeing ) );
00469                         freeing = block;
00470                 }
00471                 /* Stop processing as soon as we reach a following block */
00472                 if ( gap_after >= 0 )
00473                         break;
00474         }
00475 
00476         /* Insert before the immediately following block.  If
00477          * possible, merge the following block into the "freeing"
00478          * block.
00479          */
00480         DBGC2 ( &heap, "[%p,%p)\n",
00481                 freeing, ( ( ( void * ) freeing ) + freeing->size ) );
00482         list_add_tail ( &freeing->list, &block->list );
00483         if ( gap_after == 0 ) {
00484                 DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing,
00485                         ( ( ( void * ) freeing ) + freeing->size ), block,
00486                         ( ( ( void * ) block ) + block->size ), freeing,
00487                         ( ( ( void * ) block ) + block->size ) );
00488                 freeing->size += block->size;
00489                 list_del ( &block->list );
00490                 VALGRIND_MAKE_MEM_NOACCESS ( block, sizeof ( *block ) );
00491         }
00492 
00493         /* Update memory usage statistics */
00494         freemem += actual_size;
00495         usedmem -= actual_size;
00496 
00497         check_blocks();
00498         valgrind_make_blocks_noaccess();
00499 }
00500 
00501 /**
00502  * Reallocate memory
00503  *
00504  * @v old_ptr           Memory previously allocated by malloc(), or NULL
00505  * @v new_size          Requested size
00506  * @ret new_ptr         Allocated memory, or NULL
00507  *
00508  * Allocates memory with no particular alignment requirement.  @c
00509  * new_ptr will be aligned to at least a multiple of sizeof(void*).
00510  * If @c old_ptr is non-NULL, then the contents of the newly allocated
00511  * memory will be the same as the contents of the previously allocated
00512  * memory, up to the minimum of the old and new sizes.  The old memory
00513  * will be freed.
00514  *
00515  * If allocation fails the previously allocated block is left
00516  * untouched and NULL is returned.
00517  *
00518  * Calling realloc() with a new size of zero is a valid way to free a
00519  * memory block.
00520  */
00521 void * realloc ( void *old_ptr, size_t new_size ) {
00522         struct autosized_block *old_block;
00523         struct autosized_block *new_block;
00524         size_t old_total_size;
00525         size_t new_total_size;
00526         size_t old_size;
00527         void *new_ptr = NOWHERE;
00528 
00529         /* Allocate new memory if necessary.  If allocation fails,
00530          * return without touching the old block.
00531          */
00532         if ( new_size ) {
00533                 new_total_size = ( new_size +
00534                                    offsetof ( struct autosized_block, data ) );
00535                 if ( new_total_size < new_size )
00536                         return NULL;
00537                 new_block = alloc_memblock ( new_total_size, 1, 0 );
00538                 if ( ! new_block )
00539                         return NULL;
00540                 new_block->size = new_total_size;
00541                 VALGRIND_MAKE_MEM_NOACCESS ( &new_block->size,
00542                                              sizeof ( new_block->size ) );
00543                 new_ptr = &new_block->data;
00544                 VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
00545         }
00546         
00547         /* Copy across relevant part of the old data region (if any),
00548          * then free it.  Note that at this point either (a) new_ptr
00549          * is valid, or (b) new_size is 0; either way, the memcpy() is
00550          * valid.
00551          */
00552         if ( old_ptr && ( old_ptr != NOWHERE ) ) {
00553                 old_block = container_of ( old_ptr, struct autosized_block,
00554                                            data );
00555                 VALGRIND_MAKE_MEM_DEFINED ( &old_block->size,
00556                                             sizeof ( old_block->size ) );
00557                 old_total_size = old_block->size;
00558                 assert ( old_total_size != 0 );
00559                 old_size = ( old_total_size -
00560                              offsetof ( struct autosized_block, data ) );
00561                 memcpy ( new_ptr, old_ptr,
00562                          ( ( old_size < new_size ) ? old_size : new_size ) );
00563                 VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
00564                 free_memblock ( old_block, old_total_size );
00565         }
00566 
00567         if ( ASSERTED ) {
00568                 DBGC ( &heap, "Possible memory corruption detected from %p\n",
00569                        __builtin_return_address ( 0 ) );
00570         }
00571         return new_ptr;
00572 }
00573 
00574 /**
00575  * Allocate memory
00576  *
00577  * @v size              Requested size
00578  * @ret ptr             Memory, or NULL
00579  *
00580  * Allocates memory with no particular alignment requirement.  @c ptr
00581  * will be aligned to at least a multiple of sizeof(void*).
00582  */
00583 void * malloc ( size_t size ) {
00584         void *ptr;
00585 
00586         ptr = realloc ( NULL, size );
00587         if ( ASSERTED ) {
00588                 DBGC ( &heap, "Possible memory corruption detected from %p\n",
00589                        __builtin_return_address ( 0 ) );
00590         }
00591         return ptr;
00592 }
00593 
00594 /**
00595  * Free memory
00596  *
00597  * @v ptr               Memory allocated by malloc(), or NULL
00598  *
00599  * Memory allocated with malloc_dma() cannot be freed with free(); it
00600  * must be freed with free_dma() instead.
00601  *
00602  * If @c ptr is NULL, no action is taken.
00603  */
00604 void free ( void *ptr ) {
00605 
00606         realloc ( ptr, 0 );
00607         if ( ASSERTED ) {
00608                 DBGC ( &heap, "Possible memory corruption detected from %p\n",
00609                        __builtin_return_address ( 0 ) );
00610         }
00611 }
00612 
00613 /**
00614  * Allocate cleared memory
00615  *
00616  * @v size              Requested size
00617  * @ret ptr             Allocated memory
00618  *
00619  * Allocate memory as per malloc(), and zero it.
00620  *
00621  * This function name is non-standard, but pretty intuitive.
00622  * zalloc(size) is always equivalent to calloc(1,size)
00623  */
00624 void * zalloc ( size_t size ) {
00625         void *data;
00626 
00627         data = malloc ( size );
00628         if ( data )
00629                 memset ( data, 0, size );
00630         if ( ASSERTED ) {
00631                 DBGC ( &heap, "Possible memory corruption detected from %p\n",
00632                        __builtin_return_address ( 0 ) );
00633         }
00634         return data;
00635 }
00636 
00637 /**
00638  * Add memory to allocation pool
00639  *
00640  * @v start             Start address
00641  * @v end               End address
00642  *
00643  * Adds a block of memory [start,end) to the allocation pool.  This is
00644  * a one-way operation; there is no way to reclaim this memory.
00645  *
00646  * @c start must be aligned to at least a multiple of sizeof(void*).
00647  */
00648 void mpopulate ( void *start, size_t len ) {
00649 
00650         /* Prevent free_memblock() from rounding up len beyond the end
00651          * of what we were actually given...
00652          */
00653         len &= ~( MIN_MEMBLOCK_SIZE - 1 );
00654 
00655         /* Add to allocation pool */
00656         free_memblock ( start, len );
00657 
00658         /* Fix up memory usage statistics */
00659         usedmem += len;
00660 }
00661 
00662 /**
00663  * Initialise the heap
00664  *
00665  */
00666 static void init_heap ( void ) {
00667         VALGRIND_MAKE_MEM_NOACCESS ( heap, sizeof ( heap ) );
00668         VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
00669         mpopulate ( heap, sizeof ( heap ) );
00670 }
00671 
00672 /** Memory allocator initialisation function */
00673 struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
00674         .initialise = init_heap,
00675 };
00676 
00677 /**
00678  * Discard all cached data on shutdown
00679  *
00680  */
00681 static void shutdown_cache ( int booting __unused ) {
00682         discard_all_cache();
00683         DBGC ( &heap, "Maximum heap usage %zdkB\n", ( maxusedmem >> 10 ) );
00684 }
00685 
00686 /** Memory allocator shutdown function */
00687 struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
00688         .shutdown = shutdown_cache,
00689 };
00690 
00691 #if 0
00692 #include <stdio.h>
00693 /**
00694  * Dump free block list
00695  *
00696  */
00697 void mdumpfree ( void ) {
00698         struct memory_block *block;
00699 
00700         printf ( "Free block list:\n" );
00701         list_for_each_entry ( block, &free_blocks, list ) {
00702                 printf ( "[%p,%p] (size %#zx)\n", block,
00703                          ( ( ( void * ) block ) + block->size ), block->size );
00704         }
00705 }
00706 #endif