| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  | #include "Yap.h"
 | 
					
						
							| 
									
										
										
										
											2004-11-29 04:43:15 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | #if USE_DL_MALLOC
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-10-23 14:22:17 +01:00
										 |  |  | #include "YapHeap.h"
 | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  | #if HAVE_STRING_H
 | 
					
						
							|  |  |  | #include <string.h>
 | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | #include "alloc.h"
 | 
					
						
							|  |  |  | #include "dlmalloc.h"
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-11-23 03:01:33 +00:00
										 |  |  | static struct malloc_chunk * | 
					
						
							|  |  |  | ChunkPtrAdjust (struct malloc_chunk *ptr) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2011-05-04 10:11:41 +01:00
										 |  |  |   return (struct malloc_chunk *) ((char *) (ptr) + LOCAL_HDiff); | 
					
						
							| 
									
										
										
										
											2005-11-23 03:01:33 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  | /*
 | 
					
						
							|  |  |  |   This is a version (aka dlmalloc) of malloc/free/realloc written by | 
					
						
							|  |  |  |   Doug Lea and released to the public domain.  Use, modify, and | 
					
						
							|  |  |  |   redistribute this code without permission or acknowledgement in any | 
					
						
							|  |  |  |   way you wish.  Send questions, comments, complaints, performance | 
					
						
							|  |  |  |   data, etc to dl@cs.oswego.edu | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | * VERSION 2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |    Note: There may be an updated version of this malloc obtainable at | 
					
						
							|  |  |  |            ftp://gee.cs.oswego.edu/pub/misc/malloc.c
 | 
					
						
							|  |  |  |          Check before installing! | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | * Quickstart | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   This library is all in one file to simplify the most common usage: | 
					
						
							|  |  |  |   ftp it, compile it (-O), and link it into another program. All | 
					
						
							|  |  |  |   of the compile-time options default to reasonable values for use on | 
					
						
							|  |  |  |   most unix platforms. Compile -DWIN32 for reasonable defaults on windows. | 
					
						
							|  |  |  |   You might later want to step through various compile-time and dynamic | 
					
						
							|  |  |  |   tuning options. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   For convenience, an include file for code using this malloc is at: | 
					
						
							|  |  |  |      ftp://gee.cs.oswego.edu/pub/misc/malloc-2.7.1.h
 | 
					
						
							|  |  |  |   You don't really need this .h file unless you call functions not | 
					
						
							|  |  |  |   defined in your system include files.  The .h file contains only the | 
					
						
							|  |  |  |   excerpts from this file needed for using this malloc on ANSI C/C++ | 
					
						
							|  |  |  |   systems, so long as you haven't changed compile-time options about | 
					
						
							|  |  |  |   naming and tuning parameters.  If you do, then you can create your | 
					
						
							|  |  |  |   own malloc.h that does include all settings by cutting at the point | 
					
						
							|  |  |  |   indicated below. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | * Why use this malloc? | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   This is not the fastest, most space-conserving, most portable, or | 
					
						
							|  |  |  |   most tunable malloc ever written. However it is among the fastest | 
					
						
							|  |  |  |   while also being among the most space-conserving, portable and tunable. | 
					
						
							|  |  |  |   Consistent balance across these factors results in a good general-purpose | 
					
						
							|  |  |  |   allocator for malloc-intensive programs. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   The main properties of the algorithms are: | 
					
						
							|  |  |  |   * For large (>= 512 bytes) requests, it is a pure best-fit allocator, | 
					
						
							|  |  |  |     with ties normally decided via FIFO (i.e. least recently used). | 
					
						
							|  |  |  |   * For small (<= 64 bytes by default) requests, it is a caching | 
					
						
							|  |  |  |     allocator, that maintains pools of quickly recycled chunks. | 
					
						
							|  |  |  |   * In between, and for combinations of large and small requests, it does | 
					
						
							|  |  |  |     the best it can trying to meet both goals at once. | 
					
						
							|  |  |  |   * For very large requests (>= 128KB by default), it relies on system | 
					
						
							|  |  |  |     memory mapping facilities, if supported. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   For a longer but slightly out of date high-level description, see | 
					
						
							|  |  |  |      http://gee.cs.oswego.edu/dl/html/malloc.html
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   You may already by default be using a C library containing a malloc | 
					
						
							|  |  |  |   that is  based on some version of this malloc (for example in | 
					
						
							|  |  |  |   linux). You might still want to use the one in this file in order to | 
					
						
							|  |  |  |   customize settings or to avoid overheads associated with library | 
					
						
							|  |  |  |   versions. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | * Contents, described in more detail in "description of public routines" below. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Standard (ANSI/SVID/...)  functions: | 
					
						
							|  |  |  |     malloc(size_t n); | 
					
						
							|  |  |  |     calloc(size_t n_elements, size_t element_size); | 
					
						
							|  |  |  |     free(Void_t* p); | 
					
						
							|  |  |  |     realloc(Void_t* p, size_t n); | 
					
						
							|  |  |  |     memalign(size_t alignment, size_t n); | 
					
						
							|  |  |  |     valloc(size_t n); | 
					
						
							|  |  |  |     mallinfo() | 
					
						
							|  |  |  |     mallopt(int parameter_number, int parameter_value) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Additional functions: | 
					
						
							|  |  |  |     independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]); | 
					
						
							|  |  |  |     independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]); | 
					
						
							|  |  |  |     pvalloc(size_t n); | 
					
						
							|  |  |  |     cfree(Void_t* p); | 
					
						
							|  |  |  |     malloc_trim(size_t pad); | 
					
						
							|  |  |  |     malloc_usable_size(Void_t* p); | 
					
						
							|  |  |  |     malloc_stats(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | * Vital statistics: | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Supported pointer representation:       4 or 8 bytes | 
					
						
							|  |  |  |   Supported size_t  representation:       4 or 8 bytes  | 
					
						
							|  |  |  |        Note that size_t is allowed to be 4 bytes even if pointers are 8. | 
					
						
							|  |  |  |        You can adjust this by defining INTERNAL_SIZE_T | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Alignment:                              2 * sizeof(size_t) (default) | 
					
						
							|  |  |  |        (i.e., 8 byte alignment with 4byte size_t). This suffices for | 
					
						
							|  |  |  |        nearly all current machines and C compilers. However, you can | 
					
						
							|  |  |  |        define MALLOC_ALIGNMENT to be wider than this if necessary. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Minimum overhead per allocated chunk:   4 or 8 bytes | 
					
						
							|  |  |  |        Each malloced chunk has a hidden word of overhead holding size | 
					
						
							|  |  |  |        and status information. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead) | 
					
						
							|  |  |  |                           8-byte ptrs:  24/32 bytes (including, 4/8 overhead) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |        When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte | 
					
						
							|  |  |  |        ptrs but 4 byte size) or 24 (for 8/8) additional bytes are | 
					
						
							|  |  |  |        needed; 4 (8) for a trailing size field and 8 (16) bytes for | 
					
						
							|  |  |  |        free list pointers. Thus, the minimum allocatable size is | 
					
						
							|  |  |  |        16/24/32 bytes. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |        Even a request for zero bytes (i.e., malloc(0)) returns a | 
					
						
							|  |  |  |        pointer to something of the minimum allocatable size. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |        The maximum overhead wastage (i.e., number of extra bytes | 
					
						
							|  |  |  |        allocated than were requested in malloc) is less than or equal | 
					
						
							|  |  |  |        to the minimum size, except for requests >= mmap_threshold that | 
					
						
							|  |  |  |        are serviced via mmap(), where the worst case wastage is 2 * | 
					
						
							|  |  |  |        sizeof(size_t) bytes plus the remainder from a system page (the | 
					
						
							|  |  |  |        minimal mmap unit); typically 4096 or 8192 bytes. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Maximum allocated size:  4-byte size_t: 2^32 minus about two pages  | 
					
						
							|  |  |  |                            8-byte size_t: 2^64 minus about two pages | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |        It is assumed that (possibly signed) size_t values suffice to | 
					
						
							|  |  |  |        represent chunk sizes. `Possibly signed' is due to the fact | 
					
						
							|  |  |  |        that `size_t' may be defined on a system as either a signed or | 
					
						
							|  |  |  |        an unsigned type. The ISO C standard says that it must be | 
					
						
							|  |  |  |        unsigned, but a few systems are known not to adhere to this. | 
					
						
							|  |  |  |        Additionally, even when size_t is unsigned, sbrk (which is by | 
					
						
							|  |  |  |        default used to obtain memory from system) accepts signed | 
					
						
							|  |  |  |        arguments, and may not be able to handle size_t-wide arguments | 
					
						
							|  |  |  |        with negative sign bit.  Generally, values that would | 
					
						
							|  |  |  |        appear as negative after accounting for overhead and alignment | 
					
						
							|  |  |  |        are supported only via mmap(), which does not have this | 
					
						
							|  |  |  |        limitation. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |        Requests for sizes outside the allowed range will perform an optional | 
					
						
							|  |  |  |        failure action and then return null. (Requests may also | 
					
						
							|  |  |  |        also fail because a system is out of memory.) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Thread-safety: NOT thread-safe unless USE_MALLOC_LOCK defined | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |        When USE_MALLOC_LOCK is defined, wrappers are created to | 
					
						
							|  |  |  |        surround every public call with either a pthread mutex or | 
					
						
							|  |  |  |        a win32 spinlock (depending on WIN32). This is not | 
					
						
							|  |  |  |        especially fast, and can be a major bottleneck. | 
					
						
							|  |  |  |        It is designed only to provide minimal protection | 
					
						
							|  |  |  |        in concurrent environments, and to provide a basis for | 
					
						
							|  |  |  |        extensions.  If you are using malloc in a concurrent program, | 
					
						
							|  |  |  |        you would be far better off obtaining ptmalloc, which is | 
					
						
							|  |  |  |        derived from a version of this malloc, and is well-tuned for | 
					
						
							|  |  |  |        concurrent programs. (See http://www.malloc.de) Note that
 | 
					
						
							|  |  |  |        even when USE_MALLOC_LOCK is defined, you can can guarantee | 
					
						
							|  |  |  |        full thread-safety only if no threads acquire memory through  | 
					
						
							|  |  |  |        direct calls to MORECORE or other system-level allocators. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Compliance: I believe it is compliant with the 1997 Single Unix Specification | 
					
						
							|  |  |  |        (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably 
 | 
					
						
							|  |  |  |        others as well. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* vsc: emulation of sbrk with YAP contiguous memory management */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2006-05-16 18:37:31 +00:00
										 |  |  | void | 
					
						
							|  |  |  | Yap_add_memory_hole(ADDR start, ADDR end) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   if (Yap_NOfMemoryHoles == MAX_DLMALLOC_HOLES) { | 
					
						
							|  |  |  |     Yap_Error(OPERATING_SYSTEM_ERROR, 0L, "Unexpected Too Much Memory Fragmentation: please contact YAP maintainers"); | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   Yap_MemoryHoles[Yap_NOfMemoryHoles].start = start; | 
					
						
							|  |  |  |   Yap_MemoryHoles[Yap_NOfMemoryHoles].end = end; | 
					
						
							| 
									
										
										
										
											2009-07-15 17:29:26 -05:00
										 |  |  |   Yap_HoleSize += (UInt)(start-end); | 
					
						
							| 
									
										
										
										
											2006-05-16 18:37:31 +00:00
										 |  |  |   Yap_NOfMemoryHoles++; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  | static void * | 
					
						
							|  |  |  | yapsbrk(long size) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   ADDR newHeapTop = HeapTop, oldHeapTop = HeapTop; | 
					
						
							|  |  |  |   newHeapTop = HeapTop+size; | 
					
						
							| 
									
										
										
										
											2008-01-24 00:11:59 +00:00
										 |  |  |   while (Yap_NOfMemoryHoles && newHeapTop > Yap_MemoryHoles[0].start) { | 
					
						
							| 
									
										
										
										
											2006-05-16 18:37:31 +00:00
										 |  |  |     UInt i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     HeapTop = oldHeapTop = Yap_MemoryHoles[0].end; | 
					
						
							| 
									
										
										
										
											2004-12-08 16:54:33 +00:00
										 |  |  |     newHeapTop = oldHeapTop+size; | 
					
						
							| 
									
										
										
										
											2006-05-16 18:37:31 +00:00
										 |  |  |     Yap_NOfMemoryHoles--; | 
					
						
							|  |  |  |     for (i=0; i < Yap_NOfMemoryHoles; i++) { | 
					
						
							|  |  |  |       Yap_MemoryHoles[i].start = Yap_MemoryHoles[i+1].start; | 
					
						
							|  |  |  |       Yap_MemoryHoles[i].end = Yap_MemoryHoles[i+1].end; | 
					
						
							|  |  |  |     } | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |   } | 
					
						
							|  |  |  |   if (newHeapTop > HeapLim - MinHeapGap) { | 
					
						
							|  |  |  |     if (HeapTop + size < HeapLim) { | 
					
						
							|  |  |  |       /* small allocations, we can wait */ | 
					
						
							|  |  |  |       HeapTop += size; | 
					
						
							|  |  |  |       UNLOCK(HeapTopLock); | 
					
						
							| 
									
										
										
										
											2014-04-23 22:41:12 +01:00
										 |  |  |       LOCK(LOCAL_SignalLock); | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |       Yap_signal(YAP_CDOVF_SIGNAL); | 
					
						
							| 
									
										
										
										
											2014-04-23 22:41:12 +01:00
										 |  |  |       UNLOCK(LOCAL_SignalLock); | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |     } else { | 
					
						
							| 
									
										
										
										
											2011-05-10 10:06:51 +01:00
										 |  |  |      if (size > GLOBAL_SizeOfOverflow) | 
					
						
							|  |  |  | 	GLOBAL_SizeOfOverflow = size; | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |       /* big allocations, the caller must handle the problem */ | 
					
						
							|  |  |  |       UNLOCK(HeapUsedLock); | 
					
						
							|  |  |  |       UNLOCK(HeapTopLock); | 
					
						
							|  |  |  |       return (void *)MORECORE_FAILURE; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   HeapTop = newHeapTop; | 
					
						
							|  |  |  |   UNLOCK(HeapTopLock); | 
					
						
							|  |  |  |   return oldHeapTop; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Compute index for size. We expect this to be inlined when | 
					
						
							|  |  |  |   compiled with optimization, else not, which works out well. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | static int largebin_index(unsigned int sz) { | 
					
						
							|  |  |  |   unsigned int  x = sz >> SMALLBIN_WIDTH;  | 
					
						
							|  |  |  |   unsigned int m;            /* bit position of highest set bit of m */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (x >= 0x10000) return NBINS-1; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* On intel, use BSRL instruction to find highest bit */ | 
					
						
							|  |  |  | #if defined(__GNUC__) && defined(i386)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   __asm__("bsrl %1,%0\n\t" | 
					
						
							|  |  |  |           : "=r" (m)  | 
					
						
							|  |  |  |           : "g"  (x)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  |   { | 
					
						
							|  |  |  |     /*
 | 
					
						
							|  |  |  |       Based on branch-free nlz algorithm in chapter 5 of Henry | 
					
						
							|  |  |  |       S. Warren Jr's book "Hacker's Delight". | 
					
						
							|  |  |  |     */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     unsigned int n = ((x - 0x100) >> 16) & 8; | 
					
						
							|  |  |  |     x <<= n;  | 
					
						
							|  |  |  |     m = ((x - 0x1000) >> 16) & 4; | 
					
						
							|  |  |  |     n += m;  | 
					
						
							|  |  |  |     x <<= m;  | 
					
						
							|  |  |  |     m = ((x - 0x4000) >> 16) & 2; | 
					
						
							|  |  |  |     n += m;  | 
					
						
							|  |  |  |     x = (x << m) >> 14; | 
					
						
							|  |  |  |     m = 13 - n + (x & ~(x>>1)); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Use next 2 bits to create finer-granularity bins */ | 
					
						
							|  |  |  |   return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define bin_index(sz) \
 | 
					
						
							|  |  |  |  ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the | 
					
						
							|  |  |  |   first bin that is maintained in sorted order. This must | 
					
						
							|  |  |  |   be the smallest size corresponding to a given bin. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Normally, this should be MIN_LARGE_SIZE. But you can weaken | 
					
						
							|  |  |  |   best fit guarantees to sometimes speed up malloc by increasing value. | 
					
						
							|  |  |  |   Doing this means that malloc may choose a chunk that is  | 
					
						
							|  |  |  |   non-best-fitting by up to the width of the bin. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Some useful cutoff values: | 
					
						
							|  |  |  |       512 - all bins sorted | 
					
						
							|  |  |  |      2560 - leaves bins <=     64 bytes wide unsorted   | 
					
						
							|  |  |  |     12288 - leaves bins <=    512 bytes wide unsorted | 
					
						
							|  |  |  |     65536 - leaves bins <=   4096 bytes wide unsorted | 
					
						
							|  |  |  |    262144 - leaves bins <=  32768 bytes wide unsorted | 
					
						
							|  |  |  |        -1 - no bins sorted (not recommended!) | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-03-01 22:25:09 +00:00
										 |  |  | /*#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE */ | 
					
						
							|  |  |  | #define FIRST_SORTED_BIN_SIZE 2056
 | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Unsorted chunks | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     All remainders from chunk splits, as well as all returned chunks, | 
					
						
							|  |  |  |     are first placed in the "unsorted" bin. They are then placed | 
					
						
							|  |  |  |     in regular bins after malloc gives them ONE chance to be used before | 
					
						
							|  |  |  |     binning. So, basically, the unsorted_chunks list acts as a queue, | 
					
						
							|  |  |  |     with chunks being placed on it in free (and malloc_consolidate), | 
					
						
							|  |  |  |     and taken off (to be either used or placed in bins) in malloc. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */ | 
					
						
							|  |  |  | #define unsorted_chunks(M)          (bin_at(M, 1))
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Top | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     The top-most available chunk (i.e., the one bordering the end of | 
					
						
							|  |  |  |     available memory) is treated specially. It is never included in | 
					
						
							|  |  |  |     any bin, is used only if no other chunk is available, and is | 
					
						
							|  |  |  |     released back to the system if it is very large (see | 
					
						
							|  |  |  |     M_TRIM_THRESHOLD).  Because top initially | 
					
						
							|  |  |  |     points to its own bin with initial zero size, thus forcing | 
					
						
							|  |  |  |     extension on the first malloc request, we avoid having any special | 
					
						
							|  |  |  |     code in malloc to check whether it even exists yet. But we still | 
					
						
							|  |  |  |     need to do so when getting memory from system, so we make | 
					
						
							|  |  |  |     initial_top treat the bin as a legal but unusable chunk during the | 
					
						
							|  |  |  |     interval between initialization and the first call to | 
					
						
							|  |  |  |     sYSMALLOc. (This is somewhat delicate, since it relies on | 
					
						
							|  |  |  |     the 2 preceding words to be zero during this interval as well.) | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* Conveniently, the unsorted bin can be used as dummy top on first call */ | 
					
						
							|  |  |  | #define initial_top(M)              (unsorted_chunks(M))
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Binmap | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     To help compensate for the large number of bins, a one-level index | 
					
						
							|  |  |  |     structure is used for bin-by-bin searching.  `binmap' is a | 
					
						
							|  |  |  |     bitvector recording whether bins are definitely empty so they can | 
					
						
							|  |  |  |     be skipped over during during traversals.  The bits are NOT always | 
					
						
							|  |  |  |     cleared as soon as bins are empty, but instead only | 
					
						
							|  |  |  |     when they are noticed to be empty during traversal in malloc. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define idx2block(i)     ((i) >> BINMAPSHIFT)
 | 
					
						
							|  |  |  | #define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define mark_bin(m,i)    ((m)->binmap[idx2block(i)] |=  idx2bit(i))
 | 
					
						
							|  |  |  | #define unmark_bin(m,i)  ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
 | 
					
						
							|  |  |  | #define get_binmap(m,i)  ((m)->binmap[idx2block(i)] &   idx2bit(i))
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Fastbins | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     An array of lists holding recently freed small chunks.  Fastbins | 
					
						
							|  |  |  |     are not doubly linked.  It is faster to single-link them, and | 
					
						
							|  |  |  |     since chunks are never removed from the middles of these lists, | 
					
						
							|  |  |  |     double linking is not necessary. Also, unlike regular bins, they | 
					
						
							|  |  |  |     are not even processed in FIFO order (they use faster LIFO) since | 
					
						
							|  |  |  |     ordering doesn't much matter in the transient contexts in which | 
					
						
							|  |  |  |     fastbins are normally used. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     Chunks in fastbins keep their inuse bit set, so they cannot | 
					
						
							|  |  |  |     be consolidated with other free chunks. malloc_consolidate | 
					
						
							|  |  |  |     releases all chunks in fastbins and consolidates them with | 
					
						
							|  |  |  |     other free chunks.  | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() | 
					
						
							|  |  |  |   that triggers automatic consolidation of possibly-surrounding | 
					
						
							|  |  |  |   fastbin chunks. This is a heuristic, so the exact value should not | 
					
						
							|  |  |  |   matter too much. It is defined at half the default trim threshold as a | 
					
						
							|  |  |  |   compromise heuristic to only attempt consolidation if it is likely | 
					
						
							|  |  |  |   to lead to trimming. However, it is not dynamically tunable, since | 
					
						
							|  |  |  |   consolidation reduces fragmentation surrounding loarge chunks even  | 
					
						
							|  |  |  |   if trimming is not used. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define FASTBIN_CONSOLIDATION_THRESHOLD  \
 | 
					
						
							|  |  |  |   ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Since the lowest 2 bits in max_fast don't matter in size comparisons,  | 
					
						
							|  |  |  |   they are used as flags. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ANYCHUNKS_BIT held in max_fast indicates that there may be any | 
					
						
							|  |  |  |   freed chunks at all. It is set true when entering a chunk into any | 
					
						
							|  |  |  |   bin. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define ANYCHUNKS_BIT        (1U)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define have_anychunks(M)     (((M)->max_fast &  ANYCHUNKS_BIT))
 | 
					
						
							|  |  |  | #define set_anychunks(M)      ((M)->max_fast |=  ANYCHUNKS_BIT)
 | 
					
						
							|  |  |  | #define clear_anychunks(M)    ((M)->max_fast &= ~ANYCHUNKS_BIT)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   FASTCHUNKS_BIT held in max_fast indicates that there are probably | 
					
						
							|  |  |  |   some fastbin chunks. It is set true on entering a chunk into any | 
					
						
							|  |  |  |   fastbin, and cleared only in malloc_consolidate. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define FASTCHUNKS_BIT        (2U)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define have_fastchunks(M)   (((M)->max_fast &  FASTCHUNKS_BIT))
 | 
					
						
							|  |  |  | #define set_fastchunks(M)    ((M)->max_fast |=  (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
 | 
					
						
							|  |  |  | #define clear_fastchunks(M)  ((M)->max_fast &= ~(FASTCHUNKS_BIT))
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* 
 | 
					
						
							|  |  |  |    Set value of max_fast.  | 
					
						
							|  |  |  |    Use impossibly small value if 0. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define set_max_fast(M, s) \
 | 
					
						
							|  |  |  |   (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \ | 
					
						
							|  |  |  |   ((M)->max_fast &  (FASTCHUNKS_BIT|ANYCHUNKS_BIT)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define get_max_fast(M) \
 | 
					
						
							|  |  |  |   ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   morecore_properties is a status word holding dynamically discovered | 
					
						
							|  |  |  |   or controlled properties of the morecore function | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define MORECORE_CONTIGUOUS_BIT  (1U)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define contiguous(M) \
 | 
					
						
							|  |  |  |         (((M)->morecore_properties &  MORECORE_CONTIGUOUS_BIT)) | 
					
						
							|  |  |  | #define noncontiguous(M) \
 | 
					
						
							|  |  |  |         (((M)->morecore_properties &  MORECORE_CONTIGUOUS_BIT) == 0) | 
					
						
							|  |  |  | #define set_contiguous(M) \
 | 
					
						
							|  |  |  |         ((M)->morecore_properties |=  MORECORE_CONTIGUOUS_BIT) | 
					
						
							|  |  |  | #define set_noncontiguous(M) \
 | 
					
						
							|  |  |  |         ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* 
 | 
					
						
							|  |  |  |    There is exactly one instance of this struct in this malloc. | 
					
						
							|  |  |  |    If you are adapting this malloc in a way that does NOT use a static | 
					
						
							|  |  |  |    malloc_state, you MUST explicitly zero-fill it before using. This | 
					
						
							|  |  |  |    malloc relies on the property that malloc_state is initialized to | 
					
						
							|  |  |  |    all zeroes (as is true of C statics). | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* static struct malloc_state av_; */  /* never directly referenced */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |    All uses of av_ are via get_malloc_state(). | 
					
						
							|  |  |  |    At most one "call" to get_malloc_state is made per invocation of | 
					
						
							|  |  |  |    the public versions of malloc and free, but other routines | 
					
						
							|  |  |  |    that in turn invoke malloc and/or free may call more then once.  | 
					
						
							|  |  |  |    Also, it is called in check* routines if DEBUG is set. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* #define get_malloc_state() (&(av_)) */ | 
					
						
							|  |  |  | #define get_malloc_state()  Yap_av
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Initialize a malloc_state struct. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   This is called only from within malloc_consolidate, which needs | 
					
						
							|  |  |  |   be called in the same contexts anyway.  It is never called directly | 
					
						
							|  |  |  |   outside of malloc_consolidate because some optimizing compilers try | 
					
						
							|  |  |  |   to inline it at all call points, which turns out not to be an | 
					
						
							|  |  |  |   optimization at all. (Inlining it in malloc_consolidate is fine though.) | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | static void malloc_init_state(mstate av) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static void malloc_init_state(av) mstate av; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   int     i; | 
					
						
							|  |  |  |   mbinptr bin; | 
					
						
							|  |  |  |    | 
					
						
							|  |  |  |   /* Establish circular links for normal bins */ | 
					
						
							|  |  |  |   for (i = 1; i < NBINS; ++i) {  | 
					
						
							|  |  |  |     bin = bin_at(av,i); | 
					
						
							|  |  |  |     bin->fd = bin->bk = bin; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   av->top_pad        = DEFAULT_TOP_PAD; | 
					
						
							|  |  |  |   av->trim_threshold = DEFAULT_TRIM_THRESHOLD; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if MORECORE_CONTIGUOUS
 | 
					
						
							|  |  |  |   set_contiguous(av); | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  |   set_noncontiguous(av); | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   set_max_fast(av, DEFAULT_MXFAST); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   av->top            = initial_top(av); | 
					
						
							|  |  |  |   av->pagesize       = malloc_getpagesize; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* 
 | 
					
						
							|  |  |  |    Other internal utilities operating on mstates | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | static Void_t*  sYSMALLOc(INTERNAL_SIZE_T, mstate); | 
					
						
							|  |  |  | static int      sYSTRIm(size_t, mstate); | 
					
						
							|  |  |  | static void     malloc_consolidate(mstate); | 
					
						
							|  |  |  | static Void_t** iALLOc(size_t, size_t*, int, Void_t**); | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static Void_t*  sYSMALLOc(); | 
					
						
							|  |  |  | static int      sYSTRIm(); | 
					
						
							|  |  |  | static void     malloc_consolidate(); | 
					
						
							|  |  |  | static Void_t** iALLOc(); | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Debugging support | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   These routines make a number of assertions about the states | 
					
						
							|  |  |  |   of data structures that should be true at all times. If any | 
					
						
							|  |  |  |   are not true, it's very likely that a user program has somehow | 
					
						
							|  |  |  |   trashed memory. (It's also possible that there is a coding error | 
					
						
							|  |  |  |   in malloc. In which case, please report it!) | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2004-11-04 18:22:36 +00:00
										 |  |  | #if ! DEBUG_DLMALLOC
 | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | #define check_chunk(P)
 | 
					
						
							|  |  |  | #define check_free_chunk(P)
 | 
					
						
							|  |  |  | #define check_inuse_chunk(P)
 | 
					
						
							|  |  |  | #define check_remalloced_chunk(P,N)
 | 
					
						
							|  |  |  | #define check_malloced_chunk(P,N)
 | 
					
						
							|  |  |  | #define check_malloc_state()
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | #define check_chunk(P)              do_check_chunk(P)
 | 
					
						
							|  |  |  | #define check_free_chunk(P)         do_check_free_chunk(P)
 | 
					
						
							|  |  |  | #define check_inuse_chunk(P)        do_check_inuse_chunk(P)
 | 
					
						
							|  |  |  | #define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N)
 | 
					
						
							|  |  |  | #define check_malloced_chunk(P,N)   do_check_malloced_chunk(P,N)
 | 
					
						
							|  |  |  | #define check_malloc_state()        do_check_malloc_state()
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Properties of all chunks | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | static void do_check_chunk(mchunkptr p) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static void do_check_chunk(p) mchunkptr p; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  | #if DEBUG_DLMALLOC
 | 
					
						
							|  |  |  |   /* min and max possible addresses assuming contiguous allocation */ | 
					
						
							|  |  |  |   char* max_address = (char*)(av->top) + chunksize(av->top); | 
					
						
							|  |  |  |   CHUNK_SIZE_T  sz = chunksize(p); | 
					
						
							|  |  |  |   char* min_address = max_address - av->sbrked_mem; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (!chunk_is_mmapped(p)) { | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* Has legal address ... */ | 
					
						
							|  |  |  |     if (p != av->top) { | 
					
						
							|  |  |  |       if (contiguous(av)) { | 
					
						
							|  |  |  |         assert(((char*)p) >= min_address); | 
					
						
							|  |  |  |         assert(((char*)p + sz) <= ((char*)(av->top))); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     else { | 
					
						
							|  |  |  |       /* top size is always at least MINSIZE */ | 
					
						
							|  |  |  |       assert((CHUNK_SIZE_T)(sz) >= MINSIZE); | 
					
						
							|  |  |  |       /* top predecessor always marked inuse */ | 
					
						
							|  |  |  |       assert(prev_inuse(p)); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   else { | 
					
						
							|  |  |  | #if HAVE_MMAP
 | 
					
						
							|  |  |  |     /* address is outside main heap  */ | 
					
						
							|  |  |  |     if (contiguous(av) && av->top != initial_top(av)) { | 
					
						
							|  |  |  |       assert(((char*)p) < min_address || ((char*)p) > max_address); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     /* chunk is page-aligned */ | 
					
						
							|  |  |  |     assert(((p->prev_size + sz) & (av->pagesize-1)) == 0); | 
					
						
							|  |  |  |     /* mem is aligned */ | 
					
						
							|  |  |  |     assert(aligned_OK(chunk2mem(p))); | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  |     /* force an appropriate assert violation if debug set */ | 
					
						
							|  |  |  |     assert(!chunk_is_mmapped(p)); | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Properties of free chunks | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | static void do_check_free_chunk(mchunkptr p) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static void do_check_free_chunk(p) mchunkptr p; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | #if DEBUG_DLMALLOC 
 | 
					
						
							|  |  |  |  mstate av = get_malloc_state(); | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; | 
					
						
							|  |  |  | #if DEBUG_DLMALLOC 
 | 
					
						
							|  |  |  |   mchunkptr next = chunk_at_offset(p, sz); | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   do_check_chunk(p); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Chunk must claim to be free ... */ | 
					
						
							|  |  |  |   assert(!inuse(p)); | 
					
						
							|  |  |  |   assert (!chunk_is_mmapped(p)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Unless a special marker, must have OK fields */ | 
					
						
							|  |  |  |   if ((CHUNK_SIZE_T)(sz) >= MINSIZE) | 
					
						
							|  |  |  |   { | 
					
						
							|  |  |  |     assert((sz & MALLOC_ALIGN_MASK) == 0); | 
					
						
							|  |  |  |     assert(aligned_OK(chunk2mem(p))); | 
					
						
							|  |  |  |     /* ... matching footer field */ | 
					
						
							|  |  |  |     assert(next->prev_size == sz); | 
					
						
							|  |  |  |     /* ... and is fully consolidated */ | 
					
						
							|  |  |  |     assert(prev_inuse(p)); | 
					
						
							|  |  |  |     assert (next == av->top || inuse(next)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /* ... and has minimally sane links */ | 
					
						
							|  |  |  |     assert(p->fd->bk == p); | 
					
						
							|  |  |  |     assert(p->bk->fd == p); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   else /* markers are always of size SIZE_SZ */ | 
					
						
							|  |  |  |     assert(sz == SIZE_SZ); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Properties of inuse chunks | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | static void do_check_inuse_chunk(mchunkptr p) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static void do_check_inuse_chunk(p) mchunkptr p; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  |   mchunkptr next; | 
					
						
							|  |  |  |   do_check_chunk(p); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (chunk_is_mmapped(p)) | 
					
						
							|  |  |  |     return; /* mmapped chunks have no next/prev */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Check whether it claims to be in use ... */ | 
					
						
							|  |  |  |   assert(inuse(p)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   next = next_chunk(p); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* ... and is surrounded by OK chunks.
 | 
					
						
							|  |  |  |     Since more things can be checked with free chunks than inuse ones, | 
					
						
							|  |  |  |     if an inuse chunk borders them and debug is on, it's worth doing them. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  |   if (!prev_inuse(p))  { | 
					
						
							|  |  |  |     /* Note that we cannot even look at prev unless it is not inuse */ | 
					
						
							|  |  |  |     mchunkptr prv = prev_chunk(p); | 
					
						
							|  |  |  |     assert(next_chunk(prv) == p); | 
					
						
							|  |  |  |     do_check_free_chunk(prv); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (next == av->top) { | 
					
						
							|  |  |  |     assert(prev_inuse(next)); | 
					
						
							|  |  |  |     assert(chunksize(next) >= MINSIZE); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   else if (!inuse(next)) | 
					
						
							|  |  |  |     do_check_free_chunk(next); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Properties of chunks recycled from fastbins | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | #if DEBUG_DLMALLOC 
 | 
					
						
							|  |  |  |   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   do_check_inuse_chunk(p); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Legal size ... */ | 
					
						
							|  |  |  |   assert((sz & MALLOC_ALIGN_MASK) == 0); | 
					
						
							|  |  |  |   assert((CHUNK_SIZE_T)(sz) >= MINSIZE); | 
					
						
							|  |  |  |   /* ... and alignment */ | 
					
						
							|  |  |  |   assert(aligned_OK(chunk2mem(p))); | 
					
						
							|  |  |  |   /* chunk is less than MINSIZE more than request */ | 
					
						
							|  |  |  |   assert((long)(sz) - (long)(s) >= 0); | 
					
						
							|  |  |  |   assert((long)(sz) - (long)(s + MINSIZE) < 0); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Properties of nonrecycled chunks at the point they are malloced | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   /* same as recycled case ... */ | 
					
						
							|  |  |  |   do_check_remalloced_chunk(p, s); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     ... plus,  must obey implementation invariant that prev_inuse is | 
					
						
							|  |  |  |     always true of any allocated chunk; i.e., that each allocated | 
					
						
							|  |  |  |     chunk borders either a previously allocated and still in-use | 
					
						
							|  |  |  |     chunk, or the base of its memory arena. This is ensured | 
					
						
							|  |  |  |     by making all allocations from the the `lowest' part of any found | 
					
						
							|  |  |  |     chunk.  This does not necessarily hold however for chunks | 
					
						
							|  |  |  |     recycled via fastbins. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   assert(prev_inuse(p)); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   Properties of malloc_state. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   This may be useful for debugging malloc, as well as detecting user | 
					
						
							|  |  |  |   programmer errors that somehow write into malloc_state. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   If you are extending or experimenting with this malloc, you can | 
					
						
							|  |  |  |   probably figure out how to hack this routine to print out or | 
					
						
							|  |  |  |   display chunk addresses, sizes, bins, and other instrumentation. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static void do_check_malloc_state(void) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  |   int i; | 
					
						
							|  |  |  |   mchunkptr p; | 
					
						
							|  |  |  |   mchunkptr q; | 
					
						
							|  |  |  |   mbinptr b; | 
					
						
							|  |  |  |   unsigned int binbit; | 
					
						
							|  |  |  |   int empty; | 
					
						
							|  |  |  |   unsigned int idx; | 
					
						
							|  |  |  |   INTERNAL_SIZE_T size; | 
					
						
							|  |  |  |   CHUNK_SIZE_T  total = 0; | 
					
						
							|  |  |  |   int max_fast_bin; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* internal size_t must be no wider than pointer type */ | 
					
						
							|  |  |  |   assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* alignment is a power of 2 */ | 
					
						
							|  |  |  |   assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* cannot run remaining checks until fully initialized */ | 
					
						
							|  |  |  |   if (av->top == 0 || av->top == initial_top(av)) | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* pagesize is a power of 2 */ | 
					
						
							|  |  |  |   assert((av->pagesize & (av->pagesize-1)) == 0); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* properties of fastbins */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* max_fast is in allowed range */ | 
					
						
							|  |  |  |   assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   max_fast_bin = fastbin_index(av->max_fast); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   for (i = 0; i < NFASTBINS; ++i) { | 
					
						
							|  |  |  |     p = av->fastbins[i]; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /* all bins past max_fast are empty */ | 
					
						
							|  |  |  |     if (i > max_fast_bin) | 
					
						
							|  |  |  |       assert(p == 0); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     while (p != 0) { | 
					
						
							|  |  |  |       /* each chunk claims to be inuse */ | 
					
						
							|  |  |  |       do_check_inuse_chunk(p); | 
					
						
							|  |  |  |       total += chunksize(p); | 
					
						
							|  |  |  |       /* chunk belongs in this bin */ | 
					
						
							|  |  |  |       assert(fastbin_index(chunksize(p)) == i); | 
					
						
							|  |  |  |       p = p->fd; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (total != 0) | 
					
						
							|  |  |  |     assert(have_fastchunks(av)); | 
					
						
							|  |  |  |   else if (!have_fastchunks(av)) | 
					
						
							|  |  |  |     assert(total == 0); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* check normal bins */ | 
					
						
							|  |  |  |   for (i = 1; i < NBINS; ++i) { | 
					
						
							|  |  |  |     b = bin_at(av,i); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /* binmap is accurate (except for bin 1 == unsorted_chunks) */ | 
					
						
							|  |  |  |     if (i >= 2) { | 
					
						
							|  |  |  |       binbit = get_binmap(av,i); | 
					
						
							|  |  |  |       empty = last(b) == b; | 
					
						
							|  |  |  |       if (!binbit) | 
					
						
							|  |  |  |         assert(empty); | 
					
						
							|  |  |  |       else if (!empty) | 
					
						
							|  |  |  |         assert(binbit); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     for (p = last(b); p != b; p = p->bk) { | 
					
						
							|  |  |  |       /* each chunk claims to be free */ | 
					
						
							|  |  |  |       do_check_free_chunk(p); | 
					
						
							|  |  |  |       size = chunksize(p); | 
					
						
							|  |  |  |       total += size; | 
					
						
							|  |  |  |       if (i >= 2) { | 
					
						
							|  |  |  |         /* chunk belongs in bin */ | 
					
						
							|  |  |  |         idx = bin_index(size); | 
					
						
							|  |  |  |         assert(idx == i); | 
					
						
							|  |  |  |         /* lists are sorted */ | 
					
						
							|  |  |  |         if ((CHUNK_SIZE_T) size >= (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) { | 
					
						
							|  |  |  |           assert(p->bk == b ||  | 
					
						
							|  |  |  |                  (CHUNK_SIZE_T)chunksize(p->bk) >=  | 
					
						
							|  |  |  |                  (CHUNK_SIZE_T)chunksize(p)); | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |       /* chunk is followed by a legal chain of inuse chunks */ | 
					
						
							|  |  |  |       for (q = next_chunk(p); | 
					
						
							|  |  |  |            (q != av->top && inuse(q) &&  | 
					
						
							|  |  |  |              (CHUNK_SIZE_T)(chunksize(q)) >= MINSIZE); | 
					
						
							|  |  |  |            q = next_chunk(q)) | 
					
						
							|  |  |  |         do_check_inuse_chunk(q); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* top chunk is OK */ | 
					
						
							|  |  |  |   check_chunk(av->top); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* sanity checks for statistics */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   assert(total <= (CHUNK_SIZE_T)(av->max_total_mem)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   assert((CHUNK_SIZE_T)(av->sbrked_mem) <= | 
					
						
							|  |  |  |          (CHUNK_SIZE_T)(av->max_sbrked_mem)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   assert((CHUNK_SIZE_T)(av->max_total_mem) >= | 
					
						
							|  |  |  |          (CHUNK_SIZE_T)(av->sbrked_mem)); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* ----------- Routines dealing with system allocation -------------- */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   sysmalloc handles malloc cases requiring more memory from the system. | 
					
						
							|  |  |  |   On entry, it is assumed that av->top does not have enough | 
					
						
							|  |  |  |   space to service request for nb bytes, thus requiring that av->top | 
					
						
							|  |  |  |   be extended or replaced. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mchunkptr       old_top;        /* incoming value of av->top */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T old_size;       /* its size */ | 
					
						
							|  |  |  |   char*           old_end;        /* its end address */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   long            size;           /* arg to first MORECORE or mmap call */ | 
					
						
							|  |  |  |   char*           brk;            /* return value from MORECORE */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   long            correction;     /* arg to 2nd MORECORE call */ | 
					
						
							|  |  |  |   char*           snd_brk;        /* 2nd return val */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */ | 
					
						
							|  |  |  |   char*           aligned_brk;    /* aligned offset into brk */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   mchunkptr       p;              /* the allocated/returned chunk */ | 
					
						
							|  |  |  |   mchunkptr       remainder;      /* remainder from allocation */ | 
					
						
							|  |  |  |   CHUNK_SIZE_T    remainder_size; /* its size */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   CHUNK_SIZE_T    sum;            /* for updating stats */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   size_t          pagemask  = av->pagesize - 1; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     If there is space available in fastbins, consolidate and retry | 
					
						
							|  |  |  |     malloc from scratch rather than getting memory from system.  This | 
					
						
							|  |  |  |     can occur only if nb is in smallbin range so we didn't consolidate | 
					
						
							|  |  |  |     upon entry to malloc. It is much easier to handle this case here | 
					
						
							|  |  |  |     than in malloc proper. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (have_fastchunks(av)) { | 
					
						
							|  |  |  |     assert(in_smallbin_range(nb)); | 
					
						
							|  |  |  |     malloc_consolidate(av); | 
					
						
							|  |  |  |     return mALLOc(nb - MALLOC_ALIGN_MASK); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Record incoming configuration of top */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   old_top  = av->top; | 
					
						
							|  |  |  |   old_size = chunksize(old_top); | 
					
						
							|  |  |  |   old_end  = (char*)(chunk_at_offset(old_top, old_size)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   brk = snd_brk = (char*)(MORECORE_FAILURE);  | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* 
 | 
					
						
							|  |  |  |      If not the first time through, we require old_size to be | 
					
						
							|  |  |  |      at least MINSIZE and to have prev_inuse set. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   assert((old_top == initial_top(av) && old_size == 0) ||  | 
					
						
							|  |  |  |          ((CHUNK_SIZE_T) (old_size) >= MINSIZE && | 
					
						
							|  |  |  |           prev_inuse(old_top))); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Precondition: not enough current space to satisfy nb request */ | 
					
						
							|  |  |  |   assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Precondition: all fastbins are consolidated */ | 
					
						
							|  |  |  |   assert(!have_fastchunks(av)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Request enough space for nb + pad + overhead */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   size = nb + av->top_pad + MINSIZE; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     If contiguous, we can subtract out existing space that we hope to | 
					
						
							|  |  |  |     combine with new space. We add it back later only if | 
					
						
							|  |  |  |     we don't actually get contiguous space. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (contiguous(av)) | 
					
						
							|  |  |  |     size -= old_size; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     Round to a multiple of page size. | 
					
						
							|  |  |  |     If MORECORE is not contiguous, this ensures that we only call it | 
					
						
							|  |  |  |     with whole-page arguments.  And if MORECORE is contiguous and | 
					
						
							|  |  |  |     this is not first time through, this preserves page-alignment of | 
					
						
							|  |  |  |     previous calls. Otherwise, we correct to page-align below. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   size = (size + pagemask) & ~pagemask; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     Don't try to call MORECORE if argument is so big as to appear | 
					
						
							|  |  |  |     negative. Note that since mmap takes size_t arg, it may succeed | 
					
						
							|  |  |  |     below even if we cannot call MORECORE. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (size > 0)  | 
					
						
							|  |  |  |     brk = (char*)(MORECORE(size)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     If have mmap, try using it as a backup when MORECORE fails or | 
					
						
							|  |  |  |     cannot be used. This is worth doing on systems that have "holes" in | 
					
						
							|  |  |  |     address space, so sbrk cannot extend to give contiguous space, but | 
					
						
							|  |  |  |     space is available elsewhere.  Note that we ignore mmap max count | 
					
						
							|  |  |  |     and threshold limits, since the space will not be used as a | 
					
						
							|  |  |  |     segregated mmap region. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (brk != (char*)(MORECORE_FAILURE)) { | 
					
						
							|  |  |  |     av->sbrked_mem += size; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /*
 | 
					
						
							|  |  |  |       If MORECORE extends previous space, we can likewise extend top size. | 
					
						
							|  |  |  |     */ | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) { | 
					
						
							|  |  |  |       set_head(old_top, (size + old_size) | PREV_INUSE); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /*
 | 
					
						
							|  |  |  |       Otherwise, make adjustments: | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       * If the first time through or noncontiguous, we need to call sbrk | 
					
						
							|  |  |  |         just to find out where the end of memory lies. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       * We need to ensure that all returned chunks from malloc will meet | 
					
						
							|  |  |  |         MALLOC_ALIGNMENT | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       * If there was an intervening foreign sbrk, we need to adjust sbrk | 
					
						
							|  |  |  |         request size to account for fact that we will not be able to | 
					
						
							|  |  |  |         combine new space with existing space in old_top. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       * Almost all systems internally allocate whole pages at a time, in | 
					
						
							|  |  |  |         which case we might as well use the whole last page of request. | 
					
						
							|  |  |  |         So we allocate enough more memory to hit a page boundary now, | 
					
						
							|  |  |  |         which in turn causes future contiguous calls to page-align. | 
					
						
							|  |  |  |     */ | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     else { | 
					
						
							|  |  |  |       front_misalign = 0; | 
					
						
							|  |  |  |       end_misalign = 0; | 
					
						
							|  |  |  |       correction = 0; | 
					
						
							|  |  |  |       aligned_brk = brk; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       /*
 | 
					
						
							|  |  |  |         If MORECORE returns an address lower than we have seen before, | 
					
						
							|  |  |  |         we know it isn't really contiguous.  This and some subsequent | 
					
						
							|  |  |  |         checks help cope with non-conforming MORECORE functions and | 
					
						
							|  |  |  |         the presence of "foreign" calls to MORECORE from outside of | 
					
						
							|  |  |  |         malloc or by other threads.  We cannot guarantee to detect | 
					
						
							|  |  |  |         these in all cases, but cope with the ones we do detect. | 
					
						
							|  |  |  |       */ | 
					
						
							|  |  |  |       if (contiguous(av) && old_size != 0 && brk < old_end) { | 
					
						
							|  |  |  |         set_noncontiguous(av); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       /* handle contiguous cases */ | 
					
						
							|  |  |  |       if (contiguous(av)) {  | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         /* 
 | 
					
						
							|  |  |  |            We can tolerate forward non-contiguities here (usually due | 
					
						
							|  |  |  |            to foreign calls) but treat them as part of our space for | 
					
						
							|  |  |  |            stats reporting. | 
					
						
							|  |  |  |         */ | 
					
						
							|  |  |  |         if (old_size != 0)  | 
					
						
							|  |  |  |           av->sbrked_mem += brk - old_end; | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         /* Guarantee alignment of first new chunk made from this space */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK; | 
					
						
							|  |  |  |         if (front_misalign > 0) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |           /*
 | 
					
						
							|  |  |  |             Skip over some bytes to arrive at an aligned position. | 
					
						
							|  |  |  |             We don't need to specially mark these wasted front bytes. | 
					
						
							|  |  |  |             They will never be accessed anyway because | 
					
						
							|  |  |  |             prev_inuse of av->top (and any chunk created from its start) | 
					
						
							|  |  |  |             is always true after initialization. | 
					
						
							|  |  |  |           */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |           correction = MALLOC_ALIGNMENT - front_misalign; | 
					
						
							|  |  |  |           aligned_brk += correction; | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         /*
 | 
					
						
							|  |  |  |           If this isn't adjacent to existing space, then we will not | 
					
						
							|  |  |  |           be able to merge with old_top space, so must add to 2nd request. | 
					
						
							|  |  |  |         */ | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         correction += old_size; | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         /* Extend the end address to hit a page boundary */ | 
					
						
							|  |  |  |         end_misalign = (INTERNAL_SIZE_T)(brk + size + correction); | 
					
						
							|  |  |  |         correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign; | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         assert(correction >= 0); | 
					
						
							|  |  |  |         snd_brk = (char*)(MORECORE(correction)); | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         if (snd_brk == (char*)(MORECORE_FAILURE)) { | 
					
						
							|  |  |  |           /*
 | 
					
						
							|  |  |  |             If can't allocate correction, try to at least find out current | 
					
						
							|  |  |  |             brk.  It might be enough to proceed without failing. | 
					
						
							|  |  |  |           */ | 
					
						
							|  |  |  |           correction = 0; | 
					
						
							|  |  |  |           snd_brk = (char*)(MORECORE(0)); | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |         else if (snd_brk < brk) { | 
					
						
							|  |  |  |           /*
 | 
					
						
							|  |  |  |             If the second call gives noncontiguous space even though | 
					
						
							|  |  |  |             it says it won't, the only course of action is to ignore | 
					
						
							|  |  |  |             results of second call, and conservatively estimate where | 
					
						
							|  |  |  |             the first call left us. Also set noncontiguous, so this | 
					
						
							|  |  |  |             won't happen again, leaving at most one hole. | 
					
						
							|  |  |  |              | 
					
						
							|  |  |  |             Note that this check is intrinsically incomplete.  Because | 
					
						
							|  |  |  |             MORECORE is allowed to give more space than we ask for, | 
					
						
							|  |  |  |             there is no reliable way to detect a noncontiguity | 
					
						
							|  |  |  |             producing a forward gap for the second call. | 
					
						
							|  |  |  |           */ | 
					
						
							|  |  |  |           snd_brk = brk + size; | 
					
						
							|  |  |  |           correction = 0; | 
					
						
							|  |  |  |           set_noncontiguous(av); | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       /* handle non-contiguous cases */ | 
					
						
							|  |  |  |       else {  | 
					
						
							|  |  |  |         /* MORECORE/mmap must correctly align */ | 
					
						
							|  |  |  |         assert(aligned_OK(chunk2mem(brk))); | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         /* Find out current end of memory */ | 
					
						
							|  |  |  |         if (snd_brk == (char*)(MORECORE_FAILURE)) { | 
					
						
							|  |  |  |           snd_brk = (char*)(MORECORE(0)); | 
					
						
							|  |  |  |           av->sbrked_mem += snd_brk - brk - size; | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       /* Adjust top based on results of second sbrk */ | 
					
						
							|  |  |  |       if (snd_brk != (char*)(MORECORE_FAILURE)) { | 
					
						
							|  |  |  |         av->top = (mchunkptr)aligned_brk; | 
					
						
							|  |  |  |         set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); | 
					
						
							|  |  |  |         av->sbrked_mem += correction; | 
					
						
							|  |  |  |       | 
					
						
							|  |  |  |         /*
 | 
					
						
							|  |  |  |           If not the first time through, we either have a | 
					
						
							|  |  |  |           gap due to foreign sbrk or a non-contiguous region.  Insert a | 
					
						
							|  |  |  |           double fencepost at old_top to prevent consolidation with space | 
					
						
							|  |  |  |           we don't own. These fenceposts are artificial chunks that are | 
					
						
							|  |  |  |           marked as inuse and are in any case too small to use.  We need | 
					
						
							|  |  |  |           two to make sizes and alignments work out. | 
					
						
							|  |  |  |         */ | 
					
						
							|  |  |  |     | 
					
						
							|  |  |  |         if (old_size != 0) { | 
					
						
							|  |  |  |           /* 
 | 
					
						
							|  |  |  |              Shrink old_top to insert fenceposts, keeping size a | 
					
						
							|  |  |  |              multiple of MALLOC_ALIGNMENT. We know there is at least | 
					
						
							|  |  |  |              enough space in old_top to do this. | 
					
						
							|  |  |  |           */ | 
					
						
							|  |  |  |           old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; | 
					
						
							|  |  |  |           set_head(old_top, old_size | PREV_INUSE); | 
					
						
							|  |  |  |            | 
					
						
							|  |  |  |           /*
 | 
					
						
							|  |  |  |             Note that the following assignments completely overwrite | 
					
						
							|  |  |  |             old_top when old_size was previously MINSIZE.  This is | 
					
						
							|  |  |  |             intentional. We need the fencepost, even if old_top otherwise gets | 
					
						
							|  |  |  |             lost. | 
					
						
							|  |  |  |           */ | 
					
						
							|  |  |  |           chunk_at_offset(old_top, old_size          )->size = | 
					
						
							|  |  |  |             SIZE_SZ|PREV_INUSE; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |           chunk_at_offset(old_top, old_size + SIZE_SZ)->size = | 
					
						
							|  |  |  |             SIZE_SZ|PREV_INUSE; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |           /* 
 | 
					
						
							|  |  |  |              If possible, release the rest, suppressing trimming. | 
					
						
							|  |  |  |           */ | 
					
						
							|  |  |  |           if (old_size >= MINSIZE) { | 
					
						
							|  |  |  |             INTERNAL_SIZE_T tt = av->trim_threshold; | 
					
						
							|  |  |  |             av->trim_threshold = (INTERNAL_SIZE_T)(-1); | 
					
						
							|  |  |  |             fREe(chunk2mem(old_top)); | 
					
						
							|  |  |  |             av->trim_threshold = tt; | 
					
						
							|  |  |  |           } | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* Update statistics */ | 
					
						
							|  |  |  |     sum = av->sbrked_mem; | 
					
						
							|  |  |  |     if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem)) | 
					
						
							|  |  |  |       av->max_sbrked_mem = sum; | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     sum += av->mmapped_mem; | 
					
						
							|  |  |  |     if (sum > (CHUNK_SIZE_T)(av->max_total_mem)) | 
					
						
							|  |  |  |       av->max_total_mem = sum; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     check_malloc_state(); | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* finally, do the allocation */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     p = av->top; | 
					
						
							|  |  |  |     size = chunksize(p); | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* check that one of the above allocation paths succeeded */ | 
					
						
							|  |  |  |     if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) { | 
					
						
							| 
									
										
										
										
											2006-05-16 18:37:31 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |       remainder_size = size - nb; | 
					
						
							|  |  |  |       remainder = chunk_at_offset(p, nb); | 
					
						
							|  |  |  |       av->top = remainder; | 
					
						
							|  |  |  |       set_head(p, nb | PREV_INUSE); | 
					
						
							|  |  |  |       set_head(remainder, remainder_size | PREV_INUSE); | 
					
						
							|  |  |  |       check_malloced_chunk(p, nb); | 
					
						
							|  |  |  |       return chunk2mem(p); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* catch all failure paths */ | 
					
						
							|  |  |  |   MALLOC_FAILURE_ACTION; | 
					
						
							|  |  |  |   return 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   sYSTRIm is an inverse of sorts to sYSMALLOc.  It gives memory back | 
					
						
							|  |  |  |   to the system (via negative arguments to sbrk) if there is unused | 
					
						
							|  |  |  |   memory at the `high' end of the malloc pool. It is called | 
					
						
							|  |  |  |   automatically by free() when top space exceeds the trim | 
					
						
							|  |  |  |   threshold. It is also called by the public malloc_trim routine.  It | 
					
						
							|  |  |  |   returns 1 if it actually released any memory, else 0. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | static int sYSTRIm(size_t pad, mstate av) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static int sYSTRIm(pad, av) size_t pad; mstate av; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   long  top_size;        /* Amount of top-most memory */ | 
					
						
							|  |  |  |   long  extra;           /* Amount to release */ | 
					
						
							|  |  |  |   long  released;        /* Amount actually released */ | 
					
						
							|  |  |  |   char* current_brk;     /* address returned by pre-check sbrk call */ | 
					
						
							|  |  |  |   char* new_brk;         /* address returned by post-check sbrk call */ | 
					
						
							|  |  |  |   size_t pagesz; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   pagesz = av->pagesize; | 
					
						
							|  |  |  |   top_size = chunksize(av->top); | 
					
						
							|  |  |  |    | 
					
						
							|  |  |  |   /* Release in pagesize units, keeping at least one page */ | 
					
						
							|  |  |  |   extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; | 
					
						
							|  |  |  |    | 
					
						
							|  |  |  |   if (extra > 0) { | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /*
 | 
					
						
							|  |  |  |       Only proceed if end of memory is where we last set it. | 
					
						
							|  |  |  |       This avoids problems if there were foreign sbrk calls. | 
					
						
							|  |  |  |     */ | 
					
						
							|  |  |  |     current_brk = (char*)(MORECORE(0)); | 
					
						
							|  |  |  |     if (current_brk == (char*)(av->top) + top_size) { | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       /*
 | 
					
						
							|  |  |  |         Attempt to release memory. We ignore MORECORE return value, | 
					
						
							|  |  |  |         and instead call again to find out where new end of memory is. | 
					
						
							|  |  |  |         This avoids problems if first call releases less than we asked, | 
					
						
							|  |  |  |         of if failure somehow altered brk value. (We could still | 
					
						
							|  |  |  |         encounter problems if it altered brk in some very bad way, | 
					
						
							|  |  |  |         but the only thing we can do is adjust anyway, which will cause | 
					
						
							|  |  |  |         some downstream failure.) | 
					
						
							|  |  |  |       */ | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       MORECORE(-extra); | 
					
						
							|  |  |  |       new_brk = (char*)(MORECORE(0)); | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       if (new_brk != (char*)MORECORE_FAILURE) { | 
					
						
							|  |  |  |         released = (long)(current_brk - new_brk); | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         if (released != 0) { | 
					
						
							|  |  |  |           /* Success. Adjust top. */ | 
					
						
							|  |  |  |           av->sbrked_mem -= released; | 
					
						
							|  |  |  |           set_head(av->top, (top_size - released) | PREV_INUSE); | 
					
						
							|  |  |  |           check_malloc_state(); | 
					
						
							|  |  |  |           return 1; | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   return 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ malloc ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | Void_t* mALLOc(size_t bytes) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  |   Void_t* mALLOc(bytes) size_t bytes; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   INTERNAL_SIZE_T nb;               /* normalized request size */ | 
					
						
							|  |  |  |   unsigned int    idx;              /* associated bin index */ | 
					
						
							|  |  |  |   mbinptr         bin;              /* associated bin */ | 
					
						
							|  |  |  |   mfastbinptr*    fb;               /* associated fastbin */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   mchunkptr       victim;           /* inspected/selected chunk */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T size;             /* its size */ | 
					
						
							|  |  |  |   int             victim_index;     /* its bin index */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   mchunkptr       remainder;        /* remainder from a split */ | 
					
						
							|  |  |  |   CHUNK_SIZE_T    remainder_size;   /* its size */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   unsigned int    block;            /* bit map traverser */ | 
					
						
							|  |  |  |   unsigned int    bit;              /* bit map traverser */ | 
					
						
							|  |  |  |   unsigned int    map;              /* current word of binmap */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   mchunkptr       fwd;              /* misc temp for linking */ | 
					
						
							|  |  |  |   mchunkptr       bck;              /* misc temp for linking */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     Convert request size to internal form by adding SIZE_SZ bytes | 
					
						
							|  |  |  |     overhead plus possibly more to obtain necessary alignment and/or | 
					
						
							|  |  |  |     to obtain a size of at least MINSIZE, the smallest allocatable | 
					
						
							|  |  |  |     size. Also, checked_request2size traps (returning 0) request sizes | 
					
						
							|  |  |  |     that are so large that they wrap around zero when padded and | 
					
						
							|  |  |  |     aligned. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   checked_request2size(bytes, nb); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     Bypass search if no frees yet | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   if (!have_anychunks(av)) { | 
					
						
							|  |  |  |     if (av->max_fast == 0) /* initialization check */ | 
					
						
							|  |  |  |       malloc_consolidate(av); | 
					
						
							|  |  |  |     goto use_top; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     If the size qualifies as a fastbin, first check corresponding bin. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {  | 
					
						
							|  |  |  |     fb = &(av->fastbins[(fastbin_index(nb))]); | 
					
						
							|  |  |  |     if ( (victim = *fb) != 0) { | 
					
						
							|  |  |  |       *fb = victim->fd; | 
					
						
							|  |  |  |       check_remalloced_chunk(victim, nb); | 
					
						
							|  |  |  |       return chunk2mem(victim); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     If a small request, check regular bin.  Since these "smallbins" | 
					
						
							|  |  |  |     hold one size each, no searching within bins is necessary. | 
					
						
							|  |  |  |     (For a large request, we need to wait until unsorted chunks are | 
					
						
							|  |  |  |     processed to find best fit. But for small ones, fits are exact | 
					
						
							|  |  |  |     anyway, so we can check now, which is faster.) | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (in_smallbin_range(nb)) { | 
					
						
							|  |  |  |     idx = smallbin_index(nb); | 
					
						
							|  |  |  |     bin = bin_at(av,idx); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if ( (victim = last(bin)) != bin) { | 
					
						
							|  |  |  |       bck = victim->bk; | 
					
						
							|  |  |  |       set_inuse_bit_at_offset(victim, nb); | 
					
						
							|  |  |  |       bin->bk = bck; | 
					
						
							|  |  |  |       bck->fd = bin; | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       check_malloced_chunk(victim, nb); | 
					
						
							|  |  |  |       return chunk2mem(victim); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* 
 | 
					
						
							|  |  |  |      If this is a large request, consolidate fastbins before continuing. | 
					
						
							|  |  |  |      While it might look excessive to kill all fastbins before | 
					
						
							|  |  |  |      even seeing if there is space available, this avoids | 
					
						
							|  |  |  |      fragmentation problems normally associated with fastbins. | 
					
						
							|  |  |  |      Also, in practice, programs tend to have runs of either small or | 
					
						
							|  |  |  |      large requests, but less often mixtures, so consolidation is not  | 
					
						
							|  |  |  |      invoked all that often in most programs. And the programs that | 
					
						
							|  |  |  |      it is called frequently in otherwise tend to fragment. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   else { | 
					
						
							|  |  |  |     idx = largebin_index(nb); | 
					
						
							|  |  |  |     if (have_fastchunks(av))  | 
					
						
							|  |  |  |       malloc_consolidate(av); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     Process recently freed or remaindered chunks, taking one only if | 
					
						
							|  |  |  |     it is exact fit, or, if this a small request, the chunk is remainder from | 
					
						
							|  |  |  |     the most recent non-exact fit.  Place other traversed chunks in | 
					
						
							|  |  |  |     bins.  Note that this step is the only place in any routine where | 
					
						
							|  |  |  |     chunks are placed in bins. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |   while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) { | 
					
						
							|  |  |  |     bck = victim->bk; | 
					
						
							|  |  |  |     size = chunksize(victim); | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* 
 | 
					
						
							|  |  |  |        If a small request, try to use last remainder if it is the | 
					
						
							|  |  |  |        only chunk in unsorted bin.  This helps promote locality for | 
					
						
							|  |  |  |        runs of consecutive small requests. This is the only | 
					
						
							|  |  |  |        exception to best-fit, and applies only when there is | 
					
						
							|  |  |  |        no exact fit for a small chunk. | 
					
						
							|  |  |  |     */ | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     if (in_smallbin_range(nb) &&  | 
					
						
							|  |  |  |         bck == unsorted_chunks(av) && | 
					
						
							|  |  |  |         victim == av->last_remainder && | 
					
						
							|  |  |  |         (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) { | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       /* split and reattach remainder */ | 
					
						
							|  |  |  |       remainder_size = size - nb; | 
					
						
							|  |  |  |       remainder = chunk_at_offset(victim, nb); | 
					
						
							|  |  |  |       unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; | 
					
						
							|  |  |  |       av->last_remainder = remainder;  | 
					
						
							|  |  |  |       remainder->bk = remainder->fd = unsorted_chunks(av); | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       set_head(victim, nb | PREV_INUSE); | 
					
						
							|  |  |  |       set_head(remainder, remainder_size | PREV_INUSE); | 
					
						
							|  |  |  |       set_foot(remainder, remainder_size); | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       check_malloced_chunk(victim, nb); | 
					
						
							|  |  |  |       return chunk2mem(victim); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* remove from unsorted list */ | 
					
						
							|  |  |  |     unsorted_chunks(av)->bk = bck; | 
					
						
							|  |  |  |     bck->fd = unsorted_chunks(av); | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* Take now instead of binning if exact fit */ | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     if (size == nb) { | 
					
						
							|  |  |  |       set_inuse_bit_at_offset(victim, size); | 
					
						
							|  |  |  |       check_malloced_chunk(victim, nb); | 
					
						
							|  |  |  |       return chunk2mem(victim); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* place chunk in bin */ | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     if (in_smallbin_range(size)) { | 
					
						
							|  |  |  |       victim_index = smallbin_index(size); | 
					
						
							|  |  |  |       bck = bin_at(av, victim_index); | 
					
						
							|  |  |  |       fwd = bck->fd; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     else { | 
					
						
							|  |  |  |       victim_index = largebin_index(size); | 
					
						
							|  |  |  |       bck = bin_at(av, victim_index); | 
					
						
							|  |  |  |       fwd = bck->fd; | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       if (fwd != bck) { | 
					
						
							|  |  |  |         /* if smaller than smallest, place first */ | 
					
						
							|  |  |  |         if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) { | 
					
						
							|  |  |  |           fwd = bck; | 
					
						
							|  |  |  |           bck = bck->bk; | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |         else if ((CHUNK_SIZE_T)(size) >=  | 
					
						
							|  |  |  |                  (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) { | 
					
						
							|  |  |  |            | 
					
						
							|  |  |  |           /* maintain large bins in sorted order */ | 
					
						
							|  |  |  |           size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */ | 
					
						
							|  |  |  |           while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size))  | 
					
						
							|  |  |  |             fwd = fwd->fd; | 
					
						
							|  |  |  |           bck = fwd->bk; | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |     mark_bin(av, victim_index); | 
					
						
							|  |  |  |     victim->bk = bck; | 
					
						
							|  |  |  |     victim->fd = fwd; | 
					
						
							|  |  |  |     fwd->bk = victim; | 
					
						
							|  |  |  |     bck->fd = victim; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |    | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     If a large request, scan through the chunks of current bin to | 
					
						
							|  |  |  |     find one that fits.  (This will be the smallest that fits unless | 
					
						
							|  |  |  |     FIRST_SORTED_BIN_SIZE has been changed from default.)  This is | 
					
						
							|  |  |  |     the only step where an unbounded number of chunks might be | 
					
						
							|  |  |  |     scanned without doing anything useful with them. However the | 
					
						
							|  |  |  |     lists tend to be short. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  |    | 
					
						
							|  |  |  |   if (!in_smallbin_range(nb)) { | 
					
						
							|  |  |  |     bin = bin_at(av, idx); | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     for (victim = last(bin); victim != bin; victim = victim->bk) { | 
					
						
							|  |  |  |       size = chunksize(victim); | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) { | 
					
						
							|  |  |  |         remainder_size = size - nb; | 
					
						
							| 
									
										
										
										
											2004-12-07 06:01:55 +00:00
										 |  |  |         dl_unlink(victim, bck, fwd); | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |          | 
					
						
							|  |  |  |         /* Exhaust */ | 
					
						
							|  |  |  |         if (remainder_size < MINSIZE)  { | 
					
						
							|  |  |  |           set_inuse_bit_at_offset(victim, size); | 
					
						
							|  |  |  |           check_malloced_chunk(victim, nb); | 
					
						
							|  |  |  |           return chunk2mem(victim); | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |         /* Split */ | 
					
						
							|  |  |  |         else { | 
					
						
							|  |  |  |           remainder = chunk_at_offset(victim, nb); | 
					
						
							|  |  |  |           unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; | 
					
						
							|  |  |  |           remainder->bk = remainder->fd = unsorted_chunks(av); | 
					
						
							|  |  |  |           set_head(victim, nb | PREV_INUSE); | 
					
						
							|  |  |  |           set_head(remainder, remainder_size | PREV_INUSE); | 
					
						
							|  |  |  |           set_foot(remainder, remainder_size); | 
					
						
							|  |  |  |           check_malloced_chunk(victim, nb); | 
					
						
							|  |  |  |           return chunk2mem(victim); | 
					
						
							|  |  |  |         }  | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     }     | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     Search for a chunk by scanning bins, starting with next largest | 
					
						
							|  |  |  |     bin. This search is strictly by best-fit; i.e., the smallest | 
					
						
							|  |  |  |     (with ties going to approximately the least recently used) chunk | 
					
						
							|  |  |  |     that fits is selected. | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     The bitmap avoids needing to check that most blocks are nonempty. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |   ++idx; | 
					
						
							|  |  |  |   bin = bin_at(av,idx); | 
					
						
							|  |  |  |   block = idx2block(idx); | 
					
						
							|  |  |  |   map = av->binmap[block]; | 
					
						
							|  |  |  |   bit = idx2bit(idx); | 
					
						
							|  |  |  |    | 
					
						
							|  |  |  |   for (;;) { | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* Skip rest of block if there are no more set bits in this block.  */ | 
					
						
							|  |  |  |     if (bit > map || bit == 0) { | 
					
						
							|  |  |  |       do { | 
					
						
							|  |  |  |         if (++block >= BINMAPSIZE)  /* out of bins */ | 
					
						
							|  |  |  |           goto use_top; | 
					
						
							|  |  |  |       } while ( (map = av->binmap[block]) == 0); | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       bin = bin_at(av, (block << BINMAPSHIFT)); | 
					
						
							|  |  |  |       bit = 1; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* Advance to bin with set bit. There must be one. */ | 
					
						
							|  |  |  |     while ((bit & map) == 0) { | 
					
						
							|  |  |  |       bin = next_bin(bin); | 
					
						
							|  |  |  |       bit <<= 1; | 
					
						
							|  |  |  |       assert(bit != 0); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* Inspect the bin. It is likely to be non-empty */ | 
					
						
							|  |  |  |     victim = last(bin); | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /*  If a false alarm (empty bin), clear the bit. */ | 
					
						
							|  |  |  |     if (victim == bin) { | 
					
						
							|  |  |  |       av->binmap[block] = map &= ~bit; /* Write through */ | 
					
						
							|  |  |  |       bin = next_bin(bin); | 
					
						
							|  |  |  |       bit <<= 1; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     else { | 
					
						
							|  |  |  |       size = chunksize(victim); | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       /*  We know the first chunk in this bin is big enough to use. */ | 
					
						
							|  |  |  |       assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)); | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       remainder_size = size - nb; | 
					
						
							|  |  |  |        | 
					
						
							| 
									
										
										
										
											2004-12-07 06:01:55 +00:00
										 |  |  |       /* dl_unlink */ | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |       bck = victim->bk; | 
					
						
							|  |  |  |       bin->bk = bck; | 
					
						
							|  |  |  |       bck->fd = bin; | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       /* Exhaust */ | 
					
						
							|  |  |  |       if (remainder_size < MINSIZE) { | 
					
						
							|  |  |  |         set_inuse_bit_at_offset(victim, size); | 
					
						
							|  |  |  |         check_malloced_chunk(victim, nb); | 
					
						
							|  |  |  |         return chunk2mem(victim); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       /* Split */ | 
					
						
							|  |  |  |       else { | 
					
						
							|  |  |  |         remainder = chunk_at_offset(victim, nb); | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; | 
					
						
							|  |  |  |         remainder->bk = remainder->fd = unsorted_chunks(av); | 
					
						
							|  |  |  |         /* advertise as last remainder */ | 
					
						
							|  |  |  |         if (in_smallbin_range(nb))  | 
					
						
							|  |  |  |           av->last_remainder = remainder;  | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         set_head(victim, nb | PREV_INUSE); | 
					
						
							|  |  |  |         set_head(remainder, remainder_size | PREV_INUSE); | 
					
						
							|  |  |  |         set_foot(remainder, remainder_size); | 
					
						
							|  |  |  |         check_malloced_chunk(victim, nb); | 
					
						
							|  |  |  |         return chunk2mem(victim); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   use_top:     | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     If large enough, split off the chunk bordering the end of memory | 
					
						
							|  |  |  |     (held in av->top). Note that this is in accord with the best-fit | 
					
						
							|  |  |  |     search rule.  In effect, av->top is treated as larger (and thus | 
					
						
							|  |  |  |     less well fitting) than any other available chunk since it can | 
					
						
							|  |  |  |     be extended to be as large as necessary (up to system | 
					
						
							|  |  |  |     limitations). | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     We require that av->top always exists (i.e., has size >= | 
					
						
							|  |  |  |     MINSIZE) after initialization, so if it would otherwise be | 
					
						
							|  |  |  |     exhuasted by current request, it is replenished. (The main | 
					
						
							|  |  |  |     reason for ensuring it exists is that we may need MINSIZE space | 
					
						
							|  |  |  |     to put in fenceposts in sysmalloc.) | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  |    | 
					
						
							|  |  |  |   victim = av->top; | 
					
						
							|  |  |  |   size = chunksize(victim); | 
					
						
							| 
									
										
										
										
											2008-12-09 12:54:27 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |   if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) { | 
					
						
							|  |  |  |     remainder_size = size - nb; | 
					
						
							|  |  |  |     remainder = chunk_at_offset(victim, nb); | 
					
						
							|  |  |  |     av->top = remainder; | 
					
						
							|  |  |  |     set_head(victim, nb | PREV_INUSE); | 
					
						
							|  |  |  |     set_head(remainder, remainder_size | PREV_INUSE); | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     check_malloced_chunk(victim, nb); | 
					
						
							|  |  |  |     return chunk2mem(victim); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |    | 
					
						
							|  |  |  |   /* 
 | 
					
						
							|  |  |  |      If no space in top, relay to handle system-dependent cases  | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  |   return sYSMALLOc(nb, av);     | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ free ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | void fREe(Void_t* mem) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | void fREe(mem) Void_t* mem; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   mchunkptr       p;           /* chunk corresponding to mem */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T size;        /* its size */ | 
					
						
							|  |  |  |   mfastbinptr*    fb;          /* associated fastbin */ | 
					
						
							|  |  |  |   mchunkptr       nextchunk;   /* next contiguous chunk */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T nextsize;    /* its size */ | 
					
						
							|  |  |  |   int             nextinuse;   /* true if nextchunk is used */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */ | 
					
						
							|  |  |  |   mchunkptr       bck;         /* misc temp for linking */ | 
					
						
							|  |  |  |   mchunkptr       fwd;         /* misc temp for linking */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* free(0) has no effect */ | 
					
						
							|  |  |  |   if (mem != 0) { | 
					
						
							|  |  |  |     p = mem2chunk(mem); | 
					
						
							|  |  |  |     size = chunksize(p); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     check_inuse_chunk(p); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /*
 | 
					
						
							|  |  |  |       If eligible, place chunk on a fastbin so it can be found | 
					
						
							|  |  |  |       and used quickly in malloc. | 
					
						
							|  |  |  |     */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if TRIM_FASTBINS
 | 
					
						
							|  |  |  |         /* 
 | 
					
						
							|  |  |  |            If TRIM_FASTBINS set, don't place chunks | 
					
						
							|  |  |  |            bordering top into fastbins | 
					
						
							|  |  |  |         */ | 
					
						
							|  |  |  |         && (chunk_at_offset(p, size) != av->top) | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  |         ) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       set_fastchunks(av); | 
					
						
							|  |  |  |       fb = &(av->fastbins[fastbin_index(size)]); | 
					
						
							|  |  |  |       p->fd = *fb; | 
					
						
							|  |  |  |       *fb = p; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /*
 | 
					
						
							|  |  |  |        Consolidate other non-mmapped chunks as they arrive. | 
					
						
							|  |  |  |     */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     else if (!chunk_is_mmapped(p)) { | 
					
						
							|  |  |  |       set_anychunks(av); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       nextchunk = chunk_at_offset(p, size); | 
					
						
							|  |  |  |       nextsize = chunksize(nextchunk); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       /* consolidate backward */ | 
					
						
							|  |  |  |       if (!prev_inuse(p)) { | 
					
						
							|  |  |  |         prevsize = p->prev_size; | 
					
						
							|  |  |  |         size += prevsize; | 
					
						
							|  |  |  |         p = chunk_at_offset(p, -((long) prevsize)); | 
					
						
							| 
									
										
										
										
											2004-12-07 06:01:55 +00:00
										 |  |  |         dl_unlink(p, bck, fwd); | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |       } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       if (nextchunk != av->top) { | 
					
						
							|  |  |  |         /* get and clear inuse bit */ | 
					
						
							|  |  |  |         nextinuse = inuse_bit_at_offset(nextchunk, nextsize); | 
					
						
							|  |  |  |         set_head(nextchunk, nextsize); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         /* consolidate forward */ | 
					
						
							|  |  |  |         if (!nextinuse) { | 
					
						
							| 
									
										
										
										
											2004-12-07 06:01:55 +00:00
										 |  |  |           dl_unlink(nextchunk, bck, fwd); | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |           size += nextsize; | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         /*
 | 
					
						
							|  |  |  |           Place the chunk in unsorted chunk list. Chunks are | 
					
						
							|  |  |  |           not placed into regular bins until after they have | 
					
						
							|  |  |  |           been given one chance to be used in malloc. | 
					
						
							|  |  |  |         */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         bck = unsorted_chunks(av); | 
					
						
							|  |  |  |         fwd = bck->fd; | 
					
						
							|  |  |  |         p->bk = bck; | 
					
						
							|  |  |  |         p->fd = fwd; | 
					
						
							|  |  |  |         bck->fd = p; | 
					
						
							|  |  |  |         fwd->bk = p; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         set_head(p, size | PREV_INUSE); | 
					
						
							|  |  |  |         set_foot(p, size); | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         check_free_chunk(p); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       /*
 | 
					
						
							|  |  |  |          If the chunk borders the current high end of memory, | 
					
						
							|  |  |  |          consolidate into top | 
					
						
							|  |  |  |       */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       else { | 
					
						
							|  |  |  |         size += nextsize; | 
					
						
							|  |  |  |         set_head(p, size | PREV_INUSE); | 
					
						
							|  |  |  |         av->top = p; | 
					
						
							|  |  |  |         check_chunk(p); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       /*
 | 
					
						
							|  |  |  |         If freeing a large space, consolidate possibly-surrounding | 
					
						
							|  |  |  |         chunks. Then, if the total unused topmost memory exceeds trim | 
					
						
							|  |  |  |         threshold, ask malloc_trim to reduce top. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         Unless max_fast is 0, we don't know if there are fastbins | 
					
						
							|  |  |  |         bordering top, so we cannot tell for sure whether threshold | 
					
						
							|  |  |  |         has been reached unless fastbins are consolidated.  But we | 
					
						
							|  |  |  |         don't want to consolidate on each free.  As a compromise, | 
					
						
							|  |  |  |         consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD | 
					
						
							|  |  |  |         is reached. | 
					
						
							|  |  |  |       */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       if ((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {  | 
					
						
							|  |  |  |         if (have_fastchunks(av))  | 
					
						
							|  |  |  |           malloc_consolidate(av); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifndef MORECORE_CANNOT_TRIM        
 | 
					
						
							|  |  |  |         if ((CHUNK_SIZE_T)(chunksize(av->top)) >=  | 
					
						
							|  |  |  |             (CHUNK_SIZE_T)(av->trim_threshold)) | 
					
						
							|  |  |  |           sYSTRIm(av->top_pad, av); | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     /*
 | 
					
						
							|  |  |  |       If the chunk was allocated via mmap, release via munmap() | 
					
						
							|  |  |  |       Note that if HAVE_MMAP is false but chunk_is_mmapped is | 
					
						
							|  |  |  |       true, then user must have overwritten memory. There's nothing | 
					
						
							|  |  |  |       we can do to catch this error unless DEBUG is set, in which case | 
					
						
							|  |  |  |       check_inuse_chunk (above) will have triggered error. | 
					
						
							|  |  |  |     */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     else { | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------- malloc_consolidate ------------------------- | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   malloc_consolidate is a specialized version of free() that tears | 
					
						
							|  |  |  |   down chunks held in fastbins.  Free itself cannot be used for this | 
					
						
							|  |  |  |   purpose since, among other things, it might place chunks back onto | 
					
						
							|  |  |  |   fastbins.  So, instead, we need to use a minor variant of the same | 
					
						
							|  |  |  |   code. | 
					
						
							|  |  |  |    | 
					
						
							|  |  |  |   Also, because this routine needs to be called the first time through | 
					
						
							|  |  |  |   malloc anyway, it turns out to be the perfect place to trigger | 
					
						
							|  |  |  |   initialization code. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | static void malloc_consolidate(mstate av) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static void malloc_consolidate(av) mstate av; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mfastbinptr*    fb;                 /* current fastbin being consolidated */ | 
					
						
							|  |  |  |   mfastbinptr*    maxfb;              /* last fastbin (for loop control) */ | 
					
						
							|  |  |  |   mchunkptr       p;                  /* current chunk being consolidated */ | 
					
						
							|  |  |  |   mchunkptr       nextp;              /* next chunk to consolidate */ | 
					
						
							|  |  |  |   mchunkptr       unsorted_bin;       /* bin header */ | 
					
						
							|  |  |  |   mchunkptr       first_unsorted;     /* chunk to link to */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* These have same use as in free() */ | 
					
						
							|  |  |  |   mchunkptr       nextchunk; | 
					
						
							|  |  |  |   INTERNAL_SIZE_T size; | 
					
						
							|  |  |  |   INTERNAL_SIZE_T nextsize; | 
					
						
							|  |  |  |   INTERNAL_SIZE_T prevsize; | 
					
						
							|  |  |  |   int             nextinuse; | 
					
						
							|  |  |  |   mchunkptr       bck; | 
					
						
							|  |  |  |   mchunkptr       fwd; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     If max_fast is 0, we know that av hasn't | 
					
						
							|  |  |  |     yet been initialized, in which case do so below | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (av->max_fast != 0) { | 
					
						
							|  |  |  |     clear_fastchunks(av); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     unsorted_bin = unsorted_chunks(av); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /*
 | 
					
						
							|  |  |  |       Remove each chunk from fast bin and consolidate it, placing it | 
					
						
							|  |  |  |       then in unsorted bin. Among other reasons for doing this, | 
					
						
							|  |  |  |       placing in unsorted bin avoids needing to calculate actual bins | 
					
						
							|  |  |  |       until malloc is sure that chunks aren't immediately going to be | 
					
						
							|  |  |  |       reused anyway. | 
					
						
							|  |  |  |     */ | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); | 
					
						
							|  |  |  |     fb = &(av->fastbins[0]); | 
					
						
							|  |  |  |     do { | 
					
						
							|  |  |  |       if ( (p = *fb) != 0) { | 
					
						
							|  |  |  |         *fb = 0; | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         do { | 
					
						
							|  |  |  |           check_inuse_chunk(p); | 
					
						
							|  |  |  |           nextp = p->fd; | 
					
						
							|  |  |  |            | 
					
						
							|  |  |  |           /* Slightly streamlined version of consolidation code in free() */ | 
					
						
							|  |  |  |           size = p->size & ~PREV_INUSE; | 
					
						
							|  |  |  |           nextchunk = chunk_at_offset(p, size); | 
					
						
							|  |  |  |           nextsize = chunksize(nextchunk); | 
					
						
							|  |  |  |            | 
					
						
							|  |  |  |           if (!prev_inuse(p)) { | 
					
						
							|  |  |  |             prevsize = p->prev_size; | 
					
						
							|  |  |  |             size += prevsize; | 
					
						
							|  |  |  |             p = chunk_at_offset(p, -((long) prevsize)); | 
					
						
							| 
									
										
										
										
											2004-12-07 06:01:55 +00:00
										 |  |  |             dl_unlink(p, bck, fwd); | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |           } | 
					
						
							|  |  |  |            | 
					
						
							|  |  |  |           if (nextchunk != av->top) { | 
					
						
							|  |  |  |             nextinuse = inuse_bit_at_offset(nextchunk, nextsize); | 
					
						
							|  |  |  |             set_head(nextchunk, nextsize); | 
					
						
							|  |  |  |              | 
					
						
							|  |  |  |             if (!nextinuse) { | 
					
						
							|  |  |  |               size += nextsize; | 
					
						
							| 
									
										
										
										
											2004-12-07 06:01:55 +00:00
										 |  |  |               dl_unlink(nextchunk, bck, fwd); | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |             } | 
					
						
							|  |  |  |              | 
					
						
							|  |  |  |             first_unsorted = unsorted_bin->fd; | 
					
						
							|  |  |  |             unsorted_bin->fd = p; | 
					
						
							|  |  |  |             first_unsorted->bk = p; | 
					
						
							|  |  |  |              | 
					
						
							|  |  |  |             set_head(p, size | PREV_INUSE); | 
					
						
							|  |  |  |             p->bk = unsorted_bin; | 
					
						
							|  |  |  |             p->fd = first_unsorted; | 
					
						
							|  |  |  |             set_foot(p, size); | 
					
						
							|  |  |  |           } | 
					
						
							|  |  |  |            | 
					
						
							|  |  |  |           else { | 
					
						
							|  |  |  |             size += nextsize; | 
					
						
							|  |  |  |             set_head(p, size | PREV_INUSE); | 
					
						
							|  |  |  |             av->top = p; | 
					
						
							|  |  |  |           } | 
					
						
							|  |  |  |            | 
					
						
							|  |  |  |         } while ( (p = nextp) != 0); | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } while (fb++ != maxfb); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   else { | 
					
						
							|  |  |  |     malloc_init_state(av); | 
					
						
							|  |  |  |     check_malloc_state(); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ realloc ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | Void_t* rEALLOc(Void_t* oldmem, size_t bytes) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   INTERNAL_SIZE_T  nb;              /* padded request size */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   mchunkptr        oldp;            /* chunk corresponding to oldmem */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T  oldsize;         /* its size */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   mchunkptr        newp;            /* chunk to return */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T  newsize;         /* its size */ | 
					
						
							|  |  |  |   Void_t*          newmem;          /* corresponding user mem */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   mchunkptr        next;            /* next contiguous chunk after oldp */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   mchunkptr        remainder;       /* extra space at end of newp */ | 
					
						
							|  |  |  |   CHUNK_SIZE_T     remainder_size;  /* its size */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   mchunkptr        bck;             /* misc temp for linking */ | 
					
						
							|  |  |  |   mchunkptr        fwd;             /* misc temp for linking */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   CHUNK_SIZE_T     copysize;        /* bytes to copy */ | 
					
						
							|  |  |  |   unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T* s;               /* copy source */  | 
					
						
							|  |  |  |   INTERNAL_SIZE_T* d;               /* copy destination */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifdef REALLOC_ZERO_BYTES_FREES
 | 
					
						
							|  |  |  |   if (bytes == 0) { | 
					
						
							|  |  |  |     fREe(oldmem); | 
					
						
							|  |  |  |     return 0; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* realloc of null is supposed to be same as malloc */ | 
					
						
							|  |  |  |   if (oldmem == 0) return mALLOc(bytes); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   checked_request2size(bytes, nb); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   oldp    = mem2chunk(oldmem); | 
					
						
							|  |  |  |   oldsize = chunksize(oldp); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   check_inuse_chunk(oldp); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (!chunk_is_mmapped(oldp)) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb)) { | 
					
						
							|  |  |  |       /* already big enough; split below */ | 
					
						
							|  |  |  |       newp = oldp; | 
					
						
							|  |  |  |       newsize = oldsize; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     else { | 
					
						
							|  |  |  |       next = chunk_at_offset(oldp, oldsize); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       /* Try to expand forward into top */ | 
					
						
							|  |  |  |       if (next == av->top && | 
					
						
							|  |  |  |           (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >= | 
					
						
							|  |  |  |           (CHUNK_SIZE_T)(nb + MINSIZE)) { | 
					
						
							|  |  |  |         set_head_size(oldp, nb); | 
					
						
							|  |  |  |         av->top = chunk_at_offset(oldp, nb); | 
					
						
							|  |  |  |         set_head(av->top, (newsize - nb) | PREV_INUSE); | 
					
						
							|  |  |  |         return chunk2mem(oldp); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       /* Try to expand forward into next chunk;  split off remainder below */ | 
					
						
							|  |  |  |       else if (next != av->top &&  | 
					
						
							|  |  |  |                !inuse(next) && | 
					
						
							|  |  |  |                (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >= | 
					
						
							|  |  |  |                (CHUNK_SIZE_T)(nb)) { | 
					
						
							|  |  |  |         newp = oldp; | 
					
						
							| 
									
										
										
										
											2004-12-07 06:01:55 +00:00
										 |  |  |         dl_unlink(next, bck, fwd); | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |       } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       /* allocate, copy, free */ | 
					
						
							|  |  |  |       else { | 
					
						
							|  |  |  |         newmem = mALLOc(nb - MALLOC_ALIGN_MASK); | 
					
						
							|  |  |  |         if (newmem == 0) | 
					
						
							|  |  |  |           return 0; /* propagate failure */ | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |         newp = mem2chunk(newmem); | 
					
						
							|  |  |  |         newsize = chunksize(newp); | 
					
						
							|  |  |  |          | 
					
						
							|  |  |  |         /*
 | 
					
						
							|  |  |  |           Avoid copy if newp is next chunk after oldp. | 
					
						
							|  |  |  |         */ | 
					
						
							|  |  |  |         if (newp == next) { | 
					
						
							|  |  |  |           newsize += oldsize; | 
					
						
							|  |  |  |           newp = oldp; | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |         else { | 
					
						
							|  |  |  |           /*
 | 
					
						
							|  |  |  |             Unroll copy of <= 36 bytes (72 if 8byte sizes) | 
					
						
							|  |  |  |             We know that contents have an odd number of | 
					
						
							|  |  |  |             INTERNAL_SIZE_T-sized words; minimally 3. | 
					
						
							|  |  |  |           */ | 
					
						
							|  |  |  |            | 
					
						
							|  |  |  |           copysize = oldsize - SIZE_SZ; | 
					
						
							|  |  |  |           s = (INTERNAL_SIZE_T*)(oldmem); | 
					
						
							|  |  |  |           d = (INTERNAL_SIZE_T*)(newmem); | 
					
						
							|  |  |  |           ncopies = copysize / sizeof(INTERNAL_SIZE_T); | 
					
						
							|  |  |  |           assert(ncopies >= 3); | 
					
						
							|  |  |  |            | 
					
						
							|  |  |  |           if (ncopies > 9) | 
					
						
							|  |  |  |             memcpy(d, s, copysize); | 
					
						
							|  |  |  |            | 
					
						
							|  |  |  |           else { | 
					
						
							|  |  |  |             *(d+0) = *(s+0); | 
					
						
							|  |  |  |             *(d+1) = *(s+1); | 
					
						
							|  |  |  |             *(d+2) = *(s+2); | 
					
						
							|  |  |  |             if (ncopies > 4) { | 
					
						
							|  |  |  |               *(d+3) = *(s+3); | 
					
						
							|  |  |  |               *(d+4) = *(s+4); | 
					
						
							|  |  |  |               if (ncopies > 6) { | 
					
						
							|  |  |  |                 *(d+5) = *(s+5); | 
					
						
							|  |  |  |                 *(d+6) = *(s+6); | 
					
						
							|  |  |  |                 if (ncopies > 8) { | 
					
						
							|  |  |  |                   *(d+7) = *(s+7); | 
					
						
							|  |  |  |                   *(d+8) = *(s+8); | 
					
						
							|  |  |  |                 } | 
					
						
							|  |  |  |               } | 
					
						
							|  |  |  |             } | 
					
						
							|  |  |  |           } | 
					
						
							|  |  |  |            | 
					
						
							|  |  |  |           fREe(oldmem); | 
					
						
							|  |  |  |           check_inuse_chunk(newp); | 
					
						
							|  |  |  |           return chunk2mem(newp); | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /* If possible, free extra space in old or extended chunk */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     remainder_size = newsize - nb; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if (remainder_size < MINSIZE) { /* not enough extra to split off */ | 
					
						
							|  |  |  |       set_head_size(newp, newsize); | 
					
						
							|  |  |  |       set_inuse_bit_at_offset(newp, newsize); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     else { /* split remainder */ | 
					
						
							|  |  |  |       remainder = chunk_at_offset(newp, nb); | 
					
						
							|  |  |  |       set_head_size(newp, nb); | 
					
						
							|  |  |  |       set_head(remainder, remainder_size | PREV_INUSE); | 
					
						
							|  |  |  |       /* Mark remainder as inuse so free() won't complain */ | 
					
						
							|  |  |  |       set_inuse_bit_at_offset(remainder, remainder_size); | 
					
						
							|  |  |  |       fREe(chunk2mem(remainder));  | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     check_inuse_chunk(newp); | 
					
						
							|  |  |  |     return chunk2mem(newp); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     Handle mmap cases | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   else { | 
					
						
							|  |  |  | #if HAVE_MMAP
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if HAVE_MREMAP
 | 
					
						
							|  |  |  |     INTERNAL_SIZE_T offset = oldp->prev_size; | 
					
						
							|  |  |  |     size_t pagemask = av->pagesize - 1; | 
					
						
							|  |  |  |     char *cp; | 
					
						
							|  |  |  |     CHUNK_SIZE_T  sum; | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     /* Note the extra SIZE_SZ overhead */ | 
					
						
							|  |  |  |     newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /* don't need to remap if still within same page */ | 
					
						
							|  |  |  |     if (oldsize == newsize - offset)  | 
					
						
							|  |  |  |       return oldmem; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1); | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     if (cp != (char*)MORECORE_FAILURE) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       newp = (mchunkptr)(cp + offset); | 
					
						
							|  |  |  |       set_head(newp, (newsize - offset)|IS_MMAPPED); | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       assert(aligned_OK(chunk2mem(newp))); | 
					
						
							|  |  |  |       assert((newp->prev_size == offset)); | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       /* update statistics */ | 
					
						
							|  |  |  |       sum = av->mmapped_mem += newsize - oldsize; | 
					
						
							|  |  |  |       if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))  | 
					
						
							|  |  |  |         av->max_mmapped_mem = sum; | 
					
						
							|  |  |  |       sum += av->sbrked_mem; | 
					
						
							|  |  |  |       if (sum > (CHUNK_SIZE_T)(av->max_total_mem))  | 
					
						
							|  |  |  |         av->max_total_mem = sum; | 
					
						
							|  |  |  |        | 
					
						
							|  |  |  |       return chunk2mem(newp); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /* Note the extra SIZE_SZ overhead. */ | 
					
						
							|  |  |  |     if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))  | 
					
						
							|  |  |  |       newmem = oldmem; /* do nothing */ | 
					
						
							|  |  |  |     else { | 
					
						
							|  |  |  |       /* Must alloc, copy, free. */ | 
					
						
							|  |  |  |       newmem = mALLOc(nb - MALLOC_ALIGN_MASK); | 
					
						
							|  |  |  |       if (newmem != 0) { | 
					
						
							|  |  |  |         memcpy(newmem, oldmem, oldsize - 2*SIZE_SZ); | 
					
						
							|  |  |  |         fREe(oldmem); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     return newmem; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #else 
 | 
					
						
							|  |  |  |     /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */ | 
					
						
							|  |  |  |     check_malloc_state(); | 
					
						
							|  |  |  |     MALLOC_FAILURE_ACTION; | 
					
						
							|  |  |  |     return 0; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ memalign ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | Void_t* mEMALIGn(size_t alignment, size_t bytes) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   INTERNAL_SIZE_T nb;             /* padded  request size */ | 
					
						
							|  |  |  |   char*           m;              /* memory returned by malloc call */ | 
					
						
							|  |  |  |   mchunkptr       p;              /* corresponding chunk */ | 
					
						
							|  |  |  |   char*           brk;            /* alignment point within p */ | 
					
						
							|  |  |  |   mchunkptr       newp;           /* chunk to return */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T newsize;        /* its size */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */ | 
					
						
							|  |  |  |   mchunkptr       remainder;      /* spare room at end to split off */ | 
					
						
							|  |  |  |   CHUNK_SIZE_T    remainder_size; /* its size */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T size; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* If need less alignment than we give anyway, just relay to malloc */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Otherwise, ensure that it is at least a minimum chunk size */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (alignment <  MINSIZE) alignment = MINSIZE; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Make sure alignment is power of 2 (in case MINSIZE is not).  */ | 
					
						
							|  |  |  |   if ((alignment & (alignment - 1)) != 0) { | 
					
						
							|  |  |  |     size_t a = MALLOC_ALIGNMENT * 2; | 
					
						
							|  |  |  |     while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1; | 
					
						
							|  |  |  |     alignment = a; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   checked_request2size(bytes, nb); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /*
 | 
					
						
							|  |  |  |     Strategy: find a spot within that chunk that meets the alignment | 
					
						
							|  |  |  |     request, and then possibly free the leading and trailing space. | 
					
						
							|  |  |  |   */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Call malloc with worst case padding to hit alignment. */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   m  = (char*)(mALLOc(nb + alignment + MINSIZE)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (m == 0) return 0; /* propagate failure */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   p = mem2chunk(m); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /*
 | 
					
						
							|  |  |  |       Find an aligned spot inside chunk.  Since we need to give back | 
					
						
							|  |  |  |       leading space in a chunk of at least MINSIZE, if the first | 
					
						
							|  |  |  |       calculation places us at a spot with less than MINSIZE leader, | 
					
						
							|  |  |  |       we can move to the next aligned spot -- we've allocated enough | 
					
						
							|  |  |  |       total room so that this is always possible. | 
					
						
							|  |  |  |     */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     brk = (char*)mem2chunk((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) & | 
					
						
							|  |  |  |                            -((signed long) alignment))); | 
					
						
							|  |  |  |     if ((CHUNK_SIZE_T)(brk - (char*)(p)) < MINSIZE) | 
					
						
							|  |  |  |       brk += alignment; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     newp = (mchunkptr)brk; | 
					
						
							|  |  |  |     leadsize = brk - (char*)(p); | 
					
						
							|  |  |  |     newsize = chunksize(p) - leadsize; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /* For mmapped chunks, just adjust offset */ | 
					
						
							|  |  |  |     if (chunk_is_mmapped(p)) { | 
					
						
							|  |  |  |       newp->prev_size = p->prev_size + leadsize; | 
					
						
							|  |  |  |       set_head(newp, newsize|IS_MMAPPED); | 
					
						
							|  |  |  |       return chunk2mem(newp); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /* Otherwise, give back leader, use the rest */ | 
					
						
							|  |  |  |     set_head(newp, newsize | PREV_INUSE); | 
					
						
							|  |  |  |     set_inuse_bit_at_offset(newp, newsize); | 
					
						
							|  |  |  |     set_head_size(p, leadsize); | 
					
						
							|  |  |  |     fREe(chunk2mem(p)); | 
					
						
							|  |  |  |     p = newp; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     assert (newsize >= nb && | 
					
						
							|  |  |  |             (((PTR_UINT)(chunk2mem(p))) % alignment) == 0); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Also give back spare room at the end */ | 
					
						
							|  |  |  |   if (!chunk_is_mmapped(p)) { | 
					
						
							|  |  |  |     size = chunksize(p); | 
					
						
							|  |  |  |     if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) { | 
					
						
							|  |  |  |       remainder_size = size - nb; | 
					
						
							|  |  |  |       remainder = chunk_at_offset(p, nb); | 
					
						
							|  |  |  |       set_head(remainder, remainder_size | PREV_INUSE); | 
					
						
							|  |  |  |       set_head_size(p, nb); | 
					
						
							|  |  |  |       fREe(chunk2mem(remainder)); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   check_inuse_chunk(p); | 
					
						
							|  |  |  |   return chunk2mem(p); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ calloc ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | Void_t* cALLOc(size_t n_elements, size_t elem_size) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mchunkptr p; | 
					
						
							|  |  |  |   CHUNK_SIZE_T  clearsize; | 
					
						
							|  |  |  |   CHUNK_SIZE_T  nclears; | 
					
						
							|  |  |  |   INTERNAL_SIZE_T* d; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Void_t* mem = mALLOc(n_elements * elem_size); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (mem != 0) { | 
					
						
							|  |  |  |     p = mem2chunk(mem); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if (!chunk_is_mmapped(p)) | 
					
						
							|  |  |  |     {   | 
					
						
							|  |  |  |       /*
 | 
					
						
							|  |  |  |         Unroll clear of <= 36 bytes (72 if 8byte sizes) | 
					
						
							|  |  |  |         We know that contents have an odd number of | 
					
						
							|  |  |  |         INTERNAL_SIZE_T-sized words; minimally 3. | 
					
						
							|  |  |  |       */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       d = (INTERNAL_SIZE_T*)mem; | 
					
						
							|  |  |  |       clearsize = chunksize(p) - SIZE_SZ; | 
					
						
							|  |  |  |       nclears = clearsize / sizeof(INTERNAL_SIZE_T); | 
					
						
							|  |  |  |       assert(nclears >= 3); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       if (nclears > 9) | 
					
						
							| 
									
										
										
										
											2004-11-04 18:22:36 +00:00
										 |  |  |         memset(d, 0, clearsize); | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |       else { | 
					
						
							|  |  |  |         *(d+0) = 0; | 
					
						
							|  |  |  |         *(d+1) = 0; | 
					
						
							|  |  |  |         *(d+2) = 0; | 
					
						
							|  |  |  |         if (nclears > 4) { | 
					
						
							|  |  |  |           *(d+3) = 0; | 
					
						
							|  |  |  |           *(d+4) = 0; | 
					
						
							|  |  |  |           if (nclears > 6) { | 
					
						
							|  |  |  |             *(d+5) = 0; | 
					
						
							|  |  |  |             *(d+6) = 0; | 
					
						
							|  |  |  |             if (nclears > 8) { | 
					
						
							|  |  |  |               *(d+7) = 0; | 
					
						
							|  |  |  |               *(d+8) = 0; | 
					
						
							|  |  |  |             } | 
					
						
							|  |  |  |           } | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   return mem; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ cfree ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | void cFREe(Void_t *mem) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | void cFREe(mem) Void_t *mem; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   fREe(mem); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------- independent_calloc ------------------------- | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[]) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[]; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   size_t sz = elem_size; /* serves as 1-element array */ | 
					
						
							|  |  |  |   /* opts arg of 3 means all elements are same size, and should be cleared */ | 
					
						
							|  |  |  |   return iALLOc(n_elements, &sz, 3, chunks); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------- independent_comalloc ------------------------- | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[]) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[]; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   return iALLOc(n_elements, sizes, 0, chunks); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ ialloc ------------------------------ | 
					
						
							|  |  |  |   ialloc provides common support for independent_X routines, handling all of | 
					
						
							|  |  |  |   the combinations that can result. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   The opts arg has: | 
					
						
							|  |  |  |     bit 0 set if all elements are same size (using sizes[0]) | 
					
						
							|  |  |  |     bit 1 set if elements should be zeroed | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | static Void_t** iALLOc(size_t n_elements,  | 
					
						
							|  |  |  |                        size_t* sizes,   | 
					
						
							|  |  |  |                        int opts, | 
					
						
							|  |  |  |                        Void_t* chunks[]) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[]; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  |   INTERNAL_SIZE_T element_size;   /* chunksize of each element, if all same */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T contents_size;  /* total size of elements */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T array_size;     /* request size of pointer array */ | 
					
						
							|  |  |  |   Void_t*         mem;            /* malloced aggregate space */ | 
					
						
							|  |  |  |   mchunkptr       p;              /* corresponding chunk */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */ | 
					
						
							|  |  |  |   Void_t**        marray;         /* either "chunks" or malloced ptr array */ | 
					
						
							|  |  |  |   mchunkptr       array_chunk;    /* chunk for malloced ptr array */ | 
					
						
							|  |  |  |   INTERNAL_SIZE_T size;            | 
					
						
							|  |  |  |   size_t          i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Ensure initialization */ | 
					
						
							|  |  |  |   if (av->max_fast == 0) malloc_consolidate(av); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* compute array length, if needed */ | 
					
						
							|  |  |  |   if (chunks != 0) { | 
					
						
							|  |  |  |     if (n_elements == 0) | 
					
						
							|  |  |  |       return chunks; /* nothing to do */ | 
					
						
							|  |  |  |     marray = chunks; | 
					
						
							|  |  |  |     array_size = 0; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   else { | 
					
						
							|  |  |  |     /* if empty req, must still return chunk representing empty array */ | 
					
						
							|  |  |  |     if (n_elements == 0)  | 
					
						
							|  |  |  |       return (Void_t**) mALLOc(0); | 
					
						
							|  |  |  |     marray = 0; | 
					
						
							|  |  |  |     array_size = request2size(n_elements * (sizeof(Void_t*))); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* compute total element size */ | 
					
						
							|  |  |  |   if (opts & 0x1) { /* all-same-size */ | 
					
						
							|  |  |  |     element_size = request2size(*sizes); | 
					
						
							|  |  |  |     contents_size = n_elements * element_size; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   else { /* add up all the sizes */ | 
					
						
							|  |  |  |     element_size = 0; | 
					
						
							|  |  |  |     contents_size = 0; | 
					
						
							|  |  |  |     for (i = 0; i != n_elements; ++i)  | 
					
						
							|  |  |  |       contents_size += request2size(sizes[i]);      | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* subtract out alignment bytes from total to minimize overallocation */ | 
					
						
							|  |  |  |   size = contents_size + array_size - MALLOC_ALIGN_MASK; | 
					
						
							|  |  |  |    | 
					
						
							|  |  |  |   /* 
 | 
					
						
							|  |  |  |      Allocate the aggregate chunk. | 
					
						
							|  |  |  |      But first disable mmap so malloc won't use it, since | 
					
						
							|  |  |  |      we would not be able to later free/realloc space internal | 
					
						
							|  |  |  |      to a segregated mmap region. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  |   mem = mALLOc(size); | 
					
						
							|  |  |  |   if (mem == 0)  | 
					
						
							|  |  |  |     return 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   p = mem2chunk(mem); | 
					
						
							|  |  |  |   assert(!chunk_is_mmapped(p));  | 
					
						
							|  |  |  |   remainder_size = chunksize(p); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (opts & 0x2) {       /* optionally clear the elements */ | 
					
						
							| 
									
										
										
										
											2004-11-04 18:22:36 +00:00
										 |  |  |     memset(mem, 0, remainder_size - SIZE_SZ - array_size); | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* If not provided, allocate the pointer array as final part of chunk */ | 
					
						
							|  |  |  |   if (marray == 0) { | 
					
						
							|  |  |  |     array_chunk = chunk_at_offset(p, contents_size); | 
					
						
							|  |  |  |     marray = (Void_t**) (chunk2mem(array_chunk)); | 
					
						
							|  |  |  |     set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE); | 
					
						
							|  |  |  |     remainder_size = contents_size; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* split out elements */ | 
					
						
							|  |  |  |   for (i = 0; ; ++i) { | 
					
						
							|  |  |  |     marray[i] = chunk2mem(p); | 
					
						
							|  |  |  |     if (i != n_elements-1) { | 
					
						
							|  |  |  |       if (element_size != 0)  | 
					
						
							|  |  |  |         size = element_size; | 
					
						
							|  |  |  |       else | 
					
						
							|  |  |  |         size = request2size(sizes[i]);           | 
					
						
							|  |  |  |       remainder_size -= size; | 
					
						
							|  |  |  |       set_head(p, size | PREV_INUSE); | 
					
						
							|  |  |  |       p = chunk_at_offset(p, size); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     else { /* the final element absorbs any overallocation slop */ | 
					
						
							|  |  |  |       set_head(p, remainder_size | PREV_INUSE); | 
					
						
							|  |  |  |       break; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2004-11-04 18:22:36 +00:00
										 |  |  | #if DEBUG_DLMALLOC
 | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |   if (marray != chunks) { | 
					
						
							|  |  |  |     /* final element must have exactly exhausted chunk */ | 
					
						
							|  |  |  |     if (element_size != 0)  | 
					
						
							|  |  |  |       assert(remainder_size == element_size); | 
					
						
							|  |  |  |     else | 
					
						
							|  |  |  |       assert(remainder_size == request2size(sizes[i])); | 
					
						
							|  |  |  |     check_inuse_chunk(mem2chunk(marray)); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   for (i = 0; i != n_elements; ++i) | 
					
						
							|  |  |  |     check_inuse_chunk(mem2chunk(marray[i])); | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   return marray; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ valloc ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | Void_t* vALLOc(size_t bytes) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | Void_t* vALLOc(bytes) size_t bytes; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   /* Ensure initialization */ | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  |   if (av->max_fast == 0) malloc_consolidate(av); | 
					
						
							|  |  |  |   return mEMALIGn(av->pagesize, bytes); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ pvalloc ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | Void_t* pVALLOc(size_t bytes) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | Void_t* pVALLOc(bytes) size_t bytes; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  |   size_t pagesz; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Ensure initialization */ | 
					
						
							|  |  |  |   if (av->max_fast == 0) malloc_consolidate(av); | 
					
						
							|  |  |  |   pagesz = av->pagesize; | 
					
						
							|  |  |  |   return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1)); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  |     | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ malloc_trim ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | int mTRIm(size_t pad) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | int mTRIm(pad) size_t pad; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  |   /* Ensure initialization/consolidation */ | 
					
						
							|  |  |  |   malloc_consolidate(av); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifndef MORECORE_CANNOT_TRIM        
 | 
					
						
							|  |  |  |   return sYSTRIm(pad, av); | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  |   return 0; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------- malloc_usable_size ------------------------- | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | size_t mUSABLe(Void_t* mem) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | size_t mUSABLe(mem) Void_t* mem; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mchunkptr p; | 
					
						
							|  |  |  |   if (mem != 0) { | 
					
						
							|  |  |  |     p = mem2chunk(mem); | 
					
						
							|  |  |  |     if (chunk_is_mmapped(p)) | 
					
						
							|  |  |  |       return chunksize(p) - 2*SIZE_SZ; | 
					
						
							|  |  |  |     else if (inuse(p)) | 
					
						
							|  |  |  |       return chunksize(p) - SIZE_SZ; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   return 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ mallinfo ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mallinfo mALLINFo() | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  |   struct mallinfo mi; | 
					
						
							|  |  |  |   int i; | 
					
						
							|  |  |  |   mbinptr b; | 
					
						
							|  |  |  |   mchunkptr p; | 
					
						
							|  |  |  |   INTERNAL_SIZE_T avail; | 
					
						
							|  |  |  |   INTERNAL_SIZE_T fastavail; | 
					
						
							|  |  |  |   int nblocks; | 
					
						
							|  |  |  |   int nfastblocks; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Ensure initialization */ | 
					
						
							|  |  |  |   if (av->top == 0)  malloc_consolidate(av); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   check_malloc_state(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Account for top */ | 
					
						
							|  |  |  |   avail = chunksize(av->top); | 
					
						
							|  |  |  |   nblocks = 1;  /* top always exists */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* traverse fastbins */ | 
					
						
							|  |  |  |   nfastblocks = 0; | 
					
						
							|  |  |  |   fastavail = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   for (i = 0; i < NFASTBINS; ++i) { | 
					
						
							|  |  |  |     for (p = av->fastbins[i]; p != 0; p = p->fd) { | 
					
						
							|  |  |  |       ++nfastblocks; | 
					
						
							|  |  |  |       fastavail += chunksize(p); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   avail += fastavail; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* traverse regular bins */ | 
					
						
							|  |  |  |   for (i = 1; i < NBINS; ++i) { | 
					
						
							|  |  |  |     b = bin_at(av, i); | 
					
						
							|  |  |  |     for (p = last(b); p != b; p = p->bk) { | 
					
						
							|  |  |  |       ++nblocks; | 
					
						
							|  |  |  |       avail += chunksize(p); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   mi.smblks = nfastblocks; | 
					
						
							|  |  |  |   mi.ordblks = nblocks; | 
					
						
							|  |  |  |   mi.fordblks = avail; | 
					
						
							|  |  |  |   mi.uordblks = av->sbrked_mem - avail; | 
					
						
							|  |  |  |   mi.arena = av->sbrked_mem; | 
					
						
							|  |  |  |   mi.fsmblks = fastavail; | 
					
						
							|  |  |  |   mi.keepcost = chunksize(av->top); | 
					
						
							|  |  |  |   mi.usmblks = av->max_total_mem; | 
					
						
							| 
									
										
										
										
											2006-01-17 14:10:42 +00:00
										 |  |  |   /* YAP doesn't have special mmapped regions */ | 
					
						
							|  |  |  |   mi.hblkhd = 0L; | 
					
						
							|  |  |  |   mi.hblks = 0L; | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |   return mi; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ malloc_stats ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2006-05-18 16:33:05 +00:00
										 |  |  | UInt | 
					
						
							|  |  |  | Yap_givemallinfo(void) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   struct mallinfo mi = mALLINFo(); | 
					
						
							|  |  |  |   return mi.uordblks; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  | void mSTATs(void) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   struct mallinfo mi = mALLINFo(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   fprintf(stderr, "max system bytes = %10lu\n", | 
					
						
							|  |  |  |           (CHUNK_SIZE_T)(mi.usmblks)); | 
					
						
							|  |  |  |   fprintf(stderr, "system bytes     = %10lu\n", | 
					
						
							|  |  |  |           (CHUNK_SIZE_T)(mi.arena + mi.hblkhd)); | 
					
						
							|  |  |  |   fprintf(stderr, "in use bytes     = %10lu\n", | 
					
						
							|  |  |  |           (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   ------------------------------ mallopt ------------------------------ | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if __STD_C
 | 
					
						
							|  |  |  | int mALLOPt(int param_number, int value) | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | int mALLOPt(param_number, value) int param_number; int value; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = get_malloc_state(); | 
					
						
							|  |  |  |   /* Ensure initialization/consolidation */ | 
					
						
							|  |  |  |   malloc_consolidate(av); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   switch(param_number) { | 
					
						
							|  |  |  |   case M_MXFAST: | 
					
						
							|  |  |  |     if (value >= 0 && value <= MAX_FAST_SIZE) { | 
					
						
							|  |  |  |       set_max_fast(av, value); | 
					
						
							|  |  |  |       return 1; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     else | 
					
						
							|  |  |  |       return 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   case M_TRIM_THRESHOLD: | 
					
						
							|  |  |  |     av->trim_threshold = value; | 
					
						
							|  |  |  |     return 1; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   case M_TOP_PAD: | 
					
						
							|  |  |  |     av->top_pad = value; | 
					
						
							|  |  |  |     return 1; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   default: | 
					
						
							|  |  |  |     return 0; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* 
 | 
					
						
							|  |  |  |   -------------------- Alternative MORECORE functions -------------------- | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |   General Requirements for MORECORE. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   The MORECORE function must have the following properties: | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   If MORECORE_CONTIGUOUS is false: | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     * MORECORE must allocate in multiples of pagesize. It will | 
					
						
							|  |  |  |       only be called with arguments that are multiples of pagesize. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     * MORECORE(0) must return an address that is at least  | 
					
						
							|  |  |  |       MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   else (i.e. If MORECORE_CONTIGUOUS is true): | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     * Consecutive calls to MORECORE with positive arguments | 
					
						
							|  |  |  |       return increasing addresses, indicating that space has been | 
					
						
							|  |  |  |       contiguously extended.  | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     * MORECORE need not allocate in multiples of pagesize. | 
					
						
							|  |  |  |       Calls to MORECORE need not have args of multiples of pagesize. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     * MORECORE need not page-align. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   In either case: | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     * MORECORE may allocate more memory than requested. (Or even less, | 
					
						
							|  |  |  |       but this will generally result in a malloc failure.) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     * MORECORE must not allocate memory when given argument zero, but | 
					
						
							|  |  |  |       instead return one past the end address of memory from previous | 
					
						
							|  |  |  |       nonzero call. This malloc does NOT call MORECORE(0) | 
					
						
							|  |  |  |       until at least one call with positive arguments is made, so | 
					
						
							|  |  |  |       the initial value returned is not important. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     * Even though consecutive calls to MORECORE need not return contiguous | 
					
						
							|  |  |  |       addresses, it must be OK for malloc'ed chunks to span multiple | 
					
						
							|  |  |  |       regions in those cases where they do happen to be contiguous. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     * MORECORE need not handle negative arguments -- it may instead | 
					
						
							|  |  |  |       just return MORECORE_FAILURE when given negative arguments. | 
					
						
							|  |  |  |       Negative arguments are always multiples of pagesize. MORECORE | 
					
						
							|  |  |  |       must not misinterpret negative args as large positive unsigned | 
					
						
							|  |  |  |       args. You can suppress all such calls from even occurring by defining | 
					
						
							|  |  |  |       MORECORE_CANNOT_TRIM, | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   There is some variation across systems about the type of the | 
					
						
							|  |  |  |   argument to sbrk/MORECORE. If size_t is unsigned, then it cannot | 
					
						
							|  |  |  |   actually be size_t, because sbrk supports negative args, so it is | 
					
						
							|  |  |  |   normally the signed type of the same width as size_t (sometimes | 
					
						
							|  |  |  |   declared as "intptr_t", and sometimes "ptrdiff_t").  It doesn't much | 
					
						
							|  |  |  |   matter though. Internally, we use "long" as arguments, which should | 
					
						
							|  |  |  |   work across all reasonable possibilities. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Additionally, if MORECORE ever returns failure for a positive | 
					
						
							|  |  |  |   request, and HAVE_MMAP is true, then mmap is used as a noncontiguous | 
					
						
							|  |  |  |   system allocator. This is a useful backup strategy for systems with | 
					
						
							|  |  |  |   holes in address spaces -- in this case sbrk cannot contiguously | 
					
						
							|  |  |  |   expand the heap, but mmap may be able to map noncontiguous space. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   If you'd like mmap to ALWAYS be used, you can define MORECORE to be | 
					
						
							|  |  |  |   a function that always returns MORECORE_FAILURE. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Malloc only has limited ability to detect failures of MORECORE | 
					
						
							|  |  |  |   to supply contiguous space when it says it can. In particular, | 
					
						
							|  |  |  |   multithreaded programs that do not use locks may result in | 
					
						
							|  |  |  |   rece conditions across calls to MORECORE that result in gaps | 
					
						
							|  |  |  |   that cannot be detected as such, and subsequent corruption. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   If you are using this malloc with something other than sbrk (or its | 
					
						
							|  |  |  |   emulation) to supply memory regions, you probably want to set | 
					
						
							|  |  |  |   MORECORE_CONTIGUOUS as false.  As an example, here is a custom | 
					
						
							|  |  |  |   allocator kindly contributed for pre-OSX macOS.  It uses virtually | 
					
						
							|  |  |  |   but not necessarily physically contiguous non-paged memory (locked | 
					
						
							|  |  |  |   in, present and won't get swapped out).  You can use it by | 
					
						
							|  |  |  |   uncommenting this section, adding some #includes, and setting up the | 
					
						
							|  |  |  |   appropriate defines above: | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       #define MORECORE osMoreCore
 | 
					
						
							|  |  |  |       #define MORECORE_CONTIGUOUS 0
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   There is also a shutdown routine that should somehow be called for | 
					
						
							|  |  |  |   cleanup upon program exit. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   #define MAX_POOL_ENTRIES 100
 | 
					
						
							|  |  |  |   #define MINIMUM_MORECORE_SIZE  (64 * 1024)
 | 
					
						
							|  |  |  |   static int next_os_pool; | 
					
						
							|  |  |  |   void *our_os_pools[MAX_POOL_ENTRIES]; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   void *osMoreCore(int size) | 
					
						
							|  |  |  |   { | 
					
						
							|  |  |  |     void *ptr = 0; | 
					
						
							|  |  |  |     static void *sbrk_top = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if (size > 0) | 
					
						
							|  |  |  |     { | 
					
						
							|  |  |  |       if (size < MINIMUM_MORECORE_SIZE) | 
					
						
							|  |  |  |          size = MINIMUM_MORECORE_SIZE; | 
					
						
							|  |  |  |       if (CurrentExecutionLevel() == kTaskLevel) | 
					
						
							|  |  |  |          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); | 
					
						
							|  |  |  |       if (ptr == 0) | 
					
						
							|  |  |  |       { | 
					
						
							|  |  |  |         return (void *) MORECORE_FAILURE; | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |       // save ptrs so they can be freed during cleanup
 | 
					
						
							|  |  |  |       our_os_pools[next_os_pool] = ptr; | 
					
						
							|  |  |  |       next_os_pool++; | 
					
						
							|  |  |  |       ptr = (void *) ((((CHUNK_SIZE_T) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); | 
					
						
							|  |  |  |       sbrk_top = (char *) ptr + size; | 
					
						
							|  |  |  |       return ptr; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     else if (size < 0) | 
					
						
							|  |  |  |     { | 
					
						
							|  |  |  |       // we don't currently support shrink behavior
 | 
					
						
							|  |  |  |       return (void *) MORECORE_FAILURE; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     else | 
					
						
							|  |  |  |     { | 
					
						
							|  |  |  |       return sbrk_top; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   // cleanup any allocated memory pools
 | 
					
						
							|  |  |  |   // called as last thing before shutting down driver
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   void osCleanupMem(void) | 
					
						
							|  |  |  |   { | 
					
						
							|  |  |  |     void **ptr; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) | 
					
						
							|  |  |  |       if (*ptr) | 
					
						
							|  |  |  |       { | 
					
						
							|  |  |  |          PoolDeallocate(*ptr); | 
					
						
							|  |  |  |          *ptr = 0; | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* ------------------------------------------------------------
 | 
					
						
							|  |  |  | History: | 
					
						
							|  |  |  |     V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee) | 
					
						
							|  |  |  |       * Fix malloc_state bitmap array misdeclaration | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee) | 
					
						
							|  |  |  |       * Allow tuning of FIRST_SORTED_BIN_SIZE | 
					
						
							|  |  |  |       * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. | 
					
						
							|  |  |  |       * Better detection and support for non-contiguousness of MORECORE.  | 
					
						
							|  |  |  |         Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger | 
					
						
							|  |  |  |       * Bypass most of malloc if no frees. Thanks To Emery Berger. | 
					
						
							|  |  |  |       * Fix freeing of old top non-contiguous chunk im sysmalloc. | 
					
						
							|  |  |  |       * Raised default trim and map thresholds to 256K. | 
					
						
							|  |  |  |       * Fix mmap-related #defines. Thanks to Lubos Lunak. | 
					
						
							|  |  |  |       * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. | 
					
						
							|  |  |  |       * Branch-free bin calculation | 
					
						
							|  |  |  |       * Default trim and mmap thresholds now 256K. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee) | 
					
						
							|  |  |  |       * Introduce independent_comalloc and independent_calloc. | 
					
						
							|  |  |  |         Thanks to Michael Pachos for motivation and help. | 
					
						
							|  |  |  |       * Make optional .h file available | 
					
						
							|  |  |  |       * Allow > 2GB requests on 32bit systems. | 
					
						
							|  |  |  |       * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>. | 
					
						
							|  |  |  |         Thanks also to Andreas Mueller <a.mueller at paradatec.de>, | 
					
						
							|  |  |  |         and Anonymous. | 
					
						
							|  |  |  |       * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for  | 
					
						
							|  |  |  |         helping test this.) | 
					
						
							|  |  |  |       * memalign: check alignment arg | 
					
						
							|  |  |  |       * realloc: don't try to shift chunks backwards, since this | 
					
						
							|  |  |  |         leads to  more fragmentation in some programs and doesn't | 
					
						
							|  |  |  |         seem to help in any others. | 
					
						
							|  |  |  |       * Collect all cases in malloc requiring system memory into sYSMALLOc | 
					
						
							|  |  |  |       * Use mmap as backup to sbrk | 
					
						
							|  |  |  |       * Place all internal state in malloc_state | 
					
						
							|  |  |  |       * Introduce fastbins (although similar to 2.5.1) | 
					
						
							|  |  |  |       * Many minor tunings and cosmetic improvements | 
					
						
							|  |  |  |       * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK  | 
					
						
							|  |  |  |       * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS | 
					
						
							|  |  |  |         Thanks to Tony E. Bennett <tbennett@nvidia.com> and others. | 
					
						
							|  |  |  |       * Include errno.h to support default failure action. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee) | 
					
						
							|  |  |  |       * return null for negative arguments | 
					
						
							|  |  |  |       * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com> | 
					
						
							|  |  |  |          * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' | 
					
						
							|  |  |  |           (e.g. WIN32 platforms) | 
					
						
							|  |  |  |          * Cleanup header file inclusion for WIN32 platforms | 
					
						
							|  |  |  |          * Cleanup code to avoid Microsoft Visual C++ compiler complaints | 
					
						
							|  |  |  |          * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing | 
					
						
							|  |  |  |            memory allocation routines | 
					
						
							|  |  |  |          * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) | 
					
						
							|  |  |  |          * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to | 
					
						
							|  |  |  |            usage of 'assert' in non-WIN32 code | 
					
						
							|  |  |  |          * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to | 
					
						
							|  |  |  |            avoid infinite loop | 
					
						
							|  |  |  |       * Always call 'fREe()' rather than 'free()' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee) | 
					
						
							|  |  |  |       * Fixed ordering problem with boundary-stamping | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee) | 
					
						
							|  |  |  |       * Added pvalloc, as recommended by H.J. Liu | 
					
						
							|  |  |  |       * Added 64bit pointer support mainly from Wolfram Gloger | 
					
						
							|  |  |  |       * Added anonymously donated WIN32 sbrk emulation | 
					
						
							|  |  |  |       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen | 
					
						
							|  |  |  |       * malloc_extend_top: fix mask error that caused wastage after | 
					
						
							|  |  |  |         foreign sbrks | 
					
						
							|  |  |  |       * Add linux mremap support code from HJ Liu | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee) | 
					
						
							|  |  |  |       * Integrated most documentation with the code. | 
					
						
							|  |  |  |       * Add support for mmap, with help from | 
					
						
							|  |  |  |         Wolfram Gloger (Gloger@lrz.uni-muenchen.de). | 
					
						
							|  |  |  |       * Use last_remainder in more cases. | 
					
						
							|  |  |  |       * Pack bins using idea from  colin@nyx10.cs.du.edu | 
					
						
							|  |  |  |       * Use ordered bins instead of best-fit threshhold | 
					
						
							|  |  |  |       * Eliminate block-local decls to simplify tracing and debugging. | 
					
						
							|  |  |  |       * Support another case of realloc via move into top | 
					
						
							|  |  |  |       * Fix error occuring when initial sbrk_base not word-aligned. | 
					
						
							|  |  |  |       * Rely on page size for units instead of SBRK_UNIT to | 
					
						
							|  |  |  |         avoid surprises about sbrk alignment conventions. | 
					
						
							|  |  |  |       * Add mallinfo, mallopt. Thanks to Raymond Nijssen | 
					
						
							|  |  |  |         (raymond@es.ele.tue.nl) for the suggestion. | 
					
						
							|  |  |  |       * Add `pad' argument to malloc_trim and top_pad mallopt parameter. | 
					
						
							|  |  |  |       * More precautions for cases where other routines call sbrk, | 
					
						
							|  |  |  |         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). | 
					
						
							|  |  |  |       * Added macros etc., allowing use in linux libc from | 
					
						
							|  |  |  |         H.J. Lu (hjl@gnu.ai.mit.edu) | 
					
						
							|  |  |  |       * Inverted this history list | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee) | 
					
						
							|  |  |  |       * Re-tuned and fixed to behave more nicely with V2.6.0 changes. | 
					
						
							|  |  |  |       * Removed all preallocation code since under current scheme | 
					
						
							|  |  |  |         the work required to undo bad preallocations exceeds | 
					
						
							|  |  |  |         the work saved in good cases for most test programs. | 
					
						
							|  |  |  |       * No longer use return list or unconsolidated bins since | 
					
						
							|  |  |  |         no scheme using them consistently outperforms those that don't | 
					
						
							|  |  |  |         given above changes. | 
					
						
							|  |  |  |       * Use best fit for very large chunks to prevent some worst-cases. | 
					
						
							|  |  |  |       * Added some support for debugging | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee) | 
					
						
							|  |  |  |       * Removed footers when chunks are in use. Thanks to | 
					
						
							|  |  |  |         Paul Wilson (wilson@cs.texas.edu) for the suggestion. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee) | 
					
						
							|  |  |  |       * Added malloc_trim, with help from Wolfram Gloger | 
					
						
							|  |  |  |         (wmglo@Dent.MED.Uni-Muenchen.DE). | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g) | 
					
						
							|  |  |  |       * realloc: try to expand in both directions | 
					
						
							|  |  |  |       * malloc: swap order of clean-bin strategy; | 
					
						
							|  |  |  |       * realloc: only conditionally expand backwards | 
					
						
							|  |  |  |       * Try not to scavenge used bins | 
					
						
							|  |  |  |       * Use bin counts as a guide to preallocation | 
					
						
							|  |  |  |       * Occasionally bin return list chunks in first scan | 
					
						
							|  |  |  |       * Add a few optimizations from colin@nyx10.cs.du.edu | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g) | 
					
						
							|  |  |  |       * faster bin computation & slightly different binning | 
					
						
							|  |  |  |       * merged all consolidations to one part of malloc proper | 
					
						
							|  |  |  |          (eliminating old malloc_find_space & malloc_clean_bin) | 
					
						
							|  |  |  |       * Scan 2 returns chunks (not just 1) | 
					
						
							|  |  |  |       * Propagate failure in realloc if malloc returns 0 | 
					
						
							|  |  |  |       * Add stuff to allow compilation on non-ANSI compilers | 
					
						
							|  |  |  |           from kpv@research.att.com | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu) | 
					
						
							|  |  |  |       * removed potential for odd address access in prev_chunk | 
					
						
							|  |  |  |       * removed dependency on getpagesize.h | 
					
						
							|  |  |  |       * misc cosmetics and a bit more internal documentation | 
					
						
							|  |  |  |       * anticosmetics: mangled names in macros to evade debugger strangeness | 
					
						
							|  |  |  |       * tested on sparc, hp-700, dec-mips, rs6000 | 
					
						
							|  |  |  |           with gcc & native cc (hp, dec only) allowing | 
					
						
							|  |  |  |           Detlefs & Zorn comparison study (in SIGPLAN Notices.) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu) | 
					
						
							|  |  |  |       * Based loosely on libg++-1.2X malloc. (It retains some of the overall | 
					
						
							|  |  |  |          structure of old version,  but most details differ.) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void | 
					
						
							|  |  |  | Yap_initdlmalloc(void) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   HeapTop = (ADDR)ALIGN_SIZE(HeapTop,16); | 
					
						
							| 
									
										
										
										
											2006-05-16 18:37:31 +00:00
										 |  |  |   Yap_NOfMemoryHoles = 0; | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |   Yap_av = (struct malloc_state *)HeapTop; | 
					
						
							| 
									
										
										
										
											2004-11-04 18:22:36 +00:00
										 |  |  |   memset((void *)Yap_av, 0, sizeof(struct malloc_state)); | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  |   HeapTop += sizeof(struct malloc_state); | 
					
						
							| 
									
										
										
										
											2004-12-08 16:54:33 +00:00
										 |  |  |   HeapTop = (ADDR)ALIGN_SIZE(HeapTop,2*SIZEOF_LONG_LONG_INT); | 
					
						
							| 
									
										
										
										
											2006-05-18 16:33:05 +00:00
										 |  |  |   HeapMax = HeapTop-Yap_HeapBase; | 
					
						
							| 
									
										
										
										
											2004-10-28 20:12:23 +00:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2004-11-04 18:22:36 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-11-23 03:01:33 +00:00
										 |  |  | void Yap_RestoreDLMalloc(void) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  |   mstate av = Yap_av; | 
					
						
							|  |  |  |   int i; | 
					
						
							|  |  |  |   mchunkptr p; | 
					
						
							|  |  |  |   mchunkptr q; | 
					
						
							|  |  |  |   mbinptr b; | 
					
						
							|  |  |  |   unsigned int binbit; | 
					
						
							|  |  |  |   int empty; | 
					
						
							|  |  |  |   unsigned int idx; | 
					
						
							|  |  |  |   INTERNAL_SIZE_T size; | 
					
						
							|  |  |  |   CHUNK_SIZE_T  total = 0; | 
					
						
							|  |  |  |   int max_fast_bin; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* internal size_t must be no wider than pointer type */ | 
					
						
							|  |  |  |   assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* alignment is a power of 2 */ | 
					
						
							|  |  |  |   assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* cannot run remaining checks until fully initialized */ | 
					
						
							|  |  |  |   if (av->top == 0 || av->top == initial_top(av)) | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* pagesize is a power of 2 */ | 
					
						
							|  |  |  |   assert((av->pagesize & (av->pagesize-1)) == 0); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* properties of fastbins */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* max_fast is in allowed range */ | 
					
						
							|  |  |  |   assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   max_fast_bin = fastbin_index(av->max_fast); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (av->top) { | 
					
						
							|  |  |  |     av->top = ChunkPtrAdjust(av->top); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   if (av->last_remainder) { | 
					
						
							|  |  |  |     av->last_remainder = ChunkPtrAdjust(av->last_remainder); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   for (i = 0; i < NFASTBINS; ++i) { | 
					
						
							|  |  |  |      | 
					
						
							|  |  |  |     if (av->fastbins[i]) { | 
					
						
							|  |  |  |       av->fastbins[i] = ChunkPtrAdjust(av->fastbins[i]); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     p = av->fastbins[i]; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /* all bins past max_fast are empty */ | 
					
						
							|  |  |  |     if (i > max_fast_bin) | 
					
						
							|  |  |  |       assert(p == 0); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     while (p != 0) { | 
					
						
							|  |  |  |       /* each chunk claims to be inuse */ | 
					
						
							|  |  |  |       check_inuse_chunk(p); | 
					
						
							|  |  |  |       total += chunksize(p); | 
					
						
							|  |  |  |       /* chunk belongs in this bin */ | 
					
						
							|  |  |  |       assert(fastbin_index(chunksize(p)) == i); | 
					
						
							|  |  |  |       if (p->fd) | 
					
						
							|  |  |  | 	p->fd = ChunkPtrAdjust(p->fd); | 
					
						
							|  |  |  |       if (p->bk) | 
					
						
							|  |  |  | 	p->bk = ChunkPtrAdjust(p->bk); | 
					
						
							|  |  |  |       p = p->fd; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (total != 0) | 
					
						
							|  |  |  |     assert(have_fastchunks(av)); | 
					
						
							|  |  |  |   else if (!have_fastchunks(av)) | 
					
						
							|  |  |  |     assert(total == 0); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   for (i = 0; i < NBINS*2; i++) { | 
					
						
							|  |  |  |     if (av->bins[i]) { | 
					
						
							|  |  |  |       av->bins[i] = ChunkPtrAdjust(av->bins[i]); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* check normal bins */ | 
					
						
							|  |  |  |   for (i = 1; i < NBINS; ++i) { | 
					
						
							|  |  |  |     b = bin_at(av,i); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /* binmap is accurate (except for bin 1 == unsorted_chunks) */ | 
					
						
							|  |  |  |     if (i >= 2) { | 
					
						
							|  |  |  |       binbit = get_binmap(av,i); | 
					
						
							|  |  |  |       empty = last(b) == b; | 
					
						
							|  |  |  |       if (!binbit) | 
					
						
							|  |  |  |         assert(empty); | 
					
						
							|  |  |  |       else if (!empty) | 
					
						
							|  |  |  |         assert(binbit); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     for (p = last(b); p != b; p = p->bk) { | 
					
						
							|  |  |  |       /* each chunk claims to be free */ | 
					
						
							|  |  |  |       check_free_chunk(p); | 
					
						
							|  |  |  |       if (p->fd) | 
					
						
							|  |  |  | 	p->fd = ChunkPtrAdjust(p->fd); | 
					
						
							|  |  |  |       if (p->bk) | 
					
						
							|  |  |  | 	p->bk = ChunkPtrAdjust(p->bk); | 
					
						
							|  |  |  |       size = chunksize(p); | 
					
						
							|  |  |  |       total += size; | 
					
						
							|  |  |  |       if (i >= 2) { | 
					
						
							|  |  |  |         /* chunk belongs in bin */ | 
					
						
							|  |  |  |         idx = bin_index(size); | 
					
						
							|  |  |  |         assert(idx == i); | 
					
						
							|  |  |  |         /* lists are sorted */ | 
					
						
							|  |  |  |         if ((CHUNK_SIZE_T) size >= (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) { | 
					
						
							|  |  |  |           assert(p->bk == b ||  | 
					
						
							|  |  |  |                  (CHUNK_SIZE_T)chunksize(p->bk) >=  | 
					
						
							|  |  |  |                  (CHUNK_SIZE_T)chunksize(p)); | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |       /* chunk is followed by a legal chain of inuse chunks */ | 
					
						
							|  |  |  |       for (q = next_chunk(p); | 
					
						
							|  |  |  |            (q != av->top && inuse(q) &&  | 
					
						
							|  |  |  |              (CHUNK_SIZE_T)(chunksize(q)) >= MINSIZE); | 
					
						
							|  |  |  |            q = next_chunk(q)) { | 
					
						
							|  |  |  | 	check_inuse_chunk(q); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2004-11-04 18:22:36 +00:00
										 |  |  | #endif /* USE_DL_MALLOC */
 | 
					
						
							| 
									
										
										
										
											2005-11-23 03:01:33 +00:00
										 |  |  | 
 |