cscg22-gearboy

CSCG 2022 Challenge 'Gearboy'
git clone https://git.sinitax.com/sinitax/cscg22-gearboy
Log | Files | Refs | sfeed.txt

SDL_malloc.c (192294B)


      1/*
      2  Simple DirectMedia Layer
      3  Copyright (C) 1997-2014 Sam Lantinga <slouken@libsdl.org>
      4
      5  This software is provided 'as-is', without any express or implied
      6  warranty.  In no event will the authors be held liable for any damages
      7  arising from the use of this software.
      8
      9  Permission is granted to anyone to use this software for any purpose,
     10  including commercial applications, and to alter it and redistribute it
     11  freely, subject to the following restrictions:
     12
     13  1. The origin of this software must not be misrepresented; you must not
     14     claim that you wrote the original software. If you use this software
     15     in a product, an acknowledgment in the product documentation would be
     16     appreciated but is not required.
     17  2. Altered source versions must be plainly marked as such, and must not be
     18     misrepresented as being the original software.
     19  3. This notice may not be removed or altered from any source distribution.
     20*/
     21#include "../SDL_internal.h"
     22
     23/* This file contains portable memory management functions for SDL */
     24
     25#include "SDL_stdinc.h"
     26
     27#if defined(HAVE_MALLOC)
     28
     29void *SDL_malloc(size_t size)
     30{
     31    return malloc(size);
     32}
     33
     34void *SDL_calloc(size_t nmemb, size_t size)
     35{
     36    return calloc(nmemb, size);
     37}
     38
     39void *SDL_realloc(void *ptr, size_t size)
     40{
     41    return realloc(ptr, size);
     42}
     43
     44void SDL_free(void *ptr)
     45{
     46    free(ptr);
     47}
     48
     49#else  /* the rest of this is a LOT of tapdancing to implement malloc. :) */
     50
     51#define LACKS_SYS_TYPES_H
     52#define LACKS_STDIO_H
     53#define LACKS_STRINGS_H
     54#define LACKS_STRING_H
     55#define LACKS_STDLIB_H
     56#define ABORT
     57#define USE_LOCKS 1
     58
     59/*
     60  This is a version (aka dlmalloc) of malloc/free/realloc written by
     61  Doug Lea and released to the public domain, as explained at
     62  http://creativecommons.org/licenses/publicdomain.  Send questions,
     63  comments, complaints, performance data, etc to dl@cs.oswego.edu
     64
     65* Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
     66
     67   Note: There may be an updated version of this malloc obtainable at
     68           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
     69         Check before installing!
     70
     71* Quickstart
     72
     73  This library is all in one file to simplify the most common usage:
     74  ftp it, compile it (-O3), and link it into another program. All of
     75  the compile-time options default to reasonable values for use on
     76  most platforms.  You might later want to step through various
     77  compile-time and dynamic tuning options.
     78
     79  For convenience, an include file for code using this malloc is at:
     80     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
     81  You don't really need this .h file unless you call functions not
     82  defined in your system include files.  The .h file contains only the
     83  excerpts from this file needed for using this malloc on ANSI C/C++
     84  systems, so long as you haven't changed compile-time options about
     85  naming and tuning parameters.  If you do, then you can create your
     86  own malloc.h that does include all settings by cutting at the point
     87  indicated below. Note that you may already by default be using a C
     88  library containing a malloc that is based on some version of this
     89  malloc (for example in linux). You might still want to use the one
     90  in this file to customize settings or to avoid overheads associated
     91  with library versions.
     92
     93* Vital statistics:
     94
     95  Supported pointer/size_t representation:       4 or 8 bytes
     96       size_t MUST be an unsigned type of the same width as
     97       pointers. (If you are using an ancient system that declares
     98       size_t as a signed type, or need it to be a different width
     99       than pointers, you can use a previous release of this malloc
    100       (e.g. 2.7.2) supporting these.)
    101
    102  Alignment:                                     8 bytes (default)
    103       This suffices for nearly all current machines and C compilers.
    104       However, you can define MALLOC_ALIGNMENT to be wider than this
    105       if necessary (up to 128bytes), at the expense of using more space.
    106
    107  Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
    108                                          8 or 16 bytes (if 8byte sizes)
    109       Each malloced chunk has a hidden word of overhead holding size
    110       and status information, and additional cross-check word
    111       if FOOTERS is defined.
    112
    113  Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
    114                          8-byte ptrs:  32 bytes    (including overhead)
    115
    116       Even a request for zero bytes (i.e., malloc(0)) returns a
    117       pointer to something of the minimum allocatable size.
    118       The maximum overhead wastage (i.e., number of extra bytes
    119       allocated than were requested in malloc) is less than or equal
    120       to the minimum size, except for requests >= mmap_threshold that
    121       are serviced via mmap(), where the worst case wastage is about
    122       32 bytes plus the remainder from a system page (the minimal
    123       mmap unit); typically 4096 or 8192 bytes.
    124
    125  Security: static-safe; optionally more or less
    126       The "security" of malloc refers to the ability of malicious
    127       code to accentuate the effects of errors (for example, freeing
    128       space that is not currently malloc'ed or overwriting past the
    129       ends of chunks) in code that calls malloc.  This malloc
    130       guarantees not to modify any memory locations below the base of
    131       heap, i.e., static variables, even in the presence of usage
    132       errors.  The routines additionally detect most improper frees
    133       and reallocs.  All this holds as long as the static bookkeeping
    134       for malloc itself is not corrupted by some other means.  This
    135       is only one aspect of security -- these checks do not, and
    136       cannot, detect all possible programming errors.
    137
    138       If FOOTERS is defined nonzero, then each allocated chunk
    139       carries an additional check word to verify that it was malloced
    140       from its space.  These check words are the same within each
    141       execution of a program using malloc, but differ across
    142       executions, so externally crafted fake chunks cannot be
    143       freed. This improves security by rejecting frees/reallocs that
    144       could corrupt heap memory, in addition to the checks preventing
    145       writes to statics that are always on.  This may further improve
    146       security at the expense of time and space overhead.  (Note that
    147       FOOTERS may also be worth using with MSPACES.)
    148
    149       By default detected errors cause the program to abort (calling
    150       "abort()"). You can override this to instead proceed past
    151       errors by defining PROCEED_ON_ERROR.  In this case, a bad free
    152       has no effect, and a malloc that encounters a bad address
    153       caused by user overwrites will ignore the bad address by
    154       dropping pointers and indices to all known memory. This may
    155       be appropriate for programs that should continue if at all
    156       possible in the face of programming errors, although they may
    157       run out of memory because dropped memory is never reclaimed.
    158
    159       If you don't like either of these options, you can define
    160       CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
    161       else. And if if you are sure that your program using malloc has
    162       no errors or vulnerabilities, you can define INSECURE to 1,
    163       which might (or might not) provide a small performance improvement.
    164
    165  Thread-safety: NOT thread-safe unless USE_LOCKS defined
    166       When USE_LOCKS is defined, each public call to malloc, free,
    167       etc is surrounded with either a pthread mutex or a win32
    168       spinlock (depending on WIN32). This is not especially fast, and
    169       can be a major bottleneck.  It is designed only to provide
    170       minimal protection in concurrent environments, and to provide a
    171       basis for extensions.  If you are using malloc in a concurrent
    172       program, consider instead using ptmalloc, which is derived from
    173       a version of this malloc. (See http://www.malloc.de).
    174
    175  System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
    176       This malloc can use unix sbrk or any emulation (invoked using
    177       the CALL_MORECORE macro) and/or mmap/munmap or any emulation
    178       (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
    179       memory.  On most unix systems, it tends to work best if both
    180       MORECORE and MMAP are enabled.  On Win32, it uses emulations
    181       based on VirtualAlloc. It also uses common C library functions
    182       like memset.
    183
    184  Compliance: I believe it is compliant with the Single Unix Specification
    185       (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
    186       others as well.
    187
    188* Overview of algorithms
    189
    190  This is not the fastest, most space-conserving, most portable, or
    191  most tunable malloc ever written. However it is among the fastest
    192  while also being among the most space-conserving, portable and
    193  tunable.  Consistent balance across these factors results in a good
    194  general-purpose allocator for malloc-intensive programs.
    195
    196  In most ways, this malloc is a best-fit allocator. Generally, it
    197  chooses the best-fitting existing chunk for a request, with ties
    198  broken in approximately least-recently-used order. (This strategy
    199  normally maintains low fragmentation.) However, for requests less
    200  than 256bytes, it deviates from best-fit when there is not an
    201  exactly fitting available chunk by preferring to use space adjacent
    202  to that used for the previous small request, as well as by breaking
    203  ties in approximately most-recently-used order. (These enhance
    204  locality of series of small allocations.)  And for very large requests
    205  (>= 256Kb by default), it relies on system memory mapping
    206  facilities, if supported.  (This helps avoid carrying around and
    207  possibly fragmenting memory used only for large chunks.)
    208
    209  All operations (except malloc_stats and mallinfo) have execution
    210  times that are bounded by a constant factor of the number of bits in
    211  a size_t, not counting any clearing in calloc or copying in realloc,
    212  or actions surrounding MORECORE and MMAP that have times
    213  proportional to the number of non-contiguous regions returned by
    214  system allocation routines, which is often just 1.
    215
    216  The implementation is not very modular and seriously overuses
    217  macros. Perhaps someday all C compilers will do as good a job
    218  inlining modular code as can now be done by brute-force expansion,
    219  but now, enough of them seem not to.
    220
    221  Some compilers issue a lot of warnings about code that is
    222  dead/unreachable only on some platforms, and also about intentional
    223  uses of negation on unsigned types. All known cases of each can be
    224  ignored.
    225
    226  For a longer but out of date high-level description, see
    227     http://gee.cs.oswego.edu/dl/html/malloc.html
    228
    229* MSPACES
    230  If MSPACES is defined, then in addition to malloc, free, etc.,
    231  this file also defines mspace_malloc, mspace_free, etc. These
    232  are versions of malloc routines that take an "mspace" argument
    233  obtained using create_mspace, to control all internal bookkeeping.
    234  If ONLY_MSPACES is defined, only these versions are compiled.
    235  So if you would like to use this allocator for only some allocations,
    236  and your system malloc for others, you can compile with
    237  ONLY_MSPACES and then do something like...
    238    static mspace mymspace = create_mspace(0,0); // for example
    239    #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
    240
    241  (Note: If you only need one instance of an mspace, you can instead
    242  use "USE_DL_PREFIX" to relabel the global malloc.)
    243
    244  You can similarly create thread-local allocators by storing
    245  mspaces as thread-locals. For example:
    246    static __thread mspace tlms = 0;
    247    void*  tlmalloc(size_t bytes) {
    248      if (tlms == 0) tlms = create_mspace(0, 0);
    249      return mspace_malloc(tlms, bytes);
    250    }
    251    void  tlfree(void* mem) { mspace_free(tlms, mem); }
    252
    253  Unless FOOTERS is defined, each mspace is completely independent.
    254  You cannot allocate from one and free to another (although
    255  conformance is only weakly checked, so usage errors are not always
    256  caught). If FOOTERS is defined, then each chunk carries around a tag
    257  indicating its originating mspace, and frees are directed to their
    258  originating spaces.
    259
    260 -------------------------  Compile-time options ---------------------------
    261
    262Be careful in setting #define values for numerical constants of type
    263size_t. On some systems, literal values are not automatically extended
    264to size_t precision unless they are explicitly casted.
    265
    266WIN32                    default: defined if _WIN32 defined
    267  Defining WIN32 sets up defaults for MS environment and compilers.
    268  Otherwise defaults are for unix.
    269
    270MALLOC_ALIGNMENT         default: (size_t)8
    271  Controls the minimum alignment for malloc'ed chunks.  It must be a
    272  power of two and at least 8, even on machines for which smaller
    273  alignments would suffice. It may be defined as larger than this
    274  though. Note however that code and data structures are optimized for
    275  the case of 8-byte alignment.
    276
    277MSPACES                  default: 0 (false)
    278  If true, compile in support for independent allocation spaces.
    279  This is only supported if HAVE_MMAP is true.
    280
    281ONLY_MSPACES             default: 0 (false)
    282  If true, only compile in mspace versions, not regular versions.
    283
    284USE_LOCKS                default: 0 (false)
    285  Causes each call to each public routine to be surrounded with
    286  pthread or WIN32 mutex lock/unlock. (If set true, this can be
    287  overridden on a per-mspace basis for mspace versions.)
    288
    289FOOTERS                  default: 0
    290  If true, provide extra checking and dispatching by placing
    291  information in the footers of allocated chunks. This adds
    292  space and time overhead.
    293
    294INSECURE                 default: 0
    295  If true, omit checks for usage errors and heap space overwrites.
    296
    297USE_DL_PREFIX            default: NOT defined
    298  Causes compiler to prefix all public routines with the string 'dl'.
    299  This can be useful when you only want to use this malloc in one part
    300  of a program, using your regular system malloc elsewhere.
    301
    302ABORT                    default: defined as abort()
    303  Defines how to abort on failed checks.  On most systems, a failed
    304  check cannot die with an "assert" or even print an informative
    305  message, because the underlying print routines in turn call malloc,
    306  which will fail again.  Generally, the best policy is to simply call
    307  abort(). It's not very useful to do more than this because many
    308  errors due to overwriting will show up as address faults (null, odd
    309  addresses etc) rather than malloc-triggered checks, so will also
    310  abort.  Also, most compilers know that abort() does not return, so
    311  can better optimize code conditionally calling it.
    312
    313PROCEED_ON_ERROR           default: defined as 0 (false)
    314  Controls whether detected bad addresses cause them to bypassed
    315  rather than aborting. If set, detected bad arguments to free and
    316  realloc are ignored. And all bookkeeping information is zeroed out
    317  upon a detected overwrite of freed heap space, thus losing the
    318  ability to ever return it from malloc again, but enabling the
    319  application to proceed. If PROCEED_ON_ERROR is defined, the
    320  static variable malloc_corruption_error_count is compiled in
    321  and can be examined to see if errors have occurred. This option
    322  generates slower code than the default abort policy.
    323
    324DEBUG                    default: NOT defined
    325  The DEBUG setting is mainly intended for people trying to modify
    326  this code or diagnose problems when porting to new platforms.
    327  However, it may also be able to better isolate user errors than just
    328  using runtime checks.  The assertions in the check routines spell
    329  out in more detail the assumptions and invariants underlying the
    330  algorithms.  The checking is fairly extensive, and will slow down
    331  execution noticeably. Calling malloc_stats or mallinfo with DEBUG
    332  set will attempt to check every non-mmapped allocated and free chunk
    333  in the course of computing the summaries.
    334
    335ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
    336  Debugging assertion failures can be nearly impossible if your
    337  version of the assert macro causes malloc to be called, which will
    338  lead to a cascade of further failures, blowing the runtime stack.
    339  ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
    340  which will usually make debugging easier.
    341
    342MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
    343  The action to take before "return 0" when malloc fails to be able to
    344  return memory because there is none available.
    345
    346HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
    347  True if this system supports sbrk or an emulation of it.
    348
    349MORECORE                  default: sbrk
    350  The name of the sbrk-style system routine to call to obtain more
    351  memory.  See below for guidance on writing custom MORECORE
    352  functions. The type of the argument to sbrk/MORECORE varies across
    353  systems.  It cannot be size_t, because it supports negative
    354  arguments, so it is normally the signed type of the same width as
    355  size_t (sometimes declared as "intptr_t").  It doesn't much matter
    356  though. Internally, we only call it with arguments less than half
    357  the max value of a size_t, which should work across all reasonable
    358  possibilities, although sometimes generating compiler warnings.  See
    359  near the end of this file for guidelines for creating a custom
    360  version of MORECORE.
    361
    362MORECORE_CONTIGUOUS       default: 1 (true)
    363  If true, take advantage of fact that consecutive calls to MORECORE
    364  with positive arguments always return contiguous increasing
    365  addresses.  This is true of unix sbrk. It does not hurt too much to
    366  set it true anyway, since malloc copes with non-contiguities.
    367  Setting it false when definitely non-contiguous saves time
    368  and possibly wasted space it would take to discover this though.
    369
    370MORECORE_CANNOT_TRIM      default: NOT defined
    371  True if MORECORE cannot release space back to the system when given
    372  negative arguments. This is generally necessary only if you are
    373  using a hand-crafted MORECORE function that cannot handle negative
    374  arguments.
    375
    376HAVE_MMAP                 default: 1 (true)
    377  True if this system supports mmap or an emulation of it.  If so, and
    378  HAVE_MORECORE is not true, MMAP is used for all system
    379  allocation. If set and HAVE_MORECORE is true as well, MMAP is
    380  primarily used to directly allocate very large blocks. It is also
    381  used as a backup strategy in cases where MORECORE fails to provide
    382  space from system. Note: A single call to MUNMAP is assumed to be
    383  able to unmap memory that may have be allocated using multiple calls
    384  to MMAP, so long as they are adjacent.
    385
    386HAVE_MREMAP               default: 1 on linux, else 0
    387  If true realloc() uses mremap() to re-allocate large blocks and
    388  extend or shrink allocation spaces.
    389
    390MMAP_CLEARS               default: 1 on unix
    391  True if mmap clears memory so calloc doesn't need to. This is true
    392  for standard unix mmap using /dev/zero.
    393
    394USE_BUILTIN_FFS            default: 0 (i.e., not used)
    395  Causes malloc to use the builtin ffs() function to compute indices.
    396  Some compilers may recognize and intrinsify ffs to be faster than the
    397  supplied C version. Also, the case of x86 using gcc is special-cased
    398  to an asm instruction, so is already as fast as it can be, and so
    399  this setting has no effect. (On most x86s, the asm version is only
    400  slightly faster than the C version.)
    401
    402malloc_getpagesize         default: derive from system includes, or 4096.
    403  The system page size. To the extent possible, this malloc manages
    404  memory from the system in page-size units.  This may be (and
    405  usually is) a function rather than a constant. This is ignored
    406  if WIN32, where page size is determined using getSystemInfo during
    407  initialization.
    408
    409USE_DEV_RANDOM             default: 0 (i.e., not used)
    410  Causes malloc to use /dev/random to initialize secure magic seed for
    411  stamping footers. Otherwise, the current time is used.
    412
    413NO_MALLINFO                default: 0
    414  If defined, don't compile "mallinfo". This can be a simple way
    415  of dealing with mismatches between system declarations and
    416  those in this file.
    417
    418MALLINFO_FIELD_TYPE        default: size_t
    419  The type of the fields in the mallinfo struct. This was originally
    420  defined as "int" in SVID etc, but is more usefully defined as
    421  size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
    422
    423REALLOC_ZERO_BYTES_FREES    default: not defined
    424  This should be set if a call to realloc with zero bytes should
    425  be the same as a call to free. Some people think it should. Otherwise,
    426  since this malloc returns a unique pointer for malloc(0), so does
    427  realloc(p, 0).
    428
    429LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
    430LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
    431LACKS_STDLIB_H                default: NOT defined unless on WIN32
    432  Define these if your system does not have these header files.
    433  You might need to manually insert some of the declarations they provide.
    434
    435DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
    436                                system_info.dwAllocationGranularity in WIN32,
    437                                otherwise 64K.
    438      Also settable using mallopt(M_GRANULARITY, x)
    439  The unit for allocating and deallocating memory from the system.  On
    440  most systems with contiguous MORECORE, there is no reason to
    441  make this more than a page. However, systems with MMAP tend to
    442  either require or encourage larger granularities.  You can increase
    443  this value to prevent system allocation functions to be called so
    444  often, especially if they are slow.  The value must be at least one
    445  page and must be a power of two.  Setting to 0 causes initialization
    446  to either page size or win32 region size.  (Note: In previous
    447  versions of malloc, the equivalent of this option was called
    448  "TOP_PAD")
    449
    450DEFAULT_TRIM_THRESHOLD    default: 2MB
    451      Also settable using mallopt(M_TRIM_THRESHOLD, x)
    452  The maximum amount of unused top-most memory to keep before
    453  releasing via malloc_trim in free().  Automatic trimming is mainly
    454  useful in long-lived programs using contiguous MORECORE.  Because
    455  trimming via sbrk can be slow on some systems, and can sometimes be
    456  wasteful (in cases where programs immediately afterward allocate
    457  more large chunks) the value should be high enough so that your
    458  overall system performance would improve by releasing this much
    459  memory.  As a rough guide, you might set to a value close to the
    460  average size of a process (program) running on your system.
    461  Releasing this much memory would allow such a process to run in
    462  memory.  Generally, it is worth tuning trim thresholds when a
    463  program undergoes phases where several large chunks are allocated
    464  and released in ways that can reuse each other's storage, perhaps
    465  mixed with phases where there are no such chunks at all. The trim
    466  value must be greater than page size to have any useful effect.  To
    467  disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
    468  some people use of mallocing a huge space and then freeing it at
    469  program startup, in an attempt to reserve system memory, doesn't
    470  have the intended effect under automatic trimming, since that memory
    471  will immediately be returned to the system.
    472
    473DEFAULT_MMAP_THRESHOLD       default: 256K
    474      Also settable using mallopt(M_MMAP_THRESHOLD, x)
    475  The request size threshold for using MMAP to directly service a
    476  request. Requests of at least this size that cannot be allocated
    477  using already-existing space will be serviced via mmap.  (If enough
    478  normal freed space already exists it is used instead.)  Using mmap
    479  segregates relatively large chunks of memory so that they can be
    480  individually obtained and released from the host system. A request
    481  serviced through mmap is never reused by any other request (at least
    482  not directly; the system may just so happen to remap successive
    483  requests to the same locations).  Segregating space in this way has
    484  the benefits that: Mmapped space can always be individually released
    485  back to the system, which helps keep the system level memory demands
    486  of a long-lived program low.  Also, mapped memory doesn't become
    487  `locked' between other chunks, as can happen with normally allocated
    488  chunks, which means that even trimming via malloc_trim would not
    489  release them.  However, it has the disadvantage that the space
    490  cannot be reclaimed, consolidated, and then used to service later
    491  requests, as happens with normal chunks.  The advantages of mmap
    492  nearly always outweigh disadvantages for "large" chunks, but the
    493  value of "large" may vary across systems.  The default is an
    494  empirically derived value that works well in most systems. You can
    495  disable mmap by setting to MAX_SIZE_T.
    496
    497*/
    498
    499#ifndef WIN32
    500#ifdef _WIN32
    501#define WIN32 1
    502#endif /* _WIN32 */
    503#endif /* WIN32 */
    504#ifdef WIN32
    505#define WIN32_LEAN_AND_MEAN
    506#include <windows.h>
    507#define HAVE_MMAP 1
    508#define HAVE_MORECORE 0
    509#define LACKS_UNISTD_H
    510#define LACKS_SYS_PARAM_H
    511#define LACKS_SYS_MMAN_H
    512#define LACKS_STRING_H
    513#define LACKS_STRINGS_H
    514#define LACKS_SYS_TYPES_H
    515#define LACKS_ERRNO_H
    516#define LACKS_FCNTL_H
    517#define MALLOC_FAILURE_ACTION
    518#define MMAP_CLEARS 0           /* WINCE and some others apparently don't clear */
    519#endif /* WIN32 */
    520
    521#if defined(DARWIN) || defined(_DARWIN)
    522/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
    523#ifndef HAVE_MORECORE
    524#define HAVE_MORECORE 0
    525#define HAVE_MMAP 1
    526#endif /* HAVE_MORECORE */
    527#endif /* DARWIN */
    528
    529#ifndef LACKS_SYS_TYPES_H
    530#include <sys/types.h>          /* For size_t */
    531#endif /* LACKS_SYS_TYPES_H */
    532
    533/* The maximum possible size_t value has all bits set */
    534#define MAX_SIZE_T           (~(size_t)0)
    535
    536#ifndef ONLY_MSPACES
    537#define ONLY_MSPACES 0
    538#endif /* ONLY_MSPACES */
    539#ifndef MSPACES
    540#if ONLY_MSPACES
    541#define MSPACES 1
    542#else /* ONLY_MSPACES */
    543#define MSPACES 0
    544#endif /* ONLY_MSPACES */
    545#endif /* MSPACES */
    546#ifndef MALLOC_ALIGNMENT
    547#define MALLOC_ALIGNMENT ((size_t)8U)
    548#endif /* MALLOC_ALIGNMENT */
    549#ifndef FOOTERS
    550#define FOOTERS 0
    551#endif /* FOOTERS */
    552#ifndef ABORT
    553#define ABORT  abort()
    554#endif /* ABORT */
    555#ifndef ABORT_ON_ASSERT_FAILURE
    556#define ABORT_ON_ASSERT_FAILURE 1
    557#endif /* ABORT_ON_ASSERT_FAILURE */
    558#ifndef PROCEED_ON_ERROR
    559#define PROCEED_ON_ERROR 0
    560#endif /* PROCEED_ON_ERROR */
    561#ifndef USE_LOCKS
    562#define USE_LOCKS 0
    563#endif /* USE_LOCKS */
    564#ifndef INSECURE
    565#define INSECURE 0
    566#endif /* INSECURE */
    567#ifndef HAVE_MMAP
    568#define HAVE_MMAP 1
    569#endif /* HAVE_MMAP */
    570#ifndef MMAP_CLEARS
    571#define MMAP_CLEARS 1
    572#endif /* MMAP_CLEARS */
    573#ifndef HAVE_MREMAP
    574#ifdef linux
    575#define HAVE_MREMAP 1
    576#else /* linux */
    577#define HAVE_MREMAP 0
    578#endif /* linux */
    579#endif /* HAVE_MREMAP */
    580#ifndef MALLOC_FAILURE_ACTION
    581#define MALLOC_FAILURE_ACTION  errno = ENOMEM;
    582#endif /* MALLOC_FAILURE_ACTION */
    583#ifndef HAVE_MORECORE
    584#if ONLY_MSPACES
    585#define HAVE_MORECORE 0
    586#else /* ONLY_MSPACES */
    587#define HAVE_MORECORE 1
    588#endif /* ONLY_MSPACES */
    589#endif /* HAVE_MORECORE */
    590#if !HAVE_MORECORE
    591#define MORECORE_CONTIGUOUS 0
    592#else /* !HAVE_MORECORE */
    593#ifndef MORECORE
    594#define MORECORE sbrk
    595#endif /* MORECORE */
    596#ifndef MORECORE_CONTIGUOUS
    597#define MORECORE_CONTIGUOUS 1
    598#endif /* MORECORE_CONTIGUOUS */
    599#endif /* HAVE_MORECORE */
    600#ifndef DEFAULT_GRANULARITY
    601#if MORECORE_CONTIGUOUS
    602#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
    603#else /* MORECORE_CONTIGUOUS */
    604#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
    605#endif /* MORECORE_CONTIGUOUS */
    606#endif /* DEFAULT_GRANULARITY */
    607#ifndef DEFAULT_TRIM_THRESHOLD
    608#ifndef MORECORE_CANNOT_TRIM
    609#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
    610#else /* MORECORE_CANNOT_TRIM */
    611#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
    612#endif /* MORECORE_CANNOT_TRIM */
    613#endif /* DEFAULT_TRIM_THRESHOLD */
    614#ifndef DEFAULT_MMAP_THRESHOLD
    615#if HAVE_MMAP
    616#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
    617#else /* HAVE_MMAP */
    618#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
    619#endif /* HAVE_MMAP */
    620#endif /* DEFAULT_MMAP_THRESHOLD */
    621#ifndef USE_BUILTIN_FFS
    622#define USE_BUILTIN_FFS 0
    623#endif /* USE_BUILTIN_FFS */
    624#ifndef USE_DEV_RANDOM
    625#define USE_DEV_RANDOM 0
    626#endif /* USE_DEV_RANDOM */
    627#ifndef NO_MALLINFO
    628#define NO_MALLINFO 0
    629#endif /* NO_MALLINFO */
    630#ifndef MALLINFO_FIELD_TYPE
    631#define MALLINFO_FIELD_TYPE size_t
    632#endif /* MALLINFO_FIELD_TYPE */
    633
    634#define memset  SDL_memset
    635#define memcpy  SDL_memcpy
    636#define malloc  SDL_malloc
    637#define calloc  SDL_calloc
    638#define realloc SDL_realloc
    639#define free    SDL_free
    640
    641/*
    642  mallopt tuning options.  SVID/XPG defines four standard parameter
    643  numbers for mallopt, normally defined in malloc.h.  None of these
    644  are used in this malloc, so setting them has no effect. But this
    645  malloc does support the following options.
    646*/
    647
    648#define M_TRIM_THRESHOLD     (-1)
    649#define M_GRANULARITY        (-2)
    650#define M_MMAP_THRESHOLD     (-3)
    651
    652/* ------------------------ Mallinfo declarations ------------------------ */
    653
    654#if !NO_MALLINFO
    655/*
    656  This version of malloc supports the standard SVID/XPG mallinfo
    657  routine that returns a struct containing usage properties and
    658  statistics. It should work on any system that has a
    659  /usr/include/malloc.h defining struct mallinfo.  The main
    660  declaration needed is the mallinfo struct that is returned (by-copy)
    661  by mallinfo().  The malloinfo struct contains a bunch of fields that
    662  are not even meaningful in this version of malloc.  These fields are
    663  are instead filled by mallinfo() with other numbers that might be of
    664  interest.
    665
    666  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
    667  /usr/include/malloc.h file that includes a declaration of struct
    668  mallinfo.  If so, it is included; else a compliant version is
    669  declared below.  These must be precisely the same for mallinfo() to
    670  work.  The original SVID version of this struct, defined on most
    671  systems with mallinfo, declares all fields as ints. But some others
    672  define as unsigned long. If your system defines the fields using a
    673  type of different width than listed here, you MUST #include your
    674  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
    675*/
    676
    677/* #define HAVE_USR_INCLUDE_MALLOC_H */
    678
    679#ifdef HAVE_USR_INCLUDE_MALLOC_H
    680#include "/usr/include/malloc.h"
    681#else /* HAVE_USR_INCLUDE_MALLOC_H */
    682
    683struct mallinfo
    684{
    685    MALLINFO_FIELD_TYPE arena;  /* non-mmapped space allocated from system */
    686    MALLINFO_FIELD_TYPE ordblks;        /* number of free chunks */
    687    MALLINFO_FIELD_TYPE smblks; /* always 0 */
    688    MALLINFO_FIELD_TYPE hblks;  /* always 0 */
    689    MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
    690    MALLINFO_FIELD_TYPE usmblks;        /* maximum total allocated space */
    691    MALLINFO_FIELD_TYPE fsmblks;        /* always 0 */
    692    MALLINFO_FIELD_TYPE uordblks;       /* total allocated space */
    693    MALLINFO_FIELD_TYPE fordblks;       /* total free space */
    694    MALLINFO_FIELD_TYPE keepcost;       /* releasable (via malloc_trim) space */
    695};
    696
    697#endif /* HAVE_USR_INCLUDE_MALLOC_H */
    698#endif /* NO_MALLINFO */
    699
    700#ifdef __cplusplus
    701extern "C"
    702{
    703#endif                          /* __cplusplus */
    704
    705#if !ONLY_MSPACES
    706
    707/* ------------------- Declarations of public routines ------------------- */
    708
    709#ifndef USE_DL_PREFIX
    710#define dlcalloc               calloc
    711#define dlfree                 free
    712#define dlmalloc               malloc
    713#define dlmemalign             memalign
    714#define dlrealloc              realloc
    715#define dlvalloc               valloc
    716#define dlpvalloc              pvalloc
    717#define dlmallinfo             mallinfo
    718#define dlmallopt              mallopt
    719#define dlmalloc_trim          malloc_trim
    720#define dlmalloc_stats         malloc_stats
    721#define dlmalloc_usable_size   malloc_usable_size
    722#define dlmalloc_footprint     malloc_footprint
    723#define dlmalloc_max_footprint malloc_max_footprint
    724#define dlindependent_calloc   independent_calloc
    725#define dlindependent_comalloc independent_comalloc
    726#endif                          /* USE_DL_PREFIX */
    727
    728
    729/*
    730  malloc(size_t n)
    731  Returns a pointer to a newly allocated chunk of at least n bytes, or
    732  null if no space is available, in which case errno is set to ENOMEM
    733  on ANSI C systems.
    734
    735  If n is zero, malloc returns a minimum-sized chunk. (The minimum
    736  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
    737  systems.)  Note that size_t is an unsigned type, so calls with
    738  arguments that would be negative if signed are interpreted as
    739  requests for huge amounts of space, which will often fail. The
    740  maximum supported value of n differs across systems, but is in all
    741  cases less than the maximum representable value of a size_t.
    742*/
    743    void *dlmalloc(size_t);
    744
    745/*
    746  free(void* p)
    747  Releases the chunk of memory pointed to by p, that had been previously
    748  allocated using malloc or a related routine such as realloc.
    749  It has no effect if p is null. If p was not malloced or already
    750  freed, free(p) will by default cause the current program to abort.
    751*/
    752    void dlfree(void *);
    753
    754/*
    755  calloc(size_t n_elements, size_t element_size);
    756  Returns a pointer to n_elements * element_size bytes, with all locations
    757  set to zero.
    758*/
    759    void *dlcalloc(size_t, size_t);
    760
    761/*
    762  realloc(void* p, size_t n)
    763  Returns a pointer to a chunk of size n that contains the same data
    764  as does chunk p up to the minimum of (n, p's size) bytes, or null
    765  if no space is available.
    766
    767  The returned pointer may or may not be the same as p. The algorithm
    768  prefers extending p in most cases when possible, otherwise it
    769  employs the equivalent of a malloc-copy-free sequence.
    770
    771  If p is null, realloc is equivalent to malloc.
    772
    773  If space is not available, realloc returns null, errno is set (if on
    774  ANSI) and p is NOT freed.
    775
    776  if n is for fewer bytes than already held by p, the newly unused
    777  space is lopped off and freed if possible.  realloc with a size
    778  argument of zero (re)allocates a minimum-sized chunk.
    779
    780  The old unix realloc convention of allowing the last-free'd chunk
    781  to be used as an argument to realloc is not supported.
    782*/
    783
    784    void *dlrealloc(void *, size_t);
    785
    786/*
    787  memalign(size_t alignment, size_t n);
    788  Returns a pointer to a newly allocated chunk of n bytes, aligned
    789  in accord with the alignment argument.
    790
    791  The alignment argument should be a power of two. If the argument is
    792  not a power of two, the nearest greater power is used.
    793  8-byte alignment is guaranteed by normal malloc calls, so don't
    794  bother calling memalign with an argument of 8 or less.
    795
    796  Overreliance on memalign is a sure way to fragment space.
    797*/
    798    void *dlmemalign(size_t, size_t);
    799
    800/*
    801  valloc(size_t n);
    802  Equivalent to memalign(pagesize, n), where pagesize is the page
    803  size of the system. If the pagesize is unknown, 4096 is used.
    804*/
    805    void *dlvalloc(size_t);
    806
    807/*
    808  mallopt(int parameter_number, int parameter_value)
    809  Sets tunable parameters The format is to provide a
    810  (parameter-number, parameter-value) pair.  mallopt then sets the
    811  corresponding parameter to the argument value if it can (i.e., so
    812  long as the value is meaningful), and returns 1 if successful else
    813  0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
    814  normally defined in malloc.h.  None of these are use in this malloc,
    815  so setting them has no effect. But this malloc also supports other
    816  options in mallopt. See below for details.  Briefly, supported
    817  parameters are as follows (listed defaults are for "typical"
    818  configurations).
    819
    820  Symbol            param #  default    allowed param values
    821  M_TRIM_THRESHOLD     -1   2*1024*1024   any   (MAX_SIZE_T disables)
    822  M_GRANULARITY        -2     page size   any power of 2 >= page size
    823  M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
    824*/
    825    int dlmallopt(int, int);
    826
    827/*
    828  malloc_footprint();
    829  Returns the number of bytes obtained from the system.  The total
    830  number of bytes allocated by malloc, realloc etc., is less than this
    831  value. Unlike mallinfo, this function returns only a precomputed
    832  result, so can be called frequently to monitor memory consumption.
    833  Even if locks are otherwise defined, this function does not use them,
    834  so results might not be up to date.
    835*/
    836    size_t dlmalloc_footprint(void);
    837
    838/*
    839  malloc_max_footprint();
    840  Returns the maximum number of bytes obtained from the system. This
    841  value will be greater than current footprint if deallocated space
    842  has been reclaimed by the system. The peak number of bytes allocated
    843  by malloc, realloc etc., is less than this value. Unlike mallinfo,
    844  this function returns only a precomputed result, so can be called
    845  frequently to monitor memory consumption.  Even if locks are
    846  otherwise defined, this function does not use them, so results might
    847  not be up to date.
    848*/
    849    size_t dlmalloc_max_footprint(void);
    850
    851#if !NO_MALLINFO
    852/*
    853  mallinfo()
    854  Returns (by copy) a struct containing various summary statistics:
    855
    856  arena:     current total non-mmapped bytes allocated from system
    857  ordblks:   the number of free chunks
    858  smblks:    always zero.
    859  hblks:     current number of mmapped regions
    860  hblkhd:    total bytes held in mmapped regions
    861  usmblks:   the maximum total allocated space. This will be greater
    862                than current total if trimming has occurred.
    863  fsmblks:   always zero
    864  uordblks:  current total allocated space (normal or mmapped)
    865  fordblks:  total free space
    866  keepcost:  the maximum number of bytes that could ideally be released
    867               back to system via malloc_trim. ("ideally" means that
    868               it ignores page restrictions etc.)
    869
    870  Because these fields are ints, but internal bookkeeping may
    871  be kept as longs, the reported values may wrap around zero and
    872  thus be inaccurate.
    873*/
    874    struct mallinfo dlmallinfo(void);
    875#endif                          /* NO_MALLINFO */
    876
    877/*
    878  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
    879
    880  independent_calloc is similar to calloc, but instead of returning a
    881  single cleared space, it returns an array of pointers to n_elements
    882  independent elements that can hold contents of size elem_size, each
    883  of which starts out cleared, and can be independently freed,
    884  realloc'ed etc. The elements are guaranteed to be adjacently
    885  allocated (this is not guaranteed to occur with multiple callocs or
    886  mallocs), which may also improve cache locality in some
    887  applications.
    888
    889  The "chunks" argument is optional (i.e., may be null, which is
    890  probably the most typical usage). If it is null, the returned array
    891  is itself dynamically allocated and should also be freed when it is
    892  no longer needed. Otherwise, the chunks array must be of at least
    893  n_elements in length. It is filled in with the pointers to the
    894  chunks.
    895
    896  In either case, independent_calloc returns this pointer array, or
    897  null if the allocation failed.  If n_elements is zero and "chunks"
    898  is null, it returns a chunk representing an array with zero elements
    899  (which should be freed if not wanted).
    900
    901  Each element must be individually freed when it is no longer
    902  needed. If you'd like to instead be able to free all at once, you
    903  should instead use regular calloc and assign pointers into this
    904  space to represent elements.  (In this case though, you cannot
    905  independently free elements.)
    906
    907  independent_calloc simplifies and speeds up implementations of many
    908  kinds of pools.  It may also be useful when constructing large data
    909  structures that initially have a fixed number of fixed-sized nodes,
    910  but the number is not known at compile time, and some of the nodes
    911  may later need to be freed. For example:
    912
    913  struct Node { int item; struct Node* next; };
    914
    915  struct Node* build_list() {
    916    struct Node** pool;
    917    int n = read_number_of_nodes_needed();
    918    if (n <= 0) return 0;
    919    pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
    920    if (pool == 0) die();
    921    // organize into a linked list...
    922    struct Node* first = pool[0];
    923    for (i = 0; i < n-1; ++i)
    924      pool[i]->next = pool[i+1];
    925    free(pool);     // Can now free the array (or not, if it is needed later)
    926    return first;
    927  }
    928*/
    929    void **dlindependent_calloc(size_t, size_t, void **);
    930
    931/*
    932  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
    933
    934  independent_comalloc allocates, all at once, a set of n_elements
    935  chunks with sizes indicated in the "sizes" array.    It returns
    936  an array of pointers to these elements, each of which can be
    937  independently freed, realloc'ed etc. The elements are guaranteed to
    938  be adjacently allocated (this is not guaranteed to occur with
    939  multiple callocs or mallocs), which may also improve cache locality
    940  in some applications.
    941
    942  The "chunks" argument is optional (i.e., may be null). If it is null
    943  the returned array is itself dynamically allocated and should also
    944  be freed when it is no longer needed. Otherwise, the chunks array
    945  must be of at least n_elements in length. It is filled in with the
    946  pointers to the chunks.
    947
    948  In either case, independent_comalloc returns this pointer array, or
    949  null if the allocation failed.  If n_elements is zero and chunks is
    950  null, it returns a chunk representing an array with zero elements
    951  (which should be freed if not wanted).
    952
    953  Each element must be individually freed when it is no longer
    954  needed. If you'd like to instead be able to free all at once, you
    955  should instead use a single regular malloc, and assign pointers at
    956  particular offsets in the aggregate space. (In this case though, you
    957  cannot independently free elements.)
    958
    959  independent_comallac differs from independent_calloc in that each
    960  element may have a different size, and also that it does not
    961  automatically clear elements.
    962
    963  independent_comalloc can be used to speed up allocation in cases
    964  where several structs or objects must always be allocated at the
    965  same time.  For example:
    966
    967  struct Head { ... }
    968  struct Foot { ... }
    969
    970  void send_message(char* msg) {
    971    int msglen = strlen(msg);
    972    size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
    973    void* chunks[3];
    974    if (independent_comalloc(3, sizes, chunks) == 0)
    975      die();
    976    struct Head* head = (struct Head*)(chunks[0]);
    977    char*        body = (char*)(chunks[1]);
    978    struct Foot* foot = (struct Foot*)(chunks[2]);
    979    // ...
    980  }
    981
    982  In general though, independent_comalloc is worth using only for
    983  larger values of n_elements. For small values, you probably won't
    984  detect enough difference from series of malloc calls to bother.
    985
    986  Overuse of independent_comalloc can increase overall memory usage,
    987  since it cannot reuse existing noncontiguous small chunks that
    988  might be available for some of the elements.
    989*/
    990    void **dlindependent_comalloc(size_t, size_t *, void **);
    991
    992
    993/*
    994  pvalloc(size_t n);
    995  Equivalent to valloc(minimum-page-that-holds(n)), that is,
    996  round up n to nearest pagesize.
    997 */
    998    void *dlpvalloc(size_t);
    999
   1000/*
   1001  malloc_trim(size_t pad);
   1002
   1003  If possible, gives memory back to the system (via negative arguments
   1004  to sbrk) if there is unused memory at the `high' end of the malloc
   1005  pool or in unused MMAP segments. You can call this after freeing
   1006  large blocks of memory to potentially reduce the system-level memory
   1007  requirements of a program. However, it cannot guarantee to reduce
   1008  memory. Under some allocation patterns, some large free blocks of
   1009  memory will be locked between two used chunks, so they cannot be
   1010  given back to the system.
   1011
   1012  The `pad' argument to malloc_trim represents the amount of free
   1013  trailing space to leave untrimmed. If this argument is zero, only
   1014  the minimum amount of memory to maintain internal data structures
   1015  will be left. Non-zero arguments can be supplied to maintain enough
   1016  trailing space to service future expected allocations without having
   1017  to re-obtain memory from the system.
   1018
   1019  Malloc_trim returns 1 if it actually released any memory, else 0.
   1020*/
   1021    int dlmalloc_trim(size_t);
   1022
   1023/*
   1024  malloc_usable_size(void* p);
   1025
   1026  Returns the number of bytes you can actually use in
   1027  an allocated chunk, which may be more than you requested (although
   1028  often not) due to alignment and minimum size constraints.
   1029  You can use this many bytes without worrying about
   1030  overwriting other allocated objects. This is not a particularly great
   1031  programming practice. malloc_usable_size can be more useful in
   1032  debugging and assertions, for example:
   1033
   1034  p = malloc(n);
   1035  assert(malloc_usable_size(p) >= 256);
   1036*/
   1037    size_t dlmalloc_usable_size(void *);
   1038
   1039/*
   1040  malloc_stats();
   1041  Prints on stderr the amount of space obtained from the system (both
   1042  via sbrk and mmap), the maximum amount (which may be more than
   1043  current if malloc_trim and/or munmap got called), and the current
   1044  number of bytes allocated via malloc (or realloc, etc) but not yet
   1045  freed. Note that this is the number of bytes allocated, not the
   1046  number requested. It will be larger than the number requested
   1047  because of alignment and bookkeeping overhead. Because it includes
   1048  alignment wastage as being in use, this figure may be greater than
   1049  zero even when no user-level chunks are allocated.
   1050
   1051  The reported current and maximum system memory can be inaccurate if
   1052  a program makes other calls to system memory allocation functions
   1053  (normally sbrk) outside of malloc.
   1054
   1055  malloc_stats prints only the most commonly interesting statistics.
   1056  More information can be obtained by calling mallinfo.
   1057*/
   1058    void dlmalloc_stats(void);
   1059
   1060#endif                          /* ONLY_MSPACES */
   1061
   1062#if MSPACES
   1063
   1064/*
   1065  mspace is an opaque type representing an independent
   1066  region of space that supports mspace_malloc, etc.
   1067*/
   1068    typedef void *mspace;
   1069
   1070/*
   1071  create_mspace creates and returns a new independent space with the
   1072  given initial capacity, or, if 0, the default granularity size.  It
   1073  returns null if there is no system memory available to create the
   1074  space.  If argument locked is non-zero, the space uses a separate
   1075  lock to control access. The capacity of the space will grow
   1076  dynamically as needed to service mspace_malloc requests.  You can
   1077  control the sizes of incremental increases of this space by
   1078  compiling with a different DEFAULT_GRANULARITY or dynamically
   1079  setting with mallopt(M_GRANULARITY, value).
   1080*/
   1081    mspace create_mspace(size_t capacity, int locked);
   1082
   1083/*
   1084  destroy_mspace destroys the given space, and attempts to return all
   1085  of its memory back to the system, returning the total number of
   1086  bytes freed. After destruction, the results of access to all memory
   1087  used by the space become undefined.
   1088*/
   1089    size_t destroy_mspace(mspace msp);
   1090
   1091/*
   1092  create_mspace_with_base uses the memory supplied as the initial base
   1093  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
   1094  space is used for bookkeeping, so the capacity must be at least this
   1095  large. (Otherwise 0 is returned.) When this initial space is
   1096  exhausted, additional memory will be obtained from the system.
   1097  Destroying this space will deallocate all additionally allocated
   1098  space (if possible) but not the initial base.
   1099*/
   1100    mspace create_mspace_with_base(void *base, size_t capacity, int locked);
   1101
   1102/*
   1103  mspace_malloc behaves as malloc, but operates within
   1104  the given space.
   1105*/
   1106    void *mspace_malloc(mspace msp, size_t bytes);
   1107
   1108/*
   1109  mspace_free behaves as free, but operates within
   1110  the given space.
   1111
   1112  If compiled with FOOTERS==1, mspace_free is not actually needed.
   1113  free may be called instead of mspace_free because freed chunks from
   1114  any space are handled by their originating spaces.
   1115*/
   1116    void mspace_free(mspace msp, void *mem);
   1117
   1118/*
   1119  mspace_realloc behaves as realloc, but operates within
   1120  the given space.
   1121
   1122  If compiled with FOOTERS==1, mspace_realloc is not actually
   1123  needed.  realloc may be called instead of mspace_realloc because
   1124  realloced chunks from any space are handled by their originating
   1125  spaces.
   1126*/
   1127    void *mspace_realloc(mspace msp, void *mem, size_t newsize);
   1128
   1129/*
   1130  mspace_calloc behaves as calloc, but operates within
   1131  the given space.
   1132*/
   1133    void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
   1134
   1135/*
   1136  mspace_memalign behaves as memalign, but operates within
   1137  the given space.
   1138*/
   1139    void *mspace_memalign(mspace msp, size_t alignment, size_t bytes);
   1140
   1141/*
   1142  mspace_independent_calloc behaves as independent_calloc, but
   1143  operates within the given space.
   1144*/
   1145    void **mspace_independent_calloc(mspace msp, size_t n_elements,
   1146                                     size_t elem_size, void *chunks[]);
   1147
   1148/*
   1149  mspace_independent_comalloc behaves as independent_comalloc, but
   1150  operates within the given space.
   1151*/
   1152    void **mspace_independent_comalloc(mspace msp, size_t n_elements,
   1153                                       size_t sizes[], void *chunks[]);
   1154
   1155/*
   1156  mspace_footprint() returns the number of bytes obtained from the
   1157  system for this space.
   1158*/
   1159    size_t mspace_footprint(mspace msp);
   1160
   1161/*
   1162  mspace_max_footprint() returns the peak number of bytes obtained from the
   1163  system for this space.
   1164*/
   1165    size_t mspace_max_footprint(mspace msp);
   1166
   1167
   1168#if !NO_MALLINFO
   1169/*
   1170  mspace_mallinfo behaves as mallinfo, but reports properties of
   1171  the given space.
   1172*/
   1173    struct mallinfo mspace_mallinfo(mspace msp);
   1174#endif                          /* NO_MALLINFO */
   1175
   1176/*
   1177  mspace_malloc_stats behaves as malloc_stats, but reports
   1178  properties of the given space.
   1179*/
   1180    void mspace_malloc_stats(mspace msp);
   1181
   1182/*
   1183  mspace_trim behaves as malloc_trim, but
   1184  operates within the given space.
   1185*/
   1186    int mspace_trim(mspace msp, size_t pad);
   1187
   1188/*
   1189  An alias for mallopt.
   1190*/
   1191    int mspace_mallopt(int, int);
   1192
   1193#endif                          /* MSPACES */
   1194
   1195#ifdef __cplusplus
   1196};                              /* end of extern "C" */
   1197#endif /* __cplusplus */
   1198
   1199/*
   1200  ========================================================================
   1201  To make a fully customizable malloc.h header file, cut everything
   1202  above this line, put into file malloc.h, edit to suit, and #include it
   1203  on the next line, as well as in programs that use this malloc.
   1204  ========================================================================
   1205*/
   1206
   1207/* #include "malloc.h" */
   1208
   1209/*------------------------------ internal #includes ---------------------- */
   1210
   1211#ifdef _MSC_VER
   1212#pragma warning( disable : 4146 )       /* no "unsigned" warnings */
   1213#endif /* _MSC_VER */
   1214
   1215#ifndef LACKS_STDIO_H
   1216#include <stdio.h>              /* for printing in malloc_stats */
   1217#endif
   1218
   1219#ifndef LACKS_ERRNO_H
   1220#include <errno.h>              /* for MALLOC_FAILURE_ACTION */
   1221#endif /* LACKS_ERRNO_H */
   1222#if FOOTERS
   1223#include <time.h>               /* for magic initialization */
   1224#endif /* FOOTERS */
   1225#ifndef LACKS_STDLIB_H
   1226#include <stdlib.h>             /* for abort() */
   1227#endif /* LACKS_STDLIB_H */
   1228#ifdef DEBUG
   1229#if ABORT_ON_ASSERT_FAILURE
   1230#define assert(x) if(!(x)) ABORT
   1231#else /* ABORT_ON_ASSERT_FAILURE */
   1232#include <assert.h>
   1233#endif /* ABORT_ON_ASSERT_FAILURE */
   1234#else /* DEBUG */
   1235#define assert(x)
   1236#endif /* DEBUG */
   1237#ifndef LACKS_STRING_H
   1238#include <string.h>             /* for memset etc */
   1239#endif /* LACKS_STRING_H */
   1240#if USE_BUILTIN_FFS
   1241#ifndef LACKS_STRINGS_H
   1242#include <strings.h>            /* for ffs */
   1243#endif /* LACKS_STRINGS_H */
   1244#endif /* USE_BUILTIN_FFS */
   1245#if HAVE_MMAP
   1246#ifndef LACKS_SYS_MMAN_H
   1247#include <sys/mman.h>           /* for mmap */
   1248#endif /* LACKS_SYS_MMAN_H */
   1249#ifndef LACKS_FCNTL_H
   1250#include <fcntl.h>
   1251#endif /* LACKS_FCNTL_H */
   1252#endif /* HAVE_MMAP */
   1253#if HAVE_MORECORE
   1254#ifndef LACKS_UNISTD_H
   1255#include <unistd.h>             /* for sbrk */
   1256#else /* LACKS_UNISTD_H */
   1257#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
   1258extern void *sbrk(ptrdiff_t);
   1259#endif /* FreeBSD etc */
   1260#endif /* LACKS_UNISTD_H */
   1261#endif /* HAVE_MMAP */
   1262
   1263#ifndef WIN32
   1264#ifndef malloc_getpagesize
   1265#  ifdef _SC_PAGESIZE           /* some SVR4 systems omit an underscore */
   1266#    ifndef _SC_PAGE_SIZE
   1267#      define _SC_PAGE_SIZE _SC_PAGESIZE
   1268#    endif
   1269#  endif
   1270#  ifdef _SC_PAGE_SIZE
   1271#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
   1272#  else
   1273#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
   1274extern size_t getpagesize();
   1275#      define malloc_getpagesize getpagesize()
   1276#    else
   1277#      ifdef WIN32              /* use supplied emulation of getpagesize */
   1278#        define malloc_getpagesize getpagesize()
   1279#      else
   1280#        ifndef LACKS_SYS_PARAM_H
   1281#          include <sys/param.h>
   1282#        endif
   1283#        ifdef EXEC_PAGESIZE
   1284#          define malloc_getpagesize EXEC_PAGESIZE
   1285#        else
   1286#          ifdef NBPG
   1287#            ifndef CLSIZE
   1288#              define malloc_getpagesize NBPG
   1289#            else
   1290#              define malloc_getpagesize (NBPG * CLSIZE)
   1291#            endif
   1292#          else
   1293#            ifdef NBPC
   1294#              define malloc_getpagesize NBPC
   1295#            else
   1296#              ifdef PAGESIZE
   1297#                define malloc_getpagesize PAGESIZE
   1298#              else /* just guess */
   1299#                define malloc_getpagesize ((size_t)4096U)
   1300#              endif
   1301#            endif
   1302#          endif
   1303#        endif
   1304#      endif
   1305#    endif
   1306#  endif
   1307#endif
   1308#endif
   1309
   1310/* ------------------- size_t and alignment properties -------------------- */
   1311
   1312/* The byte and bit size of a size_t */
   1313#define SIZE_T_SIZE         (sizeof(size_t))
   1314#define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
   1315
   1316/* Some constants coerced to size_t */
   1317/* Annoying but necessary to avoid errors on some plaftorms */
   1318#define SIZE_T_ZERO         ((size_t)0)
   1319#define SIZE_T_ONE          ((size_t)1)
   1320#define SIZE_T_TWO          ((size_t)2)
   1321#define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
   1322#define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
   1323#define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
   1324#define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
   1325
   1326/* The bit mask value corresponding to MALLOC_ALIGNMENT */
   1327#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
   1328
   1329/* True if address a has acceptable alignment */
   1330#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
   1331
   1332/* the number of bytes to offset an address to align it */
   1333#define align_offset(A)\
   1334 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
   1335  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
   1336
   1337/* -------------------------- MMAP preliminaries ------------------------- */
   1338
   1339/*
   1340   If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
   1341   checks to fail so compiler optimizer can delete code rather than
   1342   using so many "#if"s.
   1343*/
   1344
   1345
   1346/* MORECORE and MMAP must return MFAIL on failure */
   1347#define MFAIL                ((void*)(MAX_SIZE_T))
   1348#define CMFAIL               ((char*)(MFAIL))   /* defined for convenience */
   1349
   1350#if !HAVE_MMAP
   1351#define IS_MMAPPED_BIT       (SIZE_T_ZERO)
   1352#define USE_MMAP_BIT         (SIZE_T_ZERO)
   1353#define CALL_MMAP(s)         MFAIL
   1354#define CALL_MUNMAP(a, s)    (-1)
   1355#define DIRECT_MMAP(s)       MFAIL
   1356
   1357#else /* HAVE_MMAP */
   1358#define IS_MMAPPED_BIT       (SIZE_T_ONE)
   1359#define USE_MMAP_BIT         (SIZE_T_ONE)
   1360
   1361#ifndef WIN32
   1362#define CALL_MUNMAP(a, s)    munmap((a), (s))
   1363#define MMAP_PROT            (PROT_READ|PROT_WRITE)
   1364#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
   1365#define MAP_ANONYMOUS        MAP_ANON
   1366#endif /* MAP_ANON */
   1367#ifdef MAP_ANONYMOUS
   1368#define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
   1369#define CALL_MMAP(s)         mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
   1370#else /* MAP_ANONYMOUS */
   1371/*
   1372   Nearly all versions of mmap support MAP_ANONYMOUS, so the following
   1373   is unlikely to be needed, but is supplied just in case.
   1374*/
   1375#define MMAP_FLAGS           (MAP_PRIVATE)
   1376static int dev_zero_fd = -1;    /* Cached file descriptor for /dev/zero. */
   1377#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
   1378           (dev_zero_fd = open("/dev/zero", O_RDWR), \
   1379            mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
   1380            mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
   1381#endif /* MAP_ANONYMOUS */
   1382
   1383#define DIRECT_MMAP(s)       CALL_MMAP(s)
   1384#else /* WIN32 */
   1385
   1386/* Win32 MMAP via VirtualAlloc */
   1387static void *
   1388win32mmap(size_t size)
   1389{
   1390    void *ptr =
   1391        VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
   1392    return (ptr != 0) ? ptr : MFAIL;
   1393}
   1394
   1395/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
   1396static void *
   1397win32direct_mmap(size_t size)
   1398{
   1399    void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
   1400                             PAGE_READWRITE);
   1401    return (ptr != 0) ? ptr : MFAIL;
   1402}
   1403
   1404/* This function supports releasing coalesed segments */
   1405static int
   1406win32munmap(void *ptr, size_t size)
   1407{
   1408    MEMORY_BASIC_INFORMATION minfo;
   1409    char *cptr = ptr;
   1410    while (size) {
   1411        if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
   1412            return -1;
   1413        if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
   1414            minfo.State != MEM_COMMIT || minfo.RegionSize > size)
   1415            return -1;
   1416        if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
   1417            return -1;
   1418        cptr += minfo.RegionSize;
   1419        size -= minfo.RegionSize;
   1420    }
   1421    return 0;
   1422}
   1423
   1424#define CALL_MMAP(s)         win32mmap(s)
   1425#define CALL_MUNMAP(a, s)    win32munmap((a), (s))
   1426#define DIRECT_MMAP(s)       win32direct_mmap(s)
   1427#endif /* WIN32 */
   1428#endif /* HAVE_MMAP */
   1429
   1430#if HAVE_MMAP && HAVE_MREMAP
   1431#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
   1432#else /* HAVE_MMAP && HAVE_MREMAP */
   1433#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
   1434#endif /* HAVE_MMAP && HAVE_MREMAP */
   1435
   1436#if HAVE_MORECORE
   1437#define CALL_MORECORE(S)     MORECORE(S)
   1438#else /* HAVE_MORECORE */
   1439#define CALL_MORECORE(S)     MFAIL
   1440#endif /* HAVE_MORECORE */
   1441
   1442/* mstate bit set if continguous morecore disabled or failed */
   1443#define USE_NONCONTIGUOUS_BIT (4U)
   1444
   1445/* segment bit set in create_mspace_with_base */
   1446#define EXTERN_BIT            (8U)
   1447
   1448
   1449/* --------------------------- Lock preliminaries ------------------------ */
   1450
   1451#if USE_LOCKS
   1452
   1453/*
   1454  When locks are defined, there are up to two global locks:
   1455
   1456  * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
   1457    MORECORE.  In many cases sys_alloc requires two calls, that should
   1458    not be interleaved with calls by other threads.  This does not
   1459    protect against direct calls to MORECORE by other threads not
   1460    using this lock, so there is still code to cope the best we can on
   1461    interference.
   1462
   1463  * magic_init_mutex ensures that mparams.magic and other
   1464    unique mparams values are initialized only once.
   1465*/
   1466
   1467#ifndef WIN32
   1468/* By default use posix locks */
   1469#include <pthread.h>
   1470#define MLOCK_T pthread_mutex_t
   1471#define INITIAL_LOCK(l)      pthread_mutex_init(l, NULL)
   1472#define ACQUIRE_LOCK(l)      pthread_mutex_lock(l)
   1473#define RELEASE_LOCK(l)      pthread_mutex_unlock(l)
   1474
   1475#if HAVE_MORECORE
   1476static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
   1477#endif /* HAVE_MORECORE */
   1478
   1479static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
   1480
   1481#else /* WIN32 */
   1482/*
   1483   Because lock-protected regions have bounded times, and there
   1484   are no recursive lock calls, we can use simple spinlocks.
   1485*/
   1486
   1487#define MLOCK_T long
   1488static int
   1489win32_acquire_lock(MLOCK_T * sl)
   1490{
   1491    for (;;) {
   1492#ifdef InterlockedCompareExchangePointer
   1493        if (!InterlockedCompareExchange(sl, 1, 0))
   1494            return 0;
   1495#else /* Use older void* version */
   1496        if (!InterlockedCompareExchange((void **) sl, (void *) 1, (void *) 0))
   1497            return 0;
   1498#endif /* InterlockedCompareExchangePointer */
   1499        Sleep(0);
   1500    }
   1501}
   1502
   1503static void
   1504win32_release_lock(MLOCK_T * sl)
   1505{
   1506    InterlockedExchange(sl, 0);
   1507}
   1508
   1509#define INITIAL_LOCK(l)      *(l)=0
   1510#define ACQUIRE_LOCK(l)      win32_acquire_lock(l)
   1511#define RELEASE_LOCK(l)      win32_release_lock(l)
   1512#if HAVE_MORECORE
   1513static MLOCK_T morecore_mutex;
   1514#endif /* HAVE_MORECORE */
   1515static MLOCK_T magic_init_mutex;
   1516#endif /* WIN32 */
   1517
   1518#define USE_LOCK_BIT               (2U)
   1519#else /* USE_LOCKS */
   1520#define USE_LOCK_BIT               (0U)
   1521#define INITIAL_LOCK(l)
   1522#endif /* USE_LOCKS */
   1523
   1524#if USE_LOCKS && HAVE_MORECORE
   1525#define ACQUIRE_MORECORE_LOCK()    ACQUIRE_LOCK(&morecore_mutex);
   1526#define RELEASE_MORECORE_LOCK()    RELEASE_LOCK(&morecore_mutex);
   1527#else /* USE_LOCKS && HAVE_MORECORE */
   1528#define ACQUIRE_MORECORE_LOCK()
   1529#define RELEASE_MORECORE_LOCK()
   1530#endif /* USE_LOCKS && HAVE_MORECORE */
   1531
   1532#if USE_LOCKS
   1533#define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
   1534#define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
   1535#else /* USE_LOCKS */
   1536#define ACQUIRE_MAGIC_INIT_LOCK()
   1537#define RELEASE_MAGIC_INIT_LOCK()
   1538#endif /* USE_LOCKS */
   1539
   1540
   1541/* -----------------------  Chunk representations ------------------------ */
   1542
   1543/*
   1544  (The following includes lightly edited explanations by Colin Plumb.)
   1545
   1546  The malloc_chunk declaration below is misleading (but accurate and
   1547  necessary).  It declares a "view" into memory allowing access to
   1548  necessary fields at known offsets from a given base.
   1549
   1550  Chunks of memory are maintained using a `boundary tag' method as
   1551  originally described by Knuth.  (See the paper by Paul Wilson
   1552  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
   1553  techniques.)  Sizes of free chunks are stored both in the front of
   1554  each chunk and at the end.  This makes consolidating fragmented
   1555  chunks into bigger chunks fast.  The head fields also hold bits
   1556  representing whether chunks are free or in use.
   1557
   1558  Here are some pictures to make it clearer.  They are "exploded" to
   1559  show that the state of a chunk can be thought of as extending from
   1560  the high 31 bits of the head field of its header through the
   1561  prev_foot and PINUSE_BIT bit of the following chunk header.
   1562
   1563  A chunk that's in use looks like:
   1564
   1565   chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1566           | Size of previous chunk (if P = 1)                             |
   1567           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1568         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
   1569         | Size of this chunk                                         1| +-+
   1570   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1571         |                                                               |
   1572         +-                                                             -+
   1573         |                                                               |
   1574         +-                                                             -+
   1575         |                                                               :
   1576         +-      size - sizeof(size_t) available payload bytes          -+
   1577         :                                                               |
   1578 chunk-> +-                                                             -+
   1579         |                                                               |
   1580         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1581       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
   1582       | Size of next chunk (may or may not be in use)               | +-+
   1583 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1584
   1585    And if it's free, it looks like this:
   1586
   1587   chunk-> +-                                                             -+
   1588           | User payload (must be in use, or we would have merged!)       |
   1589           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1590         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
   1591         | Size of this chunk                                         0| +-+
   1592   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1593         | Next pointer                                                  |
   1594         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1595         | Prev pointer                                                  |
   1596         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1597         |                                                               :
   1598         +-      size - sizeof(struct chunk) unused bytes               -+
   1599         :                                                               |
   1600 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1601         | Size of this chunk                                            |
   1602         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1603       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
   1604       | Size of next chunk (must be in use, or we would have merged)| +-+
   1605 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1606       |                                                               :
   1607       +- User payload                                                -+
   1608       :                                                               |
   1609       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1610                                                                     |0|
   1611                                                                     +-+
   1612  Note that since we always merge adjacent free chunks, the chunks
   1613  adjacent to a free chunk must be in use.
   1614
   1615  Given a pointer to a chunk (which can be derived trivially from the
   1616  payload pointer) we can, in O(1) time, find out whether the adjacent
   1617  chunks are free, and if so, unlink them from the lists that they
   1618  are on and merge them with the current chunk.
   1619
   1620  Chunks always begin on even word boundaries, so the mem portion
   1621  (which is returned to the user) is also on an even word boundary, and
   1622  thus at least double-word aligned.
   1623
   1624  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
   1625  chunk size (which is always a multiple of two words), is an in-use
   1626  bit for the *previous* chunk.  If that bit is *clear*, then the
   1627  word before the current chunk size contains the previous chunk
   1628  size, and can be used to find the front of the previous chunk.
   1629  The very first chunk allocated always has this bit set, preventing
   1630  access to non-existent (or non-owned) memory. If pinuse is set for
   1631  any given chunk, then you CANNOT determine the size of the
   1632  previous chunk, and might even get a memory addressing fault when
   1633  trying to do so.
   1634
   1635  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
   1636  the chunk size redundantly records whether the current chunk is
   1637  inuse. This redundancy enables usage checks within free and realloc,
   1638  and reduces indirection when freeing and consolidating chunks.
   1639
   1640  Each freshly allocated chunk must have both cinuse and pinuse set.
   1641  That is, each allocated chunk borders either a previously allocated
   1642  and still in-use chunk, or the base of its memory arena. This is
   1643  ensured by making all allocations from the the `lowest' part of any
   1644  found chunk.  Further, no free chunk physically borders another one,
   1645  so each free chunk is known to be preceded and followed by either
   1646  inuse chunks or the ends of memory.
   1647
   1648  Note that the `foot' of the current chunk is actually represented
   1649  as the prev_foot of the NEXT chunk. This makes it easier to
   1650  deal with alignments etc but can be very confusing when trying
   1651  to extend or adapt this code.
   1652
   1653  The exceptions to all this are
   1654
   1655     1. The special chunk `top' is the top-most available chunk (i.e.,
   1656        the one bordering the end of available memory). It is treated
   1657        specially.  Top is never included in any bin, is used only if
   1658        no other chunk is available, and is released back to the
   1659        system if it is very large (see M_TRIM_THRESHOLD).  In effect,
   1660        the top chunk is treated as larger (and thus less well
   1661        fitting) than any other available chunk.  The top chunk
   1662        doesn't update its trailing size field since there is no next
   1663        contiguous chunk that would have to index off it. However,
   1664        space is still allocated for it (TOP_FOOT_SIZE) to enable
   1665        separation or merging when space is extended.
   1666
   1667     3. Chunks allocated via mmap, which have the lowest-order bit
   1668        (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
   1669        PINUSE_BIT in their head fields.  Because they are allocated
   1670        one-by-one, each must carry its own prev_foot field, which is
   1671        also used to hold the offset this chunk has within its mmapped
   1672        region, which is needed to preserve alignment. Each mmapped
   1673        chunk is trailed by the first two fields of a fake next-chunk
   1674        for sake of usage checks.
   1675
   1676*/
   1677
   1678struct malloc_chunk
   1679{
   1680    size_t prev_foot;           /* Size of previous chunk (if free).  */
   1681    size_t head;                /* Size and inuse bits. */
   1682    struct malloc_chunk *fd;    /* double links -- used only if free. */
   1683    struct malloc_chunk *bk;
   1684};
   1685
   1686typedef struct malloc_chunk mchunk;
   1687typedef struct malloc_chunk *mchunkptr;
   1688typedef struct malloc_chunk *sbinptr;   /* The type of bins of chunks */
   1689typedef size_t bindex_t;        /* Described below */
   1690typedef unsigned int binmap_t;  /* Described below */
   1691typedef unsigned int flag_t;    /* The type of various bit flag sets */
   1692
   1693/* ------------------- Chunks sizes and alignments ----------------------- */
   1694
   1695#define MCHUNK_SIZE         (sizeof(mchunk))
   1696
   1697#if FOOTERS
   1698#define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
   1699#else /* FOOTERS */
   1700#define CHUNK_OVERHEAD      (SIZE_T_SIZE)
   1701#endif /* FOOTERS */
   1702
   1703/* MMapped chunks need a second word of overhead ... */
   1704#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
   1705/* ... and additional padding for fake next-chunk at foot */
   1706#define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
   1707
   1708/* The smallest size we can malloc is an aligned minimal chunk */
   1709#define MIN_CHUNK_SIZE\
   1710  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
   1711
   1712/* conversion from malloc headers to user pointers, and back */
   1713#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
   1714#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
   1715/* chunk associated with aligned address A */
   1716#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
   1717
   1718/* Bounds on request (not chunk) sizes. */
   1719#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
   1720#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
   1721
   1722/* pad request bytes into a usable size */
   1723#define pad_request(req) \
   1724   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
   1725
   1726/* pad request, checking for minimum (but not maximum) */
   1727#define request2size(req) \
   1728  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
   1729
   1730
   1731/* ------------------ Operations on head and foot fields ----------------- */
   1732
   1733/*
   1734  The head field of a chunk is or'ed with PINUSE_BIT when previous
   1735  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
   1736  use. If the chunk was obtained with mmap, the prev_foot field has
   1737  IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
   1738  mmapped region to the base of the chunk.
   1739*/
   1740
   1741#define PINUSE_BIT          (SIZE_T_ONE)
   1742#define CINUSE_BIT          (SIZE_T_TWO)
   1743#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
   1744
   1745/* Head value for fenceposts */
   1746#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
   1747
   1748/* extraction of fields from head words */
   1749#define cinuse(p)           ((p)->head & CINUSE_BIT)
   1750#define pinuse(p)           ((p)->head & PINUSE_BIT)
   1751#define chunksize(p)        ((p)->head & ~(INUSE_BITS))
   1752
   1753#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
   1754#define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
   1755
   1756/* Treat space at ptr +/- offset as a chunk */
   1757#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
   1758#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
   1759
   1760/* Ptr to next or previous physical malloc_chunk. */
   1761#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
   1762#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
   1763
   1764/* extract next chunk's pinuse bit */
   1765#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
   1766
   1767/* Get/set size at footer */
   1768#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
   1769#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
   1770
   1771/* Set size, pinuse bit, and foot */
   1772#define set_size_and_pinuse_of_free_chunk(p, s)\
   1773  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
   1774
   1775/* Set size, pinuse bit, foot, and clear next pinuse */
   1776#define set_free_with_pinuse(p, s, n)\
   1777  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
   1778
   1779#define is_mmapped(p)\
   1780  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
   1781
   1782/* Get the internal overhead associated with chunk p */
   1783#define overhead_for(p)\
   1784 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
   1785
   1786/* Return true if malloced space is not necessarily cleared */
   1787#if MMAP_CLEARS
   1788#define calloc_must_clear(p) (!is_mmapped(p))
   1789#else /* MMAP_CLEARS */
   1790#define calloc_must_clear(p) (1)
   1791#endif /* MMAP_CLEARS */
   1792
   1793/* ---------------------- Overlaid data structures ----------------------- */
   1794
   1795/*
   1796  When chunks are not in use, they are treated as nodes of either
   1797  lists or trees.
   1798
   1799  "Small"  chunks are stored in circular doubly-linked lists, and look
   1800  like this:
   1801
   1802    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1803            |             Size of previous chunk                            |
   1804            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1805    `head:' |             Size of chunk, in bytes                         |P|
   1806      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1807            |             Forward pointer to next chunk in list             |
   1808            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1809            |             Back pointer to previous chunk in list            |
   1810            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1811            |             Unused space (may be 0 bytes long)                .
   1812            .                                                               .
   1813            .                                                               |
   1814nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1815    `foot:' |             Size of chunk, in bytes                           |
   1816            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1817
   1818  Larger chunks are kept in a form of bitwise digital trees (aka
   1819  tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
   1820  free chunks greater than 256 bytes, their size doesn't impose any
   1821  constraints on user chunk sizes.  Each node looks like:
   1822
   1823    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1824            |             Size of previous chunk                            |
   1825            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1826    `head:' |             Size of chunk, in bytes                         |P|
   1827      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1828            |             Forward pointer to next chunk of same size        |
   1829            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1830            |             Back pointer to previous chunk of same size       |
   1831            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1832            |             Pointer to left child (child[0])                  |
   1833            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1834            |             Pointer to right child (child[1])                 |
   1835            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1836            |             Pointer to parent                                 |
   1837            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1838            |             bin index of this chunk                           |
   1839            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1840            |             Unused space                                      .
   1841            .                                                               |
   1842nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1843    `foot:' |             Size of chunk, in bytes                           |
   1844            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
   1845
   1846  Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
   1847  of the same size are arranged in a circularly-linked list, with only
   1848  the oldest chunk (the next to be used, in our FIFO ordering)
   1849  actually in the tree.  (Tree members are distinguished by a non-null
   1850  parent pointer.)  If a chunk with the same size an an existing node
   1851  is inserted, it is linked off the existing node using pointers that
   1852  work in the same way as fd/bk pointers of small chunks.
   1853
   1854  Each tree contains a power of 2 sized range of chunk sizes (the
   1855  smallest is 0x100 <= x < 0x180), which is is divided in half at each
   1856  tree level, with the chunks in the smaller half of the range (0x100
   1857  <= x < 0x140 for the top nose) in the left subtree and the larger
   1858  half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
   1859  done by inspecting individual bits.
   1860
   1861  Using these rules, each node's left subtree contains all smaller
   1862  sizes than its right subtree.  However, the node at the root of each
   1863  subtree has no particular ordering relationship to either.  (The
   1864  dividing line between the subtree sizes is based on trie relation.)
   1865  If we remove the last chunk of a given size from the interior of the
   1866  tree, we need to replace it with a leaf node.  The tree ordering
   1867  rules permit a node to be replaced by any leaf below it.
   1868
   1869  The smallest chunk in a tree (a common operation in a best-fit
   1870  allocator) can be found by walking a path to the leftmost leaf in
   1871  the tree.  Unlike a usual binary tree, where we follow left child
   1872  pointers until we reach a null, here we follow the right child
   1873  pointer any time the left one is null, until we reach a leaf with
   1874  both child pointers null. The smallest chunk in the tree will be
   1875  somewhere along that path.
   1876
   1877  The worst case number of steps to add, find, or remove a node is
   1878  bounded by the number of bits differentiating chunks within
   1879  bins. Under current bin calculations, this ranges from 6 up to 21
   1880  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
   1881  is of course much better.
   1882*/
   1883
   1884struct malloc_tree_chunk
   1885{
   1886    /* The first four fields must be compatible with malloc_chunk */
   1887    size_t prev_foot;
   1888    size_t head;
   1889    struct malloc_tree_chunk *fd;
   1890    struct malloc_tree_chunk *bk;
   1891
   1892    struct malloc_tree_chunk *child[2];
   1893    struct malloc_tree_chunk *parent;
   1894    bindex_t index;
   1895};
   1896
   1897typedef struct malloc_tree_chunk tchunk;
   1898typedef struct malloc_tree_chunk *tchunkptr;
   1899typedef struct malloc_tree_chunk *tbinptr;      /* The type of bins of trees */
   1900
   1901/* A little helper macro for trees */
   1902#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
   1903
   1904/* ----------------------------- Segments -------------------------------- */
   1905
   1906/*
   1907  Each malloc space may include non-contiguous segments, held in a
   1908  list headed by an embedded malloc_segment record representing the
   1909  top-most space. Segments also include flags holding properties of
   1910  the space. Large chunks that are directly allocated by mmap are not
   1911  included in this list. They are instead independently created and
   1912  destroyed without otherwise keeping track of them.
   1913
   1914  Segment management mainly comes into play for spaces allocated by
   1915  MMAP.  Any call to MMAP might or might not return memory that is
   1916  adjacent to an existing segment.  MORECORE normally contiguously
   1917  extends the current space, so this space is almost always adjacent,
   1918  which is simpler and faster to deal with. (This is why MORECORE is
   1919  used preferentially to MMAP when both are available -- see
   1920  sys_alloc.)  When allocating using MMAP, we don't use any of the
   1921  hinting mechanisms (inconsistently) supported in various
   1922  implementations of unix mmap, or distinguish reserving from
   1923  committing memory. Instead, we just ask for space, and exploit
   1924  contiguity when we get it.  It is probably possible to do
   1925  better than this on some systems, but no general scheme seems
   1926  to be significantly better.
   1927
   1928  Management entails a simpler variant of the consolidation scheme
   1929  used for chunks to reduce fragmentation -- new adjacent memory is
   1930  normally prepended or appended to an existing segment. However,
   1931  there are limitations compared to chunk consolidation that mostly
   1932  reflect the fact that segment processing is relatively infrequent
   1933  (occurring only when getting memory from system) and that we
   1934  don't expect to have huge numbers of segments:
   1935
   1936  * Segments are not indexed, so traversal requires linear scans.  (It
   1937    would be possible to index these, but is not worth the extra
   1938    overhead and complexity for most programs on most platforms.)
   1939  * New segments are only appended to old ones when holding top-most
   1940    memory; if they cannot be prepended to others, they are held in
   1941    different segments.
   1942
   1943  Except for the top-most segment of an mstate, each segment record
   1944  is kept at the tail of its segment. Segments are added by pushing
   1945  segment records onto the list headed by &mstate.seg for the
   1946  containing mstate.
   1947
   1948  Segment flags control allocation/merge/deallocation policies:
   1949  * If EXTERN_BIT set, then we did not allocate this segment,
   1950    and so should not try to deallocate or merge with others.
   1951    (This currently holds only for the initial segment passed
   1952    into create_mspace_with_base.)
   1953  * If IS_MMAPPED_BIT set, the segment may be merged with
   1954    other surrounding mmapped segments and trimmed/de-allocated
   1955    using munmap.
   1956  * If neither bit is set, then the segment was obtained using
   1957    MORECORE so can be merged with surrounding MORECORE'd segments
   1958    and deallocated/trimmed using MORECORE with negative arguments.
   1959*/
   1960
   1961struct malloc_segment
   1962{
   1963    char *base;                 /* base address */
   1964    size_t size;                /* allocated size */
   1965    struct malloc_segment *next;        /* ptr to next segment */
   1966    flag_t sflags;              /* mmap and extern flag */
   1967};
   1968
   1969#define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
   1970#define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
   1971
   1972typedef struct malloc_segment msegment;
   1973typedef struct malloc_segment *msegmentptr;
   1974
   1975/* ---------------------------- malloc_state ----------------------------- */
   1976
   1977/*
   1978   A malloc_state holds all of the bookkeeping for a space.
   1979   The main fields are:
   1980
   1981  Top
   1982    The topmost chunk of the currently active segment. Its size is
   1983    cached in topsize.  The actual size of topmost space is
   1984    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
   1985    fenceposts and segment records if necessary when getting more
   1986    space from the system.  The size at which to autotrim top is
   1987    cached from mparams in trim_check, except that it is disabled if
   1988    an autotrim fails.
   1989
   1990  Designated victim (dv)
   1991    This is the preferred chunk for servicing small requests that
   1992    don't have exact fits.  It is normally the chunk split off most
   1993    recently to service another small request.  Its size is cached in
   1994    dvsize. The link fields of this chunk are not maintained since it
   1995    is not kept in a bin.
   1996
   1997  SmallBins
   1998    An array of bin headers for free chunks.  These bins hold chunks
   1999    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
   2000    chunks of all the same size, spaced 8 bytes apart.  To simplify
   2001    use in double-linked lists, each bin header acts as a malloc_chunk
   2002    pointing to the real first node, if it exists (else pointing to
   2003    itself).  This avoids special-casing for headers.  But to avoid
   2004    waste, we allocate only the fd/bk pointers of bins, and then use
   2005    repositioning tricks to treat these as the fields of a chunk.
   2006
   2007  TreeBins
   2008    Treebins are pointers to the roots of trees holding a range of
   2009    sizes. There are 2 equally spaced treebins for each power of two
   2010    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
   2011    larger.
   2012
   2013  Bin maps
   2014    There is one bit map for small bins ("smallmap") and one for
   2015    treebins ("treemap).  Each bin sets its bit when non-empty, and
   2016    clears the bit when empty.  Bit operations are then used to avoid
   2017    bin-by-bin searching -- nearly all "search" is done without ever
   2018    looking at bins that won't be selected.  The bit maps
   2019    conservatively use 32 bits per map word, even if on 64bit system.
   2020    For a good description of some of the bit-based techniques used
   2021    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
   2022    supplement at http://hackersdelight.org/). Many of these are
   2023    intended to reduce the branchiness of paths through malloc etc, as
   2024    well as to reduce the number of memory locations read or written.
   2025
   2026  Segments
   2027    A list of segments headed by an embedded malloc_segment record
   2028    representing the initial space.
   2029
   2030  Address check support
   2031    The least_addr field is the least address ever obtained from
   2032    MORECORE or MMAP. Attempted frees and reallocs of any address less
   2033    than this are trapped (unless INSECURE is defined).
   2034
   2035  Magic tag
   2036    A cross-check field that should always hold same value as mparams.magic.
   2037
   2038  Flags
   2039    Bits recording whether to use MMAP, locks, or contiguous MORECORE
   2040
   2041  Statistics
   2042    Each space keeps track of current and maximum system memory
   2043    obtained via MORECORE or MMAP.
   2044
   2045  Locking
   2046    If USE_LOCKS is defined, the "mutex" lock is acquired and released
   2047    around every public call using this mspace.
   2048*/
   2049
   2050/* Bin types, widths and sizes */
   2051#define NSMALLBINS        (32U)
   2052#define NTREEBINS         (32U)
   2053#define SMALLBIN_SHIFT    (3U)
   2054#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
   2055#define TREEBIN_SHIFT     (8U)
   2056#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
   2057#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
   2058#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
   2059
   2060struct malloc_state
   2061{
   2062    binmap_t smallmap;
   2063    binmap_t treemap;
   2064    size_t dvsize;
   2065    size_t topsize;
   2066    char *least_addr;
   2067    mchunkptr dv;
   2068    mchunkptr top;
   2069    size_t trim_check;
   2070    size_t magic;
   2071    mchunkptr smallbins[(NSMALLBINS + 1) * 2];
   2072    tbinptr treebins[NTREEBINS];
   2073    size_t footprint;
   2074    size_t max_footprint;
   2075    flag_t mflags;
   2076#if USE_LOCKS
   2077    MLOCK_T mutex;              /* locate lock among fields that rarely change */
   2078#endif                          /* USE_LOCKS */
   2079    msegment seg;
   2080};
   2081
   2082typedef struct malloc_state *mstate;
   2083
   2084/* ------------- Global malloc_state and malloc_params ------------------- */
   2085
   2086/*
   2087  malloc_params holds global properties, including those that can be
   2088  dynamically set using mallopt. There is a single instance, mparams,
   2089  initialized in init_mparams.
   2090*/
   2091
   2092struct malloc_params
   2093{
   2094    size_t magic;
   2095    size_t page_size;
   2096    size_t granularity;
   2097    size_t mmap_threshold;
   2098    size_t trim_threshold;
   2099    flag_t default_mflags;
   2100};
   2101
   2102static struct malloc_params mparams;
   2103
   2104/* The global malloc_state used for all non-"mspace" calls */
   2105static struct malloc_state _gm_;
   2106#define gm                 (&_gm_)
   2107#define is_global(M)       ((M) == &_gm_)
   2108#define is_initialized(M)  ((M)->top != 0)
   2109
   2110/* -------------------------- system alloc setup ------------------------- */
   2111
   2112/* Operations on mflags */
   2113
   2114#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
   2115#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
   2116#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
   2117
   2118#define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
   2119#define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
   2120#define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
   2121
   2122#define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
   2123#define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
   2124
   2125#define set_lock(M,L)\
   2126 ((M)->mflags = (L)?\
   2127  ((M)->mflags | USE_LOCK_BIT) :\
   2128  ((M)->mflags & ~USE_LOCK_BIT))
   2129
   2130/* page-align a size */
   2131#define page_align(S)\
   2132 (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
   2133
   2134/* granularity-align a size */
   2135#define granularity_align(S)\
   2136  (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
   2137
   2138#define is_page_aligned(S)\
   2139   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
   2140#define is_granularity_aligned(S)\
   2141   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
   2142
   2143/*  True if segment S holds address A */
   2144#define segment_holds(S, A)\
   2145  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
   2146
   2147/* Return segment holding given address */
   2148static msegmentptr
   2149segment_holding(mstate m, char *addr)
   2150{
   2151    msegmentptr sp = &m->seg;
   2152    for (;;) {
   2153        if (addr >= sp->base && addr < sp->base + sp->size)
   2154            return sp;
   2155        if ((sp = sp->next) == 0)
   2156            return 0;
   2157    }
   2158}
   2159
   2160/* Return true if segment contains a segment link */
   2161static int
   2162has_segment_link(mstate m, msegmentptr ss)
   2163{
   2164    msegmentptr sp = &m->seg;
   2165    for (;;) {
   2166        if ((char *) sp >= ss->base && (char *) sp < ss->base + ss->size)
   2167            return 1;
   2168        if ((sp = sp->next) == 0)
   2169            return 0;
   2170    }
   2171}
   2172
   2173#ifndef MORECORE_CANNOT_TRIM
   2174#define should_trim(M,s)  ((s) > (M)->trim_check)
   2175#else /* MORECORE_CANNOT_TRIM */
   2176#define should_trim(M,s)  (0)
   2177#endif /* MORECORE_CANNOT_TRIM */
   2178
   2179/*
   2180  TOP_FOOT_SIZE is padding at the end of a segment, including space
   2181  that may be needed to place segment records and fenceposts when new
   2182  noncontiguous segments are added.
   2183*/
   2184#define TOP_FOOT_SIZE\
   2185  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
   2186
   2187
   2188/* -------------------------------  Hooks -------------------------------- */
   2189
   2190/*
   2191  PREACTION should be defined to return 0 on success, and nonzero on
   2192  failure. If you are not using locking, you can redefine these to do
   2193  anything you like.
   2194*/
   2195
   2196#if USE_LOCKS
   2197
   2198/* Ensure locks are initialized */
   2199#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
   2200
   2201#define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
   2202#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
   2203#else /* USE_LOCKS */
   2204
   2205#ifndef PREACTION
   2206#define PREACTION(M) (0)
   2207#endif /* PREACTION */
   2208
   2209#ifndef POSTACTION
   2210#define POSTACTION(M)
   2211#endif /* POSTACTION */
   2212
   2213#endif /* USE_LOCKS */
   2214
   2215/*
   2216  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
   2217  USAGE_ERROR_ACTION is triggered on detected bad frees and
   2218  reallocs. The argument p is an address that might have triggered the
   2219  fault. It is ignored by the two predefined actions, but might be
   2220  useful in custom actions that try to help diagnose errors.
   2221*/
   2222
   2223#if PROCEED_ON_ERROR
   2224
   2225/* A count of the number of corruption errors causing resets */
   2226int malloc_corruption_error_count;
   2227
   2228/* default corruption action */
   2229static void reset_on_error(mstate m);
   2230
   2231#define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
   2232#define USAGE_ERROR_ACTION(m, p)
   2233
   2234#else /* PROCEED_ON_ERROR */
   2235
   2236#ifndef CORRUPTION_ERROR_ACTION
   2237#define CORRUPTION_ERROR_ACTION(m) ABORT
   2238#endif /* CORRUPTION_ERROR_ACTION */
   2239
   2240#ifndef USAGE_ERROR_ACTION
   2241#define USAGE_ERROR_ACTION(m,p) ABORT
   2242#endif /* USAGE_ERROR_ACTION */
   2243
   2244#endif /* PROCEED_ON_ERROR */
   2245
   2246/* -------------------------- Debugging setup ---------------------------- */
   2247
   2248#if ! DEBUG
   2249
   2250#define check_free_chunk(M,P)
   2251#define check_inuse_chunk(M,P)
   2252#define check_malloced_chunk(M,P,N)
   2253#define check_mmapped_chunk(M,P)
   2254#define check_malloc_state(M)
   2255#define check_top_chunk(M,P)
   2256
   2257#else /* DEBUG */
   2258#define check_free_chunk(M,P)       do_check_free_chunk(M,P)
   2259#define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
   2260#define check_top_chunk(M,P)        do_check_top_chunk(M,P)
   2261#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
   2262#define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
   2263#define check_malloc_state(M)       do_check_malloc_state(M)
   2264
   2265static void do_check_any_chunk(mstate m, mchunkptr p);
   2266static void do_check_top_chunk(mstate m, mchunkptr p);
   2267static void do_check_mmapped_chunk(mstate m, mchunkptr p);
   2268static void do_check_inuse_chunk(mstate m, mchunkptr p);
   2269static void do_check_free_chunk(mstate m, mchunkptr p);
   2270static void do_check_malloced_chunk(mstate m, void *mem, size_t s);
   2271static void do_check_tree(mstate m, tchunkptr t);
   2272static void do_check_treebin(mstate m, bindex_t i);
   2273static void do_check_smallbin(mstate m, bindex_t i);
   2274static void do_check_malloc_state(mstate m);
   2275static int bin_find(mstate m, mchunkptr x);
   2276static size_t traverse_and_check(mstate m);
   2277#endif /* DEBUG */
   2278
   2279/* ---------------------------- Indexing Bins ---------------------------- */
   2280
   2281#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
   2282#define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
   2283#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
   2284#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
   2285
   2286/* addressing by index. See above about smallbin repositioning */
   2287#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
   2288#define treebin_at(M,i)     (&((M)->treebins[i]))
   2289
   2290/* assign tree index for size S to variable I */
   2291#if defined(__GNUC__) && defined(i386)
   2292#define compute_tree_index(S, I)\
   2293{\
   2294  size_t X = S >> TREEBIN_SHIFT;\
   2295  if (X == 0)\
   2296    I = 0;\
   2297  else if (X > 0xFFFF)\
   2298    I = NTREEBINS-1;\
   2299  else {\
   2300    unsigned int K;\
   2301    __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
   2302    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
   2303  }\
   2304}
   2305#else /* GNUC */
   2306#define compute_tree_index(S, I)\
   2307{\
   2308  size_t X = S >> TREEBIN_SHIFT;\
   2309  if (X == 0)\
   2310    I = 0;\
   2311  else if (X > 0xFFFF)\
   2312    I = NTREEBINS-1;\
   2313  else {\
   2314    unsigned int Y = (unsigned int)X;\
   2315    unsigned int N = ((Y - 0x100) >> 16) & 8;\
   2316    unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
   2317    N += K;\
   2318    N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
   2319    K = 14 - N + ((Y <<= K) >> 15);\
   2320    I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
   2321  }\
   2322}
   2323#endif /* GNUC */
   2324
   2325/* Bit representing maximum resolved size in a treebin at i */
   2326#define bit_for_tree_index(i) \
   2327   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
   2328
   2329/* Shift placing maximum resolved bit in a treebin at i as sign bit */
   2330#define leftshift_for_tree_index(i) \
   2331   ((i == NTREEBINS-1)? 0 : \
   2332    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
   2333
   2334/* The size of the smallest chunk held in bin with index i */
   2335#define minsize_for_tree_index(i) \
   2336   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
   2337   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
   2338
   2339
   2340/* ------------------------ Operations on bin maps ----------------------- */
   2341
   2342/* bit corresponding to given index */
   2343#define idx2bit(i)              ((binmap_t)(1) << (i))
   2344
   2345/* Mark/Clear bits with given index */
   2346#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
   2347#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
   2348#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
   2349
   2350#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
   2351#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
   2352#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
   2353
   2354/* index corresponding to given bit */
   2355
   2356#if defined(__GNUC__) && defined(i386)
   2357#define compute_bit2idx(X, I)\
   2358{\
   2359  unsigned int J;\
   2360  __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
   2361  I = (bindex_t)J;\
   2362}
   2363
   2364#else /* GNUC */
   2365#if  USE_BUILTIN_FFS
   2366#define compute_bit2idx(X, I) I = ffs(X)-1
   2367
   2368#else /* USE_BUILTIN_FFS */
   2369#define compute_bit2idx(X, I)\
   2370{\
   2371  unsigned int Y = X - 1;\
   2372  unsigned int K = Y >> (16-4) & 16;\
   2373  unsigned int N = K;        Y >>= K;\
   2374  N += K = Y >> (8-3) &  8;  Y >>= K;\
   2375  N += K = Y >> (4-2) &  4;  Y >>= K;\
   2376  N += K = Y >> (2-1) &  2;  Y >>= K;\
   2377  N += K = Y >> (1-0) &  1;  Y >>= K;\
   2378  I = (bindex_t)(N + Y);\
   2379}
   2380#endif /* USE_BUILTIN_FFS */
   2381#endif /* GNUC */
   2382
   2383/* isolate the least set bit of a bitmap */
   2384#define least_bit(x)         ((x) & -(x))
   2385
   2386/* mask with all bits to left of least bit of x on */
   2387#define left_bits(x)         ((x<<1) | -(x<<1))
   2388
   2389/* mask with all bits to left of or equal to least bit of x on */
   2390#define same_or_left_bits(x) ((x) | -(x))
   2391
   2392
   2393/* ----------------------- Runtime Check Support ------------------------- */
   2394
   2395/*
   2396  For security, the main invariant is that malloc/free/etc never
   2397  writes to a static address other than malloc_state, unless static
   2398  malloc_state itself has been corrupted, which cannot occur via
   2399  malloc (because of these checks). In essence this means that we
   2400  believe all pointers, sizes, maps etc held in malloc_state, but
   2401  check all of those linked or offsetted from other embedded data
   2402  structures.  These checks are interspersed with main code in a way
   2403  that tends to minimize their run-time cost.
   2404
   2405  When FOOTERS is defined, in addition to range checking, we also
   2406  verify footer fields of inuse chunks, which can be used guarantee
   2407  that the mstate controlling malloc/free is intact.  This is a
   2408  streamlined version of the approach described by William Robertson
   2409  et al in "Run-time Detection of Heap-based Overflows" LISA'03
   2410  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
   2411  of an inuse chunk holds the xor of its mstate and a random seed,
   2412  that is checked upon calls to free() and realloc().  This is
   2413  (probablistically) unguessable from outside the program, but can be
   2414  computed by any code successfully malloc'ing any chunk, so does not
   2415  itself provide protection against code that has already broken
   2416  security through some other means.  Unlike Robertson et al, we
   2417  always dynamically check addresses of all offset chunks (previous,
   2418  next, etc). This turns out to be cheaper than relying on hashes.
   2419*/
   2420
   2421#if !INSECURE
   2422/* Check if address a is at least as high as any from MORECORE or MMAP */
   2423#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
   2424/* Check if address of next chunk n is higher than base chunk p */
   2425#define ok_next(p, n)    ((char*)(p) < (char*)(n))
   2426/* Check if p has its cinuse bit on */
   2427#define ok_cinuse(p)     cinuse(p)
   2428/* Check if p has its pinuse bit on */
   2429#define ok_pinuse(p)     pinuse(p)
   2430
   2431#else /* !INSECURE */
   2432#define ok_address(M, a) (1)
   2433#define ok_next(b, n)    (1)
   2434#define ok_cinuse(p)     (1)
   2435#define ok_pinuse(p)     (1)
   2436#endif /* !INSECURE */
   2437
   2438#if (FOOTERS && !INSECURE)
   2439/* Check if (alleged) mstate m has expected magic field */
   2440#define ok_magic(M)      ((M)->magic == mparams.magic)
   2441#else /* (FOOTERS && !INSECURE) */
   2442#define ok_magic(M)      (1)
   2443#endif /* (FOOTERS && !INSECURE) */
   2444
   2445
   2446/* In gcc, use __builtin_expect to minimize impact of checks */
   2447#if !INSECURE
   2448#if defined(__GNUC__) && __GNUC__ >= 3
   2449#define RTCHECK(e)  __builtin_expect(e, 1)
   2450#else /* GNUC */
   2451#define RTCHECK(e)  (e)
   2452#endif /* GNUC */
   2453#else /* !INSECURE */
   2454#define RTCHECK(e)  (1)
   2455#endif /* !INSECURE */
   2456
   2457/* macros to set up inuse chunks with or without footers */
   2458
   2459#if !FOOTERS
   2460
   2461#define mark_inuse_foot(M,p,s)
   2462
   2463/* Set cinuse bit and pinuse bit of next chunk */
   2464#define set_inuse(M,p,s)\
   2465  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
   2466  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
   2467
   2468/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
   2469#define set_inuse_and_pinuse(M,p,s)\
   2470  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
   2471  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
   2472
   2473/* Set size, cinuse and pinuse bit of this chunk */
   2474#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
   2475  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
   2476
   2477#else /* FOOTERS */
   2478
   2479/* Set foot of inuse chunk to be xor of mstate and seed */
   2480#define mark_inuse_foot(M,p,s)\
   2481  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
   2482
   2483#define get_mstate_for(p)\
   2484  ((mstate)(((mchunkptr)((char*)(p) +\
   2485    (chunksize(p))))->prev_foot ^ mparams.magic))
   2486
   2487#define set_inuse(M,p,s)\
   2488  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
   2489  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
   2490  mark_inuse_foot(M,p,s))
   2491
   2492#define set_inuse_and_pinuse(M,p,s)\
   2493  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
   2494  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
   2495 mark_inuse_foot(M,p,s))
   2496
   2497#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
   2498  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
   2499  mark_inuse_foot(M, p, s))
   2500
   2501#endif /* !FOOTERS */
   2502
   2503/* ---------------------------- setting mparams -------------------------- */
   2504
   2505/* Initialize mparams */
   2506static int
   2507init_mparams(void)
   2508{
   2509    if (mparams.page_size == 0) {
   2510        size_t s;
   2511
   2512        mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
   2513        mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
   2514#if MORECORE_CONTIGUOUS
   2515        mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT;
   2516#else /* MORECORE_CONTIGUOUS */
   2517        mparams.default_mflags =
   2518            USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT;
   2519#endif /* MORECORE_CONTIGUOUS */
   2520
   2521#if (FOOTERS && !INSECURE)
   2522        {
   2523#if USE_DEV_RANDOM
   2524            int fd;
   2525            unsigned char buf[sizeof(size_t)];
   2526            /* Try to use /dev/urandom, else fall back on using time */
   2527            if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
   2528                read(fd, buf, sizeof(buf)) == sizeof(buf)) {
   2529                s = *((size_t *) buf);
   2530                close(fd);
   2531            } else
   2532#endif /* USE_DEV_RANDOM */
   2533                s = (size_t) (time(0) ^ (size_t) 0x55555555U);
   2534
   2535            s |= (size_t) 8U;   /* ensure nonzero */
   2536            s &= ~(size_t) 7U;  /* improve chances of fault for bad values */
   2537
   2538        }
   2539#else /* (FOOTERS && !INSECURE) */
   2540        s = (size_t) 0x58585858U;
   2541#endif /* (FOOTERS && !INSECURE) */
   2542        ACQUIRE_MAGIC_INIT_LOCK();
   2543        if (mparams.magic == 0) {
   2544            mparams.magic = s;
   2545            /* Set up lock for main malloc area */
   2546            INITIAL_LOCK(&gm->mutex);
   2547            gm->mflags = mparams.default_mflags;
   2548        }
   2549        RELEASE_MAGIC_INIT_LOCK();
   2550
   2551#ifndef WIN32
   2552        mparams.page_size = malloc_getpagesize;
   2553        mparams.granularity = ((DEFAULT_GRANULARITY != 0) ?
   2554                               DEFAULT_GRANULARITY : mparams.page_size);
   2555#else /* WIN32 */
   2556        {
   2557            SYSTEM_INFO system_info;
   2558            GetSystemInfo(&system_info);
   2559            mparams.page_size = system_info.dwPageSize;
   2560            mparams.granularity = system_info.dwAllocationGranularity;
   2561        }
   2562#endif /* WIN32 */
   2563
   2564        /* Sanity-check configuration:
   2565           size_t must be unsigned and as wide as pointer type.
   2566           ints must be at least 4 bytes.
   2567           alignment must be at least 8.
   2568           Alignment, min chunk size, and page size must all be powers of 2.
   2569         */
   2570        if ((sizeof(size_t) != sizeof(char *)) ||
   2571            (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
   2572            (sizeof(int) < 4) ||
   2573            (MALLOC_ALIGNMENT < (size_t) 8U) ||
   2574            ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
   2575            ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
   2576            ((mparams.granularity & (mparams.granularity - SIZE_T_ONE)) != 0)
   2577            || ((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0))
   2578            ABORT;
   2579    }
   2580    return 0;
   2581}
   2582
   2583/* support for mallopt */
   2584static int
   2585change_mparam(int param_number, int value)
   2586{
   2587    size_t val = (size_t) value;
   2588    init_mparams();
   2589    switch (param_number) {
   2590    case M_TRIM_THRESHOLD:
   2591        mparams.trim_threshold = val;
   2592        return 1;
   2593    case M_GRANULARITY:
   2594        if (val >= mparams.page_size && ((val & (val - 1)) == 0)) {
   2595            mparams.granularity = val;
   2596            return 1;
   2597        } else
   2598            return 0;
   2599    case M_MMAP_THRESHOLD:
   2600        mparams.mmap_threshold = val;
   2601        return 1;
   2602    default:
   2603        return 0;
   2604    }
   2605}
   2606
   2607#if DEBUG
   2608/* ------------------------- Debugging Support --------------------------- */
   2609
   2610/* Check properties of any chunk, whether free, inuse, mmapped etc  */
   2611static void
   2612do_check_any_chunk(mstate m, mchunkptr p)
   2613{
   2614    assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
   2615    assert(ok_address(m, p));
   2616}
   2617
   2618/* Check properties of top chunk */
   2619static void
   2620do_check_top_chunk(mstate m, mchunkptr p)
   2621{
   2622    msegmentptr sp = segment_holding(m, (char *) p);
   2623    size_t sz = chunksize(p);
   2624    assert(sp != 0);
   2625    assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
   2626    assert(ok_address(m, p));
   2627    assert(sz == m->topsize);
   2628    assert(sz > 0);
   2629    assert(sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
   2630    assert(pinuse(p));
   2631    assert(!next_pinuse(p));
   2632}
   2633
   2634/* Check properties of (inuse) mmapped chunks */
   2635static void
   2636do_check_mmapped_chunk(mstate m, mchunkptr p)
   2637{
   2638    size_t sz = chunksize(p);
   2639    size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
   2640    assert(is_mmapped(p));
   2641    assert(use_mmap(m));
   2642    assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
   2643    assert(ok_address(m, p));
   2644    assert(!is_small(sz));
   2645    assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
   2646    assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
   2647    assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
   2648}
   2649
   2650/* Check properties of inuse chunks */
   2651static void
   2652do_check_inuse_chunk(mstate m, mchunkptr p)
   2653{
   2654    do_check_any_chunk(m, p);
   2655    assert(cinuse(p));
   2656    assert(next_pinuse(p));
   2657    /* If not pinuse and not mmapped, previous chunk has OK offset */
   2658    assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
   2659    if (is_mmapped(p))
   2660        do_check_mmapped_chunk(m, p);
   2661}
   2662
   2663/* Check properties of free chunks */
   2664static void
   2665do_check_free_chunk(mstate m, mchunkptr p)
   2666{
   2667    size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
   2668    mchunkptr next = chunk_plus_offset(p, sz);
   2669    do_check_any_chunk(m, p);
   2670    assert(!cinuse(p));
   2671    assert(!next_pinuse(p));
   2672    assert(!is_mmapped(p));
   2673    if (p != m->dv && p != m->top) {
   2674        if (sz >= MIN_CHUNK_SIZE) {
   2675            assert((sz & CHUNK_ALIGN_MASK) == 0);
   2676            assert(is_aligned(chunk2mem(p)));
   2677            assert(next->prev_foot == sz);
   2678            assert(pinuse(p));
   2679            assert(next == m->top || cinuse(next));
   2680            assert(p->fd->bk == p);
   2681            assert(p->bk->fd == p);
   2682        } else                  /* markers are always of size SIZE_T_SIZE */
   2683            assert(sz == SIZE_T_SIZE);
   2684    }
   2685}
   2686
   2687/* Check properties of malloced chunks at the point they are malloced */
   2688static void
   2689do_check_malloced_chunk(mstate m, void *mem, size_t s)
   2690{
   2691    if (mem != 0) {
   2692        mchunkptr p = mem2chunk(mem);
   2693        size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
   2694        do_check_inuse_chunk(m, p);
   2695        assert((sz & CHUNK_ALIGN_MASK) == 0);
   2696        assert(sz >= MIN_CHUNK_SIZE);
   2697        assert(sz >= s);
   2698        /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
   2699        assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
   2700    }
   2701}
   2702
   2703/* Check a tree and its subtrees.  */
   2704static void
   2705do_check_tree(mstate m, tchunkptr t)
   2706{
   2707    tchunkptr head = 0;
   2708    tchunkptr u = t;
   2709    bindex_t tindex = t->index;
   2710    size_t tsize = chunksize(t);
   2711    bindex_t idx;
   2712    compute_tree_index(tsize, idx);
   2713    assert(tindex == idx);
   2714    assert(tsize >= MIN_LARGE_SIZE);
   2715    assert(tsize >= minsize_for_tree_index(idx));
   2716    assert((idx == NTREEBINS - 1)
   2717           || (tsize < minsize_for_tree_index((idx + 1))));
   2718
   2719    do {                        /* traverse through chain of same-sized nodes */
   2720        do_check_any_chunk(m, ((mchunkptr) u));
   2721        assert(u->index == tindex);
   2722        assert(chunksize(u) == tsize);
   2723        assert(!cinuse(u));
   2724        assert(!next_pinuse(u));
   2725        assert(u->fd->bk == u);
   2726        assert(u->bk->fd == u);
   2727        if (u->parent == 0) {
   2728            assert(u->child[0] == 0);
   2729            assert(u->child[1] == 0);
   2730        } else {
   2731            assert(head == 0);  /* only one node on chain has parent */
   2732            head = u;
   2733            assert(u->parent != u);
   2734            assert(u->parent->child[0] == u ||
   2735                   u->parent->child[1] == u ||
   2736                   *((tbinptr *) (u->parent)) == u);
   2737            if (u->child[0] != 0) {
   2738                assert(u->child[0]->parent == u);
   2739                assert(u->child[0] != u);
   2740                do_check_tree(m, u->child[0]);
   2741            }
   2742            if (u->child[1] != 0) {
   2743                assert(u->child[1]->parent == u);
   2744                assert(u->child[1] != u);
   2745                do_check_tree(m, u->child[1]);
   2746            }
   2747            if (u->child[0] != 0 && u->child[1] != 0) {
   2748                assert(chunksize(u->child[0]) < chunksize(u->child[1]));
   2749            }
   2750        }
   2751        u = u->fd;
   2752    } while (u != t);
   2753    assert(head != 0);
   2754}
   2755
   2756/*  Check all the chunks in a treebin.  */
   2757static void
   2758do_check_treebin(mstate m, bindex_t i)
   2759{
   2760    tbinptr *tb = treebin_at(m, i);
   2761    tchunkptr t = *tb;
   2762    int empty = (m->treemap & (1U << i)) == 0;
   2763    if (t == 0)
   2764        assert(empty);
   2765    if (!empty)
   2766        do_check_tree(m, t);
   2767}
   2768
   2769/*  Check all the chunks in a smallbin.  */
   2770static void
   2771do_check_smallbin(mstate m, bindex_t i)
   2772{
   2773    sbinptr b = smallbin_at(m, i);
   2774    mchunkptr p = b->bk;
   2775    unsigned int empty = (m->smallmap & (1U << i)) == 0;
   2776    if (p == b)
   2777        assert(empty);
   2778    if (!empty) {
   2779        for (; p != b; p = p->bk) {
   2780            size_t size = chunksize(p);
   2781            mchunkptr q;
   2782            /* each chunk claims to be free */
   2783            do_check_free_chunk(m, p);
   2784            /* chunk belongs in bin */
   2785            assert(small_index(size) == i);
   2786            assert(p->bk == b || chunksize(p->bk) == chunksize(p));
   2787            /* chunk is followed by an inuse chunk */
   2788            q = next_chunk(p);
   2789            if (q->head != FENCEPOST_HEAD)
   2790                do_check_inuse_chunk(m, q);
   2791        }
   2792    }
   2793}
   2794
   2795/* Find x in a bin. Used in other check functions. */
   2796static int
   2797bin_find(mstate m, mchunkptr x)
   2798{
   2799    size_t size = chunksize(x);
   2800    if (is_small(size)) {
   2801        bindex_t sidx = small_index(size);
   2802        sbinptr b = smallbin_at(m, sidx);
   2803        if (smallmap_is_marked(m, sidx)) {
   2804            mchunkptr p = b;
   2805            do {
   2806                if (p == x)
   2807                    return 1;
   2808            } while ((p = p->fd) != b);
   2809        }
   2810    } else {
   2811        bindex_t tidx;
   2812        compute_tree_index(size, tidx);
   2813        if (treemap_is_marked(m, tidx)) {
   2814            tchunkptr t = *treebin_at(m, tidx);
   2815            size_t sizebits = size << leftshift_for_tree_index(tidx);
   2816            while (t != 0 && chunksize(t) != size) {
   2817                t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
   2818                sizebits <<= 1;
   2819            }
   2820            if (t != 0) {
   2821                tchunkptr u = t;
   2822                do {
   2823                    if (u == (tchunkptr) x)
   2824                        return 1;
   2825                } while ((u = u->fd) != t);
   2826            }
   2827        }
   2828    }
   2829    return 0;
   2830}
   2831
   2832/* Traverse each chunk and check it; return total */
   2833static size_t
   2834traverse_and_check(mstate m)
   2835{
   2836    size_t sum = 0;
   2837    if (is_initialized(m)) {
   2838        msegmentptr s = &m->seg;
   2839        sum += m->topsize + TOP_FOOT_SIZE;
   2840        while (s != 0) {
   2841            mchunkptr q = align_as_chunk(s->base);
   2842            mchunkptr lastq = 0;
   2843            assert(pinuse(q));
   2844            while (segment_holds(s, q) &&
   2845                   q != m->top && q->head != FENCEPOST_HEAD) {
   2846                sum += chunksize(q);
   2847                if (cinuse(q)) {
   2848                    assert(!bin_find(m, q));
   2849                    do_check_inuse_chunk(m, q);
   2850                } else {
   2851                    assert(q == m->dv || bin_find(m, q));
   2852                    assert(lastq == 0 || cinuse(lastq));        /* Not 2 consecutive free */
   2853                    do_check_free_chunk(m, q);
   2854                }
   2855                lastq = q;
   2856                q = next_chunk(q);
   2857            }
   2858            s = s->next;
   2859        }
   2860    }
   2861    return sum;
   2862}
   2863
   2864/* Check all properties of malloc_state. */
   2865static void
   2866do_check_malloc_state(mstate m)
   2867{
   2868    bindex_t i;
   2869    size_t total;
   2870    /* check bins */
   2871    for (i = 0; i < NSMALLBINS; ++i)
   2872        do_check_smallbin(m, i);
   2873    for (i = 0; i < NTREEBINS; ++i)
   2874        do_check_treebin(m, i);
   2875
   2876    if (m->dvsize != 0) {       /* check dv chunk */
   2877        do_check_any_chunk(m, m->dv);
   2878        assert(m->dvsize == chunksize(m->dv));
   2879        assert(m->dvsize >= MIN_CHUNK_SIZE);
   2880        assert(bin_find(m, m->dv) == 0);
   2881    }
   2882
   2883    if (m->top != 0) {          /* check top chunk */
   2884        do_check_top_chunk(m, m->top);
   2885        assert(m->topsize == chunksize(m->top));
   2886        assert(m->topsize > 0);
   2887        assert(bin_find(m, m->top) == 0);
   2888    }
   2889
   2890    total = traverse_and_check(m);
   2891    assert(total <= m->footprint);
   2892    assert(m->footprint <= m->max_footprint);
   2893}
   2894#endif /* DEBUG */
   2895
   2896/* ----------------------------- statistics ------------------------------ */
   2897
   2898#if !NO_MALLINFO
   2899static struct mallinfo
   2900internal_mallinfo(mstate m)
   2901{
   2902    struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
   2903    if (!PREACTION(m)) {
   2904        check_malloc_state(m);
   2905        if (is_initialized(m)) {
   2906            size_t nfree = SIZE_T_ONE;  /* top always free */
   2907            size_t mfree = m->topsize + TOP_FOOT_SIZE;
   2908            size_t sum = mfree;
   2909            msegmentptr s = &m->seg;
   2910            while (s != 0) {
   2911                mchunkptr q = align_as_chunk(s->base);
   2912                while (segment_holds(s, q) &&
   2913                       q != m->top && q->head != FENCEPOST_HEAD) {
   2914                    size_t sz = chunksize(q);
   2915                    sum += sz;
   2916                    if (!cinuse(q)) {
   2917                        mfree += sz;
   2918                        ++nfree;
   2919                    }
   2920                    q = next_chunk(q);
   2921                }
   2922                s = s->next;
   2923            }
   2924
   2925            nm.arena = sum;
   2926            nm.ordblks = nfree;
   2927            nm.hblkhd = m->footprint - sum;
   2928            nm.usmblks = m->max_footprint;
   2929            nm.uordblks = m->footprint - mfree;
   2930            nm.fordblks = mfree;
   2931            nm.keepcost = m->topsize;
   2932        }
   2933
   2934        POSTACTION(m);
   2935    }
   2936    return nm;
   2937}
   2938#endif /* !NO_MALLINFO */
   2939
   2940static void
   2941internal_malloc_stats(mstate m)
   2942{
   2943    if (!PREACTION(m)) {
   2944        size_t maxfp = 0;
   2945        size_t fp = 0;
   2946        size_t used = 0;
   2947        check_malloc_state(m);
   2948        if (is_initialized(m)) {
   2949            msegmentptr s = &m->seg;
   2950            maxfp = m->max_footprint;
   2951            fp = m->footprint;
   2952            used = fp - (m->topsize + TOP_FOOT_SIZE);
   2953
   2954            while (s != 0) {
   2955                mchunkptr q = align_as_chunk(s->base);
   2956                while (segment_holds(s, q) &&
   2957                       q != m->top && q->head != FENCEPOST_HEAD) {
   2958                    if (!cinuse(q))
   2959                        used -= chunksize(q);
   2960                    q = next_chunk(q);
   2961                }
   2962                s = s->next;
   2963            }
   2964        }
   2965#ifndef LACKS_STDIO_H
   2966        fprintf(stderr, "max system bytes = %10lu\n",
   2967                (unsigned long) (maxfp));
   2968        fprintf(stderr, "system bytes     = %10lu\n", (unsigned long) (fp));
   2969        fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long) (used));
   2970#endif
   2971
   2972        POSTACTION(m);
   2973    }
   2974}
   2975
   2976/* ----------------------- Operations on smallbins ----------------------- */
   2977
   2978/*
   2979  Various forms of linking and unlinking are defined as macros.  Even
   2980  the ones for trees, which are very long but have very short typical
   2981  paths.  This is ugly but reduces reliance on inlining support of
   2982  compilers.
   2983*/
   2984
   2985/* Link a free chunk into a smallbin  */
   2986#define insert_small_chunk(M, P, S) {\
   2987  bindex_t I  = small_index(S);\
   2988  mchunkptr B = smallbin_at(M, I);\
   2989  mchunkptr F = B;\
   2990  assert(S >= MIN_CHUNK_SIZE);\
   2991  if (!smallmap_is_marked(M, I))\
   2992    mark_smallmap(M, I);\
   2993  else if (RTCHECK(ok_address(M, B->fd)))\
   2994    F = B->fd;\
   2995  else {\
   2996    CORRUPTION_ERROR_ACTION(M);\
   2997  }\
   2998  B->fd = P;\
   2999  F->bk = P;\
   3000  P->fd = F;\
   3001  P->bk = B;\
   3002}
   3003
   3004/* Unlink a chunk from a smallbin  */
   3005#define unlink_small_chunk(M, P, S) {\
   3006  mchunkptr F = P->fd;\
   3007  mchunkptr B = P->bk;\
   3008  bindex_t I = small_index(S);\
   3009  assert(P != B);\
   3010  assert(P != F);\
   3011  assert(chunksize(P) == small_index2size(I));\
   3012  if (F == B)\
   3013    clear_smallmap(M, I);\
   3014  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
   3015                   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
   3016    F->bk = B;\
   3017    B->fd = F;\
   3018  }\
   3019  else {\
   3020    CORRUPTION_ERROR_ACTION(M);\
   3021  }\
   3022}
   3023
   3024/* Unlink the first chunk from a smallbin */
   3025#define unlink_first_small_chunk(M, B, P, I) {\
   3026  mchunkptr F = P->fd;\
   3027  assert(P != B);\
   3028  assert(P != F);\
   3029  assert(chunksize(P) == small_index2size(I));\
   3030  if (B == F)\
   3031    clear_smallmap(M, I);\
   3032  else if (RTCHECK(ok_address(M, F))) {\
   3033    B->fd = F;\
   3034    F->bk = B;\
   3035  }\
   3036  else {\
   3037    CORRUPTION_ERROR_ACTION(M);\
   3038  }\
   3039}
   3040
   3041/* Replace dv node, binning the old one */
   3042/* Used only when dvsize known to be small */
   3043#define replace_dv(M, P, S) {\
   3044  size_t DVS = M->dvsize;\
   3045  if (DVS != 0) {\
   3046    mchunkptr DV = M->dv;\
   3047    assert(is_small(DVS));\
   3048    insert_small_chunk(M, DV, DVS);\
   3049  }\
   3050  M->dvsize = S;\
   3051  M->dv = P;\
   3052}
   3053
   3054/* ------------------------- Operations on trees ------------------------- */
   3055
   3056/* Insert chunk into tree */
   3057#define insert_large_chunk(M, X, S) {\
   3058  tbinptr* H;\
   3059  bindex_t I;\
   3060  compute_tree_index(S, I);\
   3061  H = treebin_at(M, I);\
   3062  X->index = I;\
   3063  X->child[0] = X->child[1] = 0;\
   3064  if (!treemap_is_marked(M, I)) {\
   3065    mark_treemap(M, I);\
   3066    *H = X;\
   3067    X->parent = (tchunkptr)H;\
   3068    X->fd = X->bk = X;\
   3069  }\
   3070  else {\
   3071    tchunkptr T = *H;\
   3072    size_t K = S << leftshift_for_tree_index(I);\
   3073    for (;;) {\
   3074      if (chunksize(T) != S) {\
   3075        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
   3076        K <<= 1;\
   3077        if (*C != 0)\
   3078          T = *C;\
   3079        else if (RTCHECK(ok_address(M, C))) {\
   3080          *C = X;\
   3081          X->parent = T;\
   3082          X->fd = X->bk = X;\
   3083          break;\
   3084        }\
   3085        else {\
   3086          CORRUPTION_ERROR_ACTION(M);\
   3087          break;\
   3088        }\
   3089      }\
   3090      else {\
   3091        tchunkptr F = T->fd;\
   3092        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
   3093          T->fd = F->bk = X;\
   3094          X->fd = F;\
   3095          X->bk = T;\
   3096          X->parent = 0;\
   3097          break;\
   3098        }\
   3099        else {\
   3100          CORRUPTION_ERROR_ACTION(M);\
   3101          break;\
   3102        }\
   3103      }\
   3104    }\
   3105  }\
   3106}
   3107
   3108/*
   3109  Unlink steps:
   3110
   3111  1. If x is a chained node, unlink it from its same-sized fd/bk links
   3112     and choose its bk node as its replacement.
   3113  2. If x was the last node of its size, but not a leaf node, it must
   3114     be replaced with a leaf node (not merely one with an open left or
   3115     right), to make sure that lefts and rights of descendents
   3116     correspond properly to bit masks.  We use the rightmost descendent
   3117     of x.  We could use any other leaf, but this is easy to locate and
   3118     tends to counteract removal of leftmosts elsewhere, and so keeps
   3119     paths shorter than minimally guaranteed.  This doesn't loop much
   3120     because on average a node in a tree is near the bottom.
   3121  3. If x is the base of a chain (i.e., has parent links) relink
   3122     x's parent and children to x's replacement (or null if none).
   3123*/
   3124
   3125#define unlink_large_chunk(M, X) {\
   3126  tchunkptr XP = X->parent;\
   3127  tchunkptr R;\
   3128  if (X->bk != X) {\
   3129    tchunkptr F = X->fd;\
   3130    R = X->bk;\
   3131    if (RTCHECK(ok_address(M, F))) {\
   3132      F->bk = R;\
   3133      R->fd = F;\
   3134    }\
   3135    else {\
   3136      CORRUPTION_ERROR_ACTION(M);\
   3137    }\
   3138  }\
   3139  else {\
   3140    tchunkptr* RP;\
   3141    if (((R = *(RP = &(X->child[1]))) != 0) ||\
   3142        ((R = *(RP = &(X->child[0]))) != 0)) {\
   3143      tchunkptr* CP;\
   3144      while ((*(CP = &(R->child[1])) != 0) ||\
   3145             (*(CP = &(R->child[0])) != 0)) {\
   3146        R = *(RP = CP);\
   3147      }\
   3148      if (RTCHECK(ok_address(M, RP)))\
   3149        *RP = 0;\
   3150      else {\
   3151        CORRUPTION_ERROR_ACTION(M);\
   3152      }\
   3153    }\
   3154  }\
   3155  if (XP != 0) {\
   3156    tbinptr* H = treebin_at(M, X->index);\
   3157    if (X == *H) {\
   3158      if ((*H = R) == 0) \
   3159        clear_treemap(M, X->index);\
   3160    }\
   3161    else if (RTCHECK(ok_address(M, XP))) {\
   3162      if (XP->child[0] == X) \
   3163        XP->child[0] = R;\
   3164      else \
   3165        XP->child[1] = R;\
   3166    }\
   3167    else\
   3168      CORRUPTION_ERROR_ACTION(M);\
   3169    if (R != 0) {\
   3170      if (RTCHECK(ok_address(M, R))) {\
   3171        tchunkptr C0, C1;\
   3172        R->parent = XP;\
   3173        if ((C0 = X->child[0]) != 0) {\
   3174          if (RTCHECK(ok_address(M, C0))) {\
   3175            R->child[0] = C0;\
   3176            C0->parent = R;\
   3177          }\
   3178          else\
   3179            CORRUPTION_ERROR_ACTION(M);\
   3180        }\
   3181        if ((C1 = X->child[1]) != 0) {\
   3182          if (RTCHECK(ok_address(M, C1))) {\
   3183            R->child[1] = C1;\
   3184            C1->parent = R;\
   3185          }\
   3186          else\
   3187            CORRUPTION_ERROR_ACTION(M);\
   3188        }\
   3189      }\
   3190      else\
   3191        CORRUPTION_ERROR_ACTION(M);\
   3192    }\
   3193  }\
   3194}
   3195
   3196/* Relays to large vs small bin operations */
   3197
   3198#define insert_chunk(M, P, S)\
   3199  if (is_small(S)) insert_small_chunk(M, P, S)\
   3200  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
   3201
   3202#define unlink_chunk(M, P, S)\
   3203  if (is_small(S)) unlink_small_chunk(M, P, S)\
   3204  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
   3205
   3206
   3207/* Relays to internal calls to malloc/free from realloc, memalign etc */
   3208
   3209#if ONLY_MSPACES
   3210#define internal_malloc(m, b) mspace_malloc(m, b)
   3211#define internal_free(m, mem) mspace_free(m,mem);
   3212#else /* ONLY_MSPACES */
   3213#if MSPACES
   3214#define internal_malloc(m, b)\
   3215   (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
   3216#define internal_free(m, mem)\
   3217   if (m == gm) dlfree(mem); else mspace_free(m,mem);
   3218#else /* MSPACES */
   3219#define internal_malloc(m, b) dlmalloc(b)
   3220#define internal_free(m, mem) dlfree(mem)
   3221#endif /* MSPACES */
   3222#endif /* ONLY_MSPACES */
   3223
   3224/* -----------------------  Direct-mmapping chunks ----------------------- */
   3225
   3226/*
   3227  Directly mmapped chunks are set up with an offset to the start of
   3228  the mmapped region stored in the prev_foot field of the chunk. This
   3229  allows reconstruction of the required argument to MUNMAP when freed,
   3230  and also allows adjustment of the returned chunk to meet alignment
   3231  requirements (especially in memalign).  There is also enough space
   3232  allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
   3233  the PINUSE bit so frees can be checked.
   3234*/
   3235
   3236/* Malloc using mmap */
   3237static void *
   3238mmap_alloc(mstate m, size_t nb)
   3239{
   3240    size_t mmsize =
   3241        granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
   3242    if (mmsize > nb) {          /* Check for wrap around 0 */
   3243        char *mm = (char *) (DIRECT_MMAP(mmsize));
   3244        if (mm != CMFAIL) {
   3245            size_t offset = align_offset(chunk2mem(mm));
   3246            size_t psize = mmsize - offset - MMAP_FOOT_PAD;
   3247            mchunkptr p = (mchunkptr) (mm + offset);
   3248            p->prev_foot = offset | IS_MMAPPED_BIT;
   3249            (p)->head = (psize | CINUSE_BIT);
   3250            mark_inuse_foot(m, p, psize);
   3251            chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
   3252            chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0;
   3253
   3254            if (mm < m->least_addr)
   3255                m->least_addr = mm;
   3256            if ((m->footprint += mmsize) > m->max_footprint)
   3257                m->max_footprint = m->footprint;
   3258            assert(is_aligned(chunk2mem(p)));
   3259            check_mmapped_chunk(m, p);
   3260            return chunk2mem(p);
   3261        }
   3262    }
   3263    return 0;
   3264}
   3265
   3266/* Realloc using mmap */
   3267static mchunkptr
   3268mmap_resize(mstate m, mchunkptr oldp, size_t nb)
   3269{
   3270    size_t oldsize = chunksize(oldp);
   3271    if (is_small(nb))           /* Can't shrink mmap regions below small size */
   3272        return 0;
   3273    /* Keep old chunk if big enough but not too big */
   3274    if (oldsize >= nb + SIZE_T_SIZE &&
   3275        (oldsize - nb) <= (mparams.granularity << 1))
   3276        return oldp;
   3277    else {
   3278        size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
   3279        size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
   3280        size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
   3281                                             CHUNK_ALIGN_MASK);
   3282        char *cp = (char *) CALL_MREMAP((char *) oldp - offset,
   3283                                        oldmmsize, newmmsize, 1);
   3284        if (cp != CMFAIL) {
   3285            mchunkptr newp = (mchunkptr) (cp + offset);
   3286            size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
   3287            newp->head = (psize | CINUSE_BIT);
   3288            mark_inuse_foot(m, newp, psize);
   3289            chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
   3290            chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
   3291
   3292            if (cp < m->least_addr)
   3293                m->least_addr = cp;
   3294            if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
   3295                m->max_footprint = m->footprint;
   3296            check_mmapped_chunk(m, newp);
   3297            return newp;
   3298        }
   3299    }
   3300    return 0;
   3301}
   3302
   3303/* -------------------------- mspace management -------------------------- */
   3304
   3305/* Initialize top chunk and its size */
   3306static void
   3307init_top(mstate m, mchunkptr p, size_t psize)
   3308{
   3309    /* Ensure alignment */
   3310    size_t offset = align_offset(chunk2mem(p));
   3311    p = (mchunkptr) ((char *) p + offset);
   3312    psize -= offset;
   3313
   3314    m->top = p;
   3315    m->topsize = psize;
   3316    p->head = psize | PINUSE_BIT;
   3317    /* set size of fake trailing chunk holding overhead space only once */
   3318    chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
   3319    m->trim_check = mparams.trim_threshold;     /* reset on each update */
   3320}
   3321
   3322/* Initialize bins for a new mstate that is otherwise zeroed out */
   3323static void
   3324init_bins(mstate m)
   3325{
   3326    /* Establish circular links for smallbins */
   3327    bindex_t i;
   3328    for (i = 0; i < NSMALLBINS; ++i) {
   3329        sbinptr bin = smallbin_at(m, i);
   3330        bin->fd = bin->bk = bin;
   3331    }
   3332}
   3333
   3334#if PROCEED_ON_ERROR
   3335
   3336/* default corruption action */
   3337static void
   3338reset_on_error(mstate m)
   3339{
   3340    int i;
   3341    ++malloc_corruption_error_count;
   3342    /* Reinitialize fields to forget about all memory */
   3343    m->smallbins = m->treebins = 0;
   3344    m->dvsize = m->topsize = 0;
   3345    m->seg.base = 0;
   3346    m->seg.size = 0;
   3347    m->seg.next = 0;
   3348    m->top = m->dv = 0;
   3349    for (i = 0; i < NTREEBINS; ++i)
   3350        *treebin_at(m, i) = 0;
   3351    init_bins(m);
   3352}
   3353#endif /* PROCEED_ON_ERROR */
   3354
   3355/* Allocate chunk and prepend remainder with chunk in successor base. */
   3356static void *
   3357prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
   3358{
   3359    mchunkptr p = align_as_chunk(newbase);
   3360    mchunkptr oldfirst = align_as_chunk(oldbase);
   3361    size_t psize = (char *) oldfirst - (char *) p;
   3362    mchunkptr q = chunk_plus_offset(p, nb);
   3363    size_t qsize = psize - nb;
   3364    set_size_and_pinuse_of_inuse_chunk(m, p, nb);
   3365
   3366    assert((char *) oldfirst > (char *) q);
   3367    assert(pinuse(oldfirst));
   3368    assert(qsize >= MIN_CHUNK_SIZE);
   3369
   3370    /* consolidate remainder with first chunk of old base */
   3371    if (oldfirst == m->top) {
   3372        size_t tsize = m->topsize += qsize;
   3373        m->top = q;
   3374        q->head = tsize | PINUSE_BIT;
   3375        check_top_chunk(m, q);
   3376    } else if (oldfirst == m->dv) {
   3377        size_t dsize = m->dvsize += qsize;
   3378        m->dv = q;
   3379        set_size_and_pinuse_of_free_chunk(q, dsize);
   3380    } else {
   3381        if (!cinuse(oldfirst)) {
   3382            size_t nsize = chunksize(oldfirst);
   3383            unlink_chunk(m, oldfirst, nsize);
   3384            oldfirst = chunk_plus_offset(oldfirst, nsize);
   3385            qsize += nsize;
   3386        }
   3387        set_free_with_pinuse(q, qsize, oldfirst);
   3388        insert_chunk(m, q, qsize);
   3389        check_free_chunk(m, q);
   3390    }
   3391
   3392    check_malloced_chunk(m, chunk2mem(p), nb);
   3393    return chunk2mem(p);
   3394}
   3395
   3396
   3397/* Add a segment to hold a new noncontiguous region */
   3398static void
   3399add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
   3400{
   3401    /* Determine locations and sizes of segment, fenceposts, old top */
   3402    char *old_top = (char *) m->top;
   3403    msegmentptr oldsp = segment_holding(m, old_top);
   3404    char *old_end = oldsp->base + oldsp->size;
   3405    size_t ssize = pad_request(sizeof(struct malloc_segment));
   3406    char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
   3407    size_t offset = align_offset(chunk2mem(rawsp));
   3408    char *asp = rawsp + offset;
   3409    char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
   3410    mchunkptr sp = (mchunkptr) csp;
   3411    msegmentptr ss = (msegmentptr) (chunk2mem(sp));
   3412    mchunkptr tnext = chunk_plus_offset(sp, ssize);
   3413    mchunkptr p = tnext;
   3414    int nfences = 0;
   3415
   3416    /* reset top to new space */
   3417    init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
   3418
   3419    /* Set up segment record */
   3420    assert(is_aligned(ss));
   3421    set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
   3422    *ss = m->seg;               /* Push current record */
   3423    m->seg.base = tbase;
   3424    m->seg.size = tsize;
   3425    m->seg.sflags = mmapped;
   3426    m->seg.next = ss;
   3427
   3428    /* Insert trailing fenceposts */
   3429    for (;;) {
   3430        mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
   3431        p->head = FENCEPOST_HEAD;
   3432        ++nfences;
   3433        if ((char *) (&(nextp->head)) < old_end)
   3434            p = nextp;
   3435        else
   3436            break;
   3437    }
   3438    assert(nfences >= 2);
   3439
   3440    /* Insert the rest of old top into a bin as an ordinary free chunk */
   3441    if (csp != old_top) {
   3442        mchunkptr q = (mchunkptr) old_top;
   3443        size_t psize = csp - old_top;
   3444        mchunkptr tn = chunk_plus_offset(q, psize);
   3445        set_free_with_pinuse(q, psize, tn);
   3446        insert_chunk(m, q, psize);
   3447    }
   3448
   3449    check_top_chunk(m, m->top);
   3450}
   3451
   3452/* -------------------------- System allocation -------------------------- */
   3453
   3454/* Get memory from system using MORECORE or MMAP */
   3455static void *
   3456sys_alloc(mstate m, size_t nb)
   3457{
   3458    char *tbase = CMFAIL;
   3459    size_t tsize = 0;
   3460    flag_t mmap_flag = 0;
   3461
   3462    init_mparams();
   3463
   3464    /* Directly map large chunks */
   3465    if (use_mmap(m) && nb >= mparams.mmap_threshold) {
   3466        void *mem = mmap_alloc(m, nb);
   3467        if (mem != 0)
   3468            return mem;
   3469    }
   3470
   3471    /*
   3472       Try getting memory in any of three ways (in most-preferred to
   3473       least-preferred order):
   3474       1. A call to MORECORE that can normally contiguously extend memory.
   3475       (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
   3476       or main space is mmapped or a previous contiguous call failed)
   3477       2. A call to MMAP new space (disabled if not HAVE_MMAP).
   3478       Note that under the default settings, if MORECORE is unable to
   3479       fulfill a request, and HAVE_MMAP is true, then mmap is
   3480       used as a noncontiguous system allocator. This is a useful backup
   3481       strategy for systems with holes in address spaces -- in this case
   3482       sbrk cannot contiguously expand the heap, but mmap may be able to
   3483       find space.
   3484       3. A call to MORECORE that cannot usually contiguously extend memory.
   3485       (disabled if not HAVE_MORECORE)
   3486     */
   3487
   3488    if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
   3489        char *br = CMFAIL;
   3490        msegmentptr ss =
   3491            (m->top == 0) ? 0 : segment_holding(m, (char *) m->top);
   3492        size_t asize = 0;
   3493        ACQUIRE_MORECORE_LOCK();
   3494
   3495        if (ss == 0) {          /* First time through or recovery */
   3496            char *base = (char *) CALL_MORECORE(0);
   3497            if (base != CMFAIL) {
   3498                asize =
   3499                    granularity_align(nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT +
   3500                                      SIZE_T_ONE);
   3501                /* Adjust to end on a page boundary */
   3502                if (!is_page_aligned(base))
   3503                    asize += (page_align((size_t) base) - (size_t) base);
   3504                /* Can't call MORECORE if size is negative when treated as signed */
   3505                if (asize < HALF_MAX_SIZE_T &&
   3506                    (br = (char *) (CALL_MORECORE(asize))) == base) {
   3507                    tbase = base;
   3508                    tsize = asize;
   3509                }
   3510            }
   3511        } else {
   3512            /* Subtract out existing available top space from MORECORE request. */
   3513            asize =
   3514                granularity_align(nb - m->topsize + TOP_FOOT_SIZE +
   3515                                  MALLOC_ALIGNMENT + SIZE_T_ONE);
   3516            /* Use mem here only if it did continuously extend old space */
   3517            if (asize < HALF_MAX_SIZE_T &&
   3518                (br =
   3519                 (char *) (CALL_MORECORE(asize))) == ss->base + ss->size) {
   3520                tbase = br;
   3521                tsize = asize;
   3522            }
   3523        }
   3524
   3525        if (tbase == CMFAIL) {  /* Cope with partial failure */
   3526            if (br != CMFAIL) { /* Try to use/extend the space we did get */
   3527                if (asize < HALF_MAX_SIZE_T &&
   3528                    asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
   3529                    size_t esize =
   3530                        granularity_align(nb + TOP_FOOT_SIZE +
   3531                                          MALLOC_ALIGNMENT + SIZE_T_ONE -
   3532                                          asize);
   3533                    if (esize < HALF_MAX_SIZE_T) {
   3534                        char *end = (char *) CALL_MORECORE(esize);
   3535                        if (end != CMFAIL)
   3536                            asize += esize;
   3537                        else {  /* Can't use; try to release */
   3538                            end = (char *) CALL_MORECORE(-asize);
   3539                            br = CMFAIL;
   3540                        }
   3541                    }
   3542                }
   3543            }
   3544            if (br != CMFAIL) { /* Use the space we did get */
   3545                tbase = br;
   3546                tsize = asize;
   3547            } else
   3548                disable_contiguous(m);  /* Don't try contiguous path in the future */
   3549        }
   3550
   3551        RELEASE_MORECORE_LOCK();
   3552    }
   3553
   3554    if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
   3555        size_t req = nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT + SIZE_T_ONE;
   3556        size_t rsize = granularity_align(req);
   3557        if (rsize > nb) {       /* Fail if wraps around zero */
   3558            char *mp = (char *) (CALL_MMAP(rsize));
   3559            if (mp != CMFAIL) {
   3560                tbase = mp;
   3561                tsize = rsize;
   3562                mmap_flag = IS_MMAPPED_BIT;
   3563            }
   3564        }
   3565    }
   3566
   3567    if (HAVE_MORECORE && tbase == CMFAIL) {     /* Try noncontiguous MORECORE */
   3568        size_t asize =
   3569            granularity_align(nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT +
   3570                              SIZE_T_ONE);
   3571        if (asize < HALF_MAX_SIZE_T) {
   3572            char *br = CMFAIL;
   3573            char *end = CMFAIL;
   3574            ACQUIRE_MORECORE_LOCK();
   3575            br = (char *) (CALL_MORECORE(asize));
   3576            end = (char *) (CALL_MORECORE(0));
   3577            RELEASE_MORECORE_LOCK();
   3578            if (br != CMFAIL && end != CMFAIL && br < end) {
   3579                size_t ssize = end - br;
   3580                if (ssize > nb + TOP_FOOT_SIZE) {
   3581                    tbase = br;
   3582                    tsize = ssize;
   3583                }
   3584            }
   3585        }
   3586    }
   3587
   3588    if (tbase != CMFAIL) {
   3589
   3590        if ((m->footprint += tsize) > m->max_footprint)
   3591            m->max_footprint = m->footprint;
   3592
   3593        if (!is_initialized(m)) {       /* first-time initialization */
   3594            m->seg.base = m->least_addr = tbase;
   3595            m->seg.size = tsize;
   3596            m->seg.sflags = mmap_flag;
   3597            m->magic = mparams.magic;
   3598            init_bins(m);
   3599            if (is_global(m))
   3600                init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
   3601            else {
   3602                /* Offset top by embedded malloc_state */
   3603                mchunkptr mn = next_chunk(mem2chunk(m));
   3604                init_top(m, mn,
   3605                         (size_t) ((tbase + tsize) - (char *) mn) -
   3606                         TOP_FOOT_SIZE);
   3607            }
   3608        }
   3609
   3610        else {
   3611            /* Try to merge with an existing segment */
   3612            msegmentptr sp = &m->seg;
   3613            while (sp != 0 && tbase != sp->base + sp->size)
   3614                sp = sp->next;
   3615            if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds(sp, m->top)) { /* append */
   3616                sp->size += tsize;
   3617                init_top(m, m->top, m->topsize + tsize);
   3618            } else {
   3619                if (tbase < m->least_addr)
   3620                    m->least_addr = tbase;
   3621                sp = &m->seg;
   3622                while (sp != 0 && sp->base != tbase + tsize)
   3623                    sp = sp->next;
   3624                if (sp != 0 &&
   3625                    !is_extern_segment(sp) &&
   3626                    (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
   3627                    char *oldbase = sp->base;
   3628                    sp->base = tbase;
   3629                    sp->size += tsize;
   3630                    return prepend_alloc(m, tbase, oldbase, nb);
   3631                } else
   3632                    add_segment(m, tbase, tsize, mmap_flag);
   3633            }
   3634        }
   3635
   3636        if (nb < m->topsize) {  /* Allocate from new or extended top space */
   3637            size_t rsize = m->topsize -= nb;
   3638            mchunkptr p = m->top;
   3639            mchunkptr r = m->top = chunk_plus_offset(p, nb);
   3640            r->head = rsize | PINUSE_BIT;
   3641            set_size_and_pinuse_of_inuse_chunk(m, p, nb);
   3642            check_top_chunk(m, m->top);
   3643            check_malloced_chunk(m, chunk2mem(p), nb);
   3644            return chunk2mem(p);
   3645        }
   3646    }
   3647
   3648    MALLOC_FAILURE_ACTION;
   3649    return 0;
   3650}
   3651
   3652/* -----------------------  system deallocation -------------------------- */
   3653
   3654/* Unmap and unlink any mmapped segments that don't contain used chunks */
   3655static size_t
   3656release_unused_segments(mstate m)
   3657{
   3658    size_t released = 0;
   3659    msegmentptr pred = &m->seg;
   3660    msegmentptr sp = pred->next;
   3661    while (sp != 0) {
   3662        char *base = sp->base;
   3663        size_t size = sp->size;
   3664        msegmentptr next = sp->next;
   3665        if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
   3666            mchunkptr p = align_as_chunk(base);
   3667            size_t psize = chunksize(p);
   3668            /* Can unmap if first chunk holds entire segment and not pinned */
   3669            if (!cinuse(p)
   3670                && (char *) p + psize >= base + size - TOP_FOOT_SIZE) {
   3671                tchunkptr tp = (tchunkptr) p;
   3672                assert(segment_holds(sp, (char *) sp));
   3673                if (p == m->dv) {
   3674                    m->dv = 0;
   3675                    m->dvsize = 0;
   3676                } else {
   3677                    unlink_large_chunk(m, tp);
   3678                }
   3679                if (CALL_MUNMAP(base, size) == 0) {
   3680                    released += size;
   3681                    m->footprint -= size;
   3682                    /* unlink obsoleted record */
   3683                    sp = pred;
   3684                    sp->next = next;
   3685                } else {        /* back out if cannot unmap */
   3686                    insert_large_chunk(m, tp, psize);
   3687                }
   3688            }
   3689        }
   3690        pred = sp;
   3691        sp = next;
   3692    }
   3693    return released;
   3694}
   3695
   3696static int
   3697sys_trim(mstate m, size_t pad)
   3698{
   3699    size_t released = 0;
   3700    if (pad < MAX_REQUEST && is_initialized(m)) {
   3701        pad += TOP_FOOT_SIZE;   /* ensure enough room for segment overhead */
   3702
   3703        if (m->topsize > pad) {
   3704            /* Shrink top space in granularity-size units, keeping at least one */
   3705            size_t unit = mparams.granularity;
   3706            size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
   3707                            SIZE_T_ONE) * unit;
   3708            msegmentptr sp = segment_holding(m, (char *) m->top);
   3709
   3710            if (!is_extern_segment(sp)) {
   3711                if (is_mmapped_segment(sp)) {
   3712                    if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) {   /* can't shrink if pinned */
   3713                        size_t newsize = sp->size - extra;
   3714                        /* Prefer mremap, fall back to munmap */
   3715                        if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) !=
   3716                             MFAIL)
   3717                            || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
   3718                            released = extra;
   3719                        }
   3720                    }
   3721                } else if (HAVE_MORECORE) {
   3722                    if (extra >= HALF_MAX_SIZE_T)       /* Avoid wrapping negative */
   3723                        extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
   3724                    ACQUIRE_MORECORE_LOCK();
   3725                    {
   3726                        /* Make sure end of memory is where we last set it. */
   3727                        char *old_br = (char *) (CALL_MORECORE(0));
   3728                        if (old_br == sp->base + sp->size) {
   3729                            char *rel_br = (char *) (CALL_MORECORE(-extra));
   3730                            char *new_br = (char *) (CALL_MORECORE(0));
   3731                            if (rel_br != CMFAIL && new_br < old_br)
   3732                                released = old_br - new_br;
   3733                        }
   3734                    }
   3735                    RELEASE_MORECORE_LOCK();
   3736                }
   3737            }
   3738
   3739            if (released != 0) {
   3740                sp->size -= released;
   3741                m->footprint -= released;
   3742                init_top(m, m->top, m->topsize - released);
   3743                check_top_chunk(m, m->top);
   3744            }
   3745        }
   3746
   3747        /* Unmap any unused mmapped segments */
   3748        if (HAVE_MMAP)
   3749            released += release_unused_segments(m);
   3750
   3751        /* On failure, disable autotrim to avoid repeated failed future calls */
   3752        if (released == 0)
   3753            m->trim_check = MAX_SIZE_T;
   3754    }
   3755
   3756    return (released != 0) ? 1 : 0;
   3757}
   3758
   3759/* ---------------------------- malloc support --------------------------- */
   3760
   3761/* allocate a large request from the best fitting chunk in a treebin */
   3762static void *
   3763tmalloc_large(mstate m, size_t nb)
   3764{
   3765    tchunkptr v = 0;
   3766    size_t rsize = -nb;         /* Unsigned negation */
   3767    tchunkptr t;
   3768    bindex_t idx;
   3769    compute_tree_index(nb, idx);
   3770
   3771    if ((t = *treebin_at(m, idx)) != 0) {
   3772        /* Traverse tree for this bin looking for node with size == nb */
   3773        size_t sizebits = nb << leftshift_for_tree_index(idx);
   3774        tchunkptr rst = 0;      /* The deepest untaken right subtree */
   3775        for (;;) {
   3776            tchunkptr rt;
   3777            size_t trem = chunksize(t) - nb;
   3778            if (trem < rsize) {
   3779                v = t;
   3780                if ((rsize = trem) == 0)
   3781                    break;
   3782            }
   3783            rt = t->child[1];
   3784            t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
   3785            if (rt != 0 && rt != t)
   3786                rst = rt;
   3787            if (t == 0) {
   3788                t = rst;        /* set t to least subtree holding sizes > nb */
   3789                break;
   3790            }
   3791            sizebits <<= 1;
   3792        }
   3793    }
   3794
   3795    if (t == 0 && v == 0) {     /* set t to root of next non-empty treebin */
   3796        binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
   3797        if (leftbits != 0) {
   3798            bindex_t i;
   3799            binmap_t leastbit = least_bit(leftbits);
   3800            compute_bit2idx(leastbit, i);
   3801            t = *treebin_at(m, i);
   3802        }
   3803    }
   3804
   3805    while (t != 0) {            /* find smallest of tree or subtree */
   3806        size_t trem = chunksize(t) - nb;
   3807        if (trem < rsize) {
   3808            rsize = trem;
   3809            v = t;
   3810        }
   3811        t = leftmost_child(t);
   3812    }
   3813
   3814    /*  If dv is a better fit, return 0 so malloc will use it */
   3815    if (v != 0 && rsize < (size_t) (m->dvsize - nb)) {
   3816        if (RTCHECK(ok_address(m, v))) {        /* split */
   3817            mchunkptr r = chunk_plus_offset(v, nb);
   3818            assert(chunksize(v) == rsize + nb);
   3819            if (RTCHECK(ok_next(v, r))) {
   3820                unlink_large_chunk(m, v);
   3821                if (rsize < MIN_CHUNK_SIZE)
   3822                    set_inuse_and_pinuse(m, v, (rsize + nb));
   3823                else {
   3824                    set_size_and_pinuse_of_inuse_chunk(m, v, nb);
   3825                    set_size_and_pinuse_of_free_chunk(r, rsize);
   3826                    insert_chunk(m, r, rsize);
   3827                }
   3828                return chunk2mem(v);
   3829            }
   3830        }
   3831        CORRUPTION_ERROR_ACTION(m);
   3832    }
   3833    return 0;
   3834}
   3835
   3836/* allocate a small request from the best fitting chunk in a treebin */
   3837static void *
   3838tmalloc_small(mstate m, size_t nb)
   3839{
   3840    tchunkptr t, v;
   3841    size_t rsize;
   3842    bindex_t i;
   3843    binmap_t leastbit = least_bit(m->treemap);
   3844    compute_bit2idx(leastbit, i);
   3845
   3846    v = t = *treebin_at(m, i);
   3847    rsize = chunksize(t) - nb;
   3848
   3849    while ((t = leftmost_child(t)) != 0) {
   3850        size_t trem = chunksize(t) - nb;
   3851        if (trem < rsize) {
   3852            rsize = trem;
   3853            v = t;
   3854        }
   3855    }
   3856
   3857    if (RTCHECK(ok_address(m, v))) {
   3858        mchunkptr r = chunk_plus_offset(v, nb);
   3859        assert(chunksize(v) == rsize + nb);
   3860        if (RTCHECK(ok_next(v, r))) {
   3861            unlink_large_chunk(m, v);
   3862            if (rsize < MIN_CHUNK_SIZE)
   3863                set_inuse_and_pinuse(m, v, (rsize + nb));
   3864            else {
   3865                set_size_and_pinuse_of_inuse_chunk(m, v, nb);
   3866                set_size_and_pinuse_of_free_chunk(r, rsize);
   3867                replace_dv(m, r, rsize);
   3868            }
   3869            return chunk2mem(v);
   3870        }
   3871    }
   3872
   3873    CORRUPTION_ERROR_ACTION(m);
   3874    return 0;
   3875}
   3876
   3877/* --------------------------- realloc support --------------------------- */
   3878
   3879static void *
   3880internal_realloc(mstate m, void *oldmem, size_t bytes)
   3881{
   3882    if (bytes >= MAX_REQUEST) {
   3883        MALLOC_FAILURE_ACTION;
   3884        return 0;
   3885    }
   3886    if (!PREACTION(m)) {
   3887        mchunkptr oldp = mem2chunk(oldmem);
   3888        size_t oldsize = chunksize(oldp);
   3889        mchunkptr next = chunk_plus_offset(oldp, oldsize);
   3890        mchunkptr newp = 0;
   3891        void *extra = 0;
   3892
   3893        /* Try to either shrink or extend into top. Else malloc-copy-free */
   3894
   3895        if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
   3896                    ok_next(oldp, next) && ok_pinuse(next))) {
   3897            size_t nb = request2size(bytes);
   3898            if (is_mmapped(oldp))
   3899                newp = mmap_resize(m, oldp, nb);
   3900            else if (oldsize >= nb) {   /* already big enough */
   3901                size_t rsize = oldsize - nb;
   3902                newp = oldp;
   3903                if (rsize >= MIN_CHUNK_SIZE) {
   3904                    mchunkptr remainder = chunk_plus_offset(newp, nb);
   3905                    set_inuse(m, newp, nb);
   3906                    set_inuse(m, remainder, rsize);
   3907                    extra = chunk2mem(remainder);
   3908                }
   3909            } else if (next == m->top && oldsize + m->topsize > nb) {
   3910                /* Expand into top */
   3911                size_t newsize = oldsize + m->topsize;
   3912                size_t newtopsize = newsize - nb;
   3913                mchunkptr newtop = chunk_plus_offset(oldp, nb);
   3914                set_inuse(m, oldp, nb);
   3915                newtop->head = newtopsize | PINUSE_BIT;
   3916                m->top = newtop;
   3917                m->topsize = newtopsize;
   3918                newp = oldp;
   3919            }
   3920        } else {
   3921            USAGE_ERROR_ACTION(m, oldmem);
   3922            POSTACTION(m);
   3923            return 0;
   3924        }
   3925
   3926        POSTACTION(m);
   3927
   3928        if (newp != 0) {
   3929            if (extra != 0) {
   3930                internal_free(m, extra);
   3931            }
   3932            check_inuse_chunk(m, newp);
   3933            return chunk2mem(newp);
   3934        } else {
   3935            void *newmem = internal_malloc(m, bytes);
   3936            if (newmem != 0) {
   3937                size_t oc = oldsize - overhead_for(oldp);
   3938                memcpy(newmem, oldmem, (oc < bytes) ? oc : bytes);
   3939                internal_free(m, oldmem);
   3940            }
   3941            return newmem;
   3942        }
   3943    }
   3944    return 0;
   3945}
   3946
   3947/* --------------------------- memalign support -------------------------- */
   3948
   3949static void *
   3950internal_memalign(mstate m, size_t alignment, size_t bytes)
   3951{
   3952    if (alignment <= MALLOC_ALIGNMENT)  /* Can just use malloc */
   3953        return internal_malloc(m, bytes);
   3954    if (alignment < MIN_CHUNK_SIZE)     /* must be at least a minimum chunk size */
   3955        alignment = MIN_CHUNK_SIZE;
   3956    if ((alignment & (alignment - SIZE_T_ONE)) != 0) {  /* Ensure a power of 2 */
   3957        size_t a = MALLOC_ALIGNMENT << 1;
   3958        while (a < alignment)
   3959            a <<= 1;
   3960        alignment = a;
   3961    }
   3962
   3963    if (bytes >= MAX_REQUEST - alignment) {
   3964        if (m != 0) {           /* Test isn't needed but avoids compiler warning */
   3965            MALLOC_FAILURE_ACTION;
   3966        }
   3967    } else {
   3968        size_t nb = request2size(bytes);
   3969        size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
   3970        char *mem = (char *) internal_malloc(m, req);
   3971        if (mem != 0) {
   3972            void *leader = 0;
   3973            void *trailer = 0;
   3974            mchunkptr p = mem2chunk(mem);
   3975
   3976            if (PREACTION(m))
   3977                return 0;
   3978            if ((((size_t) (mem)) % alignment) != 0) {  /* misaligned */
   3979                /*
   3980                   Find an aligned spot inside chunk.  Since we need to give
   3981                   back leading space in a chunk of at least MIN_CHUNK_SIZE, if
   3982                   the first calculation places us at a spot with less than
   3983                   MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
   3984                   We've allocated enough total room so that this is always
   3985                   possible.
   3986                 */
   3987                char *br = (char *) mem2chunk((size_t) (((size_t) (mem +
   3988                                                                   alignment -
   3989                                                                   SIZE_T_ONE))
   3990                                                        & -alignment));
   3991                char *pos =
   3992                    ((size_t) (br - (char *) (p)) >=
   3993                     MIN_CHUNK_SIZE) ? br : br + alignment;
   3994                mchunkptr newp = (mchunkptr) pos;
   3995                size_t leadsize = pos - (char *) (p);
   3996                size_t newsize = chunksize(p) - leadsize;
   3997
   3998                if (is_mmapped(p)) {    /* For mmapped chunks, just adjust offset */
   3999                    newp->prev_foot = p->prev_foot + leadsize;
   4000                    newp->head = (newsize | CINUSE_BIT);
   4001                } else {        /* Otherwise, give back leader, use the rest */
   4002                    set_inuse(m, newp, newsize);
   4003                    set_inuse(m, p, leadsize);
   4004                    leader = chunk2mem(p);
   4005                }
   4006                p = newp;
   4007            }
   4008
   4009            /* Give back spare room at the end */
   4010            if (!is_mmapped(p)) {
   4011                size_t size = chunksize(p);
   4012                if (size > nb + MIN_CHUNK_SIZE) {
   4013                    size_t remainder_size = size - nb;
   4014                    mchunkptr remainder = chunk_plus_offset(p, nb);
   4015                    set_inuse(m, p, nb);
   4016                    set_inuse(m, remainder, remainder_size);
   4017                    trailer = chunk2mem(remainder);
   4018                }
   4019            }
   4020
   4021            assert(chunksize(p) >= nb);
   4022            assert((((size_t) (chunk2mem(p))) % alignment) == 0);
   4023            check_inuse_chunk(m, p);
   4024            POSTACTION(m);
   4025            if (leader != 0) {
   4026                internal_free(m, leader);
   4027            }
   4028            if (trailer != 0) {
   4029                internal_free(m, trailer);
   4030            }
   4031            return chunk2mem(p);
   4032        }
   4033    }
   4034    return 0;
   4035}
   4036
   4037/* ------------------------ comalloc/coalloc support --------------------- */
   4038
   4039static void **
   4040ialloc(mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
   4041{
   4042    /*
   4043       This provides common support for independent_X routines, handling
   4044       all of the combinations that can result.
   4045
   4046       The opts arg has:
   4047       bit 0 set if all elements are same size (using sizes[0])
   4048       bit 1 set if elements should be zeroed
   4049     */
   4050
   4051    size_t element_size;        /* chunksize of each element, if all same */
   4052    size_t contents_size;       /* total size of elements */
   4053    size_t array_size;          /* request size of pointer array */
   4054    void *mem;                  /* malloced aggregate space */
   4055    mchunkptr p;                /* corresponding chunk */
   4056    size_t remainder_size;      /* remaining bytes while splitting */
   4057    void **marray;              /* either "chunks" or malloced ptr array */
   4058    mchunkptr array_chunk;      /* chunk for malloced ptr array */
   4059    flag_t was_enabled;         /* to disable mmap */
   4060    size_t size;
   4061    size_t i;
   4062
   4063    /* compute array length, if needed */
   4064    if (chunks != 0) {
   4065        if (n_elements == 0)
   4066            return chunks;      /* nothing to do */
   4067        marray = chunks;
   4068        array_size = 0;
   4069    } else {
   4070        /* if empty req, must still return chunk representing empty array */
   4071        if (n_elements == 0)
   4072            return (void **) internal_malloc(m, 0);
   4073        marray = 0;
   4074        array_size = request2size(n_elements * (sizeof(void *)));
   4075    }
   4076
   4077    /* compute total element size */
   4078    if (opts & 0x1) {           /* all-same-size */
   4079        element_size = request2size(*sizes);
   4080        contents_size = n_elements * element_size;
   4081    } else {                    /* add up all the sizes */
   4082        element_size = 0;
   4083        contents_size = 0;
   4084        for (i = 0; i != n_elements; ++i)
   4085            contents_size += request2size(sizes[i]);
   4086    }
   4087
   4088    size = contents_size + array_size;
   4089
   4090    /*
   4091       Allocate the aggregate chunk.  First disable direct-mmapping so
   4092       malloc won't use it, since we would not be able to later
   4093       free/realloc space internal to a segregated mmap region.
   4094     */
   4095    was_enabled = use_mmap(m);
   4096    disable_mmap(m);
   4097    mem = internal_malloc(m, size - CHUNK_OVERHEAD);
   4098    if (was_enabled)
   4099        enable_mmap(m);
   4100    if (mem == 0)
   4101        return 0;
   4102
   4103    if (PREACTION(m))
   4104        return 0;
   4105    p = mem2chunk(mem);
   4106    remainder_size = chunksize(p);
   4107
   4108    assert(!is_mmapped(p));
   4109
   4110    if (opts & 0x2) {           /* optionally clear the elements */
   4111        memset((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
   4112    }
   4113
   4114    /* If not provided, allocate the pointer array as final part of chunk */
   4115    if (marray == 0) {
   4116        size_t array_chunk_size;
   4117        array_chunk = chunk_plus_offset(p, contents_size);
   4118        array_chunk_size = remainder_size - contents_size;
   4119        marray = (void **) (chunk2mem(array_chunk));
   4120        set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
   4121        remainder_size = contents_size;
   4122    }
   4123
   4124    /* split out elements */
   4125    for (i = 0;; ++i) {
   4126        marray[i] = chunk2mem(p);
   4127        if (i != n_elements - 1) {
   4128            if (element_size != 0)
   4129                size = element_size;
   4130            else
   4131                size = request2size(sizes[i]);
   4132            remainder_size -= size;
   4133            set_size_and_pinuse_of_inuse_chunk(m, p, size);
   4134            p = chunk_plus_offset(p, size);
   4135        } else {                /* the final element absorbs any overallocation slop */
   4136            set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
   4137            break;
   4138        }
   4139    }
   4140
   4141#if DEBUG
   4142    if (marray != chunks) {
   4143        /* final element must have exactly exhausted chunk */
   4144        if (element_size != 0) {
   4145            assert(remainder_size == element_size);
   4146        } else {
   4147            assert(remainder_size == request2size(sizes[i]));
   4148        }
   4149        check_inuse_chunk(m, mem2chunk(marray));
   4150    }
   4151    for (i = 0; i != n_elements; ++i)
   4152        check_inuse_chunk(m, mem2chunk(marray[i]));
   4153
   4154#endif /* DEBUG */
   4155
   4156    POSTACTION(m);
   4157    return marray;
   4158}
   4159
   4160
   4161/* -------------------------- public routines ---------------------------- */
   4162
   4163#if !ONLY_MSPACES
   4164
   4165void *
   4166dlmalloc(size_t bytes)
   4167{
   4168    /*
   4169       Basic algorithm:
   4170       If a small request (< 256 bytes minus per-chunk overhead):
   4171       1. If one exists, use a remainderless chunk in associated smallbin.
   4172       (Remainderless means that there are too few excess bytes to
   4173       represent as a chunk.)
   4174       2. If it is big enough, use the dv chunk, which is normally the
   4175       chunk adjacent to the one used for the most recent small request.
   4176       3. If one exists, split the smallest available chunk in a bin,
   4177       saving remainder in dv.
   4178       4. If it is big enough, use the top chunk.
   4179       5. If available, get memory from system and use it
   4180       Otherwise, for a large request:
   4181       1. Find the smallest available binned chunk that fits, and use it
   4182       if it is better fitting than dv chunk, splitting if necessary.
   4183       2. If better fitting than any binned chunk, use the dv chunk.
   4184       3. If it is big enough, use the top chunk.
   4185       4. If request size >= mmap threshold, try to directly mmap this chunk.
   4186       5. If available, get memory from system and use it
   4187
   4188       The ugly goto's here ensure that postaction occurs along all paths.
   4189     */
   4190
   4191    if (!PREACTION(gm)) {
   4192        void *mem;
   4193        size_t nb;
   4194        if (bytes <= MAX_SMALL_REQUEST) {
   4195            bindex_t idx;
   4196            binmap_t smallbits;
   4197            nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
   4198            idx = small_index(nb);
   4199            smallbits = gm->smallmap >> idx;
   4200
   4201            if ((smallbits & 0x3U) != 0) {      /* Remainderless fit to a smallbin. */
   4202                mchunkptr b, p;
   4203                idx += ~smallbits & 1;  /* Uses next bin if idx empty */
   4204                b = smallbin_at(gm, idx);
   4205                p = b->fd;
   4206                assert(chunksize(p) == small_index2size(idx));
   4207                unlink_first_small_chunk(gm, b, p, idx);
   4208                set_inuse_and_pinuse(gm, p, small_index2size(idx));
   4209                mem = chunk2mem(p);
   4210                check_malloced_chunk(gm, mem, nb);
   4211                goto postaction;
   4212            }
   4213
   4214            else if (nb > gm->dvsize) {
   4215                if (smallbits != 0) {   /* Use chunk in next nonempty smallbin */
   4216                    mchunkptr b, p, r;
   4217                    size_t rsize;
   4218                    bindex_t i;
   4219                    binmap_t leftbits =
   4220                        (smallbits << idx) & left_bits(idx2bit(idx));
   4221                    binmap_t leastbit = least_bit(leftbits);
   4222                    compute_bit2idx(leastbit, i);
   4223                    b = smallbin_at(gm, i);
   4224                    p = b->fd;
   4225                    assert(chunksize(p) == small_index2size(i));
   4226                    unlink_first_small_chunk(gm, b, p, i);
   4227                    rsize = small_index2size(i) - nb;
   4228                    /* Fit here cannot be remainderless if 4byte sizes */
   4229                    if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
   4230                        set_inuse_and_pinuse(gm, p, small_index2size(i));
   4231                    else {
   4232                        set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
   4233                        r = chunk_plus_offset(p, nb);
   4234                        set_size_and_pinuse_of_free_chunk(r, rsize);
   4235                        replace_dv(gm, r, rsize);
   4236                    }
   4237                    mem = chunk2mem(p);
   4238                    check_malloced_chunk(gm, mem, nb);
   4239                    goto postaction;
   4240                }
   4241
   4242                else if (gm->treemap != 0
   4243                         && (mem = tmalloc_small(gm, nb)) != 0) {
   4244                    check_malloced_chunk(gm, mem, nb);
   4245                    goto postaction;
   4246                }
   4247            }
   4248        } else if (bytes >= MAX_REQUEST)
   4249            nb = MAX_SIZE_T;    /* Too big to allocate. Force failure (in sys alloc) */
   4250        else {
   4251            nb = pad_request(bytes);
   4252            if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
   4253                check_malloced_chunk(gm, mem, nb);
   4254                goto postaction;
   4255            }
   4256        }
   4257
   4258        if (nb <= gm->dvsize) {
   4259            size_t rsize = gm->dvsize - nb;
   4260            mchunkptr p = gm->dv;
   4261            if (rsize >= MIN_CHUNK_SIZE) {      /* split dv */
   4262                mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
   4263                gm->dvsize = rsize;
   4264                set_size_and_pinuse_of_free_chunk(r, rsize);
   4265                set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
   4266            } else {            /* exhaust dv */
   4267                size_t dvs = gm->dvsize;
   4268                gm->dvsize = 0;
   4269                gm->dv = 0;
   4270                set_inuse_and_pinuse(gm, p, dvs);
   4271            }
   4272            mem = chunk2mem(p);
   4273            check_malloced_chunk(gm, mem, nb);
   4274            goto postaction;
   4275        }
   4276
   4277        else if (nb < gm->topsize) {    /* Split top */
   4278            size_t rsize = gm->topsize -= nb;
   4279            mchunkptr p = gm->top;
   4280            mchunkptr r = gm->top = chunk_plus_offset(p, nb);
   4281            r->head = rsize | PINUSE_BIT;
   4282            set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
   4283            mem = chunk2mem(p);
   4284            check_top_chunk(gm, gm->top);
   4285            check_malloced_chunk(gm, mem, nb);
   4286            goto postaction;
   4287        }
   4288
   4289        mem = sys_alloc(gm, nb);
   4290
   4291      postaction:
   4292        POSTACTION(gm);
   4293        return mem;
   4294    }
   4295
   4296    return 0;
   4297}
   4298
   4299void
   4300dlfree(void *mem)
   4301{
   4302    /*
   4303       Consolidate freed chunks with preceeding or succeeding bordering
   4304       free chunks, if they exist, and then place in a bin.  Intermixed
   4305       with special cases for top, dv, mmapped chunks, and usage errors.
   4306     */
   4307
   4308    if (mem != 0) {
   4309        mchunkptr p = mem2chunk(mem);
   4310#if FOOTERS
   4311        mstate fm = get_mstate_for(p);
   4312        if (!ok_magic(fm)) {
   4313            USAGE_ERROR_ACTION(fm, p);
   4314            return;
   4315        }
   4316#else /* FOOTERS */
   4317#define fm gm
   4318#endif /* FOOTERS */
   4319        if (!PREACTION(fm)) {
   4320            check_inuse_chunk(fm, p);
   4321            if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
   4322                size_t psize = chunksize(p);
   4323                mchunkptr next = chunk_plus_offset(p, psize);
   4324                if (!pinuse(p)) {
   4325                    size_t prevsize = p->prev_foot;
   4326                    if ((prevsize & IS_MMAPPED_BIT) != 0) {
   4327                        prevsize &= ~IS_MMAPPED_BIT;
   4328                        psize += prevsize + MMAP_FOOT_PAD;
   4329                        if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
   4330                            fm->footprint -= psize;
   4331                        goto postaction;
   4332                    } else {
   4333                        mchunkptr prev = chunk_minus_offset(p, prevsize);
   4334                        psize += prevsize;
   4335                        p = prev;
   4336                        if (RTCHECK(ok_address(fm, prev))) {    /* consolidate backward */
   4337                            if (p != fm->dv) {
   4338                                unlink_chunk(fm, p, prevsize);
   4339                            } else if ((next->head & INUSE_BITS) ==
   4340                                       INUSE_BITS) {
   4341                                fm->dvsize = psize;
   4342                                set_free_with_pinuse(p, psize, next);
   4343                                goto postaction;
   4344                            }
   4345                        } else
   4346                            goto erroraction;
   4347                    }
   4348                }
   4349
   4350                if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
   4351                    if (!cinuse(next)) {        /* consolidate forward */
   4352                        if (next == fm->top) {
   4353                            size_t tsize = fm->topsize += psize;
   4354                            fm->top = p;
   4355                            p->head = tsize | PINUSE_BIT;
   4356                            if (p == fm->dv) {
   4357                                fm->dv = 0;
   4358                                fm->dvsize = 0;
   4359                            }
   4360                            if (should_trim(fm, tsize))
   4361                                sys_trim(fm, 0);
   4362                            goto postaction;
   4363                        } else if (next == fm->dv) {
   4364                            size_t dsize = fm->dvsize += psize;
   4365                            fm->dv = p;
   4366                            set_size_and_pinuse_of_free_chunk(p, dsize);
   4367                            goto postaction;
   4368                        } else {
   4369                            size_t nsize = chunksize(next);
   4370                            psize += nsize;
   4371                            unlink_chunk(fm, next, nsize);
   4372                            set_size_and_pinuse_of_free_chunk(p, psize);
   4373                            if (p == fm->dv) {
   4374                                fm->dvsize = psize;
   4375                                goto postaction;
   4376                            }
   4377                        }
   4378                    } else
   4379                        set_free_with_pinuse(p, psize, next);
   4380                    insert_chunk(fm, p, psize);
   4381                    check_free_chunk(fm, p);
   4382                    goto postaction;
   4383                }
   4384            }
   4385          erroraction:
   4386            USAGE_ERROR_ACTION(fm, p);
   4387          postaction:
   4388            POSTACTION(fm);
   4389        }
   4390    }
   4391#if !FOOTERS
   4392#undef fm
   4393#endif /* FOOTERS */
   4394}
   4395
   4396void *
   4397dlcalloc(size_t n_elements, size_t elem_size)
   4398{
   4399    void *mem;
   4400    size_t req = 0;
   4401    if (n_elements != 0) {
   4402        req = n_elements * elem_size;
   4403        if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
   4404            (req / n_elements != elem_size))
   4405            req = MAX_SIZE_T;   /* force downstream failure on overflow */
   4406    }
   4407    mem = dlmalloc(req);
   4408    if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
   4409        memset(mem, 0, req);
   4410    return mem;
   4411}
   4412
   4413void *
   4414dlrealloc(void *oldmem, size_t bytes)
   4415{
   4416    if (oldmem == 0)
   4417        return dlmalloc(bytes);
   4418#ifdef REALLOC_ZERO_BYTES_FREES
   4419    if (bytes == 0) {
   4420        dlfree(oldmem);
   4421        return 0;
   4422    }
   4423#endif /* REALLOC_ZERO_BYTES_FREES */
   4424    else {
   4425#if ! FOOTERS
   4426        mstate m = gm;
   4427#else /* FOOTERS */
   4428        mstate m = get_mstate_for(mem2chunk(oldmem));
   4429        if (!ok_magic(m)) {
   4430            USAGE_ERROR_ACTION(m, oldmem);
   4431            return 0;
   4432        }
   4433#endif /* FOOTERS */
   4434        return internal_realloc(m, oldmem, bytes);
   4435    }
   4436}
   4437
   4438void *
   4439dlmemalign(size_t alignment, size_t bytes)
   4440{
   4441    return internal_memalign(gm, alignment, bytes);
   4442}
   4443
   4444void **
   4445dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
   4446{
   4447    size_t sz = elem_size;      /* serves as 1-element array */
   4448    return ialloc(gm, n_elements, &sz, 3, chunks);
   4449}
   4450
   4451void **
   4452dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
   4453{
   4454    return ialloc(gm, n_elements, sizes, 0, chunks);
   4455}
   4456
   4457void *
   4458dlvalloc(size_t bytes)
   4459{
   4460    size_t pagesz;
   4461    init_mparams();
   4462    pagesz = mparams.page_size;
   4463    return dlmemalign(pagesz, bytes);
   4464}
   4465
   4466void *
   4467dlpvalloc(size_t bytes)
   4468{
   4469    size_t pagesz;
   4470    init_mparams();
   4471    pagesz = mparams.page_size;
   4472    return dlmemalign(pagesz,
   4473                      (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
   4474}
   4475
   4476int
   4477dlmalloc_trim(size_t pad)
   4478{
   4479    int result = 0;
   4480    if (!PREACTION(gm)) {
   4481        result = sys_trim(gm, pad);
   4482        POSTACTION(gm);
   4483    }
   4484    return result;
   4485}
   4486
   4487size_t
   4488dlmalloc_footprint(void)
   4489{
   4490    return gm->footprint;
   4491}
   4492
   4493size_t
   4494dlmalloc_max_footprint(void)
   4495{
   4496    return gm->max_footprint;
   4497}
   4498
   4499#if !NO_MALLINFO
   4500struct mallinfo
   4501dlmallinfo(void)
   4502{
   4503    return internal_mallinfo(gm);
   4504}
   4505#endif /* NO_MALLINFO */
   4506
   4507void
   4508dlmalloc_stats()
   4509{
   4510    internal_malloc_stats(gm);
   4511}
   4512
   4513size_t
   4514dlmalloc_usable_size(void *mem)
   4515{
   4516    if (mem != 0) {
   4517        mchunkptr p = mem2chunk(mem);
   4518        if (cinuse(p))
   4519            return chunksize(p) - overhead_for(p);
   4520    }
   4521    return 0;
   4522}
   4523
   4524int
   4525dlmallopt(int param_number, int value)
   4526{
   4527    return change_mparam(param_number, value);
   4528}
   4529
   4530#endif /* !ONLY_MSPACES */
   4531
   4532/* ----------------------------- user mspaces ---------------------------- */
   4533
   4534#if MSPACES
   4535
   4536static mstate
   4537init_user_mstate(char *tbase, size_t tsize)
   4538{
   4539    size_t msize = pad_request(sizeof(struct malloc_state));
   4540    mchunkptr mn;
   4541    mchunkptr msp = align_as_chunk(tbase);
   4542    mstate m = (mstate) (chunk2mem(msp));
   4543    memset(m, 0, msize);
   4544    INITIAL_LOCK(&m->mutex);
   4545    msp->head = (msize | PINUSE_BIT | CINUSE_BIT);
   4546    m->seg.base = m->least_addr = tbase;
   4547    m->seg.size = m->footprint = m->max_footprint = tsize;
   4548    m->magic = mparams.magic;
   4549    m->mflags = mparams.default_mflags;
   4550    disable_contiguous(m);
   4551    init_bins(m);
   4552    mn = next_chunk(mem2chunk(m));
   4553    init_top(m, mn, (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
   4554    check_top_chunk(m, m->top);
   4555    return m;
   4556}
   4557
   4558mspace
   4559create_mspace(size_t capacity, int locked)
   4560{
   4561    mstate m = 0;
   4562    size_t msize = pad_request(sizeof(struct malloc_state));
   4563    init_mparams();             /* Ensure pagesize etc initialized */
   4564
   4565    if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
   4566        size_t rs = ((capacity == 0) ? mparams.granularity :
   4567                     (capacity + TOP_FOOT_SIZE + msize));
   4568        size_t tsize = granularity_align(rs);
   4569        char *tbase = (char *) (CALL_MMAP(tsize));
   4570        if (tbase != CMFAIL) {
   4571            m = init_user_mstate(tbase, tsize);
   4572            m->seg.sflags = IS_MMAPPED_BIT;
   4573            set_lock(m, locked);
   4574        }
   4575    }
   4576    return (mspace) m;
   4577}
   4578
   4579mspace
   4580create_mspace_with_base(void *base, size_t capacity, int locked)
   4581{
   4582    mstate m = 0;
   4583    size_t msize = pad_request(sizeof(struct malloc_state));
   4584    init_mparams();             /* Ensure pagesize etc initialized */
   4585
   4586    if (capacity > msize + TOP_FOOT_SIZE &&
   4587        capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
   4588        m = init_user_mstate((char *) base, capacity);
   4589        m->seg.sflags = EXTERN_BIT;
   4590        set_lock(m, locked);
   4591    }
   4592    return (mspace) m;
   4593}
   4594
   4595size_t
   4596destroy_mspace(mspace msp)
   4597{
   4598    size_t freed = 0;
   4599    mstate ms = (mstate) msp;
   4600    if (ok_magic(ms)) {
   4601        msegmentptr sp = &ms->seg;
   4602        while (sp != 0) {
   4603            char *base = sp->base;
   4604            size_t size = sp->size;
   4605            flag_t flag = sp->sflags;
   4606            sp = sp->next;
   4607            if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
   4608                CALL_MUNMAP(base, size) == 0)
   4609                freed += size;
   4610        }
   4611    } else {
   4612        USAGE_ERROR_ACTION(ms, ms);
   4613    }
   4614    return freed;
   4615}
   4616
   4617/*
   4618  mspace versions of routines are near-clones of the global
   4619  versions. This is not so nice but better than the alternatives.
   4620*/
   4621
   4622
   4623void *
   4624mspace_malloc(mspace msp, size_t bytes)
   4625{
   4626    mstate ms = (mstate) msp;
   4627    if (!ok_magic(ms)) {
   4628        USAGE_ERROR_ACTION(ms, ms);
   4629        return 0;
   4630    }
   4631    if (!PREACTION(ms)) {
   4632        void *mem;
   4633        size_t nb;
   4634        if (bytes <= MAX_SMALL_REQUEST) {
   4635            bindex_t idx;
   4636            binmap_t smallbits;
   4637            nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
   4638            idx = small_index(nb);
   4639            smallbits = ms->smallmap >> idx;
   4640
   4641            if ((smallbits & 0x3U) != 0) {      /* Remainderless fit to a smallbin. */
   4642                mchunkptr b, p;
   4643                idx += ~smallbits & 1;  /* Uses next bin if idx empty */
   4644                b = smallbin_at(ms, idx);
   4645                p = b->fd;
   4646                assert(chunksize(p) == small_index2size(idx));
   4647                unlink_first_small_chunk(ms, b, p, idx);
   4648                set_inuse_and_pinuse(ms, p, small_index2size(idx));
   4649                mem = chunk2mem(p);
   4650                check_malloced_chunk(ms, mem, nb);
   4651                goto postaction;
   4652            }
   4653
   4654            else if (nb > ms->dvsize) {
   4655                if (smallbits != 0) {   /* Use chunk in next nonempty smallbin */
   4656                    mchunkptr b, p, r;
   4657                    size_t rsize;
   4658                    bindex_t i;
   4659                    binmap_t leftbits =
   4660                        (smallbits << idx) & left_bits(idx2bit(idx));
   4661                    binmap_t leastbit = least_bit(leftbits);
   4662                    compute_bit2idx(leastbit, i);
   4663                    b = smallbin_at(ms, i);
   4664                    p = b->fd;
   4665                    assert(chunksize(p) == small_index2size(i));
   4666                    unlink_first_small_chunk(ms, b, p, i);
   4667                    rsize = small_index2size(i) - nb;
   4668                    /* Fit here cannot be remainderless if 4byte sizes */
   4669                    if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
   4670                        set_inuse_and_pinuse(ms, p, small_index2size(i));
   4671                    else {
   4672                        set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
   4673                        r = chunk_plus_offset(p, nb);
   4674                        set_size_and_pinuse_of_free_chunk(r, rsize);
   4675                        replace_dv(ms, r, rsize);
   4676                    }
   4677                    mem = chunk2mem(p);
   4678                    check_malloced_chunk(ms, mem, nb);
   4679                    goto postaction;
   4680                }
   4681
   4682                else if (ms->treemap != 0
   4683                         && (mem = tmalloc_small(ms, nb)) != 0) {
   4684                    check_malloced_chunk(ms, mem, nb);
   4685                    goto postaction;
   4686                }
   4687            }
   4688        } else if (bytes >= MAX_REQUEST)
   4689            nb = MAX_SIZE_T;    /* Too big to allocate. Force failure (in sys alloc) */
   4690        else {
   4691            nb = pad_request(bytes);
   4692            if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
   4693                check_malloced_chunk(ms, mem, nb);
   4694                goto postaction;
   4695            }
   4696        }
   4697
   4698        if (nb <= ms->dvsize) {
   4699            size_t rsize = ms->dvsize - nb;
   4700            mchunkptr p = ms->dv;
   4701            if (rsize >= MIN_CHUNK_SIZE) {      /* split dv */
   4702                mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
   4703                ms->dvsize = rsize;
   4704                set_size_and_pinuse_of_free_chunk(r, rsize);
   4705                set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
   4706            } else {            /* exhaust dv */
   4707                size_t dvs = ms->dvsize;
   4708                ms->dvsize = 0;
   4709                ms->dv = 0;
   4710                set_inuse_and_pinuse(ms, p, dvs);
   4711            }
   4712            mem = chunk2mem(p);
   4713            check_malloced_chunk(ms, mem, nb);
   4714            goto postaction;
   4715        }
   4716
   4717        else if (nb < ms->topsize) {    /* Split top */
   4718            size_t rsize = ms->topsize -= nb;
   4719            mchunkptr p = ms->top;
   4720            mchunkptr r = ms->top = chunk_plus_offset(p, nb);
   4721            r->head = rsize | PINUSE_BIT;
   4722            set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
   4723            mem = chunk2mem(p);
   4724            check_top_chunk(ms, ms->top);
   4725            check_malloced_chunk(ms, mem, nb);
   4726            goto postaction;
   4727        }
   4728
   4729        mem = sys_alloc(ms, nb);
   4730
   4731      postaction:
   4732        POSTACTION(ms);
   4733        return mem;
   4734    }
   4735
   4736    return 0;
   4737}
   4738
   4739void
   4740mspace_free(mspace msp, void *mem)
   4741{
   4742    if (mem != 0) {
   4743        mchunkptr p = mem2chunk(mem);
   4744#if FOOTERS
   4745        mstate fm = get_mstate_for(p);
   4746#else /* FOOTERS */
   4747        mstate fm = (mstate) msp;
   4748#endif /* FOOTERS */
   4749        if (!ok_magic(fm)) {
   4750            USAGE_ERROR_ACTION(fm, p);
   4751            return;
   4752        }
   4753        if (!PREACTION(fm)) {
   4754            check_inuse_chunk(fm, p);
   4755            if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
   4756                size_t psize = chunksize(p);
   4757                mchunkptr next = chunk_plus_offset(p, psize);
   4758                if (!pinuse(p)) {
   4759                    size_t prevsize = p->prev_foot;
   4760                    if ((prevsize & IS_MMAPPED_BIT) != 0) {
   4761                        prevsize &= ~IS_MMAPPED_BIT;
   4762                        psize += prevsize + MMAP_FOOT_PAD;
   4763                        if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
   4764                            fm->footprint -= psize;
   4765                        goto postaction;
   4766                    } else {
   4767                        mchunkptr prev = chunk_minus_offset(p, prevsize);
   4768                        psize += prevsize;
   4769                        p = prev;
   4770                        if (RTCHECK(ok_address(fm, prev))) {    /* consolidate backward */
   4771                            if (p != fm->dv) {
   4772                                unlink_chunk(fm, p, prevsize);
   4773                            } else if ((next->head & INUSE_BITS) ==
   4774                                       INUSE_BITS) {
   4775                                fm->dvsize = psize;
   4776                                set_free_with_pinuse(p, psize, next);
   4777                                goto postaction;
   4778                            }
   4779                        } else
   4780                            goto erroraction;
   4781                    }
   4782                }
   4783
   4784                if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
   4785                    if (!cinuse(next)) {        /* consolidate forward */
   4786                        if (next == fm->top) {
   4787                            size_t tsize = fm->topsize += psize;
   4788                            fm->top = p;
   4789                            p->head = tsize | PINUSE_BIT;
   4790                            if (p == fm->dv) {
   4791                                fm->dv = 0;
   4792                                fm->dvsize = 0;
   4793                            }
   4794                            if (should_trim(fm, tsize))
   4795                                sys_trim(fm, 0);
   4796                            goto postaction;
   4797                        } else if (next == fm->dv) {
   4798                            size_t dsize = fm->dvsize += psize;
   4799                            fm->dv = p;
   4800                            set_size_and_pinuse_of_free_chunk(p, dsize);
   4801                            goto postaction;
   4802                        } else {
   4803                            size_t nsize = chunksize(next);
   4804                            psize += nsize;
   4805                            unlink_chunk(fm, next, nsize);
   4806                            set_size_and_pinuse_of_free_chunk(p, psize);
   4807                            if (p == fm->dv) {
   4808                                fm->dvsize = psize;
   4809                                goto postaction;
   4810                            }
   4811                        }
   4812                    } else
   4813                        set_free_with_pinuse(p, psize, next);
   4814                    insert_chunk(fm, p, psize);
   4815                    check_free_chunk(fm, p);
   4816                    goto postaction;
   4817                }
   4818            }
   4819          erroraction:
   4820            USAGE_ERROR_ACTION(fm, p);
   4821          postaction:
   4822            POSTACTION(fm);
   4823        }
   4824    }
   4825}
   4826
   4827void *
   4828mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
   4829{
   4830    void *mem;
   4831    size_t req = 0;
   4832    mstate ms = (mstate) msp;
   4833    if (!ok_magic(ms)) {
   4834        USAGE_ERROR_ACTION(ms, ms);
   4835        return 0;
   4836    }
   4837    if (n_elements != 0) {
   4838        req = n_elements * elem_size;
   4839        if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
   4840            (req / n_elements != elem_size))
   4841            req = MAX_SIZE_T;   /* force downstream failure on overflow */
   4842    }
   4843    mem = internal_malloc(ms, req);
   4844    if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
   4845        memset(mem, 0, req);
   4846    return mem;
   4847}
   4848
   4849void *
   4850mspace_realloc(mspace msp, void *oldmem, size_t bytes)
   4851{
   4852    if (oldmem == 0)
   4853        return mspace_malloc(msp, bytes);
   4854#ifdef REALLOC_ZERO_BYTES_FREES
   4855    if (bytes == 0) {
   4856        mspace_free(msp, oldmem);
   4857        return 0;
   4858    }
   4859#endif /* REALLOC_ZERO_BYTES_FREES */
   4860    else {
   4861#if FOOTERS
   4862        mchunkptr p = mem2chunk(oldmem);
   4863        mstate ms = get_mstate_for(p);
   4864#else /* FOOTERS */
   4865        mstate ms = (mstate) msp;
   4866#endif /* FOOTERS */
   4867        if (!ok_magic(ms)) {
   4868            USAGE_ERROR_ACTION(ms, ms);
   4869            return 0;
   4870        }
   4871        return internal_realloc(ms, oldmem, bytes);
   4872    }
   4873}
   4874
   4875void *
   4876mspace_memalign(mspace msp, size_t alignment, size_t bytes)
   4877{
   4878    mstate ms = (mstate) msp;
   4879    if (!ok_magic(ms)) {
   4880        USAGE_ERROR_ACTION(ms, ms);
   4881        return 0;
   4882    }
   4883    return internal_memalign(ms, alignment, bytes);
   4884}
   4885
   4886void **
   4887mspace_independent_calloc(mspace msp, size_t n_elements,
   4888                          size_t elem_size, void *chunks[])
   4889{
   4890    size_t sz = elem_size;      /* serves as 1-element array */
   4891    mstate ms = (mstate) msp;
   4892    if (!ok_magic(ms)) {
   4893        USAGE_ERROR_ACTION(ms, ms);
   4894        return 0;
   4895    }
   4896    return ialloc(ms, n_elements, &sz, 3, chunks);
   4897}
   4898
   4899void **
   4900mspace_independent_comalloc(mspace msp, size_t n_elements,
   4901                            size_t sizes[], void *chunks[])
   4902{
   4903    mstate ms = (mstate) msp;
   4904    if (!ok_magic(ms)) {
   4905        USAGE_ERROR_ACTION(ms, ms);
   4906        return 0;
   4907    }
   4908    return ialloc(ms, n_elements, sizes, 0, chunks);
   4909}
   4910
   4911int
   4912mspace_trim(mspace msp, size_t pad)
   4913{
   4914    int result = 0;
   4915    mstate ms = (mstate) msp;
   4916    if (ok_magic(ms)) {
   4917        if (!PREACTION(ms)) {
   4918            result = sys_trim(ms, pad);
   4919            POSTACTION(ms);
   4920        }
   4921    } else {
   4922        USAGE_ERROR_ACTION(ms, ms);
   4923    }
   4924    return result;
   4925}
   4926
   4927void
   4928mspace_malloc_stats(mspace msp)
   4929{
   4930    mstate ms = (mstate) msp;
   4931    if (ok_magic(ms)) {
   4932        internal_malloc_stats(ms);
   4933    } else {
   4934        USAGE_ERROR_ACTION(ms, ms);
   4935    }
   4936}
   4937
   4938size_t
   4939mspace_footprint(mspace msp)
   4940{
   4941    size_t result;
   4942    mstate ms = (mstate) msp;
   4943    if (ok_magic(ms)) {
   4944        result = ms->footprint;
   4945    }
   4946    USAGE_ERROR_ACTION(ms, ms);
   4947    return result;
   4948}
   4949
   4950
   4951size_t
   4952mspace_max_footprint(mspace msp)
   4953{
   4954    size_t result;
   4955    mstate ms = (mstate) msp;
   4956    if (ok_magic(ms)) {
   4957        result = ms->max_footprint;
   4958    }
   4959    USAGE_ERROR_ACTION(ms, ms);
   4960    return result;
   4961}
   4962
   4963
   4964#if !NO_MALLINFO
   4965struct mallinfo
   4966mspace_mallinfo(mspace msp)
   4967{
   4968    mstate ms = (mstate) msp;
   4969    if (!ok_magic(ms)) {
   4970        USAGE_ERROR_ACTION(ms, ms);
   4971    }
   4972    return internal_mallinfo(ms);
   4973}
   4974#endif /* NO_MALLINFO */
   4975
   4976int
   4977mspace_mallopt(int param_number, int value)
   4978{
   4979    return change_mparam(param_number, value);
   4980}
   4981
   4982#endif /* MSPACES */
   4983
   4984/* -------------------- Alternative MORECORE functions ------------------- */
   4985
   4986/*
   4987  Guidelines for creating a custom version of MORECORE:
   4988
   4989  * For best performance, MORECORE should allocate in multiples of pagesize.
   4990  * MORECORE may allocate more memory than requested. (Or even less,
   4991      but this will usually result in a malloc failure.)
   4992  * MORECORE must not allocate memory when given argument zero, but
   4993      instead return one past the end address of memory from previous
   4994      nonzero call.
   4995  * For best performance, consecutive calls to MORECORE with positive
   4996      arguments should return increasing addresses, indicating that
   4997      space has been contiguously extended.
   4998  * Even though consecutive calls to MORECORE need not return contiguous
   4999      addresses, it must be OK for malloc'ed chunks to span multiple
   5000      regions in those cases where they do happen to be contiguous.
   5001  * MORECORE need not handle negative arguments -- it may instead
   5002      just return MFAIL when given negative arguments.
   5003      Negative arguments are always multiples of pagesize. MORECORE
   5004      must not misinterpret negative args as large positive unsigned
   5005      args. You can suppress all such calls from even occurring by defining
   5006      MORECORE_CANNOT_TRIM,
   5007
   5008  As an example alternative MORECORE, here is a custom allocator
   5009  kindly contributed for pre-OSX macOS.  It uses virtually but not
   5010  necessarily physically contiguous non-paged memory (locked in,
   5011  present and won't get swapped out).  You can use it by uncommenting
   5012  this section, adding some #includes, and setting up the appropriate
   5013  defines above:
   5014
   5015      #define MORECORE osMoreCore
   5016
   5017  There is also a shutdown routine that should somehow be called for
   5018  cleanup upon program exit.
   5019
   5020  #define MAX_POOL_ENTRIES 100
   5021  #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
   5022  static int next_os_pool;
   5023  void *our_os_pools[MAX_POOL_ENTRIES];
   5024
   5025  void *osMoreCore(int size)
   5026  {
   5027    void *ptr = 0;
   5028    static void *sbrk_top = 0;
   5029
   5030    if (size > 0)
   5031    {
   5032      if (size < MINIMUM_MORECORE_SIZE)
   5033         size = MINIMUM_MORECORE_SIZE;
   5034      if (CurrentExecutionLevel() == kTaskLevel)
   5035         ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
   5036      if (ptr == 0)
   5037      {
   5038        return (void *) MFAIL;
   5039      }
   5040      // save ptrs so they can be freed during cleanup
   5041      our_os_pools[next_os_pool] = ptr;
   5042      next_os_pool++;
   5043      ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
   5044      sbrk_top = (char *) ptr + size;
   5045      return ptr;
   5046    }
   5047    else if (size < 0)
   5048    {
   5049      // we don't currently support shrink behavior
   5050      return (void *) MFAIL;
   5051    }
   5052    else
   5053    {
   5054      return sbrk_top;
   5055    }
   5056  }
   5057
   5058  // cleanup any allocated memory pools
   5059  // called as last thing before shutting down driver
   5060
   5061  void osCleanupMem(void)
   5062  {
   5063    void **ptr;
   5064
   5065    for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
   5066      if (*ptr)
   5067      {
   5068         PoolDeallocate(*ptr);
   5069         *ptr = 0;
   5070      }
   5071  }
   5072
   5073*/
   5074
   5075
   5076/* -----------------------------------------------------------------------
   5077History:
   5078    V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
   5079      * Add max_footprint functions
   5080      * Ensure all appropriate literals are size_t
   5081      * Fix conditional compilation problem for some #define settings
   5082      * Avoid concatenating segments with the one provided
   5083        in create_mspace_with_base
   5084      * Rename some variables to avoid compiler shadowing warnings
   5085      * Use explicit lock initialization.
   5086      * Better handling of sbrk interference.
   5087      * Simplify and fix segment insertion, trimming and mspace_destroy
   5088      * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
   5089      * Thanks especially to Dennis Flanagan for help on these.
   5090
   5091    V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
   5092      * Fix memalign brace error.
   5093
   5094    V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
   5095      * Fix improper #endif nesting in C++
   5096      * Add explicit casts needed for C++
   5097
   5098    V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
   5099      * Use trees for large bins
   5100      * Support mspaces
   5101      * Use segments to unify sbrk-based and mmap-based system allocation,
   5102        removing need for emulation on most platforms without sbrk.
   5103      * Default safety checks
   5104      * Optional footer checks. Thanks to William Robertson for the idea.
   5105      * Internal code refactoring
   5106      * Incorporate suggestions and platform-specific changes.
   5107        Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
   5108        Aaron Bachmann,  Emery Berger, and others.
   5109      * Speed up non-fastbin processing enough to remove fastbins.
   5110      * Remove useless cfree() to avoid conflicts with other apps.
   5111      * Remove internal memcpy, memset. Compilers handle builtins better.
   5112      * Remove some options that no one ever used and rename others.
   5113
   5114    V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
   5115      * Fix malloc_state bitmap array misdeclaration
   5116
   5117    V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
   5118      * Allow tuning of FIRST_SORTED_BIN_SIZE
   5119      * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
   5120      * Better detection and support for non-contiguousness of MORECORE.
   5121        Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
   5122      * Bypass most of malloc if no frees. Thanks To Emery Berger.
   5123      * Fix freeing of old top non-contiguous chunk im sysmalloc.
   5124      * Raised default trim and map thresholds to 256K.
   5125      * Fix mmap-related #defines. Thanks to Lubos Lunak.
   5126      * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
   5127      * Branch-free bin calculation
   5128      * Default trim and mmap thresholds now 256K.
   5129
   5130    V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
   5131      * Introduce independent_comalloc and independent_calloc.
   5132        Thanks to Michael Pachos for motivation and help.
   5133      * Make optional .h file available
   5134      * Allow > 2GB requests on 32bit systems.
   5135      * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
   5136        Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
   5137        and Anonymous.
   5138      * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
   5139        helping test this.)
   5140      * memalign: check alignment arg
   5141      * realloc: don't try to shift chunks backwards, since this
   5142        leads to  more fragmentation in some programs and doesn't
   5143        seem to help in any others.
   5144      * Collect all cases in malloc requiring system memory into sysmalloc
   5145      * Use mmap as backup to sbrk
   5146      * Place all internal state in malloc_state
   5147      * Introduce fastbins (although similar to 2.5.1)
   5148      * Many minor tunings and cosmetic improvements
   5149      * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
   5150      * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
   5151        Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
   5152      * Include errno.h to support default failure action.
   5153
   5154    V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
   5155      * return null for negative arguments
   5156      * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
   5157         * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
   5158          (e.g. WIN32 platforms)
   5159         * Cleanup header file inclusion for WIN32 platforms
   5160         * Cleanup code to avoid Microsoft Visual C++ compiler complaints
   5161         * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
   5162           memory allocation routines
   5163         * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
   5164         * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
   5165           usage of 'assert' in non-WIN32 code
   5166         * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
   5167           avoid infinite loop
   5168      * Always call 'fREe()' rather than 'free()'
   5169
   5170    V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
   5171      * Fixed ordering problem with boundary-stamping
   5172
   5173    V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
   5174      * Added pvalloc, as recommended by H.J. Liu
   5175      * Added 64bit pointer support mainly from Wolfram Gloger
   5176      * Added anonymously donated WIN32 sbrk emulation
   5177      * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
   5178      * malloc_extend_top: fix mask error that caused wastage after
   5179        foreign sbrks
   5180      * Add linux mremap support code from HJ Liu
   5181
   5182    V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
   5183      * Integrated most documentation with the code.
   5184      * Add support for mmap, with help from
   5185        Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
   5186      * Use last_remainder in more cases.
   5187      * Pack bins using idea from  colin@nyx10.cs.du.edu
   5188      * Use ordered bins instead of best-fit threshhold
   5189      * Eliminate block-local decls to simplify tracing and debugging.
   5190      * Support another case of realloc via move into top
   5191      * Fix error occuring when initial sbrk_base not word-aligned.
   5192      * Rely on page size for units instead of SBRK_UNIT to
   5193        avoid surprises about sbrk alignment conventions.
   5194      * Add mallinfo, mallopt. Thanks to Raymond Nijssen
   5195        (raymond@es.ele.tue.nl) for the suggestion.
   5196      * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
   5197      * More precautions for cases where other routines call sbrk,
   5198        courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
   5199      * Added macros etc., allowing use in linux libc from
   5200        H.J. Lu (hjl@gnu.ai.mit.edu)
   5201      * Inverted this history list
   5202
   5203    V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
   5204      * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
   5205      * Removed all preallocation code since under current scheme
   5206        the work required to undo bad preallocations exceeds
   5207        the work saved in good cases for most test programs.
   5208      * No longer use return list or unconsolidated bins since
   5209        no scheme using them consistently outperforms those that don't
   5210        given above changes.
   5211      * Use best fit for very large chunks to prevent some worst-cases.
   5212      * Added some support for debugging
   5213
   5214    V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
   5215      * Removed footers when chunks are in use. Thanks to
   5216        Paul Wilson (wilson@cs.texas.edu) for the suggestion.
   5217
   5218    V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
   5219      * Added malloc_trim, with help from Wolfram Gloger
   5220        (wmglo@Dent.MED.Uni-Muenchen.DE).
   5221
   5222    V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
   5223
   5224    V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
   5225      * realloc: try to expand in both directions
   5226      * malloc: swap order of clean-bin strategy;
   5227      * realloc: only conditionally expand backwards
   5228      * Try not to scavenge used bins
   5229      * Use bin counts as a guide to preallocation
   5230      * Occasionally bin return list chunks in first scan
   5231      * Add a few optimizations from colin@nyx10.cs.du.edu
   5232
   5233    V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
   5234      * faster bin computation & slightly different binning
   5235      * merged all consolidations to one part of malloc proper
   5236         (eliminating old malloc_find_space & malloc_clean_bin)
   5237      * Scan 2 returns chunks (not just 1)
   5238      * Propagate failure in realloc if malloc returns 0
   5239      * Add stuff to allow compilation on non-ANSI compilers
   5240          from kpv@research.att.com
   5241
   5242    V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
   5243      * removed potential for odd address access in prev_chunk
   5244      * removed dependency on getpagesize.h
   5245      * misc cosmetics and a bit more internal documentation
   5246      * anticosmetics: mangled names in macros to evade debugger strangeness
   5247      * tested on sparc, hp-700, dec-mips, rs6000
   5248          with gcc & native cc (hp, dec only) allowing
   5249          Detlefs & Zorn comparison study (in SIGPLAN Notices.)
   5250
   5251    Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
   5252      * Based loosely on libg++-1.2X malloc. (It retains some of the overall
   5253         structure of old version,  but most details differ.)
   5254
   5255*/
   5256
   5257#endif /* !HAVE_MALLOC */
   5258
   5259/* vi: set ts=4 sw=4 expandtab: */