/ source / mimalloc / include / mimalloc / types.h
types.h
  1  /* ----------------------------------------------------------------------------
  2  Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
  3  This is free software; you can redistribute it and/or modify it under the
  4  terms of the MIT license. A copy of the license can be found in the file
  5  "LICENSE" at the root of this distribution.
  6  -----------------------------------------------------------------------------*/
  7  #pragma once
  8  #ifndef MIMALLOC_TYPES_H
  9  #define MIMALLOC_TYPES_H
 10  
 11  // --------------------------------------------------------------------------
 12  // This file contains the main type definitions for mimalloc:
 13  // mi_heap_t      : all data for a thread-local heap, contains
 14  //                  lists of all managed heap pages.
 15  // mi_segment_t   : a larger chunk of memory (32GiB) from where pages
 16  //                  are allocated.
 17  // mi_page_t      : a mimalloc page (usually 64KiB or 512KiB) from
 18  //                  where objects are allocated.
 19  //                  Note: we write "OS page" for OS memory pages while
 20  //                  using plain "page" for mimalloc pages (`mi_page_t`).
 21  // --------------------------------------------------------------------------
 22  
 23  
 24  #include <stddef.h>   // ptrdiff_t
 25  #include <stdint.h>   // uintptr_t, uint16_t, etc
 26  #include "atomic.h"   // _Atomic
 27  
 28  #ifdef _MSC_VER
 29  #pragma warning(disable:4214) // bitfield is not int
 30  #endif
 31  
 32  // Minimal alignment necessary. On most platforms 16 bytes are needed
 33  // due to SSE registers for example. This must be at least `sizeof(void*)`
 34  #ifndef MI_MAX_ALIGN_SIZE
 35  #define MI_MAX_ALIGN_SIZE  16   // sizeof(max_align_t)
 36  #endif
 37  
 38  // ------------------------------------------------------
 39  // Variants
 40  // ------------------------------------------------------
 41  
 42  // Define NDEBUG in the release version to disable assertions.
 43  // #define NDEBUG
 44  
 45  // Define MI_TRACK_<tool> to enable tracking support
 46  // #define MI_TRACK_VALGRIND 1
 47  // #define MI_TRACK_ASAN     1
 48  // #define MI_TRACK_ETW      1
 49  
 50  // Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
 51  // #define MI_STAT 1
 52  
 53  // Define MI_SECURE to enable security mitigations
 54  // #define MI_SECURE 1  // guard page around metadata
 55  // #define MI_SECURE 2  // guard page around each mimalloc page
 56  // #define MI_SECURE 3  // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
 57  // #define MI_SECURE 4  // checks for double free. (may be more expensive)
 58  
 59  #if !defined(MI_SECURE)
 60  #define MI_SECURE 0
 61  #endif
 62  
 63  // Define MI_DEBUG for debug mode
 64  // #define MI_DEBUG 1  // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free.
 65  // #define MI_DEBUG 2  // + internal assertion checks
 66  // #define MI_DEBUG 3  // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON)
 67  #if !defined(MI_DEBUG)
 68  #if !defined(NDEBUG) || defined(_DEBUG)
 69  #define MI_DEBUG 2
 70  #else
 71  #define MI_DEBUG 0
 72  #endif
 73  #endif
 74  
 75  // Use guard pages behind objects of a certain size (set by the MIMALLOC_DEBUG_GUARDED_MIN/MAX options)
 76  // Padding should be disabled when using guard pages
 77  // #define MI_DEBUG_GUARDED 1
 78  #if defined(MI_DEBUG_GUARDED)
 79  #define MI_PADDING  0
 80  #endif
 81  
 82  // Reserve extra padding at the end of each block to be more resilient against heap block overflows.
 83  // The padding can detect buffer overflow on free.
 84  #if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
 85  #define MI_PADDING  1
 86  #endif
 87  
 88  // Check padding bytes; allows byte-precise buffer overflow detection
 89  #if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1)
 90  #define MI_PADDING_CHECK 1
 91  #endif
 92  
 93  
 94  // Encoded free lists allow detection of corrupted free lists
 95  // and can detect buffer overflows, modify after free, and double `free`s.
 96  #if (MI_SECURE>=3 || MI_DEBUG>=1)
 97  #define MI_ENCODE_FREELIST  1
 98  #endif
 99  
100  
101  // We used to abandon huge pages in order to eagerly deallocate it if freed from another thread.
102  // Unfortunately, that makes it not possible to visit them during a heap walk or include them in a
103  // `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks nowadays if freed from
104  // another thread so the memory becomes "virtually" available (and eventually gets properly freed by
105  // the owning thread).
106  // #define MI_HUGE_PAGE_ABANDON 1
107  
108  
109  // ------------------------------------------------------
110  // Platform specific values
111  // ------------------------------------------------------
112  
113  // ------------------------------------------------------
114  // Size of a pointer.
115  // We assume that `sizeof(void*)==sizeof(intptr_t)`
116  // and it holds for all platforms we know of.
117  //
118  // However, the C standard only requires that:
119  //  p == (void*)((intptr_t)p))
120  // but we also need:
121  //  i == (intptr_t)((void*)i)
122  // or otherwise one might define an intptr_t type that is larger than a pointer...
123  // ------------------------------------------------------
124  
125  #if INTPTR_MAX > INT64_MAX
126  # define MI_INTPTR_SHIFT (4)  // assume 128-bit  (as on arm CHERI for example)
127  #elif INTPTR_MAX == INT64_MAX
128  # define MI_INTPTR_SHIFT (3)
129  #elif INTPTR_MAX == INT32_MAX
130  # define MI_INTPTR_SHIFT (2)
131  #else
132  #error platform pointers must be 32, 64, or 128 bits
133  #endif
134  
135  #if SIZE_MAX == UINT64_MAX
136  # define MI_SIZE_SHIFT (3)
137  typedef int64_t  mi_ssize_t;
138  #elif SIZE_MAX == UINT32_MAX
139  # define MI_SIZE_SHIFT (2)
140  typedef int32_t  mi_ssize_t;
141  #else
142  #error platform objects must be 32 or 64 bits
143  #endif
144  
145  #if (SIZE_MAX/2) > LONG_MAX
146  # define MI_ZU(x)  x##ULL
147  # define MI_ZI(x)  x##LL
148  #else
149  # define MI_ZU(x)  x##UL
150  # define MI_ZI(x)  x##L
151  #endif
152  
153  #define MI_INTPTR_SIZE  (1<<MI_INTPTR_SHIFT)
154  #define MI_INTPTR_BITS  (MI_INTPTR_SIZE*8)
155  
156  #define MI_SIZE_SIZE  (1<<MI_SIZE_SHIFT)
157  #define MI_SIZE_BITS  (MI_SIZE_SIZE*8)
158  
159  #define MI_KiB     (MI_ZU(1024))
160  #define MI_MiB     (MI_KiB*MI_KiB)
161  #define MI_GiB     (MI_MiB*MI_KiB)
162  
163  
164  // ------------------------------------------------------
165  // Main internal data-structures
166  // ------------------------------------------------------
167  
168  // Main tuning parameters for segment and page sizes
169  // Sizes for 64-bit, divide by two for 32-bit
170  #ifndef MI_SMALL_PAGE_SHIFT
171  #define MI_SMALL_PAGE_SHIFT               (13 + MI_INTPTR_SHIFT)      // 64KiB
172  #endif
173  #ifndef MI_MEDIUM_PAGE_SHIFT
174  #define MI_MEDIUM_PAGE_SHIFT              ( 3 + MI_SMALL_PAGE_SHIFT)  // 512KiB
175  #endif
176  #ifndef MI_LARGE_PAGE_SHIFT
177  #define MI_LARGE_PAGE_SHIFT               ( 3 + MI_MEDIUM_PAGE_SHIFT) // 4MiB
178  #endif
179  #ifndef MI_SEGMENT_SHIFT
180  #define MI_SEGMENT_SHIFT                  ( MI_LARGE_PAGE_SHIFT)      // 4MiB -- must be equal to `MI_LARGE_PAGE_SHIFT`
181  #endif
182  
183  // Derived constants
184  #define MI_SEGMENT_SIZE                   (MI_ZU(1)<<MI_SEGMENT_SHIFT)
185  #define MI_SEGMENT_ALIGN                  (MI_SEGMENT_SIZE)
186  #define MI_SEGMENT_MASK                   ((uintptr_t)(MI_SEGMENT_ALIGN - 1))
187  
188  #define MI_SMALL_PAGE_SIZE                (MI_ZU(1)<<MI_SMALL_PAGE_SHIFT)
189  #define MI_MEDIUM_PAGE_SIZE               (MI_ZU(1)<<MI_MEDIUM_PAGE_SHIFT)
190  #define MI_LARGE_PAGE_SIZE                (MI_ZU(1)<<MI_LARGE_PAGE_SHIFT)
191  
192  #define MI_SMALL_PAGES_PER_SEGMENT        (MI_SEGMENT_SIZE/MI_SMALL_PAGE_SIZE)
193  #define MI_MEDIUM_PAGES_PER_SEGMENT       (MI_SEGMENT_SIZE/MI_MEDIUM_PAGE_SIZE)
194  #define MI_LARGE_PAGES_PER_SEGMENT        (MI_SEGMENT_SIZE/MI_LARGE_PAGE_SIZE)
195  
196  // The max object size are checked to not waste more than 12.5% internally over the page sizes.
197  // (Except for large pages since huge objects are allocated in 4MiB chunks)
198  #define MI_SMALL_OBJ_SIZE_MAX             (MI_SMALL_PAGE_SIZE/4)   // 16KiB
199  #define MI_MEDIUM_OBJ_SIZE_MAX            (MI_MEDIUM_PAGE_SIZE/4)  // 128KiB
200  #define MI_LARGE_OBJ_SIZE_MAX             (MI_LARGE_PAGE_SIZE/2)   // 2MiB
201  #define MI_LARGE_OBJ_WSIZE_MAX            (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
202  
203  // Maximum number of size classes. (spaced exponentially in 12.5% increments)
204  #define MI_BIN_HUGE  (73U)
205  
206  #if (MI_LARGE_OBJ_WSIZE_MAX >= 655360)
207  #error "mimalloc internal: define more bins"
208  #endif
209  
210  // Maximum block size for which blocks are guaranteed to be block size aligned. (see `segment.c:_mi_segment_page_start`)
211  #define MI_MAX_ALIGN_GUARANTEE   (MI_MEDIUM_OBJ_SIZE_MAX)
212  
213  // Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments
214  #define MI_BLOCK_ALIGNMENT_MAX   (MI_SEGMENT_SIZE >> 1)
215  
216  // We never allocate more than PTRDIFF_MAX (see also <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
217  #define MI_MAX_ALLOC_SIZE   PTRDIFF_MAX
218  
219  // ------------------------------------------------------
220  // Mimalloc pages contain allocated blocks
221  // ------------------------------------------------------
222  
223  // The free lists use encoded next fields
224  // (Only actually encodes when MI_ENCODED_FREELIST is defined.)
225  typedef uintptr_t  mi_encoded_t;
226  
227  // thread id's
228  typedef size_t     mi_threadid_t;
229  
230  // free lists contain blocks
231  typedef struct mi_block_s {
232    mi_encoded_t next;
233  } mi_block_t;
234  
235  
236  // The delayed flags are used for efficient multi-threaded free-ing
237  typedef enum mi_delayed_e {
238    MI_USE_DELAYED_FREE   = 0, // push on the owning heap thread delayed list
239    MI_DELAYED_FREEING    = 1, // temporary: another thread is accessing the owning heap
240    MI_NO_DELAYED_FREE    = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
241    MI_NEVER_DELAYED_FREE = 3  // sticky: used for abondoned pages without a owning heap; this only resets on page reclaim
242  } mi_delayed_t;
243  
244  
245  // The `in_full` and `has_aligned` page flags are put in a union to efficiently
246  // test if both are false (`full_aligned == 0`) in the `mi_free` routine.
247  #if !MI_TSAN
248  typedef union mi_page_flags_s {
249    uint8_t full_aligned;
250    struct {
251      uint8_t in_full : 1;
252      uint8_t has_aligned : 1;
253      uint8_t has_guarded : 1;  // only used with MI_DEBUG_GUARDED
254    } x;
255  } mi_page_flags_t;
256  #else
257  // under thread sanitizer, use a byte for each flag to suppress warning, issue #130
258  typedef union mi_page_flags_s {
259    uint32_t full_aligned;
260    struct {
261      uint8_t in_full;
262      uint8_t has_aligned;
263      uint8_t has_guarded; // only used with MI_DEBUG_GUARDED
264    } x;
265  } mi_page_flags_t;
266  #endif
267  
268  // Thread free list.
269  // We use the bottom 2 bits of the pointer for mi_delayed_t flags
270  typedef uintptr_t mi_thread_free_t;
271  
272  // A page contains blocks of one specific size (`block_size`).
273  // Each page has three list of free blocks:
274  // `free` for blocks that can be allocated,
275  // `local_free` for freed blocks that are not yet available to `mi_malloc`
276  // `thread_free` for freed blocks by other threads
277  // The `local_free` and `thread_free` lists are migrated to the `free` list
278  // when it is exhausted. The separate `local_free` list is necessary to
279  // implement a monotonic heartbeat. The `thread_free` list is needed for
280  // avoiding atomic operations in the common case.
281  //
282  // `used - |thread_free|` == actual blocks that are in use (alive)
283  // `used - |thread_free| + |free| + |local_free| == capacity`
284  //
285  // We don't count `freed` (as |free|) but use `used` to reduce
286  // the number of memory accesses in the `mi_page_all_free` function(s).
287  //
288  // Notes:
289  // - Access is optimized for `free.c:mi_free` and `alloc.c:mi_page_alloc`
290  // - Using `uint16_t` does not seem to slow things down
291  // - The size is 10 words on 64-bit which helps the page index calculations
292  //   (and 12 words on 32-bit, and encoded free lists add 2 words)
293  // - `xthread_free` uses the bottom bits as a delayed-free flags to optimize
294  //   concurrent frees where only the first concurrent free adds to the owning
295  //   heap `thread_delayed_free` list (see `free.c:mi_free_block_mt`).
296  //   The invariant is that no-delayed-free is only set if there is
297  //   at least one block that will be added, or as already been added, to
298  //   the owning heap `thread_delayed_free` list. This guarantees that pages
299  //   will be freed correctly even if only other threads free blocks.
300  typedef struct mi_page_s {
301    // "owned" by the segment
302    uint8_t               segment_idx;       // index in the segment `pages` array, `page == &segment->pages[page->segment_idx]`
303    uint8_t               segment_in_use:1;  // `true` if the segment allocated this page
304    uint8_t               is_committed:1;    // `true` if the page virtual memory is committed
305    uint8_t               is_zero_init:1;    // `true` if the page was initially zero initialized
306    uint8_t               is_huge:1;         // `true` if the page is in a huge segment
307  
308    // layout like this to optimize access in `mi_malloc` and `mi_free`
309    uint16_t              capacity;          // number of blocks committed, must be the first field, see `segment.c:page_clear`
310    uint16_t              reserved;          // number of blocks reserved in memory
311    mi_page_flags_t       flags;             // `in_full` and `has_aligned` flags (8 bits)
312    uint8_t               free_is_zero:1;    // `true` if the blocks in the free list are zero initialized
313    uint8_t               retire_expire:7;   // expiration count for retired blocks
314  
315    mi_block_t*           free;              // list of available free blocks (`malloc` allocates from this list)
316    mi_block_t*           local_free;        // list of deferred free blocks by this thread (migrates to `free`)
317    uint16_t              used;              // number of blocks in use (including blocks in `thread_free`)
318    uint8_t               block_size_shift;  // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`)
319    uint8_t               heap_tag;          // tag of the owning heap, used to separate heaps by object type
320                                             // padding
321    size_t                block_size;        // size available in each block (always `>0`)
322    uint8_t*              page_start;        // start of the page area containing the blocks
323  
324    #if (MI_ENCODE_FREELIST || MI_PADDING)
325    uintptr_t             keys[2];           // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
326    #endif
327  
328    _Atomic(mi_thread_free_t) xthread_free;  // list of deferred free blocks freed by other threads
329    _Atomic(uintptr_t)        xheap;
330  
331    struct mi_page_s*     next;              // next page owned by the heap with the same `block_size`
332    struct mi_page_s*     prev;              // previous page owned by the heap with the same `block_size`
333  
334    #if MI_INTPTR_SIZE==4                    // pad to 12 words on 32-bit
335    void* padding[1];
336    #endif
337  } mi_page_t;
338  
339  
340  
341  // ------------------------------------------------------
342  // Mimalloc segments contain mimalloc pages
343  // ------------------------------------------------------
344  
345  typedef enum mi_page_kind_e {
346    MI_PAGE_SMALL,    // small blocks go into 64KiB pages inside a segment
347    MI_PAGE_MEDIUM,   // medium blocks go into 512KiB pages inside a segment
348    MI_PAGE_LARGE,    // larger blocks go into a single page spanning a whole segment
349    MI_PAGE_HUGE      // a huge page is a single page in a segment of variable size (but still 2MiB aligned)
350                      // used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an aligment `> MI_BLOCK_ALIGNMENT_MAX`.
351  } mi_page_kind_t;
352  
353  
354  // ---------------------------------------------------------------
355  // a memory id tracks the provenance of arena/OS allocated memory
356  // ---------------------------------------------------------------
357  
358  // Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
359  typedef enum mi_memkind_e {
360    MI_MEM_NONE,      // not allocated
361    MI_MEM_EXTERNAL,  // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
362    MI_MEM_STATIC,    // allocated in a static area and should not be freed (for arena meta data for example)
363    MI_MEM_OS,        // allocated from the OS
364    MI_MEM_OS_HUGE,   // allocated as huge OS pages (usually 1GiB, pinned to physical memory)
365    MI_MEM_OS_REMAP,  // allocated in a remapable area (i.e. using `mremap`)
366    MI_MEM_ARENA      // allocated from an arena (the usual case)
367  } mi_memkind_t;
368  
369  static inline bool mi_memkind_is_os(mi_memkind_t memkind) {
370    return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP);
371  }
372  
373  typedef struct mi_memid_os_info {
374    void*         base;               // actual base address of the block (used for offset aligned allocations)
375    size_t        alignment;          // alignment at allocation
376  } mi_memid_os_info_t;
377  
378  typedef struct mi_memid_arena_info {
379    size_t        block_index;        // index in the arena
380    mi_arena_id_t id;                 // arena id (>= 1)
381    bool          is_exclusive;       // this arena can only be used for specific arena allocations
382  } mi_memid_arena_info_t;
383  
384  typedef struct mi_memid_s {
385    union {
386      mi_memid_os_info_t    os;       // only used for MI_MEM_OS
387      mi_memid_arena_info_t arena;    // only used for MI_MEM_ARENA
388    } mem;
389    bool          is_pinned;          // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large (2Mib) or huge (1GiB) OS pages)
390    bool          initially_committed;// `true` if the memory was originally allocated as committed
391    bool          initially_zero;     // `true` if the memory was originally zero initialized
392    mi_memkind_t  memkind;
393  } mi_memid_t;
394  
395  
396  // ---------------------------------------------------------------
397  // Segments contain mimalloc pages
398  // ---------------------------------------------------------------
399  typedef struct mi_subproc_s mi_subproc_t;
400  
401  // Segments are large allocated memory blocks (2MiB on 64 bit) from the OS.
402  // Inside segments we allocated fixed size _pages_ that contain blocks.
403  typedef struct mi_segment_s {
404    // constant fields
405    mi_memid_t           memid;            // memory id to track provenance
406    bool                 allow_decommit;
407    bool                 allow_purge;
408    size_t               segment_size;     // for huge pages this may be different from `MI_SEGMENT_SIZE`
409    mi_subproc_t*        subproc;          // segment belongs to sub process
410  
411    // segment fields
412    struct mi_segment_s* next;             // must be the first (non-constant) segment field  -- see `segment.c:segment_init`
413    struct mi_segment_s* prev;
414    bool                 was_reclaimed;    // true if it was reclaimed (used to limit on-free reclamation)
415  
416    size_t               abandoned;        // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
417    size_t               abandoned_visits; // count how often this segment is visited for reclaiming (to force reclaim if it is too long)
418  
419    size_t               used;             // count of pages in use (`used <= capacity`)
420    size_t               capacity;         // count of available pages (`#free + used`)
421    size_t               segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages.
422    uintptr_t            cookie;           // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
423  
424    struct mi_segment_s* abandoned_os_next; // only used for abandoned segments outside arena's, and only if `mi_option_visit_abandoned` is enabled
425    struct mi_segment_s* abandoned_os_prev;
426  
427    // layout like this to optimize access in `mi_free`
428    _Atomic(mi_threadid_t) thread_id;      // unique id of the thread owning this segment
429    size_t               page_shift;       // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`).
430    mi_page_kind_t       page_kind;        // kind of pages: small, medium, large, or huge
431    mi_page_t            pages[1];         // up to `MI_SMALL_PAGES_PER_SEGMENT` pages
432  } mi_segment_t;
433  
434  
435  // ------------------------------------------------------
436  // Heaps
437  // Provide first-class heaps to allocate from.
438  // A heap just owns a set of pages for allocation and
439  // can only be allocate/reallocate from the thread that created it.
440  // Freeing blocks can be done from any thread though.
441  // Per thread, the segments are shared among its heaps.
442  // Per thread, there is always a default heap that is
443  // used for allocation; it is initialized to statically
444  // point to an empty heap to avoid initialization checks
445  // in the fast path.
446  // ------------------------------------------------------
447  
448  // Thread local data
449  typedef struct mi_tld_s mi_tld_t;
450  
451  // Pages of a certain block size are held in a queue.
452  typedef struct mi_page_queue_s {
453    mi_page_t* first;
454    mi_page_t* last;
455    size_t     block_size;
456  } mi_page_queue_t;
457  
458  #define MI_BIN_FULL  (MI_BIN_HUGE+1)
459  
460  // Random context
461  typedef struct mi_random_cxt_s {
462    uint32_t input[16];
463    uint32_t output[16];
464    int      output_available;
465    bool     weak;
466  } mi_random_ctx_t;
467  
468  
469  // In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
470  #if (MI_PADDING)
471  typedef struct mi_padding_s {
472    uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
473    uint32_t delta;  // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
474  } mi_padding_t;
475  #define MI_PADDING_SIZE   (sizeof(mi_padding_t))
476  #define MI_PADDING_WSIZE  ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
477  #else
478  #define MI_PADDING_SIZE   0
479  #define MI_PADDING_WSIZE  0
480  #endif
481  
482  #define MI_PAGES_DIRECT   (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
483  
484  
485  // A heap owns a set of pages.
486  struct mi_heap_s {
487    mi_tld_t*             tld;
488    _Atomic(mi_block_t*)  thread_delayed_free;
489    mi_threadid_t         thread_id;                           // thread this heap belongs too
490    mi_arena_id_t         arena_id;                            // arena id if the heap belongs to a specific arena (or 0)
491    uintptr_t             cookie;                              // random cookie to verify pointers (see `_mi_ptr_cookie`)
492    uintptr_t             keys[2];                             // two random keys used to encode the `thread_delayed_free` list
493    mi_random_ctx_t       random;                              // random number context used for secure allocation
494    size_t                page_count;                          // total number of pages in the `pages` queues.
495    size_t                page_retired_min;                    // smallest retired index (retired pages are fully free, but still in the page queues)
496    size_t                page_retired_max;                    // largest retired index into the `pages` array.
497    mi_heap_t*            next;                                // list of heaps per thread
498    bool                  no_reclaim;                          // `true` if this heap should not reclaim abandoned pages
499    uint8_t               tag;                                 // custom tag, can be used for separating heaps based on the object types
500    mi_page_t*            pages_free_direct[MI_PAGES_DIRECT];  // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
501    mi_page_queue_t       pages[MI_BIN_FULL + 1];              // queue of pages for each size class (or "bin")
502  };
503  
504  
505  
506  // ------------------------------------------------------
507  // Debug
508  // ------------------------------------------------------
509  
510  #if !defined(MI_DEBUG_UNINIT)
511  #define MI_DEBUG_UNINIT     (0xD0)
512  #endif
513  #if !defined(MI_DEBUG_FREED)
514  #define MI_DEBUG_FREED      (0xDF)
515  #endif
516  #if !defined(MI_DEBUG_PADDING)
517  #define MI_DEBUG_PADDING    (0xDE)
518  #endif
519  
520  #if (MI_DEBUG)
521  // use our own assertion to print without memory allocation
522  void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func );
523  #define mi_assert(expr)     ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__))
524  #else
525  #define mi_assert(x)
526  #endif
527  
528  #if (MI_DEBUG>1)
529  #define mi_assert_internal    mi_assert
530  #else
531  #define mi_assert_internal(x)
532  #endif
533  
534  #if (MI_DEBUG>2)
535  #define mi_assert_expensive   mi_assert
536  #else
537  #define mi_assert_expensive(x)
538  #endif
539  
540  // ------------------------------------------------------
541  // Statistics
542  // ------------------------------------------------------
543  
544  #ifndef MI_STAT
545  #if (MI_DEBUG>0)
546  #define MI_STAT 2
547  #else
548  #define MI_STAT 0
549  #endif
550  #endif
551  
552  typedef struct mi_stat_count_s {
553    int64_t allocated;
554    int64_t freed;
555    int64_t peak;
556    int64_t current;
557  } mi_stat_count_t;
558  
559  typedef struct mi_stat_counter_s {
560    int64_t total;
561    int64_t count;
562  } mi_stat_counter_t;
563  
564  typedef struct mi_stats_s {
565    mi_stat_count_t segments;
566    mi_stat_count_t pages;
567    mi_stat_count_t reserved;
568    mi_stat_count_t committed;
569    mi_stat_count_t reset;
570    mi_stat_count_t purged;
571    mi_stat_count_t page_committed;
572    mi_stat_count_t segments_abandoned;
573    mi_stat_count_t pages_abandoned;
574    mi_stat_count_t threads;
575    mi_stat_count_t normal;
576    mi_stat_count_t huge;
577    mi_stat_count_t giant;
578    mi_stat_count_t malloc;
579    mi_stat_count_t segments_cache;
580    mi_stat_counter_t pages_extended;
581    mi_stat_counter_t mmap_calls;
582    mi_stat_counter_t commit_calls;
583    mi_stat_counter_t reset_calls;
584    mi_stat_counter_t purge_calls;
585    mi_stat_counter_t page_no_retire;
586    mi_stat_counter_t searches;
587    mi_stat_counter_t normal_count;
588    mi_stat_counter_t huge_count;
589    mi_stat_counter_t arena_count;
590    mi_stat_counter_t arena_crossover_count;
591    mi_stat_counter_t arena_rollback_count;
592  #if MI_STAT>1
593    mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
594  #endif
595  } mi_stats_t;
596  
597  
598  void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
599  void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
600  void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
601  
602  #if (MI_STAT)
603  #define mi_stat_increase(stat,amount)         _mi_stat_increase( &(stat), amount)
604  #define mi_stat_decrease(stat,amount)         _mi_stat_decrease( &(stat), amount)
605  #define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
606  #else
607  #define mi_stat_increase(stat,amount)         (void)0
608  #define mi_stat_decrease(stat,amount)         (void)0
609  #define mi_stat_counter_increase(stat,amount) (void)0
610  #endif
611  
612  #define mi_heap_stat_counter_increase(heap,stat,amount)  mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
613  #define mi_heap_stat_increase(heap,stat,amount)  mi_stat_increase( (heap)->tld->stats.stat, amount)
614  #define mi_heap_stat_decrease(heap,stat,amount)  mi_stat_decrease( (heap)->tld->stats.stat, amount)
615  
616  
617  // ------------------------------------------------------
618  // Sub processes do not reclaim or visit segments
619  // from other sub processes
620  // ------------------------------------------------------
621  
622  struct mi_subproc_s {
623    _Atomic(size_t)    abandoned_count;         // count of abandoned segments for this sub-process
624    _Atomic(size_t)    abandoned_os_list_count; // count of abandoned segments in the os-list
625    mi_lock_t          abandoned_os_lock;       // lock for the abandoned os segment list (outside of arena's) (this lock protect list operations)
626    mi_lock_t          abandoned_os_visit_lock; // ensure only one thread per subproc visits the abandoned os list
627    mi_segment_t*      abandoned_os_list;       // doubly-linked list of abandoned segments outside of arena's (in OS allocated memory)
628    mi_segment_t*      abandoned_os_list_tail;  // the tail-end of the list
629    mi_memid_t         memid;                   // provenance of this memory block
630  };
631  
632  // ------------------------------------------------------
633  // Thread Local data
634  // ------------------------------------------------------
635  
636  // Milliseconds as in `int64_t` to avoid overflows
637  typedef int64_t  mi_msecs_t;
638  
639  // Queue of segments
640  typedef struct mi_segment_queue_s {
641    mi_segment_t* first;
642    mi_segment_t* last;
643  } mi_segment_queue_t;
644  
645  // OS thread local data
646  typedef struct mi_os_tld_s {
647    size_t                region_idx;   // start point for next allocation
648    mi_stats_t*           stats;        // points to tld stats
649  } mi_os_tld_t;
650  
651  // Segments thread local data
652  typedef struct mi_segments_tld_s {
653    mi_segment_queue_t  small_free;   // queue of segments with free small pages
654    mi_segment_queue_t  medium_free;  // queue of segments with free medium pages
655    mi_page_queue_t     pages_purge;  // queue of freed pages that are delay purged
656    size_t              count;        // current number of segments;
657    size_t              peak_count;   // peak number of segments
658    size_t              current_size; // current size of all segments
659    size_t              peak_size;    // peak size of all segments
660    size_t              reclaim_count;// number of reclaimed (abandoned) segments
661    mi_subproc_t*       subproc;      // sub-process this thread belongs to.
662    mi_stats_t*         stats;        // points to tld stats
663    mi_os_tld_t*        os;           // points to os tld
664  } mi_segments_tld_t;
665  
666  // Thread local data
667  struct mi_tld_s {
668    unsigned long long  heartbeat;     // monotonic heartbeat count
669    bool                recurse;       // true if deferred was called; used to prevent infinite recursion.
670    mi_heap_t*          heap_backing;  // backing heap of this thread (cannot be deleted)
671    mi_heap_t*          heaps;         // list of heaps in this thread (so we can abandon all when the thread terminates)
672    mi_segments_tld_t   segments;      // segment tld
673    mi_os_tld_t         os;            // os tld
674    mi_stats_t          stats;         // statistics
675  };
676  
677  #endif