/ source / mimalloc / src / arena.c
arena.c
  1  /* ----------------------------------------------------------------------------
  2  Copyright (c) 2019-2024, Microsoft Research, Daan Leijen
  3  This is free software; you can redistribute it and/or modify it under the
  4  terms of the MIT license. A copy of the license can be found in the file
  5  "LICENSE" at the root of this distribution.
  6  -----------------------------------------------------------------------------*/
  7  
  8  /* ----------------------------------------------------------------------------
  9  "Arenas" are fixed area's of OS memory from which we can allocate
 10  large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB).
 11  In contrast to the rest of mimalloc, the arenas are shared between
 12  threads and need to be accessed using atomic operations.
 13  
 14  Arenas are also used to for huge OS page (1GiB) reservations or for reserving
 15  OS memory upfront which can be improve performance or is sometimes needed
 16  on embedded devices. We can also employ this with WASI or `sbrk` systems
 17  to reserve large arenas upfront and be able to reuse the memory more effectively.
 18  
 19  The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
 20  -----------------------------------------------------------------------------*/
 21  
 22  #include "mimalloc.h"
 23  #include "mimalloc/internal.h"
 24  #include "mimalloc/atomic.h"
 25  #include "bitmap.h"
 26  
 27  
 28  /* -----------------------------------------------------------
 29    Arena allocation
 30  ----------------------------------------------------------- */
 31  
 32  // A memory arena descriptor
 33  typedef struct mi_arena_s {
 34    mi_arena_id_t       id;                   // arena id; 0 for non-specific
 35    mi_memid_t          memid;                // memid of the memory area
 36    _Atomic(uint8_t*)start;                // the start of the memory area
 37    size_t              block_count;          // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
 38    size_t              field_count;          // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
 39    size_t              meta_size;            // size of the arena structure itself (including its bitmaps)
 40    mi_memid_t          meta_memid;           // memid of the arena structure itself (OS or static allocation)
 41    int                 numa_node;            // associated NUMA node
 42    bool                exclusive;            // only allow allocations if specifically for this arena
 43    bool                is_large;             // memory area consists of large- or huge OS pages (always committed)
 44    mi_lock_t           abandoned_visit_lock; // lock is only used when abandoned segments are being visited
 45    _Atomic(size_t)search_idx;           // optimization to start the search for free blocks
 46    _Atomic(mi_msecs_t)purge_expire;         // expiration time when blocks should be decommitted from `blocks_decommit`.
 47    mi_bitmap_field_t* blocks_dirty;         // are the blocks potentially non-zero?
 48    mi_bitmap_field_t* blocks_committed;     // are the blocks committed? (can be NULL for memory that cannot be decommitted)
 49    mi_bitmap_field_t* blocks_purge;         // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
 50    mi_bitmap_field_t* blocks_abandoned;     // blocks that start with an abandoned segment. (This crosses API's but it is convenient to have here)
 51    mi_bitmap_field_t   blocks_inuse[1];      // in-place bitmap of in-use blocks (of size `field_count`)
 52    // do not add further fields here as the dirty, committed, purged, and abandoned bitmaps follow the inuse bitmap fields.
 53  } mi_arena_t;
 54  
 55  
 56  #define MI_ARENA_BLOCK_SIZE   (MI_SEGMENT_SIZE)        // 64MiB  (must be at least MI_SEGMENT_ALIGN)
 57  #define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2)  // 32MiB
 58  #define MI_MAX_ARENAS         (132)                    // Limited as the reservation exponentially increases (and takes up .bss)
 59  
 60  // The available arenas
 61  static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
 62  static mi_decl_cache_align _Atomic(size_t)      mi_arena_count; // = 0
 63  
 64  #define MI_IN_ARENA_C
 65  #include "arena-abandon.c"
 66  #undef MI_IN_ARENA_C
 67  
 68  /* -----------------------------------------------------------
 69    Arena id's
 70    id = arena_index + 1
 71  ----------------------------------------------------------- */
 72  
 73  size_t mi_arena_id_index(mi_arena_id_t id) {
 74    return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1);
 75  }
 76  
 77  static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
 78    mi_assert_internal(arena_index < MI_MAX_ARENAS);
 79    return (int)arena_index + 1;
 80  }
 81  
 82  mi_arena_id_t _mi_arena_id_none(void) {
 83    return 0;
 84  }
 85  
 86  static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) {
 87    return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
 88            (arena_id == req_arena_id));
 89  }
 90  
 91  bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
 92    if (memid.memkind == MI_MEM_ARENA) {
 93      return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id);
 94    }
 95    else {
 96      return mi_arena_id_is_suitable(_mi_arena_id_none(), false, request_arena_id);
 97    }
 98  }
 99  
100  size_t mi_arena_get_count(void) {
101    return mi_atomic_load_relaxed(&mi_arena_count);
102  }
103  
104  mi_arena_t* mi_arena_from_index(size_t idx) {
105    mi_assert_internal(idx < mi_arena_get_count());
106    return mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[idx]);
107  }
108  
109  
110  /* -----------------------------------------------------------
111    Arena allocations get a (currently) 16-bit memory id where the
112    lower 8 bits are the arena id, and the upper bits the block index.
113  ----------------------------------------------------------- */
114  
115  static size_t mi_block_count_of_size(size_t size) {
116    return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
117  }
118  
119  static size_t mi_arena_block_size(size_t bcount) {
120    return (bcount * MI_ARENA_BLOCK_SIZE);
121  }
122  
123  static size_t mi_arena_size(mi_arena_t* arena) {
124    return mi_arena_block_size(arena->block_count);
125  }
126  
127  static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
128    mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA);
129    memid.mem.arena.id = id;
130    memid.mem.arena.block_index = bitmap_index;
131    memid.mem.arena.is_exclusive = is_exclusive;
132    return memid;
133  }
134  
135  bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
136    mi_assert_internal(memid.memkind == MI_MEM_ARENA);
137    *arena_index = mi_arena_id_index(memid.mem.arena.id);
138    *bitmap_index = memid.mem.arena.block_index;
139    return memid.mem.arena.is_exclusive;
140  }
141  
142  
143  
144  /* -----------------------------------------------------------
145    Special static area for mimalloc internal structures
146    to avoid OS calls (for example, for the arena metadata (~= 256b))
147  ----------------------------------------------------------- */
148  
149  #define MI_ARENA_STATIC_MAX  ((MI_INTPTR_SIZE/2)*MI_KiB)  // 4 KiB on 64-bit
150  
151  static mi_decl_cache_align uint8_t mi_arena_static[MI_ARENA_STATIC_MAX];  // must be cache aligned, see issue #895
152  static mi_decl_cache_align _Atomic(size_t) mi_arena_static_top;
153  
154  static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
155    *memid = _mi_memid_none();
156    if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
157    const size_t toplow = mi_atomic_load_relaxed(&mi_arena_static_top);
158    if ((toplow + size) > MI_ARENA_STATIC_MAX) return NULL;
159  
160    // try to claim space
161    if (alignment < MI_MAX_ALIGN_SIZE) { alignment = MI_MAX_ALIGN_SIZE; }
162    const size_t oversize = size + alignment - 1;
163    if (toplow + oversize > MI_ARENA_STATIC_MAX) return NULL;
164    const size_t oldtop = mi_atomic_add_acq_rel(&mi_arena_static_top, oversize);
165    size_t top = oldtop + oversize;
166    if (top > MI_ARENA_STATIC_MAX) {
167      // try to roll back, ok if this fails
168      mi_atomic_cas_strong_acq_rel(&mi_arena_static_top, &top, oldtop);
169      return NULL;
170    }
171  
172    // success
173    *memid = _mi_memid_create(MI_MEM_STATIC);
174    memid->initially_zero = true;
175    const size_t start = _mi_align_up(oldtop, alignment);
176    uint8_t* const p = &mi_arena_static[start];
177    _mi_memzero_aligned(p, size);
178    return p;
179  }
180  
181  void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
182    *memid = _mi_memid_none();
183  
184    // try static
185    void* p = mi_arena_static_zalloc(size, MI_MAX_ALIGN_SIZE, memid);
186    if (p != NULL) return p;
187  
188    // or fall back to the OS
189    p = _mi_os_alloc(size, memid, &_mi_stats_main);
190    if (p == NULL) return NULL;
191  
192    // zero the OS memory if needed
193    if (!memid->initially_zero) {
194      _mi_memzero_aligned(p, size);
195      memid->initially_zero = true;
196    }
197    return p;
198  }
199  
200  void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size) {
201    if (mi_memkind_is_os(memid.memkind)) {
202      _mi_os_free(p, size, memid, &_mi_stats_main);
203    }
204    else {
205      mi_assert(memid.memkind == MI_MEM_STATIC);
206    }
207  }
208  
209  void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
210    return (arena->start + mi_arena_block_size(mi_bitmap_index_bit(bindex)));
211  }
212  
213  
214  /* -----------------------------------------------------------
215    Thread safe allocation in an arena
216  ----------------------------------------------------------- */
217  
218  // claim the `blocks_inuse` bits
219  static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats)
220  {
221    size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx);  // start from last search; ok to be relaxed as the exact start does not matter
222    if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx, stats)) {
223      mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx));  // start search from found location next time around
224      return true;
225    };
226    return false;
227  }
228  
229  
230  /* -----------------------------------------------------------
231    Arena Allocation
232  ----------------------------------------------------------- */
233  
234  static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
235                                                      bool commit, mi_memid_t* memid, mi_os_tld_t* tld)
236  {
237    MI_UNUSED(arena_index);
238    mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
239  
240    mi_bitmap_index_t bitmap_index;
241    if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index, tld->stats)) return NULL;
242  
243    // claimed it!
244    void* p = mi_arena_block_start(arena, bitmap_index);
245    *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
246    memid->is_pinned = arena->memid.is_pinned;
247  
248    // none of the claimed blocks should be scheduled for a decommit
249    if (arena->blocks_purge != NULL) {
250      // this is thread safe as a potential purge only decommits parts that are not yet claimed as used (in `blocks_inuse`).
251      _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, needed_bcount, bitmap_index);
252    }
253  
254    // set the dirty bits (todo: no need for an atomic op here?)
255    if (arena->memid.initially_zero && arena->blocks_dirty != NULL) {
256      memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
257    }
258  
259    // set commit state
260    if (arena->blocks_committed == NULL) {
261      // always committed
262      memid->initially_committed = true;
263    }
264    else if (commit) {
265      // commit requested, but the range may not be committed as a whole: ensure it is committed now
266      memid->initially_committed = true;
267      bool any_uncommitted;
268      _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
269      if (any_uncommitted) {
270        bool commit_zero = false;
271        if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
272          memid->initially_committed = false;
273        }
274        else {
275          if (commit_zero) { memid->initially_zero = true; }
276        }
277      }
278    }
279    else {
280      // no need to commit, but check if already fully committed
281      memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
282    }
283  
284    return p;
285  }
286  
287  // allocate in a speficic arena
288  static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
289                                         bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
290  {
291    MI_UNUSED_RELEASE(alignment);
292    mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
293    const size_t bcount = mi_block_count_of_size(size);
294    const size_t arena_index = mi_arena_id_index(arena_id);
295    mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
296    mi_assert_internal(size <= mi_arena_block_size(bcount));
297  
298    // Check arena suitability
299    mi_arena_t* arena = mi_arena_from_index(arena_index);
300    if (arena == NULL) return NULL;
301    if (!allow_large && arena->is_large) return NULL;
302    if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
303    if (req_arena_id == _mi_arena_id_none()) { // in not specific, check numa affinity
304      const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node);
305      if (match_numa_node) { if (!numa_suitable) return NULL; }
306                      else { if (numa_suitable) return NULL; }
307    }
308  
309    // try to allocate
310    void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld);
311    mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
312    return p;
313  }
314  
315  
316  // allocate from an arena with fallback to the OS
317  static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
318                                                    bool commit, bool allow_large,
319                                                    mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
320  {
321    MI_UNUSED(alignment);
322    mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
323    const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
324    if mi_likely(max_arena == 0) return NULL;
325  
326    if (req_arena_id != _mi_arena_id_none()) {
327      // try a specific arena if requested
328      if (mi_arena_id_index(req_arena_id) < max_arena) {
329        void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
330        if (p != NULL) return p;
331      }
332    }
333    else {
334      // try numa affine allocation
335      for (size_t i = 0; i < max_arena; i++) {
336        void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
337        if (p != NULL) return p;
338      }
339  
340      // try from another numa node instead..
341      if (numa_node >= 0) {  // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
342        for (size_t i = 0; i < max_arena; i++) {
343          void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
344          if (p != NULL) return p;
345        }
346      }
347    }
348    return NULL;
349  }
350  
351  // try to reserve a fresh arena space
352  static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id)
353  {
354    if (_mi_preloading()) return false;  // use OS only while pre loading
355    if (req_arena_id != _mi_arena_id_none()) return false;
356  
357    const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count);
358    if (arena_count > (MI_MAX_ARENAS - 4)) return false;
359  
360    size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
361    if (arena_reserve == 0) return false;
362  
363    if (!_mi_os_has_virtual_reserve()) {
364      arena_reserve = arena_reserve/4;  // be conservative if virtual reserve is not supported (for WASM for example)
365    }
366    arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
367    arena_reserve = _mi_align_up(arena_reserve, MI_SEGMENT_SIZE);
368    if (arena_count >= 8 && arena_count <= 128) {
369      // scale up the arena sizes exponentially every 8 entries (128 entries get to 589TiB)
370      const size_t multiplier = (size_t)1 << _mi_clamp(arena_count/8, 0, 16 );
371      size_t reserve = 0;
372      if (!mi_mul_overflow(multiplier, arena_reserve, &reserve)) {
373        arena_reserve = reserve;
374      }
375    }
376    if (arena_reserve < req_size) return false;  // should be able to at least handle the current allocation size
377  
378    // commit eagerly?
379    bool arena_commit = false;
380    if (mi_option_get(mi_option_arena_eager_commit) == 2)      { arena_commit = _mi_os_has_overcommit(); }
381    else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
382  
383    return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id) == 0);
384  }
385  
386  
387  void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
388                                mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
389  {
390    mi_assert_internal(memid != NULL && tld != NULL);
391    mi_assert_internal(size > 0);
392    *memid = _mi_memid_none();
393  
394    const int numa_node = _mi_os_numa_node(tld); // current numa node
395  
396    // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
397    if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) || req_arena_id != _mi_arena_id_none()) {  // is arena allocation allowed?
398      if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
399        void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
400        if (p != NULL) return p;
401  
402        // otherwise, try to first eagerly reserve a new arena
403        if (req_arena_id == _mi_arena_id_none()) {
404          mi_arena_id_t arena_id = 0;
405          if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
406            // and try allocate in there
407            mi_assert_internal(req_arena_id == _mi_arena_id_none());
408            p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
409            if (p != NULL) return p;
410          }
411        }
412      }
413    }
414  
415    // if we cannot use OS allocation, return NULL
416    if (mi_option_is_enabled(mi_option_disallow_os_alloc) || req_arena_id != _mi_arena_id_none()) {
417      errno = ENOMEM;
418      return NULL;
419    }
420  
421    // finally, fall back to the OS
422    if (align_offset > 0) {
423      return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
424    }
425    else {
426      return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
427    }
428  }
429  
430  void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
431  {
432    return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld);
433  }
434  
435  
436  void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
437    if (size != NULL) *size = 0;
438    size_t arena_index = mi_arena_id_index(arena_id);
439    if (arena_index >= MI_MAX_ARENAS) return NULL;
440    mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
441    if (arena == NULL) return NULL;
442    if (size != NULL) { *size = mi_arena_block_size(arena->block_count); }
443    return arena->start;
444  }
445  
446  
447  /* -----------------------------------------------------------
448    Arena purge
449  ----------------------------------------------------------- */
450  
451  static long mi_arena_purge_delay(void) {
452    // <0 = no purging allowed, 0=immediate purging, >0=milli-second delay
453    return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult));
454  }
455  
456  // reset or decommit in an arena and update the committed/decommit bitmaps
457  // assumes we own the area (i.e. blocks_in_use is claimed by us)
458  static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
459    mi_assert_internal(arena->blocks_committed != NULL);
460    mi_assert_internal(arena->blocks_purge != NULL);
461    mi_assert_internal(!arena->memid.is_pinned);
462    const size_t size = mi_arena_block_size(blocks);
463    void* const p = mi_arena_block_start(arena, bitmap_idx);
464    bool needs_recommit;
465    if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
466      // all blocks are committed, we can purge freely
467      needs_recommit = _mi_os_purge(p, size, stats);
468    }
469    else {
470      // some blocks are not committed -- this can happen when a partially committed block is freed
471      // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
472      // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
473      // and also undo the decommit stats (as it was already adjusted)
474      mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
475      needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
476      if (needs_recommit) { _mi_stat_increase(&_mi_stats_main.committed, size); }
477    }
478  
479    // clear the purged blocks
480    _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
481    // update committed bitmap
482    if (needs_recommit) {
483      _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
484    }
485  }
486  
487  // Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
488  // Note: assumes we (still) own the area as we may purge immediately
489  static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
490    mi_assert_internal(arena->blocks_purge != NULL);
491    const long delay = mi_arena_purge_delay();
492    if (delay < 0) return;  // is purging allowed at all?
493  
494    if (_mi_preloading() || delay == 0) {
495      // decommit directly
496      mi_arena_purge(arena, bitmap_idx, blocks, stats);
497    }
498    else {
499      // schedule decommit
500      mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
501      if (expire != 0) {
502        mi_atomic_addi64_acq_rel(&arena->purge_expire, (mi_msecs_t)(delay/10));  // add smallish extra delay
503      }
504      else {
505        mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay);
506      }
507      _mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL);
508    }
509  }
510  
511  // purge a range of blocks
512  // return true if the full range was purged.
513  // assumes we own the area (i.e. blocks_in_use is claimed by us)
514  static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) {
515    const size_t endidx = startidx + bitlen;
516    size_t bitidx = startidx;
517    bool all_purged = false;
518    while (bitidx < endidx) {
519      // count consecutive ones in the purge mask
520      size_t count = 0;
521      while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) {
522        count++;
523      }
524      if (count > 0) {
525        // found range to be purged
526        const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
527        mi_arena_purge(arena, range_idx, count, stats);
528        if (count == bitlen) {
529          all_purged = true;
530        }
531      }
532      bitidx += (count+1); // +1 to skip the zero bit (or end)
533    }
534    return all_purged;
535  }
536  
537  // returns true if anything was purged
538  static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
539  {
540    if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
541    mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
542    if (expire == 0) return false;
543    if (!force && expire > now) return false;
544  
545    // reset expire (if not already set concurrently)
546    mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0);
547  
548    // potential purges scheduled, walk through the bitmap
549    bool any_purged = false;
550    bool full_purge = true;
551    for (size_t i = 0; i < arena->field_count; i++) {
552      size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
553      if (purge != 0) {
554        size_t bitidx = 0;
555        while (bitidx < MI_BITMAP_FIELD_BITS) {
556          // find consecutive range of ones in the purge mask
557          size_t bitlen = 0;
558          while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) {
559            bitlen++;
560          }
561          // temporarily claim the purge range as "in-use" to be thread-safe with allocation
562          // try to claim the longest range of corresponding in_use bits
563          const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx);
564          while( bitlen > 0 ) {
565            if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) {
566              break;
567            }
568            bitlen--;
569          }
570          // actual claimed bits at `in_use`
571          if (bitlen > 0) {
572            // read purge again now that we have the in_use bits
573            purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
574            if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
575              full_purge = false;
576            }
577            any_purged = true;
578            // release the claimed `in_use` bits again
579            _mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index);
580          }
581          bitidx += (bitlen+1);  // +1 to skip the zero (or end)
582        } // while bitidx
583      } // purge != 0
584    }
585    // if not fully purged, make sure to purge again in the future
586    if (!full_purge) {
587      const long delay = mi_arena_purge_delay();
588      mi_msecs_t expected = 0;
589      mi_atomic_casi64_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay);
590    }
591    return any_purged;
592  }
593  
594  static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
595    if (_mi_preloading() || mi_arena_purge_delay() <= 0) return;  // nothing will be scheduled
596  
597    const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
598    if (max_arena == 0) return;
599  
600    // allow only one thread to purge at a time
601    static mi_atomic_guard_t purge_guard;
602    mi_atomic_guard(&purge_guard)
603    {
604      mi_msecs_t now = _mi_clock_now();
605      size_t max_purge_count = (visit_all ? max_arena : 1);
606      for (size_t i = 0; i < max_arena; i++) {
607        mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
608        if (arena != NULL) {
609          if (mi_arena_try_purge(arena, now, force, stats)) {
610            if (max_purge_count <= 1) break;
611            max_purge_count--;
612          }
613        }
614      }
615    }
616  }
617  
618  
619  /* -----------------------------------------------------------
620    Arena free
621  ----------------------------------------------------------- */
622  
623  void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) {
624    mi_assert_internal(size > 0 && stats != NULL);
625    mi_assert_internal(committed_size <= size);
626    if (p==NULL) return;
627    if (size==0) return;
628    const bool all_committed = (committed_size == size);
629  
630    // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
631    mi_track_mem_undefined(p,size);
632  
633    if (mi_memkind_is_os(memid.memkind)) {
634      // was a direct OS allocation, pass through
635      if (!all_committed && committed_size > 0) {
636        // if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
637        _mi_stat_decrease(&_mi_stats_main.committed, committed_size);
638      }
639      _mi_os_free(p, size, memid, stats);
640    }
641    else if (memid.memkind == MI_MEM_ARENA) {
642      // allocated in an arena
643      size_t arena_idx;
644      size_t bitmap_idx;
645      mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
646      mi_assert_internal(arena_idx < MI_MAX_ARENAS);
647      mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
648      mi_assert_internal(arena != NULL);
649      const size_t blocks = mi_block_count_of_size(size);
650  
651      // checks
652      if (arena == NULL) {
653        _mi_error_message(EINVAL, "trying to free from an invalid arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
654        return;
655      }
656      mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx));
657      if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) {
658        _mi_error_message(EINVAL, "trying to free from an invalid arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
659        return;
660      }
661  
662      // potentially decommit
663      if (arena->memid.is_pinned || arena->blocks_committed == NULL) {
664        mi_assert_internal(all_committed);
665      }
666      else {
667        mi_assert_internal(arena->blocks_committed != NULL);
668        mi_assert_internal(arena->blocks_purge != NULL);
669  
670        if (!all_committed) {
671          // mark the entire range as no longer committed (so we recommit the full range when re-using)
672          _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
673          mi_track_mem_noaccess(p,size);
674          if (committed_size > 0) {
675            // if partially committed, adjust the committed stats (is it will be recommitted when re-using)
676            // in the delayed purge, we now need to not count a decommit if the range is not marked as committed.
677            _mi_stat_decrease(&_mi_stats_main.committed, committed_size);
678          }
679          // note: if not all committed, it may be that the purge will reset/decommit the entire range
680          // that contains already decommitted parts. Since purge consistently uses reset or decommit that
681          // works (as we should never reset decommitted parts).
682        }
683        // (delay) purge the entire range
684        mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
685      }
686  
687      // and make it available to others again
688      bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
689      if (!all_inuse) {
690        _mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", p, size);
691        return;
692      };
693    }
694    else {
695      // arena was none, external, or static; nothing to do
696      mi_assert_internal(memid.memkind < MI_MEM_OS);
697    }
698  
699    // purge expired decommits
700    mi_arenas_try_purge(false, false, stats);
701  }
702  
703  // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
704  // for dynamic libraries that are unloaded and need to release all their allocated memory.
705  static void mi_arenas_unsafe_destroy(void) {
706    const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
707    size_t new_max_arena = 0;
708    for (size_t i = 0; i < max_arena; i++) {
709      mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
710      if (arena != NULL) {
711        mi_lock_done(&arena->abandoned_visit_lock);
712        if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
713          mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
714          _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
715        }
716        else {
717          new_max_arena = i;
718        }
719        _mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size);
720      }
721    }
722  
723    // try to lower the max arena.
724    size_t expected = max_arena;
725    mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena);
726  }
727  
728  // Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
729  void _mi_arenas_collect(bool force_purge, mi_stats_t* stats) {
730    mi_arenas_try_purge(force_purge, force_purge /* visit all? */, stats);
731  }
732  
733  // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
734  // for dynamic libraries that are unloaded and need to release all their allocated memory.
735  void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
736    mi_arenas_unsafe_destroy();
737    _mi_arenas_collect(true /* force purge */, stats);  // purge non-owned arenas
738  }
739  
740  // Is a pointer inside any of our arenas?
741  bool _mi_arena_contains(const void* p) {
742    const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
743    for (size_t i = 0; i < max_arena; i++) {
744      mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
745      if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
746        return true;
747      }
748    }
749    return false;
750  }
751  
752  /* -----------------------------------------------------------
753    Add an arena.
754  ----------------------------------------------------------- */
755  
756  static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id, mi_stats_t* stats) {
757    mi_assert_internal(arena != NULL);
758    mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
759    mi_assert_internal(arena->block_count > 0);
760    if (arena_id != NULL) { *arena_id = -1; }
761  
762    size_t i = mi_atomic_increment_acq_rel(&mi_arena_count);
763    if (i >= MI_MAX_ARENAS) {
764      mi_atomic_decrement_acq_rel(&mi_arena_count);
765      return false;
766    }
767    _mi_stat_counter_increase(&stats->arena_count,1);
768    arena->id = mi_arena_id_create(i);
769    mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena);
770    if (arena_id != NULL) { *arena_id = arena->id; }
771    return true;
772  }
773  
774  static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept
775  {
776    if (arena_id != NULL) *arena_id = _mi_arena_id_none();
777    if (size < MI_ARENA_BLOCK_SIZE) return false;
778  
779    if (is_large) {
780      mi_assert_internal(memid.initially_committed && memid.is_pinned);
781    }
782  
783    const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
784    const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
785    const size_t bitmaps = (memid.is_pinned ? 3 : 5);
786    const size_t asize  = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
787    mi_memid_t meta_memid;
788    mi_arena_t* arena   = (mi_arena_t*)_mi_arena_meta_zalloc(asize, &meta_memid);
789    if (arena == NULL) return false;
790  
791    // already zero'd due to zalloc
792    // _mi_memzero(arena, asize);
793    arena->id = _mi_arena_id_none();
794    arena->memid = memid;
795    arena->exclusive = exclusive;
796    arena->meta_size = asize;
797    arena->meta_memid = meta_memid;
798    arena->block_count = bcount;
799    arena->field_count = fields;
800    arena->start = (uint8_t*)start;
801    arena->numa_node    = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
802    arena->is_large     = is_large;
803    arena->purge_expire = 0;
804    arena->search_idx   = 0;
805    mi_lock_init(&arena->abandoned_visit_lock);
806    // consecutive bitmaps
807    arena->blocks_dirty     = &arena->blocks_inuse[fields];     // just after inuse bitmap
808    arena->blocks_abandoned = &arena->blocks_inuse[2 * fields]; // just after dirty bitmap
809    arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after abandoned bitmap
810    arena->blocks_purge     = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[4*fields]); // just after committed bitmap
811    // initialize committed bitmap?
812    if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
813      memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
814    }
815  
816    // and claim leftover blocks if needed (so we never allocate there)
817    ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
818    mi_assert_internal(post >= 0);
819    if (post > 0) {
820      // don't use leftover bits at the end
821      mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post);
822      _mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
823    }
824    return mi_arena_add(arena, arena_id, &_mi_stats_main);
825  
826  }
827  
828  bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
829    mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL);
830    memid.initially_committed = is_committed;
831    memid.initially_zero = is_zero;
832    memid.is_pinned = is_large;
833    return mi_manage_os_memory_ex2(start,size,is_large,numa_node,exclusive,memid, arena_id);
834  }
835  
836  // Reserve a range of regular OS memory
837  int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
838    if (arena_id != NULL) *arena_id = _mi_arena_id_none();
839    size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
840    mi_memid_t memid;
841    void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main);
842    if (start == NULL) return ENOMEM;
843    const bool is_large = memid.is_pinned; // todo: use separate is_large field?
844    if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
845      _mi_os_free_ex(start, size, commit, memid, &_mi_stats_main);
846      _mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
847      return ENOMEM;
848    }
849    _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), is_large ? " (in large os pages)" : "");
850    return 0;
851  }
852  
853  
854  // Manage a range of regular OS memory
855  bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
856    return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL);
857  }
858  
859  // Reserve a range of regular OS memory
860  int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
861    return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
862  }
863  
864  
865  /* -----------------------------------------------------------
866    Debugging
867  ----------------------------------------------------------- */
868  
869  static size_t mi_debug_show_bitmap(const char* prefix, const char* header, size_t block_count, mi_bitmap_field_t* fields, size_t field_count ) {
870    _mi_verbose_message("%s%s:\n", prefix, header);
871    size_t bcount = 0;
872    size_t inuse_count = 0;
873    for (size_t i = 0; i < field_count; i++) {
874      char buf[MI_BITMAP_FIELD_BITS + 1];
875      uintptr_t field = mi_atomic_load_relaxed(&fields[i]);
876      for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++, bcount++) {
877        if (bcount < block_count) {
878          bool inuse = ((((uintptr_t)1 << bit) & field) != 0);
879          if (inuse) inuse_count++;
880          buf[bit] = (inuse ? 'x' : '.');
881        }
882        else {
883          buf[bit] = ' ';
884        }
885      }
886      buf[MI_BITMAP_FIELD_BITS] = 0;
887      _mi_verbose_message("%s  %s\n", prefix, buf);
888    }
889    _mi_verbose_message("%s  total ('x'): %zu\n", prefix, inuse_count);
890    return inuse_count;
891  }
892  
893  void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge) mi_attr_noexcept {
894    size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count);
895    size_t inuse_total = 0;
896    size_t abandoned_total = 0;
897    size_t purge_total = 0;
898    for (size_t i = 0; i < max_arenas; i++) {
899      mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
900      if (arena == NULL) break;
901      _mi_verbose_message("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n", i, arena->block_count, MI_ARENA_BLOCK_SIZE / MI_MiB, arena->field_count, (arena->memid.is_pinned ? ", pinned" : ""));
902      if (show_inuse) {
903        inuse_total += mi_debug_show_bitmap("  ", "inuse blocks", arena->block_count, arena->blocks_inuse, arena->field_count);
904      }
905      if (arena->blocks_committed != NULL) {
906        mi_debug_show_bitmap("  ", "committed blocks", arena->block_count, arena->blocks_committed, arena->field_count);
907      }
908      if (show_abandoned) {
909        abandoned_total += mi_debug_show_bitmap("  ", "abandoned blocks", arena->block_count, arena->blocks_abandoned, arena->field_count);
910      }
911      if (show_purge && arena->blocks_purge != NULL) {
912        purge_total += mi_debug_show_bitmap("  ", "purgeable blocks", arena->block_count, arena->blocks_purge, arena->field_count);
913      }
914    }
915    if (show_inuse)     _mi_verbose_message("total inuse blocks    : %zu\n", inuse_total);
916    if (show_abandoned) _mi_verbose_message("total abandoned blocks: %zu\n", abandoned_total);
917    if (show_purge)     _mi_verbose_message("total purgeable blocks: %zu\n", purge_total);
918  }
919  
920  
921  /* -----------------------------------------------------------
922    Reserve a huge page arena.
923  ----------------------------------------------------------- */
924  // reserve at a specific numa node
925  int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
926    if (arena_id != NULL) *arena_id = -1;
927    if (pages==0) return 0;
928    if (numa_node < -1) numa_node = -1;
929    if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
930    size_t hsize = 0;
931    size_t pages_reserved = 0;
932    mi_memid_t memid;
933    void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid);
934    if (p==NULL || pages_reserved==0) {
935      _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
936      return ENOMEM;
937    }
938    _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
939  
940    if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
941      _mi_os_free(p, hsize, memid, &_mi_stats_main);
942      return ENOMEM;
943    }
944    return 0;
945  }
946  
947  int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
948    return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
949  }
950  
951  // reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)
952  int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
953    if (pages == 0) return 0;
954  
955    // pages per numa node
956    size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count());
957    if (numa_count <= 0) numa_count = 1;
958    const size_t pages_per = pages / numa_count;
959    const size_t pages_mod = pages % numa_count;
960    const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50);
961  
962    // reserve evenly among numa nodes
963    for (size_t numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) {
964      size_t node_pages = pages_per;  // can be 0
965      if (numa_node < pages_mod) node_pages++;
966      int err = mi_reserve_huge_os_pages_at(node_pages, (int)numa_node, timeout_per);
967      if (err) return err;
968      if (pages < node_pages) {
969        pages = 0;
970      }
971      else {
972        pages -= node_pages;
973      }
974    }
975  
976    return 0;
977  }
978  
979  int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
980    MI_UNUSED(max_secs);
981    _mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n");
982    if (pages_reserved != NULL) *pages_reserved = 0;
983    int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0));
984    if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
985    return err;
986  }
987  
988