/ source / mimalloc / src / alloc.c
alloc.c
  1  /* ----------------------------------------------------------------------------
  2  Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
  3  This is free software; you can redistribute it and/or modify it under the
  4  terms of the MIT license. A copy of the license can be found in the file
  5  "LICENSE" at the root of this distribution.
  6  -----------------------------------------------------------------------------*/
  7  #ifndef _DEFAULT_SOURCE
  8  #define _DEFAULT_SOURCE   // for realpath() on Linux
  9  #endif
 10  
 11  #include "mimalloc.h"
 12  #include "mimalloc/internal.h"
 13  #include "mimalloc/atomic.h"
 14  #include "mimalloc/prim.h"   // _mi_prim_thread_id()
 15  
 16  #include <string.h>      // memset, strlen (for mi_strdup)
 17  #include <stdlib.h>      // malloc, abort
 18  
 19  #define MI_IN_ALLOC_C
 20  #include "alloc-override.c"
 21  #include "free.c"
 22  #undef MI_IN_ALLOC_C
 23  
 24  // ------------------------------------------------------
 25  // Allocation
 26  // ------------------------------------------------------
 27  
 28  // Fast allocation in a page: just pop from the free list.
 29  // Fall back to generic allocation only if the list is empty.
 30  // Note: in release mode the (inlined) routine is about 7 instructions with a single test.
 31  extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
 32  {
 33    mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
 34    
 35    // check the free list
 36    mi_block_t* const block = page->free;
 37    if mi_unlikely(block == NULL) {
 38      return _mi_malloc_generic(heap, size, zero, 0);
 39    }
 40    mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
 41    
 42    // pop from the free list
 43    page->free = mi_block_next(page, block);
 44    page->used++;
 45    mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
 46    mi_assert_internal(page->block_size < MI_MAX_ALIGN_SIZE || _mi_is_aligned(block, MI_MAX_ALIGN_SIZE));
 47    
 48    #if MI_DEBUG>3
 49    if (page->free_is_zero && size > sizeof(*block)) { 
 50      mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
 51    }
 52    #endif
 53  
 54    // allow use of the block internally
 55    // note: when tracking we need to avoid ever touching the MI_PADDING since
 56    // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc/track.h`)
 57    mi_track_mem_undefined(block, mi_page_usable_block_size(page));
 58  
 59    // zero the block? note: we need to zero the full block size (issue #63)
 60    if mi_unlikely(zero) {
 61      mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
 62      mi_assert_internal(!mi_page_is_huge(page));
 63      #if MI_PADDING
 64      mi_assert_internal(page->block_size >= MI_PADDING_SIZE);
 65      #endif
 66      if (page->free_is_zero) {
 67        block->next = 0;
 68        mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE);
 69      }
 70      else {
 71        _mi_memzero_aligned(block, page->block_size - MI_PADDING_SIZE);
 72      }
 73    }
 74  
 75    #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
 76    if (!zero && !mi_page_is_huge(page)) {
 77      memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page));
 78    }
 79    #elif (MI_SECURE!=0)
 80    if (!zero) { block->next = 0; } // don't leak internal data
 81    #endif
 82  
 83    #if (MI_STAT>0)
 84    const size_t bsize = mi_page_usable_block_size(page);
 85    if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
 86      mi_heap_stat_increase(heap, normal, bsize);
 87      mi_heap_stat_counter_increase(heap, normal_count, 1);
 88      #if (MI_STAT>1)
 89      const size_t bin = _mi_bin(bsize);
 90      mi_heap_stat_increase(heap, normal_bins[bin], 1);
 91      #endif
 92    }
 93    #endif
 94  
 95    #if MI_PADDING // && !MI_TRACK_ENABLED
 96      mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
 97      ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
 98      #if (MI_DEBUG>=2)
 99      mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
100      #endif
101      mi_track_mem_defined(padding,sizeof(mi_padding_t));  // note: re-enable since mi_page_usable_block_size may set noaccess
102      padding->canary = mi_ptr_encode_canary(page,block,page->keys);
103      padding->delta  = (uint32_t)(delta);
104      #if MI_PADDING_CHECK
105      if (!mi_page_is_huge(page)) {
106        uint8_t* fill = (uint8_t*)padding - delta;
107        const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
108        for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
109      }
110      #endif
111    #endif
112  
113    return block;
114  }
115  
116  // extra entries for improved efficiency in `alloc-aligned.c`.
117  extern void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
118    return _mi_page_malloc_zero(heap,page,size,false);
119  }
120  extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
121    return _mi_page_malloc_zero(heap,page,size,true);
122  }
123  
124  #if MI_DEBUG_GUARDED
125  static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
126  static inline bool mi_heap_malloc_use_guarded(size_t size, bool has_huge_alignment);
127  static inline bool mi_heap_malloc_small_use_guarded(size_t size);
128  #endif
129  
130  static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
131    mi_assert(heap != NULL);
132    mi_assert(size <= MI_SMALL_SIZE_MAX);
133    #if MI_DEBUG
134    const uintptr_t tid = _mi_thread_id();
135    mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
136    #endif
137    #if (MI_PADDING || MI_DEBUG_GUARDED)
138    if (size == 0) { size = sizeof(void*); }
139    #endif
140    #if MI_DEBUG_GUARDED
141    if (mi_heap_malloc_small_use_guarded(size)) { return mi_heap_malloc_guarded(heap, size, zero); }
142    #endif
143  
144    // get page in constant time, and allocate from it
145    mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
146    void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero);
147    mi_track_malloc(p,size,zero);
148  
149    #if MI_STAT>1
150    if (p != NULL) {
151      if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
152      mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
153    }
154    #endif
155    #if MI_DEBUG>3
156    if (p != NULL && zero) {
157      mi_assert_expensive(mi_mem_is_zero(p, size));
158    }
159    #endif
160    return p;
161  }
162  
163  // allocate a small block
164  mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
165    return mi_heap_malloc_small_zero(heap, size, false);
166  }
167  
168  mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
169    return mi_heap_malloc_small(mi_prim_get_default_heap(), size);
170  }
171  
172  // The main allocation function
173  extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept {
174    // fast path for small objects 
175    if mi_likely(size <= MI_SMALL_SIZE_MAX) {
176      mi_assert_internal(huge_alignment == 0);
177      return mi_heap_malloc_small_zero(heap, size, zero);
178    }
179    #if MI_DEBUG_GUARDED
180    else if (mi_heap_malloc_use_guarded(size,huge_alignment>0)) { return mi_heap_malloc_guarded(heap, size, zero); }
181    #endif
182    else {
183      // regular allocation
184      mi_assert(heap!=NULL);
185      mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id());   // heaps are thread local
186      void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment);  // note: size can overflow but it is detected in malloc_generic
187      mi_track_malloc(p,size,zero);
188      
189      #if MI_STAT>1
190      if (p != NULL) {
191        if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
192        mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
193      }
194      #endif
195      #if MI_DEBUG>3
196      if (p != NULL && zero) {
197        mi_assert_expensive(mi_mem_is_zero(p, size));
198      }
199      #endif
200      return p;
201    }
202  }
203  
204  extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
205    return _mi_heap_malloc_zero_ex(heap, size, zero, 0);
206  }
207  
208  mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
209    return _mi_heap_malloc_zero(heap, size, false);
210  }
211  
212  mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
213    return mi_heap_malloc(mi_prim_get_default_heap(), size);
214  }
215  
216  // zero initialized small block
217  mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
218    return mi_heap_malloc_small_zero(mi_prim_get_default_heap(), size, true);
219  }
220  
221  mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
222    return _mi_heap_malloc_zero(heap, size, true);
223  }
224  
225  mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
226    return mi_heap_zalloc(mi_prim_get_default_heap(),size);
227  }
228  
229  
230  mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
231    size_t total;
232    if (mi_count_size_overflow(count,size,&total)) return NULL;
233    return mi_heap_zalloc(heap,total);
234  }
235  
236  mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
237    return mi_heap_calloc(mi_prim_get_default_heap(),count,size);
238  }
239  
240  // Uninitialized `calloc`
241  mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
242    size_t total;
243    if (mi_count_size_overflow(count, size, &total)) return NULL;
244    return mi_heap_malloc(heap, total);
245  }
246  
247  mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
248    return mi_heap_mallocn(mi_prim_get_default_heap(),count,size);
249  }
250  
251  // Expand (or shrink) in place (or fail)
252  void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
253    #if MI_PADDING
254    // we do not shrink/expand with padding enabled
255    MI_UNUSED(p); MI_UNUSED(newsize);
256    return NULL;
257    #else
258    if (p == NULL) return NULL;
259    const size_t size = _mi_usable_size(p,"mi_expand");
260    if (newsize > size) return NULL;
261    return p; // it fits
262    #endif
263  }
264  
265  void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept {
266    // if p == NULL then behave as malloc.
267    // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
268    // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
269    const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
270    if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) {  // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
271      mi_assert_internal(p!=NULL);
272      // todo: do not track as the usable size is still the same in the free; adjust potential padding?
273      // mi_track_resize(p,size,newsize)
274      // if (newsize < size) { mi_track_mem_noaccess((uint8_t*)p + newsize, size - newsize); }
275      return p;  // reallocation still fits and not more than 50% waste
276    }
277    void* newp = mi_heap_malloc(heap,newsize);
278    if mi_likely(newp != NULL) {
279      if (zero && newsize > size) {
280        // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
281        const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
282        _mi_memzero((uint8_t*)newp + start, newsize - start);
283      }
284      else if (newsize == 0) {
285        ((uint8_t*)newp)[0] = 0; // work around for applications that expect zero-reallocation to be zero initialized (issue #725)
286      }
287      if mi_likely(p != NULL) {
288        const size_t copysize = (newsize > size ? size : newsize);
289        mi_track_mem_defined(p,copysize);  // _mi_useable_size may be too large for byte precise memory tracking..
290        _mi_memcpy(newp, p, copysize);
291        mi_free(p); // only free the original pointer if successful
292      }
293    }
294    return newp;
295  }
296  
297  mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
298    return _mi_heap_realloc_zero(heap, p, newsize, false);
299  }
300  
301  mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
302    size_t total;
303    if (mi_count_size_overflow(count, size, &total)) return NULL;
304    return mi_heap_realloc(heap, p, total);
305  }
306  
307  
308  // Reallocate but free `p` on errors
309  mi_decl_nodiscard void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
310    void* newp = mi_heap_realloc(heap, p, newsize);
311    if (newp==NULL && p!=NULL) mi_free(p);
312    return newp;
313  }
314  
315  mi_decl_nodiscard void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
316    return _mi_heap_realloc_zero(heap, p, newsize, true);
317  }
318  
319  mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
320    size_t total;
321    if (mi_count_size_overflow(count, size, &total)) return NULL;
322    return mi_heap_rezalloc(heap, p, total);
323  }
324  
325  
326  mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
327    return mi_heap_realloc(mi_prim_get_default_heap(),p,newsize);
328  }
329  
330  mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
331    return mi_heap_reallocn(mi_prim_get_default_heap(),p,count,size);
332  }
333  
334  // Reallocate but free `p` on errors
335  mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
336    return mi_heap_reallocf(mi_prim_get_default_heap(),p,newsize);
337  }
338  
339  mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
340    return mi_heap_rezalloc(mi_prim_get_default_heap(), p, newsize);
341  }
342  
343  mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
344    return mi_heap_recalloc(mi_prim_get_default_heap(), p, count, size);
345  }
346  
347  
348  
349  // ------------------------------------------------------
350  // strdup, strndup, and realpath
351  // ------------------------------------------------------
352  
353  // `strdup` using mi_malloc
354  mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
355    if (s == NULL) return NULL;
356    size_t len = _mi_strlen(s);
357    char* t = (char*)mi_heap_malloc(heap,len+1);
358    if (t == NULL) return NULL;
359    _mi_memcpy(t, s, len);
360    t[len] = 0;
361    return t;
362  }
363  
364  mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
365    return mi_heap_strdup(mi_prim_get_default_heap(), s);
366  }
367  
368  // `strndup` using mi_malloc
369  mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
370    if (s == NULL) return NULL;
371    const size_t len = _mi_strnlen(s,n);  // len <= n
372    char* t = (char*)mi_heap_malloc(heap, len+1);
373    if (t == NULL) return NULL;
374    _mi_memcpy(t, s, len);
375    t[len] = 0;
376    return t;
377  }
378  
379  mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
380    return mi_heap_strndup(mi_prim_get_default_heap(),s,n);
381  }
382  
383  #ifndef __wasi__
384  // `realpath` using mi_malloc
385  #ifdef _WIN32
386  #ifndef PATH_MAX
387  #define PATH_MAX MAX_PATH
388  #endif
389  
390  mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
391    // todo: use GetFullPathNameW to allow longer file names
392    char buf[PATH_MAX];
393    DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
394    if (res == 0) {
395      errno = GetLastError(); return NULL;
396    }
397    else if (res > PATH_MAX) {
398      errno = EINVAL; return NULL;
399    }
400    else if (resolved_name != NULL) {
401      return resolved_name;
402    }
403    else {
404      return mi_heap_strndup(heap, buf, PATH_MAX);
405    }
406  }
407  #else
408  /*
409  #include <unistd.h>  // pathconf
410  static size_t mi_path_max(void) {
411    static size_t path_max = 0;
412    if (path_max <= 0) {
413      long m = pathconf("/",_PC_PATH_MAX);
414      if (m <= 0) path_max = 4096;      // guess
415      else if (m < 256) path_max = 256; // at least 256
416      else path_max = m;
417    }
418    return path_max;
419  }
420  */
421  char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
422    if (resolved_name != NULL) {
423      return realpath(fname,resolved_name);
424    }
425    else {
426      char* rname = realpath(fname, NULL);
427      if (rname == NULL) return NULL;
428      char* result = mi_heap_strdup(heap, rname);
429      mi_cfree(rname);  // use checked free (which may be redirected to our free but that's ok)
430      // note: with ASAN realpath is intercepted and mi_cfree may leak the returned pointer :-(
431      return result;
432    }
433    /*
434      const size_t n  = mi_path_max();
435      char* buf = (char*)mi_malloc(n+1);
436      if (buf == NULL) {
437        errno = ENOMEM;
438        return NULL;
439      }
440      char* rname  = realpath(fname,buf);
441      char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL`
442      mi_free(buf);
443      return result;
444    }
445    */
446  }
447  #endif
448  
449  mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
450    return mi_heap_realpath(mi_prim_get_default_heap(),fname,resolved_name);
451  }
452  #endif
453  
454  /*-------------------------------------------------------
455  C++ new and new_aligned
456  The standard requires calling into `get_new_handler` and
457  throwing the bad_alloc exception on failure. If we compile
458  with a C++ compiler we can implement this precisely. If we
459  use a C compiler we cannot throw a `bad_alloc` exception
460  but we call `exit` instead (i.e. not returning).
461  -------------------------------------------------------*/
462  
463  #ifdef __cplusplus
464  #include <new>
465  static bool mi_try_new_handler(bool nothrow) {
466    #if defined(_MSC_VER) || (__cplusplus >= 201103L)
467      std::new_handler h = std::get_new_handler();
468    #else
469      std::new_handler h = std::set_new_handler();
470      std::set_new_handler(h);
471    #endif
472    if (h==NULL) {
473      _mi_error_message(ENOMEM, "out of memory in 'new'");
474      #if defined(_CPPUNWIND) || defined(__cpp_exceptions)  // exceptions are not always enabled
475      if (!nothrow) {
476        throw std::bad_alloc();
477      }
478      #else
479      MI_UNUSED(nothrow);
480      #endif
481      return false;
482    }
483    else {
484      h();
485      return true;
486    }
487  }
488  #else
489  typedef void (*std_new_handler_t)(void);
490  
491  #if (defined(__GNUC__) || (defined(__clang__) && !defined(_MSC_VER)))  // exclude clang-cl, see issue #631
492  std_new_handler_t __attribute__((weak)) _ZSt15get_new_handlerv(void) {
493    return NULL;
494  }
495  static std_new_handler_t mi_get_new_handler(void) {
496    return _ZSt15get_new_handlerv();
497  }
498  #else
499  // note: on windows we could dynamically link to `?get_new_handler@std@@YAP6AXXZXZ`.
500  static std_new_handler_t mi_get_new_handler() {
501    return NULL;
502  }
503  #endif
504  
505  static bool mi_try_new_handler(bool nothrow) {
506    std_new_handler_t h = mi_get_new_handler();
507    if (h==NULL) {
508      _mi_error_message(ENOMEM, "out of memory in 'new'");
509      if (!nothrow) {
510        abort();  // cannot throw in plain C, use abort
511      }
512      return false;
513    }
514    else {
515      h();
516      return true;
517    }
518  }
519  #endif
520  
521  mi_decl_export mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) {
522    void* p = NULL;
523    while(p == NULL && mi_try_new_handler(nothrow)) {
524      p = mi_heap_malloc(heap,size);
525    }
526    return p;
527  }
528  
529  static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) {
530    return mi_heap_try_new(mi_prim_get_default_heap(), size, nothrow);
531  }
532  
533  
534  mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) {
535    void* p = mi_heap_malloc(heap,size);
536    if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false);
537    return p;
538  }
539  
540  mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
541    return mi_heap_alloc_new(mi_prim_get_default_heap(), size);
542  }
543  
544  
545  mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) {
546    size_t total;
547    if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
548      mi_try_new_handler(false);  // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
549      return NULL;
550    }
551    else {
552      return mi_heap_alloc_new(heap,total);
553    }
554  }
555  
556  mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
557    return mi_heap_alloc_new_n(mi_prim_get_default_heap(), count, size);
558  }
559  
560  
561  mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
562    void* p = mi_malloc(size);
563    if mi_unlikely(p == NULL) return mi_try_new(size, true);
564    return p;
565  }
566  
567  mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
568    void* p;
569    do {
570      p = mi_malloc_aligned(size, alignment);
571    }
572    while(p == NULL && mi_try_new_handler(false));
573    return p;
574  }
575  
576  mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
577    void* p;
578    do {
579      p = mi_malloc_aligned(size, alignment);
580    }
581    while(p == NULL && mi_try_new_handler(true));
582    return p;
583  }
584  
585  mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
586    void* q;
587    do {
588      q = mi_realloc(p, newsize);
589    } while (q == NULL && mi_try_new_handler(false));
590    return q;
591  }
592  
593  mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
594    size_t total;
595    if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) {
596      mi_try_new_handler(false);  // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
597      return NULL;
598    }
599    else {
600      return mi_new_realloc(p, total);
601    }
602  }
603  
604  #if MI_DEBUG_GUARDED
605  static inline bool mi_heap_malloc_small_use_guarded(size_t size) {
606    return (size <= (size_t)_mi_option_get_fast(mi_option_debug_guarded_max) 
607            && size >= (size_t)_mi_option_get_fast(mi_option_debug_guarded_min));
608  }
609  
610  static inline bool mi_heap_malloc_use_guarded(size_t size, bool has_huge_alignment) {
611    return (!has_huge_alignment  // guarded pages do not work with huge aligments at the moment
612            && _mi_option_get_fast(mi_option_debug_guarded_max) > 0  // guarded must be enabled
613            && (mi_heap_malloc_small_use_guarded(size)
614                || ((mi_good_size(size) & (_mi_os_page_size() - 1)) == 0))  // page-size multiple are always guarded so we can have a correct `mi_usable_size`.
615           );
616  }
617  
618  static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept
619  {
620    #if defined(MI_PADDING_SIZE)
621    mi_assert(MI_PADDING_SIZE==0);
622    #endif
623    // allocate multiple of page size ending in a guard page
624    const size_t obj_size  = _mi_align_up(size, MI_MAX_ALIGN_SIZE); // ensure minimal alignment requirement
625    const size_t os_page_size = _mi_os_page_size();
626    const size_t req_size  = _mi_align_up(obj_size + os_page_size, os_page_size);
627    void* const block = _mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */);
628    if (block==NULL) return NULL;
629    mi_page_t* page = _mi_ptr_page(block);
630    mi_segment_t* segment = _mi_page_segment(page);
631  
632    const size_t block_size = mi_page_block_size(page);  // must use `block_size` to match `mi_free_local`
633    void* const guard_page  = (uint8_t*)block + (block_size - os_page_size);
634    mi_assert_internal(_mi_is_aligned(guard_page, os_page_size));
635  
636    // place block in front of the guard page
637    size_t offset = block_size - os_page_size - obj_size;
638    if (offset > MI_BLOCK_ALIGNMENT_MAX) {
639      // give up to place it right in front of the guard page if the offset is too large for unalignment
640      offset = MI_BLOCK_ALIGNMENT_MAX;
641    }
642    void* const p = (uint8_t*)block + offset;
643    mi_assert_internal(p>=block);
644  
645    // set page flags
646    if (offset > 0) {
647      mi_page_set_has_aligned(page, true);
648    }
649  
650    // set guard page
651    if (segment->allow_decommit) {
652      mi_page_set_has_guarded(page, true);
653      _mi_os_protect(guard_page, os_page_size);
654    }
655    else {
656      _mi_warning_message("unable to set a guard page behind an object due to pinned memory (large OS pages?) (object %p of size %zu)\n", p, size);
657    }
658  
659    // stats
660    mi_track_malloc(p, size, zero);
661    #if MI_STAT>1
662    if (p != NULL) {
663      if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
664      mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
665    }
666    #endif
667    #if MI_DEBUG>3
668    if (p != NULL && zero) {
669      mi_assert_expensive(mi_mem_is_zero(p, size));
670    }
671    #endif
672    return p;
673  }
674  #endif
675  
676  // ------------------------------------------------------
677  // ensure explicit external inline definitions are emitted!
678  // ------------------------------------------------------
679  
680  #ifdef __cplusplus
681  void* _mi_externs[] = {
682    (void*)&_mi_page_malloc,
683    (void*)&_mi_page_malloc_zero,
684    (void*)&_mi_heap_malloc_zero,
685    (void*)&_mi_heap_malloc_zero_ex,
686    (void*)&mi_malloc,
687    (void*)&mi_malloc_small,
688    (void*)&mi_zalloc_small,
689    (void*)&mi_heap_malloc,
690    (void*)&mi_heap_zalloc,
691    (void*)&mi_heap_malloc_small
692    // (void*)&mi_heap_alloc_new,
693    // (void*)&mi_heap_alloc_new_n
694  };
695  #endif