/ source / mimalloc / src / page-queue.c
page-queue.c
  1  /*----------------------------------------------------------------------------
  2  Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
  3  This is free software; you can redistribute it and/or modify it under the
  4  terms of the MIT license. A copy of the license can be found in the file
  5  "LICENSE" at the root of this distribution.
  6  -----------------------------------------------------------------------------*/
  7  
  8  /* -----------------------------------------------------------
  9    Definition of page queues for each block size
 10  ----------------------------------------------------------- */
 11  
 12  #ifndef MI_IN_PAGE_C
 13  #error "this file should be included from 'page.c'"
 14  // include to help an IDE
 15  #include "mimalloc.h"     
 16  #include "mimalloc/internal.h"
 17  #include "mimalloc/atomic.h"
 18  #endif
 19  
 20  /* -----------------------------------------------------------
 21    Minimal alignment in machine words (i.e. `sizeof(void*)`)
 22  ----------------------------------------------------------- */
 23  
 24  #if (MI_MAX_ALIGN_SIZE > 4*MI_INTPTR_SIZE)
 25    #error "define alignment for more than 4x word size for this platform"
 26  #elif (MI_MAX_ALIGN_SIZE > 2*MI_INTPTR_SIZE)
 27    #define MI_ALIGN4W   // 4 machine words minimal alignment
 28  #elif (MI_MAX_ALIGN_SIZE > MI_INTPTR_SIZE)
 29    #define MI_ALIGN2W   // 2 machine words minimal alignment
 30  #else
 31    // ok, default alignment is 1 word
 32  #endif
 33  
 34  
 35  /* -----------------------------------------------------------
 36    Queue query
 37  ----------------------------------------------------------- */
 38  
 39  
 40  static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
 41    return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+sizeof(uintptr_t)));
 42  }
 43  
 44  static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
 45    return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
 46  }
 47  
 48  static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
 49    return (pq->block_size > MI_LARGE_OBJ_SIZE_MAX);
 50  }
 51  
 52  /* -----------------------------------------------------------
 53    Bins
 54  ----------------------------------------------------------- */
 55  
 56  // Return the bin for a given field size.
 57  // Returns MI_BIN_HUGE if the size is too large.
 58  // We use `wsize` for the size in "machine word sizes",
 59  // i.e. byte size == `wsize*sizeof(void*)`.
 60  static inline uint8_t mi_bin(size_t size) {
 61    size_t wsize = _mi_wsize_from_size(size);
 62    uint8_t bin;
 63    if (wsize <= 1) {
 64      bin = 1;
 65    }
 66    #if defined(MI_ALIGN4W)
 67    else if (wsize <= 4) {
 68      bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
 69    }
 70    #elif defined(MI_ALIGN2W)
 71    else if (wsize <= 8) {
 72      bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
 73    }
 74    #else
 75    else if (wsize <= 8) {
 76      bin = (uint8_t)wsize;
 77    }
 78    #endif
 79    else if (wsize > MI_LARGE_OBJ_WSIZE_MAX) {
 80      bin = MI_BIN_HUGE;
 81    }
 82    else {
 83      #if defined(MI_ALIGN4W)
 84      if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
 85      #endif
 86      wsize--;
 87      // find the highest bit
 88      uint8_t b = (uint8_t)mi_bsr(wsize);  // note: wsize != 0
 89      // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
 90      // - adjust with 3 because we use do not round the first 8 sizes
 91      //   which each get an exact bin
 92      bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3;
 93      mi_assert_internal(bin < MI_BIN_HUGE);
 94    }
 95    mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE);
 96    return bin;
 97  }
 98  
 99  
100  
101  /* -----------------------------------------------------------
102    Queue of pages with free blocks
103  ----------------------------------------------------------- */
104  
105  uint8_t _mi_bin(size_t size) {
106    return mi_bin(size);
107  }
108  
109  size_t _mi_bin_size(uint8_t bin) {
110    return _mi_heap_empty.pages[bin].block_size;
111  }
112  
113  // Good size for allocation
114  size_t mi_good_size(size_t size) mi_attr_noexcept {
115    if (size <= MI_LARGE_OBJ_SIZE_MAX) {
116      return _mi_bin_size(mi_bin(size + MI_PADDING_SIZE));
117    }
118    else {
119      return _mi_align_up(size + MI_PADDING_SIZE,_mi_os_page_size());
120    }
121  }
122  
123  #if (MI_DEBUG>1)
124  static bool mi_page_queue_contains(mi_page_queue_t* queue, const mi_page_t* page) {
125    mi_assert_internal(page != NULL);
126    mi_page_t* list = queue->first;
127    while (list != NULL) {
128      mi_assert_internal(list->next == NULL || list->next->prev == list);
129      mi_assert_internal(list->prev == NULL || list->prev->next == list);
130      if (list == page) break;
131      list = list->next;
132    }
133    return (list == page);
134  }
135  
136  #endif
137  
138  #if (MI_DEBUG>1)
139  static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* pq) {
140    return (pq >= &heap->pages[0] && pq <= &heap->pages[MI_BIN_FULL]);
141  }
142  #endif
143  
144  static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
145    mi_assert_internal(heap!=NULL);
146    uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
147    mi_assert_internal(bin <= MI_BIN_FULL);
148    mi_page_queue_t* pq = &heap->pages[bin];
149    mi_assert_internal((mi_page_block_size(page) == pq->block_size) ||
150                         (mi_page_is_huge(page) && mi_page_queue_is_huge(pq)) ||
151                           (mi_page_is_in_full(page) && mi_page_queue_is_full(pq)));
152    return pq;
153  }
154  
155  static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
156    mi_heap_t* heap = mi_page_heap(page);
157    mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
158    mi_assert_expensive(mi_page_queue_contains(pq, page));
159    return pq;
160  }
161  
162  // The current small page array is for efficiency and for each
163  // small size (up to 256) it points directly to the page for that
164  // size without having to compute the bin. This means when the
165  // current free page queue is updated for a small bin, we need to update a
166  // range of entries in `_mi_page_small_free`.
167  static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_queue_t* pq) {
168    mi_assert_internal(mi_heap_contains_queue(heap,pq));
169    size_t size = pq->block_size;
170    if (size > MI_SMALL_SIZE_MAX) return;
171  
172    mi_page_t* page = pq->first;
173    if (pq->first == NULL) page = (mi_page_t*)&_mi_page_empty;
174  
175    // find index in the right direct page array
176    size_t start;
177    size_t idx = _mi_wsize_from_size(size);
178    mi_page_t** pages_free = heap->pages_free_direct;
179  
180    if (pages_free[idx] == page) return;  // already set
181  
182    // find start slot
183    if (idx<=1) {
184      start = 0;
185    }
186    else {
187      // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
188      uint8_t bin = mi_bin(size);
189      const mi_page_queue_t* prev = pq - 1;
190      while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) {
191        prev--;
192      }
193      start = 1 + _mi_wsize_from_size(prev->block_size);
194      if (start > idx) start = idx;
195    }
196  
197    // set size range to the right page
198    mi_assert(start <= idx);
199    for (size_t sz = start; sz <= idx; sz++) {
200      pages_free[sz] = page;
201    }
202  }
203  
204  /*
205  static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
206    return (queue->first == NULL);
207  }
208  */
209  
210  static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
211    mi_assert_internal(page != NULL);
212    mi_assert_expensive(mi_page_queue_contains(queue, page));
213    mi_assert_internal(mi_page_block_size(page) == queue->block_size || 
214                        (mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) || 
215                          (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
216    mi_heap_t* heap = mi_page_heap(page);
217    if (page->prev != NULL) page->prev->next = page->next;
218    if (page->next != NULL) page->next->prev = page->prev;
219    if (page == queue->last)  queue->last = page->prev;
220    if (page == queue->first) {
221      queue->first = page->next;
222      // update first
223      mi_assert_internal(mi_heap_contains_queue(heap, queue));
224      mi_heap_queue_first_update(heap,queue);
225    }
226    heap->page_count--;
227    page->next = NULL;
228    page->prev = NULL;
229    // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL);
230    mi_page_set_in_full(page,false);
231  }
232  
233  
234  static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
235    mi_assert_internal(mi_page_heap(page) == heap);
236    mi_assert_internal(!mi_page_queue_contains(queue, page));
237    #if MI_HUGE_PAGE_ABANDON
238    mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
239    #endif
240    mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
241                        (mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) ||
242                          (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
243  
244    mi_page_set_in_full(page, mi_page_queue_is_full(queue));
245    // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap);
246    page->next = queue->first;
247    page->prev = NULL;
248    if (queue->first != NULL) {
249      mi_assert_internal(queue->first->prev == NULL);
250      queue->first->prev = page;
251      queue->first = page;
252    }
253    else {
254      queue->first = queue->last = page;
255    }
256  
257    // update direct
258    mi_heap_queue_first_update(heap, queue);
259    heap->page_count++;
260  }
261  
262  
263  static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
264    mi_assert_internal(page != NULL);
265    mi_assert_expensive(mi_page_queue_contains(from, page));
266    mi_assert_expensive(!mi_page_queue_contains(to, page));
267    const size_t bsize = mi_page_block_size(page);
268    MI_UNUSED(bsize);
269    mi_assert_internal((bsize == to->block_size && bsize == from->block_size) ||
270                       (bsize == to->block_size && mi_page_queue_is_full(from)) ||
271                       (bsize == from->block_size && mi_page_queue_is_full(to)) ||
272                       (mi_page_is_huge(page) && mi_page_queue_is_huge(to)) ||
273                       (mi_page_is_huge(page) && mi_page_queue_is_full(to)));
274  
275    mi_heap_t* heap = mi_page_heap(page);
276    if (page->prev != NULL) page->prev->next = page->next;
277    if (page->next != NULL) page->next->prev = page->prev;
278    if (page == from->last)  from->last = page->prev;
279    if (page == from->first) {
280      from->first = page->next;
281      // update first
282      mi_assert_internal(mi_heap_contains_queue(heap, from));
283      mi_heap_queue_first_update(heap, from);
284    }
285  
286    page->prev = to->last;
287    page->next = NULL;
288    if (to->last != NULL) {
289      mi_assert_internal(heap == mi_page_heap(to->last));
290      to->last->next = page;
291      to->last = page;
292    }
293    else {
294      to->first = page;
295      to->last = page;
296      mi_heap_queue_first_update(heap, to);
297    }
298  
299    mi_page_set_in_full(page, mi_page_queue_is_full(to));
300  }
301  
302  // Only called from `mi_heap_absorb`.
303  size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
304    mi_assert_internal(mi_heap_contains_queue(heap,pq));
305    mi_assert_internal(pq->block_size == append->block_size);
306  
307    if (append->first==NULL) return 0;
308  
309    // set append pages to new heap and count
310    size_t count = 0;
311    for (mi_page_t* page = append->first; page != NULL; page = page->next) {
312      // inline `mi_page_set_heap` to avoid wrong assertion during absorption;
313      // in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
314      mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
315      // set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
316      // side effect that it spins until any DELAYED_FREEING is finished. This ensures
317      // that after appending only the new heap will be used for delayed free operations.
318      _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);
319      count++;
320    }
321  
322    if (pq->last==NULL) {
323      // take over afresh
324      mi_assert_internal(pq->first==NULL);
325      pq->first = append->first;
326      pq->last = append->last;
327      mi_heap_queue_first_update(heap, pq);
328    }
329    else {
330      // append to end
331      mi_assert_internal(pq->last!=NULL);
332      mi_assert_internal(append->first!=NULL);
333      pq->last->next = append->first;
334      append->first->prev = pq->last;
335      pq->last = append->last;
336    }
337    return count;
338  }