/ src / support / allocators / pool.h
pool.h
  1  // Copyright (c) 2022 The Bitcoin Core developers
  2  // Distributed under the MIT software license, see the accompanying
  3  // file COPYING or http://www.opensource.org/licenses/mit-license.php.
  4  
  5  #ifndef BITCOIN_SUPPORT_ALLOCATORS_POOL_H
  6  #define BITCOIN_SUPPORT_ALLOCATORS_POOL_H
  7  
  8  #include <array>
  9  #include <cassert>
 10  #include <cstddef>
 11  #include <list>
 12  #include <memory>
 13  #include <new>
 14  #include <type_traits>
 15  #include <utility>
 16  
 17  /**
 18   * A memory resource similar to std::pmr::unsynchronized_pool_resource, but
 19   * optimized for node-based containers. It has the following properties:
 20   *
 21   * * Owns the allocated memory and frees it on destruction, even when deallocate
 22   *   has not been called on the allocated blocks.
 23   *
 24   * * Consists of a number of pools, each one for a different block size.
 25   *   Each pool holds blocks of uniform size in a freelist.
 26   *
 27   * * Exhausting memory in a freelist causes a new allocation of a fixed size chunk.
 28   *   This chunk is used to carve out blocks.
 29   *
 30   * * Block sizes or alignments that can not be served by the pools are allocated
 31   *   and deallocated by operator new().
 32   *
 33   * PoolResource is not thread-safe. It is intended to be used by PoolAllocator.
 34   *
 35   * @tparam MAX_BLOCK_SIZE_BYTES Maximum size to allocate with the pool. If larger
 36   *         sizes are requested, allocation falls back to new().
 37   *
 38   * @tparam ALIGN_BYTES Required alignment for the allocations.
 39   *
 40   * An example: If you create a PoolResource<128, 8>(262144) and perform a bunch of
 41   * allocations and deallocate 2 blocks with size 8 bytes, and 3 blocks with size 16,
 42   * the members will look like this:
 43   *
 44   *     m_free_lists                         m_allocated_chunks
 45   *        ┌───┐                                ┌───┐  ┌────────────-------──────┐
 46   *        │   │  blocks                        │   ├─►│    262144 B             │
 47   *        │   │  ┌─────┐  ┌─────┐              └─┬─┘  └────────────-------──────┘
 48   *        │ 1 ├─►│ 8 B ├─►│ 8 B │                │
 49   *        │   │  └─────┘  └─────┘                :
 50   *        │   │                                  │
 51   *        │   │  ┌─────┐  ┌─────┐  ┌─────┐       ▼
 52   *        │ 2 ├─►│16 B ├─►│16 B ├─►│16 B │     ┌───┐  ┌─────────────────────────┐
 53   *        │   │  └─────┘  └─────┘  └─────┘     │   ├─►│          ▲              │ ▲
 54   *        │   │                                └───┘  └──────────┬──────────────┘ │
 55   *        │ . │                                                  │    m_available_memory_end
 56   *        │ . │                                         m_available_memory_it
 57   *        │ . │
 58   *        │   │
 59   *        │   │
 60   *        │16 │
 61   *        └───┘
 62   *
 63   * Here m_free_lists[1] holds the 2 blocks of size 8 bytes, and m_free_lists[2]
 64   * holds the 3 blocks of size 16. The blocks came from the data stored in the
 65   * m_allocated_chunks list. Each chunk has bytes 262144. The last chunk has still
 66   * some memory available for the blocks, and when m_available_memory_it is at the
 67   * end, a new chunk will be allocated and added to the list.
 68   */
 69  template <std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
 70  class PoolResource final
 71  {
 72      static_assert(ALIGN_BYTES > 0, "ALIGN_BYTES must be nonzero");
 73      static_assert((ALIGN_BYTES & (ALIGN_BYTES - 1)) == 0, "ALIGN_BYTES must be a power of two");
 74  
 75      /**
 76       * In-place linked list of the allocations, used for the freelist.
 77       */
 78      struct ListNode {
 79          ListNode* m_next;
 80  
 81          explicit ListNode(ListNode* next) : m_next(next) {}
 82      };
 83      static_assert(std::is_trivially_destructible_v<ListNode>, "Make sure we don't need to manually call a destructor");
 84  
 85      /**
 86       * Internal alignment value. The larger of the requested ALIGN_BYTES and alignof(FreeList).
 87       */
 88      static constexpr std::size_t ELEM_ALIGN_BYTES = std::max(alignof(ListNode), ALIGN_BYTES);
 89      static_assert((ELEM_ALIGN_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "ELEM_ALIGN_BYTES must be a power of two");
 90      static_assert(sizeof(ListNode) <= ELEM_ALIGN_BYTES, "Units of size ELEM_SIZE_ALIGN need to be able to store a ListNode");
 91      static_assert((MAX_BLOCK_SIZE_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "MAX_BLOCK_SIZE_BYTES needs to be a multiple of the alignment.");
 92  
 93      /**
 94       * Size in bytes to allocate per chunk
 95       */
 96      const size_t m_chunk_size_bytes;
 97  
 98      /**
 99       * Contains all allocated pools of memory, used to free the data in the destructor.
100       */
101      std::list<std::byte*> m_allocated_chunks{};
102  
103      /**
104       * Single linked lists of all data that came from deallocating.
105       * m_free_lists[n] will serve blocks of size n*ELEM_ALIGN_BYTES.
106       */
107      std::array<ListNode*, MAX_BLOCK_SIZE_BYTES / ELEM_ALIGN_BYTES + 1> m_free_lists{};
108  
109      /**
110       * Points to the beginning of available memory for carving out allocations.
111       */
112      std::byte* m_available_memory_it = nullptr;
113  
114      /**
115       * Points to the end of available memory for carving out allocations.
116       *
117       * That member variable is redundant, and is always equal to `m_allocated_chunks.back() + m_chunk_size_bytes`
118       * whenever it is accessed, but `m_available_memory_end` caches this for clarity and efficiency.
119       */
120      std::byte* m_available_memory_end = nullptr;
121  
122      /**
123       * How many multiple of ELEM_ALIGN_BYTES are necessary to fit bytes. We use that result directly as an index
124       * into m_free_lists. Round up for the special case when bytes==0.
125       */
126      [[nodiscard]] static constexpr std::size_t NumElemAlignBytes(std::size_t bytes)
127      {
128          return (bytes + ELEM_ALIGN_BYTES - 1) / ELEM_ALIGN_BYTES + (bytes == 0);
129      }
130  
131      /**
132       * True when it is possible to make use of the freelist
133       */
134      [[nodiscard]] static constexpr bool IsFreeListUsable(std::size_t bytes, std::size_t alignment)
135      {
136          return alignment <= ELEM_ALIGN_BYTES && bytes <= MAX_BLOCK_SIZE_BYTES;
137      }
138  
139      /**
140       * Replaces node with placement constructed ListNode that points to the previous node
141       */
142      void PlacementAddToList(void* p, ListNode*& node)
143      {
144          node = new (p) ListNode{node};
145      }
146  
147      /**
148       * Allocate one full memory chunk which will be used to carve out allocations.
149       * Also puts any leftover bytes into the freelist.
150       *
151       * Precondition: leftover bytes are either 0 or few enough to fit into a place in the freelist
152       */
153      void AllocateChunk()
154      {
155          // if there is still any available memory left, put it into the freelist.
156          size_t remaining_available_bytes = std::distance(m_available_memory_it, m_available_memory_end);
157          if (0 != remaining_available_bytes) {
158              PlacementAddToList(m_available_memory_it, m_free_lists[remaining_available_bytes / ELEM_ALIGN_BYTES]);
159          }
160  
161          void* storage = ::operator new (m_chunk_size_bytes, std::align_val_t{ELEM_ALIGN_BYTES});
162          m_available_memory_it = new (storage) std::byte[m_chunk_size_bytes];
163          m_available_memory_end = m_available_memory_it + m_chunk_size_bytes;
164          m_allocated_chunks.emplace_back(m_available_memory_it);
165      }
166  
167      /**
168       * Access to internals for testing purpose only
169       */
170      friend class PoolResourceTester;
171  
172  public:
173      /**
174       * Construct a new PoolResource object which allocates the first chunk.
175       * chunk_size_bytes will be rounded up to next multiple of ELEM_ALIGN_BYTES.
176       */
177      explicit PoolResource(std::size_t chunk_size_bytes)
178          : m_chunk_size_bytes(NumElemAlignBytes(chunk_size_bytes) * ELEM_ALIGN_BYTES)
179      {
180          assert(m_chunk_size_bytes >= MAX_BLOCK_SIZE_BYTES);
181          AllocateChunk();
182      }
183  
184      /**
185       * Construct a new Pool Resource object, defaults to 2^18=262144 chunk size.
186       */
187      PoolResource() : PoolResource(262144) {}
188  
189      /**
190       * Disable copy & move semantics, these are not supported for the resource.
191       */
192      PoolResource(const PoolResource&) = delete;
193      PoolResource& operator=(const PoolResource&) = delete;
194      PoolResource(PoolResource&&) = delete;
195      PoolResource& operator=(PoolResource&&) = delete;
196  
197      /**
198       * Deallocates all memory allocated associated with the memory resource.
199       */
200      ~PoolResource()
201      {
202          for (std::byte* chunk : m_allocated_chunks) {
203              std::destroy(chunk, chunk + m_chunk_size_bytes);
204              ::operator delete ((void*)chunk, std::align_val_t{ELEM_ALIGN_BYTES});
205          }
206      }
207  
208      /**
209       * Allocates a block of bytes. If possible the freelist is used, otherwise allocation
210       * is forwarded to ::operator new().
211       */
212      void* Allocate(std::size_t bytes, std::size_t alignment)
213      {
214          if (IsFreeListUsable(bytes, alignment)) {
215              const std::size_t num_alignments = NumElemAlignBytes(bytes);
216              if (nullptr != m_free_lists[num_alignments]) {
217                  // we've already got data in the pool's freelist, unlink one element and return the pointer
218                  // to the unlinked memory. Since FreeList is trivially destructible we can just treat it as
219                  // uninitialized memory.
220                  return std::exchange(m_free_lists[num_alignments], m_free_lists[num_alignments]->m_next);
221              }
222  
223              // freelist is empty: get one allocation from allocated chunk memory.
224              const std::ptrdiff_t round_bytes = static_cast<std::ptrdiff_t>(num_alignments * ELEM_ALIGN_BYTES);
225              if (round_bytes > m_available_memory_end - m_available_memory_it) {
226                  // slow path, only happens when a new chunk needs to be allocated
227                  AllocateChunk();
228              }
229  
230              // Make sure we use the right amount of bytes for that freelist (might be rounded up),
231              return std::exchange(m_available_memory_it, m_available_memory_it + round_bytes);
232          }
233  
234          // Can't use the pool => use operator new()
235          return ::operator new (bytes, std::align_val_t{alignment});
236      }
237  
238      /**
239       * Returns a block to the freelists, or deletes the block when it did not come from the chunks.
240       */
241      void Deallocate(void* p, std::size_t bytes, std::size_t alignment) noexcept
242      {
243          if (IsFreeListUsable(bytes, alignment)) {
244              const std::size_t num_alignments = NumElemAlignBytes(bytes);
245              // put the memory block into the linked list. We can placement construct the FreeList
246              // into the memory since we can be sure the alignment is correct.
247              PlacementAddToList(p, m_free_lists[num_alignments]);
248          } else {
249              // Can't use the pool => forward deallocation to ::operator delete().
250              ::operator delete (p, std::align_val_t{alignment});
251          }
252      }
253  
254      /**
255       * Number of allocated chunks
256       */
257      [[nodiscard]] std::size_t NumAllocatedChunks() const
258      {
259          return m_allocated_chunks.size();
260      }
261  
262      /**
263       * Size in bytes to allocate per chunk, currently hardcoded to a fixed size.
264       */
265      [[nodiscard]] size_t ChunkSizeBytes() const
266      {
267          return m_chunk_size_bytes;
268      }
269  };
270  
271  
272  /**
273   * Forwards all allocations/deallocations to the PoolResource.
274   */
275  template <class T, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES = alignof(T)>
276  class PoolAllocator
277  {
278      PoolResource<MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>* m_resource;
279  
280      template <typename U, std::size_t M, std::size_t A>
281      friend class PoolAllocator;
282  
283  public:
284      using value_type = T;
285      using ResourceType = PoolResource<MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>;
286  
287      /**
288       * Not explicit so we can easily construct it with the correct resource
289       */
290      PoolAllocator(ResourceType* resource) noexcept
291          : m_resource(resource)
292      {
293      }
294  
295      PoolAllocator(const PoolAllocator& other) noexcept = default;
296      PoolAllocator& operator=(const PoolAllocator& other) noexcept = default;
297  
298      template <class U>
299      PoolAllocator(const PoolAllocator<U, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& other) noexcept
300          : m_resource(other.resource())
301      {
302      }
303  
304      /**
305       * The rebind struct here is mandatory because we use non type template arguments for
306       * PoolAllocator. See https://en.cppreference.com/w/cpp/named_req/Allocator#cite_note-2
307       */
308      template <typename U>
309      struct rebind {
310          using other = PoolAllocator<U, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>;
311      };
312  
313      /**
314       * Forwards each call to the resource.
315       */
316      T* allocate(size_t n)
317      {
318          return static_cast<T*>(m_resource->Allocate(n * sizeof(T), alignof(T)));
319      }
320  
321      /**
322       * Forwards each call to the resource.
323       */
324      void deallocate(T* p, size_t n) noexcept
325      {
326          m_resource->Deallocate(p, n * sizeof(T), alignof(T));
327      }
328  
329      ResourceType* resource() const noexcept
330      {
331          return m_resource;
332      }
333  };
334  
335  template <class T1, class T2, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
336  bool operator==(const PoolAllocator<T1, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& a,
337                  const PoolAllocator<T2, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& b) noexcept
338  {
339      return a.resource() == b.resource();
340  }
341  
342  template <class T1, class T2, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
343  bool operator!=(const PoolAllocator<T1, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& a,
344                  const PoolAllocator<T2, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& b) noexcept
345  {
346      return !(a == b);
347  }
348  
349  #endif // BITCOIN_SUPPORT_ALLOCATORS_POOL_H