pool.h
1 // Copyright (c) 2022 The Bitcoin Core developers 2 // Distributed under the MIT software license, see the accompanying 3 // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 5 #ifndef BITCOIN_SUPPORT_ALLOCATORS_POOL_H 6 #define BITCOIN_SUPPORT_ALLOCATORS_POOL_H 7 8 #include <array> 9 #include <cassert> 10 #include <cstddef> 11 #include <list> 12 #include <memory> 13 #include <new> 14 #include <type_traits> 15 #include <utility> 16 17 #include <util/check.h> 18 19 /** 20 * A memory resource similar to std::pmr::unsynchronized_pool_resource, but 21 * optimized for node-based containers. It has the following properties: 22 * 23 * * Owns the allocated memory and frees it on destruction, even when deallocate 24 * has not been called on the allocated blocks. 25 * 26 * * Consists of a number of pools, each one for a different block size. 27 * Each pool holds blocks of uniform size in a freelist. 28 * 29 * * Exhausting memory in a freelist causes a new allocation of a fixed size chunk. 30 * This chunk is used to carve out blocks. 31 * 32 * * Block sizes or alignments that can not be served by the pools are allocated 33 * and deallocated by operator new(). 34 * 35 * PoolResource is not thread-safe. It is intended to be used by PoolAllocator. 36 * 37 * @tparam MAX_BLOCK_SIZE_BYTES Maximum size to allocate with the pool. If larger 38 * sizes are requested, allocation falls back to new(). 39 * 40 * @tparam ALIGN_BYTES Required alignment for the allocations. 41 * 42 * An example: If you create a PoolResource<128, 8>(262144) and perform a bunch of 43 * allocations and deallocate 2 blocks with size 8 bytes, and 3 blocks with size 16, 44 * the members will look like this: 45 * 46 * m_free_lists m_allocated_chunks 47 * ┌───┐ ┌───┐ ┌────────────-------──────┐ 48 * │ │ blocks │ ├─►│ 262144 B │ 49 * │ │ ┌─────┐ ┌─────┐ └─┬─┘ └────────────-------──────┘ 50 * │ 1 ├─►│ 8 B ├─►│ 8 B │ │ 51 * │ │ └─────┘ └─────┘ : 52 * │ │ │ 53 * │ │ ┌─────┐ ┌─────┐ ┌─────┐ ▼ 54 * │ 2 ├─►│16 B ├─►│16 B ├─►│16 B │ ┌───┐ ┌─────────────────────────┐ 55 * │ │ └─────┘ └─────┘ └─────┘ │ ├─►│ ▲ │ ▲ 56 * │ │ └───┘ └──────────┬──────────────┘ │ 57 * │ . │ │ m_available_memory_end 58 * │ . │ m_available_memory_it 59 * │ . │ 60 * │ │ 61 * │ │ 62 * │16 │ 63 * └───┘ 64 * 65 * Here m_free_lists[1] holds the 2 blocks of size 8 bytes, and m_free_lists[2] 66 * holds the 3 blocks of size 16. The blocks came from the data stored in the 67 * m_allocated_chunks list. Each chunk has bytes 262144. The last chunk has still 68 * some memory available for the blocks, and when m_available_memory_it is at the 69 * end, a new chunk will be allocated and added to the list. 70 */ 71 template <std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES> 72 class PoolResource final 73 { 74 static_assert(ALIGN_BYTES > 0, "ALIGN_BYTES must be nonzero"); 75 static_assert((ALIGN_BYTES & (ALIGN_BYTES - 1)) == 0, "ALIGN_BYTES must be a power of two"); 76 77 /** 78 * In-place linked list of the allocations, used for the freelist. 79 */ 80 struct ListNode { 81 ListNode* m_next; 82 83 explicit ListNode(ListNode* next) : m_next(next) {} 84 }; 85 static_assert(std::is_trivially_destructible_v<ListNode>, "Make sure we don't need to manually call a destructor"); 86 87 /** 88 * Internal alignment value. The larger of the requested ALIGN_BYTES and alignof(FreeList). 89 */ 90 static constexpr std::size_t ELEM_ALIGN_BYTES = std::max(alignof(ListNode), ALIGN_BYTES); 91 static_assert((ELEM_ALIGN_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "ELEM_ALIGN_BYTES must be a power of two"); 92 static_assert(sizeof(ListNode) <= ELEM_ALIGN_BYTES, "Units of size ELEM_SIZE_ALIGN need to be able to store a ListNode"); 93 static_assert((MAX_BLOCK_SIZE_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "MAX_BLOCK_SIZE_BYTES needs to be a multiple of the alignment."); 94 95 /** 96 * Size in bytes to allocate per chunk 97 */ 98 const size_t m_chunk_size_bytes; 99 100 /** 101 * Contains all allocated pools of memory, used to free the data in the destructor. 102 */ 103 std::list<std::byte*> m_allocated_chunks{}; 104 105 /** 106 * Single linked lists of all data that came from deallocating. 107 * m_free_lists[n] will serve blocks of size n*ELEM_ALIGN_BYTES. 108 */ 109 std::array<ListNode*, MAX_BLOCK_SIZE_BYTES / ELEM_ALIGN_BYTES + 1> m_free_lists{}; 110 111 /** 112 * Points to the beginning of available memory for carving out allocations. 113 */ 114 std::byte* m_available_memory_it = nullptr; 115 116 /** 117 * Points to the end of available memory for carving out allocations. 118 * 119 * That member variable is redundant, and is always equal to `m_allocated_chunks.back() + m_chunk_size_bytes` 120 * whenever it is accessed, but `m_available_memory_end` caches this for clarity and efficiency. 121 */ 122 std::byte* m_available_memory_end = nullptr; 123 124 /** 125 * How many multiple of ELEM_ALIGN_BYTES are necessary to fit bytes. We use that result directly as an index 126 * into m_free_lists. Round up for the special case when bytes==0. 127 */ 128 [[nodiscard]] static constexpr std::size_t NumElemAlignBytes(std::size_t bytes) 129 { 130 return (bytes + ELEM_ALIGN_BYTES - 1) / ELEM_ALIGN_BYTES + (bytes == 0); 131 } 132 133 /** 134 * True when it is possible to make use of the freelist 135 */ 136 [[nodiscard]] static constexpr bool IsFreeListUsable(std::size_t bytes, std::size_t alignment) 137 { 138 return alignment <= ELEM_ALIGN_BYTES && bytes <= MAX_BLOCK_SIZE_BYTES; 139 } 140 141 /** 142 * Replaces node with placement constructed ListNode that points to the previous node 143 */ 144 void PlacementAddToList(void* p, ListNode*& node) 145 { 146 node = new (p) ListNode{node}; 147 } 148 149 /** 150 * Allocate one full memory chunk which will be used to carve out allocations. 151 * Also puts any leftover bytes into the freelist. 152 * 153 * Precondition: leftover bytes are either 0 or few enough to fit into a place in the freelist 154 */ 155 void AllocateChunk() 156 { 157 // if there is still any available memory left, put it into the freelist. 158 size_t remaining_available_bytes = std::distance(m_available_memory_it, m_available_memory_end); 159 if (0 != remaining_available_bytes) { 160 ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); 161 PlacementAddToList(m_available_memory_it, m_free_lists[remaining_available_bytes / ELEM_ALIGN_BYTES]); 162 ASAN_POISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); 163 } 164 165 void* storage = ::operator new (m_chunk_size_bytes, std::align_val_t{ELEM_ALIGN_BYTES}); 166 m_available_memory_it = new (storage) std::byte[m_chunk_size_bytes]; 167 m_available_memory_end = m_available_memory_it + m_chunk_size_bytes; 168 ASAN_POISON_MEMORY_REGION(m_available_memory_it, m_chunk_size_bytes); 169 m_allocated_chunks.emplace_back(m_available_memory_it); 170 } 171 172 /** 173 * Access to internals for testing purpose only 174 */ 175 friend class PoolResourceTester; 176 177 public: 178 /** 179 * Construct a new PoolResource object which allocates the first chunk. 180 * chunk_size_bytes will be rounded up to next multiple of ELEM_ALIGN_BYTES. 181 */ 182 explicit PoolResource(std::size_t chunk_size_bytes) 183 : m_chunk_size_bytes(NumElemAlignBytes(chunk_size_bytes) * ELEM_ALIGN_BYTES) 184 { 185 assert(m_chunk_size_bytes >= MAX_BLOCK_SIZE_BYTES); 186 AllocateChunk(); 187 } 188 189 /** 190 * Construct a new Pool Resource object, defaults to 2^18=262144 chunk size. 191 */ 192 PoolResource() : PoolResource(262144) {} 193 194 /** 195 * Disable copy & move semantics, these are not supported for the resource. 196 */ 197 PoolResource(const PoolResource&) = delete; 198 PoolResource& operator=(const PoolResource&) = delete; 199 PoolResource(PoolResource&&) = delete; 200 PoolResource& operator=(PoolResource&&) = delete; 201 202 /** 203 * Deallocates all memory allocated associated with the memory resource. 204 */ 205 ~PoolResource() 206 { 207 for (std::byte* chunk : m_allocated_chunks) { 208 std::destroy(chunk, chunk + m_chunk_size_bytes); 209 ::operator delete ((void*)chunk, std::align_val_t{ELEM_ALIGN_BYTES}); 210 ASAN_UNPOISON_MEMORY_REGION(chunk, m_chunk_size_bytes); 211 } 212 } 213 214 /** 215 * Allocates a block of bytes. If possible the freelist is used, otherwise allocation 216 * is forwarded to ::operator new(). 217 */ 218 void* Allocate(std::size_t bytes, std::size_t alignment) 219 { 220 if (IsFreeListUsable(bytes, alignment)) { 221 const std::size_t num_alignments = NumElemAlignBytes(bytes); 222 if (nullptr != m_free_lists[num_alignments]) { 223 // we've already got data in the pool's freelist, unlink one element and return the pointer 224 // to the unlinked memory. Since FreeList is trivially destructible we can just treat it as 225 // uninitialized memory. 226 ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); 227 auto* next{m_free_lists[num_alignments]->m_next}; 228 ASAN_POISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); 229 ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], bytes); 230 return std::exchange(m_free_lists[num_alignments], next); 231 } 232 233 // freelist is empty: get one allocation from allocated chunk memory. 234 const std::ptrdiff_t round_bytes = static_cast<std::ptrdiff_t>(num_alignments * ELEM_ALIGN_BYTES); 235 if (round_bytes > m_available_memory_end - m_available_memory_it) { 236 // slow path, only happens when a new chunk needs to be allocated 237 AllocateChunk(); 238 } 239 240 // Make sure we use the right amount of bytes for that freelist (might be rounded up), 241 ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, round_bytes); 242 return std::exchange(m_available_memory_it, m_available_memory_it + round_bytes); 243 } 244 245 // Can't use the pool => use operator new() 246 return ::operator new (bytes, std::align_val_t{alignment}); 247 } 248 249 /** 250 * Returns a block to the freelists, or deletes the block when it did not come from the chunks. 251 */ 252 void Deallocate(void* p, std::size_t bytes, std::size_t alignment) noexcept 253 { 254 if (IsFreeListUsable(bytes, alignment)) { 255 const std::size_t num_alignments = NumElemAlignBytes(bytes); 256 // put the memory block into the linked list. We can placement construct the FreeList 257 // into the memory since we can be sure the alignment is correct. 258 ASAN_UNPOISON_MEMORY_REGION(p, sizeof(ListNode)); 259 PlacementAddToList(p, m_free_lists[num_alignments]); 260 ASAN_POISON_MEMORY_REGION(p, std::max(bytes, sizeof(ListNode))); 261 } else { 262 // Can't use the pool => forward deallocation to ::operator delete(). 263 ::operator delete (p, std::align_val_t{alignment}); 264 } 265 } 266 267 /** 268 * Number of allocated chunks 269 */ 270 [[nodiscard]] std::size_t NumAllocatedChunks() const 271 { 272 return m_allocated_chunks.size(); 273 } 274 275 /** 276 * Size in bytes to allocate per chunk, currently hardcoded to a fixed size. 277 */ 278 [[nodiscard]] size_t ChunkSizeBytes() const 279 { 280 return m_chunk_size_bytes; 281 } 282 }; 283 284 285 /** 286 * Forwards all allocations/deallocations to the PoolResource. 287 */ 288 template <class T, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES = alignof(T)> 289 class PoolAllocator 290 { 291 PoolResource<MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>* m_resource; 292 293 template <typename U, std::size_t M, std::size_t A> 294 friend class PoolAllocator; 295 296 public: 297 using value_type = T; 298 using ResourceType = PoolResource<MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>; 299 300 /** 301 * Not explicit so we can easily construct it with the correct resource 302 */ 303 PoolAllocator(ResourceType* resource) noexcept 304 : m_resource(resource) 305 { 306 } 307 308 PoolAllocator(const PoolAllocator& other) noexcept = default; 309 PoolAllocator& operator=(const PoolAllocator& other) noexcept = default; 310 311 template <class U> 312 PoolAllocator(const PoolAllocator<U, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& other) noexcept 313 : m_resource(other.resource()) 314 { 315 } 316 317 /** 318 * The rebind struct here is mandatory because we use non type template arguments for 319 * PoolAllocator. See https://en.cppreference.com/w/cpp/named_req/Allocator#cite_note-2 320 */ 321 template <typename U> 322 struct rebind { 323 using other = PoolAllocator<U, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>; 324 }; 325 326 /** 327 * Forwards each call to the resource. 328 */ 329 T* allocate(size_t n) 330 { 331 return static_cast<T*>(m_resource->Allocate(n * sizeof(T), alignof(T))); 332 } 333 334 /** 335 * Forwards each call to the resource. 336 */ 337 void deallocate(T* p, size_t n) noexcept 338 { 339 m_resource->Deallocate(p, n * sizeof(T), alignof(T)); 340 } 341 342 ResourceType* resource() const noexcept 343 { 344 return m_resource; 345 } 346 }; 347 348 template <class T1, class T2, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES> 349 bool operator==(const PoolAllocator<T1, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& a, 350 const PoolAllocator<T2, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& b) noexcept 351 { 352 return a.resource() == b.resource(); 353 } 354 355 template <class T1, class T2, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES> 356 bool operator!=(const PoolAllocator<T1, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& a, 357 const PoolAllocator<T2, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& b) noexcept 358 { 359 return !(a == b); 360 } 361 362 #endif // BITCOIN_SUPPORT_ALLOCATORS_POOL_H