/ src / support / lockedpool.h
lockedpool.h
  1  // Copyright (c) 2016-present The Bitcoin Core developers
  2  // Distributed under the MIT software license, see the accompanying
  3  // file COPYING or http://www.opensource.org/licenses/mit-license.php.
  4  
  5  #ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H
  6  #define BITCOIN_SUPPORT_LOCKEDPOOL_H
  7  
  8  #include <cstddef>
  9  #include <list>
 10  #include <map>
 11  #include <memory>
 12  #include <mutex>
 13  #include <unordered_map>
 14  
 15  /**
 16   * OS-dependent allocation and deallocation of locked/pinned memory pages.
 17   * Abstract base class.
 18   */
 19  class LockedPageAllocator
 20  {
 21  public:
 22      virtual ~LockedPageAllocator() = default;
 23      /** Allocate and lock memory pages.
 24       * If len is not a multiple of the system page size, it is rounded up.
 25       * Returns nullptr in case of allocation failure.
 26       *
 27       * If locking the memory pages could not be accomplished it will still
 28       * return the memory, however the lockingSuccess flag will be false.
 29       * lockingSuccess is undefined if the allocation fails.
 30       */
 31      virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0;
 32  
 33      /** Unlock and free memory pages.
 34       * Clear the memory before unlocking.
 35       */
 36      virtual void FreeLocked(void* addr, size_t len) = 0;
 37  
 38      /** Get the total limit on the amount of memory that may be locked by this
 39       * process, in bytes. Return size_t max if there is no limit or the limit
 40       * is unknown. Return 0 if no memory can be locked at all.
 41       */
 42      virtual size_t GetLimit() = 0;
 43  };
 44  
 45  /* An arena manages a contiguous region of memory by dividing it into
 46   * chunks.
 47   */
 48  class Arena
 49  {
 50  public:
 51      Arena(void *base, size_t size, size_t alignment);
 52      virtual ~Arena();
 53  
 54      Arena(const Arena& other) = delete; // non construction-copyable
 55      Arena& operator=(const Arena&) = delete; // non copyable
 56  
 57      /** Memory statistics. */
 58      struct Stats
 59      {
 60          size_t used;
 61          size_t free;
 62          size_t total;
 63          size_t chunks_used;
 64          size_t chunks_free;
 65      };
 66  
 67      /** Allocate size bytes from this arena.
 68       * Returns pointer on success, or 0 if memory is full or
 69       * the application tried to allocate 0 bytes.
 70       */
 71      void* alloc(size_t size);
 72  
 73      /** Free a previously allocated chunk of memory.
 74       * Freeing the zero pointer has no effect.
 75       * Raises std::runtime_error in case of error.
 76       */
 77      void free(void *ptr);
 78  
 79      /** Get arena usage statistics */
 80      Stats stats() const;
 81  
 82  #ifdef ARENA_DEBUG
 83      void walk() const;
 84  #endif
 85  
 86      /** Return whether a pointer points inside this arena.
 87       * This returns base <= ptr < (base+size) so only use it for (inclusive)
 88       * chunk starting addresses.
 89       */
 90      bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; }
 91  private:
 92      typedef std::multimap<size_t, void*> SizeToChunkSortedMap;
 93      /** Map to enable O(log(n)) best-fit allocation, as it's sorted by size */
 94      SizeToChunkSortedMap size_to_free_chunk;
 95  
 96      typedef std::unordered_map<void*, SizeToChunkSortedMap::const_iterator> ChunkToSizeMap;
 97      /** Map from begin of free chunk to its node in size_to_free_chunk */
 98      ChunkToSizeMap chunks_free;
 99      /** Map from end of free chunk to its node in size_to_free_chunk */
100      ChunkToSizeMap chunks_free_end;
101  
102      /** Map from begin of used chunk to its size */
103      std::unordered_map<void*, size_t> chunks_used;
104  
105      /** Base address of arena */
106      void* base;
107      /** End address of arena */
108      void* end;
109      /** Minimum chunk alignment */
110      size_t alignment;
111  };
112  
113  /** Pool for locked memory chunks.
114   *
115   * To avoid sensitive key data from being swapped to disk, the memory in this pool
116   * is locked/pinned.
117   *
118   * An arena manages a contiguous region of memory. The pool starts out with one arena
119   * but can grow to multiple arenas if the need arises.
120   *
121   * Unlike a normal C heap, the administrative structures are separate from the managed
122   * memory. This has been done as the sizes and bases of objects are not in themselves sensitive
123   * information, as to conserve precious locked memory. In some operating systems
124   * the amount of memory that can be locked is small.
125   */
126  class LockedPool
127  {
128  public:
129      /** Size of one arena of locked memory. This is a compromise.
130       * Do not set this too low, as managing many arenas will increase
131       * allocation and deallocation overhead. Setting it too high allocates
132       * more locked memory from the OS than strictly necessary.
133       */
134      static const size_t ARENA_SIZE = 256*1024;
135      /** Chunk alignment. Another compromise. Setting this too high will waste
136       * memory, setting it too low will facilitate fragmentation.
137       */
138      static const size_t ARENA_ALIGN = 16;
139  
140      /** Callback when allocation succeeds but locking fails.
141       */
142      typedef bool (*LockingFailed_Callback)();
143  
144      /** Memory statistics. */
145      struct Stats
146      {
147          size_t used;
148          size_t free;
149          size_t total;
150          size_t locked;
151          size_t chunks_used;
152          size_t chunks_free;
153      };
154  
155      /** Create a new LockedPool. This takes ownership of the MemoryPageLocker,
156       * you can only instantiate this with LockedPool(std::move(...)).
157       *
158       * The second argument is an optional callback when locking a newly allocated arena failed.
159       * If this callback is provided and returns false, the allocation fails (hard fail), if
160       * it returns true the allocation proceeds, but it could warn.
161       */
162      explicit LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = nullptr);
163      ~LockedPool();
164  
165      LockedPool(const LockedPool& other) = delete; // non construction-copyable
166      LockedPool& operator=(const LockedPool&) = delete; // non copyable
167  
168      /** Allocate size bytes from this arena.
169       * Returns pointer on success, or 0 if memory is full or
170       * the application tried to allocate 0 bytes.
171       */
172      void* alloc(size_t size);
173  
174      /** Free a previously allocated chunk of memory.
175       * Freeing the zero pointer has no effect.
176       * Raises std::runtime_error in case of error.
177       */
178      void free(void *ptr);
179  
180      /** Get pool usage statistics */
181      Stats stats() const;
182  private:
183      std::unique_ptr<LockedPageAllocator> allocator;
184  
185      /** Create an arena from locked pages */
186      class LockedPageArena: public Arena
187      {
188      public:
189          LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align);
190          ~LockedPageArena();
191      private:
192          void *base;
193          size_t size;
194          LockedPageAllocator *allocator;
195      };
196  
197      bool new_arena(size_t size, size_t align);
198  
199      std::list<LockedPageArena> arenas;
200      LockingFailed_Callback lf_cb;
201      size_t cumulative_bytes_locked{0};
202      /** Mutex protects access to this pool's data structures, including arenas.
203       */
204      mutable std::mutex mutex;
205  };
206  
207  /**
208   * Singleton class to keep track of locked (ie, non-swappable) memory, for use in
209   * std::allocator templates.
210   *
211   * Some implementations of the STL allocate memory in some constructors (i.e., see
212   * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
213   * Due to the unpredictable order of static initializers, we have to make sure the
214   * LockedPoolManager instance exists before any other STL-based objects that use
215   * secure_allocator are created. So instead of having LockedPoolManager also be
216   * static-initialized, it is created on demand.
217   */
218  class LockedPoolManager : public LockedPool
219  {
220  public:
221      /** Return the current instance, or create it once */
222      static LockedPoolManager& Instance();
223  
224  private:
225      explicit LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator);
226  
227      /** Create a new LockedPoolManager specialized to the OS */
228      static void CreateInstance();
229      /** Called when locking fails, warn the user here */
230      static bool LockingFailed();
231  
232      static LockedPoolManager* _instance;
233  };
234  
235  #endif // BITCOIN_SUPPORT_LOCKEDPOOL_H