/ src / sync.cpp
sync.cpp
  1  // Copyright (c) 2011-present The Bitcoin Core developers
  2  // Distributed under the MIT software license, see the accompanying
  3  // file COPYING or http://www.opensource.org/licenses/mit-license.php.
  4  
  5  #include <sync.h>
  6  
  7  #include <logging/timer.h>
  8  #include <tinyformat.h>
  9  #include <util/log.h>
 10  #include <util/stdmutex.h>
 11  #include <util/strencodings.h>
 12  #include <util/threadnames.h>
 13  
 14  #include <map>
 15  #include <mutex>
 16  #include <set>
 17  #include <system_error>
 18  #include <thread>
 19  #include <type_traits>
 20  #include <unordered_map>
 21  #include <utility>
 22  #include <vector>
 23  
 24  #ifdef DEBUG_LOCKCONTENTION
 25  
 26  template <typename LockType>
 27  void ContendedLock(std::string_view name, std::string_view file, int nLine, LockType& lock)
 28  {
 29      LOG_TIME_MICROS_WITH_CATEGORY(strprintf("lock contention %s, %s:%d", name, file, nLine), BCLog::LOCK);
 30      lock.lock();
 31  }
 32  template void ContendedLock(std::string_view name, std::string_view file, int nLine, std::unique_lock<std::mutex>& lock);
 33  template void ContendedLock(std::string_view name, std::string_view file, int nLine, std::unique_lock<std::recursive_mutex>& lock);
 34  
 35  #endif
 36  
 37  #ifdef DEBUG_LOCKORDER
 38  //
 39  // Early deadlock detection.
 40  // Problem being solved:
 41  //    Thread 1 locks A, then B, then C
 42  //    Thread 2 locks D, then C, then A
 43  //     --> may result in deadlock between the two threads, depending on when they run.
 44  // Solution implemented here:
 45  // Keep track of pairs of locks: (A before B), (A before C), etc.
 46  // Complain if any thread tries to lock in a different order.
 47  //
 48  
 49  struct CLockLocation {
 50      CLockLocation(
 51          const char* pszName,
 52          const char* pszFile,
 53          int nLine,
 54          bool fTryIn,
 55          std::string&& thread_name)
 56          : fTry(fTryIn),
 57            mutexName(pszName),
 58            sourceFile(pszFile),
 59            m_thread_name(std::move(thread_name)),
 60            sourceLine(nLine) {}
 61  
 62      std::string ToString() const
 63      {
 64          return strprintf(
 65              "'%s' in %s:%s%s (in thread '%s')",
 66              mutexName, sourceFile, sourceLine, (fTry ? " (TRY)" : ""), m_thread_name);
 67      }
 68  
 69      std::string Name() const
 70      {
 71          return mutexName;
 72      }
 73  
 74  private:
 75      bool fTry;
 76      std::string mutexName;
 77      std::string sourceFile;
 78      const std::string m_thread_name;
 79      int sourceLine;
 80  };
 81  
 82  using LockStackItem = std::pair<void*, CLockLocation>;
 83  using LockStack = std::vector<LockStackItem>;
 84  using LockStacks = std::unordered_map<std::thread::id, LockStack>;
 85  
 86  using LockPair = std::pair<void*, void*>;
 87  using LockOrders = std::map<LockPair, LockStack>;
 88  using InvLockOrders = std::set<LockPair>;
 89  
 90  struct LockData {
 91      LockStacks m_lock_stacks GUARDED_BY(dd_mutex);
 92      LockOrders lockorders GUARDED_BY(dd_mutex);
 93      InvLockOrders invlockorders GUARDED_BY(dd_mutex);
 94      StdMutex dd_mutex;
 95  };
 96  
 97  LockData& GetLockData() {
 98      // This approach guarantees that the object is not destroyed until after its last use.
 99      // The operating system automatically reclaims all the memory in a program's heap when that program exits.
100      // Since the ~LockData() destructor is never called, the LockData class and all
101      // its subclasses must have implicitly-defined destructors.
102      static LockData& lock_data = *new LockData();
103      return lock_data;
104  }
105  
106  static void potential_deadlock_detected(const LockPair& mismatch, const LockStack& s1, const LockStack& s2)
107  {
108      LogError("POTENTIAL DEADLOCK DETECTED");
109      LogError("Previous lock order was:");
110      for (const LockStackItem& i : s1) {
111          std::string prefix{};
112          if (i.first == mismatch.first) {
113              prefix = " (1)";
114          }
115          if (i.first == mismatch.second) {
116              prefix = " (2)";
117          }
118          LogError("%s %s", prefix, i.second.ToString());
119      }
120  
121      std::string mutex_a, mutex_b;
122      LogError("Current lock order is:");
123      for (const LockStackItem& i : s2) {
124          std::string prefix{};
125          if (i.first == mismatch.first) {
126              prefix = " (1)";
127              mutex_a = i.second.Name();
128          }
129          if (i.first == mismatch.second) {
130              prefix = " (2)";
131              mutex_b = i.second.Name();
132          }
133          LogError("%s %s", prefix, i.second.ToString());
134      }
135      if (g_debug_lockorder_abort) {
136          tfm::format(std::cerr, "Assertion failed: detected inconsistent lock order for %s, details in debug log.\n", s2.back().second.ToString());
137          abort();
138      }
139      throw std::logic_error(strprintf("potential deadlock detected: %s -> %s -> %s", mutex_b, mutex_a, mutex_b));
140  }
141  
142  static void double_lock_detected(const void* mutex, const LockStack& lock_stack)
143  {
144      LogError("DOUBLE LOCK DETECTED");
145      LogError("Lock order:");
146      for (const LockStackItem& i : lock_stack) {
147          std::string prefix{};
148          if (i.first == mutex) {
149              prefix = " (*)";
150          }
151          LogError("%s %s", prefix, i.second.ToString());
152      }
153      if (g_debug_lockorder_abort) {
154          tfm::format(std::cerr,
155                      "Assertion failed: detected double lock for %s, details in debug log.\n",
156                      lock_stack.back().second.ToString());
157          abort();
158      }
159      throw std::logic_error("double lock detected");
160  }
161  
162  template <typename MutexType>
163  static void push_lock(MutexType* c, const CLockLocation& locklocation)
164  {
165      constexpr bool is_recursive_mutex =
166          std::is_base_of_v<RecursiveMutex, MutexType> ||
167          std::is_base_of_v<std::recursive_mutex, MutexType>;
168  
169      LockData& lockdata = GetLockData();
170      STDLOCK(lockdata.dd_mutex);
171  
172      LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
173      lock_stack.emplace_back(c, locklocation);
174      for (size_t j = 0; j < lock_stack.size() - 1; ++j) {
175          const LockStackItem& i = lock_stack[j];
176          if (i.first == c) {
177              if (is_recursive_mutex) {
178                  break;
179              }
180              // It is not a recursive mutex and it appears in the stack two times:
181              // at position `j` and at the end (which we added just before this loop).
182              // Can't allow locking the same (non-recursive) mutex two times from the
183              // same thread as that results in an undefined behavior.
184              auto lock_stack_copy = lock_stack;
185              lock_stack.pop_back();
186              double_lock_detected(c, lock_stack_copy);
187              // double_lock_detected() does not return.
188          }
189  
190          const LockPair p1 = std::make_pair(i.first, c);
191          if (lockdata.lockorders.contains(p1))
192              continue;
193  
194          const LockPair p2 = std::make_pair(c, i.first);
195          if (lockdata.lockorders.contains(p2)) {
196              auto lock_stack_copy = lock_stack;
197              lock_stack.pop_back();
198              potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy);
199              // potential_deadlock_detected() does not return.
200          }
201  
202          lockdata.lockorders.emplace(p1, lock_stack);
203          lockdata.invlockorders.insert(p2);
204      }
205  }
206  
207  static void pop_lock()
208  {
209      LockData& lockdata = GetLockData();
210      STDLOCK(lockdata.dd_mutex);
211  
212      LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
213      lock_stack.pop_back();
214      if (lock_stack.empty()) {
215          lockdata.m_lock_stacks.erase(std::this_thread::get_id());
216      }
217  }
218  
219  template <typename MutexType>
220  void EnterCritical(const char* pszName, const char* pszFile, int nLine, MutexType* cs, bool fTry)
221  {
222      push_lock(cs, CLockLocation(pszName, pszFile, nLine, fTry, util::ThreadGetInternalName()));
223  }
224  template void EnterCritical(const char*, const char*, int, std::mutex*, bool);
225  template void EnterCritical(const char*, const char*, int, std::recursive_mutex*, bool);
226  
227  void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line)
228  {
229      LockData& lockdata = GetLockData();
230      STDLOCK(lockdata.dd_mutex);
231  
232      const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
233      if (!lock_stack.empty()) {
234          const auto& lastlock = lock_stack.back();
235          if (lastlock.first == cs) {
236              lockname = lastlock.second.Name();
237              return;
238          }
239      }
240  
241      LogError("INCONSISTENT LOCK ORDER DETECTED");
242      LogError("Current lock order (least recent first) is:");
243      for (const LockStackItem& i : lock_stack) {
244          LogError(" %s", i.second.ToString());
245      }
246      if (g_debug_lockorder_abort) {
247          tfm::format(std::cerr, "%s:%s %s was not most recent critical section locked, details in debug log.\n", file, line, guardname);
248          abort();
249      }
250      throw std::logic_error(strprintf("%s was not most recent critical section locked", guardname));
251  }
252  
253  void LeaveCritical()
254  {
255      pop_lock();
256  }
257  
258  static std::string LocksHeld()
259  {
260      LockData& lockdata = GetLockData();
261      STDLOCK(lockdata.dd_mutex);
262  
263      const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
264      std::string result;
265      for (const LockStackItem& i : lock_stack)
266          result += i.second.ToString() + std::string("\n");
267      return result;
268  }
269  
270  static bool LockHeld(void* mutex)
271  {
272      LockData& lockdata = GetLockData();
273      STDLOCK(lockdata.dd_mutex);
274  
275      const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
276      for (const LockStackItem& i : lock_stack) {
277          if (i.first == mutex) return true;
278      }
279  
280      return false;
281  }
282  
283  template <typename MutexType>
284  void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs)
285  {
286      if (LockHeld(cs)) return;
287      tfm::format(std::cerr, "Assertion failed: lock %s not held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld());
288      abort();
289  }
290  template void AssertLockHeldInternal(const char*, const char*, int, Mutex*);
291  template void AssertLockHeldInternal(const char*, const char*, int, RecursiveMutex*);
292  
293  template <typename MutexType>
294  void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs)
295  {
296      if (!LockHeld(cs)) return;
297      tfm::format(std::cerr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld());
298      abort();
299  }
300  template void AssertLockNotHeldInternal(const char*, const char*, int, Mutex*);
301  template void AssertLockNotHeldInternal(const char*, const char*, int, RecursiveMutex*);
302  
303  void DeleteLock(void* cs)
304  {
305      LockData& lockdata = GetLockData();
306      STDLOCK(lockdata.dd_mutex);
307      const LockPair item = std::make_pair(cs, nullptr);
308      LockOrders::iterator it = lockdata.lockorders.lower_bound(item);
309      while (it != lockdata.lockorders.end() && it->first.first == cs) {
310          const LockPair invitem = std::make_pair(it->first.second, it->first.first);
311          lockdata.invlockorders.erase(invitem);
312          lockdata.lockorders.erase(it++);
313      }
314      InvLockOrders::iterator invit = lockdata.invlockorders.lower_bound(item);
315      while (invit != lockdata.invlockorders.end() && invit->first == cs) {
316          const LockPair invinvitem = std::make_pair(invit->second, invit->first);
317          lockdata.lockorders.erase(invinvitem);
318          lockdata.invlockorders.erase(invit++);
319      }
320  }
321  
322  bool LockStackEmpty()
323  {
324      LockData& lockdata = GetLockData();
325      STDLOCK(lockdata.dd_mutex);
326      const auto it = lockdata.m_lock_stacks.find(std::this_thread::get_id());
327      if (it == lockdata.m_lock_stacks.end()) {
328          return true;
329      }
330      return it->second.empty();
331  }
332  
333  bool g_debug_lockorder_abort = true;
334  
335  #endif /* DEBUG_LOCKORDER */