/tmp/bitcoin/src/sync.cpp
Line | Count | Source |
1 | | // Copyright (c) 2011-present The Bitcoin Core developers |
2 | | // Distributed under the MIT software license, see the accompanying |
3 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
4 | | |
5 | | #include <sync.h> |
6 | | |
7 | | #include <logging/timer.h> |
8 | | #include <tinyformat.h> |
9 | | #include <util/log.h> |
10 | | #include <util/stdmutex.h> |
11 | | #include <util/strencodings.h> |
12 | | #include <util/threadnames.h> |
13 | | |
14 | | #include <map> |
15 | | #include <mutex> |
16 | | #include <set> |
17 | | #include <system_error> |
18 | | #include <thread> |
19 | | #include <type_traits> |
20 | | #include <unordered_map> |
21 | | #include <utility> |
22 | | #include <vector> |
23 | | |
24 | | #ifdef DEBUG_LOCKCONTENTION |
25 | | |
26 | | template <typename LockType> |
27 | | void ContendedLock(std::string_view name, std::string_view file, int nLine, LockType& lock) |
28 | 665k | { |
29 | 665k | LOG_TIME_MICROS_WITH_CATEGORY(strprintf("lock contention %s, %s:%d", name, file, nLine), BCLog::LOCK); |
30 | 665k | lock.lock(); |
31 | 665k | } void ContendedLock<std::unique_lock<std::mutex>>(std::basic_string_view<char, std::char_traits<char>>, std::basic_string_view<char, std::char_traits<char>>, int, std::unique_lock<std::mutex>&) Line | Count | Source | 28 | 576k | { | 29 | 576k | LOG_TIME_MICROS_WITH_CATEGORY(strprintf("lock contention %s, %s:%d", name, file, nLine), BCLog::LOCK); | 30 | 576k | lock.lock(); | 31 | 576k | } |
void ContendedLock<std::unique_lock<std::recursive_mutex>>(std::basic_string_view<char, std::char_traits<char>>, std::basic_string_view<char, std::char_traits<char>>, int, std::unique_lock<std::recursive_mutex>&) Line | Count | Source | 28 | 89.6k | { | 29 | 89.6k | LOG_TIME_MICROS_WITH_CATEGORY(strprintf("lock contention %s, %s:%d", name, file, nLine), BCLog::LOCK); | 30 | 89.6k | lock.lock(); | 31 | 89.6k | } |
|
32 | | template void ContendedLock(std::string_view name, std::string_view file, int nLine, std::unique_lock<std::mutex>& lock); |
33 | | template void ContendedLock(std::string_view name, std::string_view file, int nLine, std::unique_lock<std::recursive_mutex>& lock); |
34 | | |
35 | | #endif |
36 | | |
37 | | #ifdef DEBUG_LOCKORDER |
38 | | // |
39 | | // Early deadlock detection. |
40 | | // Problem being solved: |
41 | | // Thread 1 locks A, then B, then C |
42 | | // Thread 2 locks D, then C, then A |
43 | | // --> may result in deadlock between the two threads, depending on when they run. |
44 | | // Solution implemented here: |
45 | | // Keep track of pairs of locks: (A before B), (A before C), etc. |
46 | | // Complain if any thread tries to lock in a different order. |
47 | | // |
48 | | |
49 | | struct CLockLocation { |
50 | | CLockLocation( |
51 | | const char* pszName, |
52 | | const char* pszFile, |
53 | | int nLine, |
54 | | bool fTryIn, |
55 | | std::string&& thread_name) |
56 | 83.3M | : fTry(fTryIn), |
57 | 83.3M | mutexName(pszName), |
58 | 83.3M | sourceFile(pszFile), |
59 | 83.3M | m_thread_name(std::move(thread_name)), |
60 | 83.3M | sourceLine(nLine) {} |
61 | | |
62 | | std::string ToString() const |
63 | 28 | { |
64 | 28 | return strprintf( |
65 | 28 | "'%s' in %s:%s%s (in thread '%s')", |
66 | 28 | mutexName, sourceFile, sourceLine, (fTry ? " (TRY)" : ""), m_thread_name); |
67 | 28 | } |
68 | | |
69 | | std::string Name() const |
70 | 2.17M | { |
71 | 2.17M | return mutexName; |
72 | 2.17M | } |
73 | | |
74 | | private: |
75 | | bool fTry; |
76 | | std::string mutexName; |
77 | | std::string sourceFile; |
78 | | const std::string m_thread_name; |
79 | | int sourceLine; |
80 | | }; |
81 | | |
82 | | using LockStackItem = std::pair<void*, CLockLocation>; |
83 | | using LockStack = std::vector<LockStackItem>; |
84 | | using LockStacks = std::unordered_map<std::thread::id, LockStack>; |
85 | | |
86 | | using LockPair = std::pair<void*, void*>; |
87 | | using LockOrders = std::map<LockPair, LockStack>; |
88 | | using InvLockOrders = std::set<LockPair>; |
89 | | |
90 | | struct LockData { |
91 | | LockStacks m_lock_stacks GUARDED_BY(dd_mutex); |
92 | | LockOrders lockorders GUARDED_BY(dd_mutex); |
93 | | InvLockOrders invlockorders GUARDED_BY(dd_mutex); |
94 | | StdMutex dd_mutex; |
95 | | }; |
96 | | |
97 | 242M | LockData& GetLockData() { |
98 | | // This approach guarantees that the object is not destroyed until after its last use. |
99 | | // The operating system automatically reclaims all the memory in a program's heap when that program exits. |
100 | | // Since the ~LockData() destructor is never called, the LockData class and all |
101 | | // its subclasses must have implicitly-defined destructors. |
102 | 242M | static LockData& lock_data = *new LockData(); |
103 | 242M | return lock_data; |
104 | 242M | } |
105 | | |
106 | | static void potential_deadlock_detected(const LockPair& mismatch, const LockStack& s1, const LockStack& s2) |
107 | 4 | { |
108 | 4 | LogError("POTENTIAL DEADLOCK DETECTED"); |
109 | 4 | LogError("Previous lock order was:"); |
110 | 8 | for (const LockStackItem& i : s1) { |
111 | 8 | std::string prefix{}; |
112 | 8 | if (i.first == mismatch.first) { |
113 | 4 | prefix = " (1)"; |
114 | 4 | } |
115 | 8 | if (i.first == mismatch.second) { |
116 | 4 | prefix = " (2)"; |
117 | 4 | } |
118 | 8 | LogError("%s %s", prefix, i.second.ToString()); |
119 | 8 | } |
120 | | |
121 | 4 | std::string mutex_a, mutex_b; |
122 | 4 | LogError("Current lock order is:"); |
123 | 8 | for (const LockStackItem& i : s2) { |
124 | 8 | std::string prefix{}; |
125 | 8 | if (i.first == mismatch.first) { |
126 | 4 | prefix = " (1)"; |
127 | 4 | mutex_a = i.second.Name(); |
128 | 4 | } |
129 | 8 | if (i.first == mismatch.second) { |
130 | 4 | prefix = " (2)"; |
131 | 4 | mutex_b = i.second.Name(); |
132 | 4 | } |
133 | 8 | LogError("%s %s", prefix, i.second.ToString()); |
134 | 8 | } |
135 | 4 | if (g_debug_lockorder_abort) { |
136 | 0 | tfm::format(std::cerr, "Assertion failed: detected inconsistent lock order for %s, details in debug log.\n", s2.back().second.ToString()); |
137 | 0 | abort(); |
138 | 0 | } |
139 | 4 | throw std::logic_error(strprintf("potential deadlock detected: %s -> %s -> %s", mutex_b, mutex_a, mutex_b)); |
140 | 4 | } |
141 | | |
142 | | static void double_lock_detected(const void* mutex, const LockStack& lock_stack) |
143 | 1 | { |
144 | 1 | LogError("DOUBLE LOCK DETECTED"); |
145 | 1 | LogError("Lock order:"); |
146 | 2 | for (const LockStackItem& i : lock_stack) { |
147 | 2 | std::string prefix{}; |
148 | 2 | if (i.first == mutex) { |
149 | 2 | prefix = " (*)"; |
150 | 2 | } |
151 | 2 | LogError("%s %s", prefix, i.second.ToString()); |
152 | 2 | } |
153 | 1 | if (g_debug_lockorder_abort) { |
154 | 0 | tfm::format(std::cerr, |
155 | 0 | "Assertion failed: detected double lock for %s, details in debug log.\n", |
156 | 0 | lock_stack.back().second.ToString()); |
157 | 0 | abort(); |
158 | 0 | } |
159 | 1 | throw std::logic_error("double lock detected"); |
160 | 1 | } |
161 | | |
162 | | template <typename MutexType> |
163 | | static void push_lock(MutexType* c, const CLockLocation& locklocation) |
164 | 83.3M | { |
165 | 83.3M | constexpr bool is_recursive_mutex = |
166 | 83.3M | std::is_base_of_v<RecursiveMutex, MutexType> || |
167 | 83.3M | std::is_base_of_v<std::recursive_mutex, MutexType>; |
168 | | |
169 | 83.3M | LockData& lockdata = GetLockData(); |
170 | 83.3M | STDLOCK(lockdata.dd_mutex); |
171 | | |
172 | 83.3M | LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; |
173 | 83.3M | lock_stack.emplace_back(c, locklocation); |
174 | 209M | for (size_t j = 0; j < lock_stack.size() - 1; ++j) { |
175 | 166M | const LockStackItem& i = lock_stack[j]; |
176 | 166M | if (i.first == c) { |
177 | 40.1M | if (is_recursive_mutex) { |
178 | 40.1M | break; |
179 | 40.1M | } |
180 | | // It is not a recursive mutex and it appears in the stack two times: |
181 | | // at position `j` and at the end (which we added just before this loop). |
182 | | // Can't allow locking the same (non-recursive) mutex two times from the |
183 | | // same thread as that results in an undefined behavior. |
184 | 1 | auto lock_stack_copy = lock_stack; |
185 | 1 | lock_stack.pop_back(); |
186 | 1 | double_lock_detected(c, lock_stack_copy); |
187 | | // double_lock_detected() does not return. |
188 | 1 | } |
189 | | |
190 | 126M | const LockPair p1 = std::make_pair(i.first, c); |
191 | 126M | if (lockdata.lockorders.contains(p1)) |
192 | 126M | continue; |
193 | | |
194 | 211k | const LockPair p2 = std::make_pair(c, i.first); |
195 | 211k | if (lockdata.lockorders.contains(p2)) { |
196 | 4 | auto lock_stack_copy = lock_stack; |
197 | 4 | lock_stack.pop_back(); |
198 | 4 | potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy); |
199 | | // potential_deadlock_detected() does not return. |
200 | 4 | } |
201 | | |
202 | 211k | lockdata.lockorders.emplace(p1, lock_stack); |
203 | 211k | lockdata.invlockorders.insert(p2); |
204 | 211k | } |
205 | 83.3M | } sync.cpp:void push_lock<std::mutex>(std::mutex*, CLockLocation const&) Line | Count | Source | 164 | 33.8M | { | 165 | 33.8M | constexpr bool is_recursive_mutex = | 166 | 33.8M | std::is_base_of_v<RecursiveMutex, MutexType> || | 167 | 33.8M | std::is_base_of_v<std::recursive_mutex, MutexType>; | 168 | | | 169 | 33.8M | LockData& lockdata = GetLockData(); | 170 | 33.8M | STDLOCK(lockdata.dd_mutex); | 171 | | | 172 | 33.8M | LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; | 173 | 33.8M | lock_stack.emplace_back(c, locklocation); | 174 | 67.3M | for (size_t j = 0; j < lock_stack.size() - 1; ++j) { | 175 | 33.4M | const LockStackItem& i = lock_stack[j]; | 176 | 33.4M | if (i.first == c) { | 177 | 1 | if (is_recursive_mutex) { | 178 | 0 | break; | 179 | 0 | } | 180 | | // It is not a recursive mutex and it appears in the stack two times: | 181 | | // at position `j` and at the end (which we added just before this loop). | 182 | | // Can't allow locking the same (non-recursive) mutex two times from the | 183 | | // same thread as that results in an undefined behavior. | 184 | 1 | auto lock_stack_copy = lock_stack; | 185 | 1 | lock_stack.pop_back(); | 186 | 1 | double_lock_detected(c, lock_stack_copy); | 187 | | // double_lock_detected() does not return. | 188 | 1 | } | 189 | | | 190 | 33.4M | const LockPair p1 = std::make_pair(i.first, c); | 191 | 33.4M | if (lockdata.lockorders.contains(p1)) | 192 | 33.3M | continue; | 193 | | | 194 | 170k | const LockPair p2 = std::make_pair(c, i.first); | 195 | 170k | if (lockdata.lockorders.contains(p2)) { | 196 | 2 | auto lock_stack_copy = lock_stack; | 197 | 2 | lock_stack.pop_back(); | 198 | 2 | potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy); | 199 | | // potential_deadlock_detected() does not return. | 200 | 2 | } | 201 | | | 202 | 170k | lockdata.lockorders.emplace(p1, lock_stack); | 203 | 170k | lockdata.invlockorders.insert(p2); | 204 | 170k | } | 205 | 33.8M | } |
sync.cpp:void push_lock<std::recursive_mutex>(std::recursive_mutex*, CLockLocation const&) Line | Count | Source | 164 | 49.5M | { | 165 | 49.5M | constexpr bool is_recursive_mutex = | 166 | 49.5M | std::is_base_of_v<RecursiveMutex, MutexType> || | 167 | 49.5M | std::is_base_of_v<std::recursive_mutex, MutexType>; | 168 | | | 169 | 49.5M | LockData& lockdata = GetLockData(); | 170 | 49.5M | STDLOCK(lockdata.dd_mutex); | 171 | | | 172 | 49.5M | LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; | 173 | 49.5M | lock_stack.emplace_back(c, locklocation); | 174 | 142M | for (size_t j = 0; j < lock_stack.size() - 1; ++j) { | 175 | 133M | const LockStackItem& i = lock_stack[j]; | 176 | 133M | if (i.first == c) { | 177 | 40.1M | if (is_recursive_mutex) { | 178 | 40.1M | break; | 179 | 40.1M | } | 180 | | // It is not a recursive mutex and it appears in the stack two times: | 181 | | // at position `j` and at the end (which we added just before this loop). | 182 | | // Can't allow locking the same (non-recursive) mutex two times from the | 183 | | // same thread as that results in an undefined behavior. | 184 | 0 | auto lock_stack_copy = lock_stack; | 185 | 0 | lock_stack.pop_back(); | 186 | 0 | double_lock_detected(c, lock_stack_copy); | 187 | | // double_lock_detected() does not return. | 188 | 0 | } | 189 | | | 190 | 92.8M | const LockPair p1 = std::make_pair(i.first, c); | 191 | 92.8M | if (lockdata.lockorders.contains(p1)) | 192 | 92.8M | continue; | 193 | | | 194 | 40.3k | const LockPair p2 = std::make_pair(c, i.first); | 195 | 40.3k | if (lockdata.lockorders.contains(p2)) { | 196 | 2 | auto lock_stack_copy = lock_stack; | 197 | 2 | lock_stack.pop_back(); | 198 | 2 | potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy); | 199 | | // potential_deadlock_detected() does not return. | 200 | 2 | } | 201 | | | 202 | 40.3k | lockdata.lockorders.emplace(p1, lock_stack); | 203 | 40.3k | lockdata.invlockorders.insert(p2); | 204 | 40.3k | } | 205 | 49.5M | } |
|
206 | | |
207 | | static void pop_lock() |
208 | 83.3M | { |
209 | 83.3M | LockData& lockdata = GetLockData(); |
210 | 83.3M | STDLOCK(lockdata.dd_mutex); |
211 | | |
212 | 83.3M | LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; |
213 | 83.3M | lock_stack.pop_back(); |
214 | 83.3M | if (lock_stack.empty()) { |
215 | 18.6M | lockdata.m_lock_stacks.erase(std::this_thread::get_id()); |
216 | 18.6M | } |
217 | 83.3M | } |
218 | | |
219 | | template <typename MutexType> |
220 | | void EnterCritical(const char* pszName, const char* pszFile, int nLine, MutexType* cs, bool fTry) |
221 | 83.3M | { |
222 | 83.3M | push_lock(cs, CLockLocation(pszName, pszFile, nLine, fTry, util::ThreadGetInternalName())); |
223 | 83.3M | } void EnterCritical<std::mutex>(char const*, char const*, int, std::mutex*, bool) Line | Count | Source | 221 | 33.8M | { | 222 | 33.8M | push_lock(cs, CLockLocation(pszName, pszFile, nLine, fTry, util::ThreadGetInternalName())); | 223 | 33.8M | } |
void EnterCritical<std::recursive_mutex>(char const*, char const*, int, std::recursive_mutex*, bool) Line | Count | Source | 221 | 49.5M | { | 222 | 49.5M | push_lock(cs, CLockLocation(pszName, pszFile, nLine, fTry, util::ThreadGetInternalName())); | 223 | 49.5M | } |
|
224 | | template void EnterCritical(const char*, const char*, int, std::mutex*, bool); |
225 | | template void EnterCritical(const char*, const char*, int, std::recursive_mutex*, bool); |
226 | | |
227 | | void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) |
228 | 2.17M | { |
229 | 2.17M | LockData& lockdata = GetLockData(); |
230 | 2.17M | STDLOCK(lockdata.dd_mutex); |
231 | | |
232 | 2.17M | const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; |
233 | 2.17M | if (!lock_stack.empty()) { |
234 | 2.17M | const auto& lastlock = lock_stack.back(); |
235 | 2.17M | if (lastlock.first == cs) { |
236 | 2.17M | lockname = lastlock.second.Name(); |
237 | 2.17M | return; |
238 | 2.17M | } |
239 | 2.17M | } |
240 | | |
241 | 18.4E | LogError("INCONSISTENT LOCK ORDER DETECTED"); |
242 | 18.4E | LogError("Current lock order (least recent first) is:"); |
243 | 18.4E | for (const LockStackItem& i : lock_stack) { |
244 | 10 | LogError(" %s", i.second.ToString()); |
245 | 10 | } |
246 | 18.4E | if (g_debug_lockorder_abort) { |
247 | 0 | tfm::format(std::cerr, "%s:%s %s was not most recent critical section locked, details in debug log.\n", file, line, guardname); |
248 | 0 | abort(); |
249 | 0 | } |
250 | 18.4E | throw std::logic_error(strprintf("%s was not most recent critical section locked", guardname)); |
251 | 18.4E | } |
252 | | |
253 | | void LeaveCritical() |
254 | 83.3M | { |
255 | 83.3M | pop_lock(); |
256 | 83.3M | } |
257 | | |
258 | | static std::string LocksHeld() |
259 | 0 | { |
260 | 0 | LockData& lockdata = GetLockData(); |
261 | 0 | STDLOCK(lockdata.dd_mutex); |
262 | |
|
263 | 0 | const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; |
264 | 0 | std::string result; |
265 | 0 | for (const LockStackItem& i : lock_stack) |
266 | 0 | result += i.second.ToString() + std::string("\n"); |
267 | 0 | return result; |
268 | 0 | } |
269 | | |
270 | | static bool LockHeld(void* mutex) |
271 | 73.3M | { |
272 | 73.3M | LockData& lockdata = GetLockData(); |
273 | 73.3M | STDLOCK(lockdata.dd_mutex); |
274 | | |
275 | 73.3M | const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; |
276 | 173M | for (const LockStackItem& i : lock_stack) { |
277 | 173M | if (i.first == mutex) return true; |
278 | 173M | } |
279 | | |
280 | 6.21M | return false; |
281 | 73.3M | } |
282 | | |
283 | | template <typename MutexType> |
284 | | void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) |
285 | 67.0M | { |
286 | 67.0M | if (LockHeld(cs)) return; |
287 | 0 | tfm::format(std::cerr, "Assertion failed: lock %s not held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld()); |
288 | 0 | abort(); |
289 | 67.0M | } void AssertLockHeldInternal<AnnotatedMixin<std::mutex>>(char const*, char const*, int, AnnotatedMixin<std::mutex>*) Line | Count | Source | 285 | 11.5M | { | 286 | 11.5M | if (LockHeld(cs)) return; | 287 | 11 | tfm::format(std::cerr, "Assertion failed: lock %s not held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld()); | 288 | 11 | abort(); | 289 | 11.5M | } |
void AssertLockHeldInternal<AnnotatedMixin<std::recursive_mutex>>(char const*, char const*, int, AnnotatedMixin<std::recursive_mutex>*) Line | Count | Source | 285 | 55.5M | { | 286 | 55.5M | if (LockHeld(cs)) return; | 287 | 18.4E | tfm::format(std::cerr, "Assertion failed: lock %s not held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld()); | 288 | 18.4E | abort(); | 289 | 55.5M | } |
|
290 | | template void AssertLockHeldInternal(const char*, const char*, int, Mutex*); |
291 | | template void AssertLockHeldInternal(const char*, const char*, int, RecursiveMutex*); |
292 | | |
293 | | template <typename MutexType> |
294 | | void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) |
295 | 6.21M | { |
296 | 6.21M | if (!LockHeld(cs)) return; |
297 | 18.4E | tfm::format(std::cerr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld()); |
298 | 18.4E | abort(); |
299 | 6.21M | } void AssertLockNotHeldInternal<AnnotatedMixin<std::mutex>>(char const*, char const*, int, AnnotatedMixin<std::mutex>*) Line | Count | Source | 295 | 5.32M | { | 296 | 5.32M | if (!LockHeld(cs)) return; | 297 | 18.4E | tfm::format(std::cerr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld()); | 298 | 18.4E | abort(); | 299 | 5.32M | } |
void AssertLockNotHeldInternal<AnnotatedMixin<std::recursive_mutex>>(char const*, char const*, int, AnnotatedMixin<std::recursive_mutex>*) Line | Count | Source | 295 | 891k | { | 296 | 891k | if (!LockHeld(cs)) return; | 297 | 18.4E | tfm::format(std::cerr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld()); | 298 | 18.4E | abort(); | 299 | 891k | } |
Unexecuted instantiation: void AssertLockNotHeldInternal<GlobalMutex>(char const*, char const*, int, GlobalMutex*) |
300 | | template void AssertLockNotHeldInternal(const char*, const char*, int, Mutex*); |
301 | | template void AssertLockNotHeldInternal(const char*, const char*, int, RecursiveMutex*); |
302 | | |
303 | | void DeleteLock(void* cs) |
304 | 149k | { |
305 | 149k | LockData& lockdata = GetLockData(); |
306 | 149k | STDLOCK(lockdata.dd_mutex); |
307 | 149k | const LockPair item = std::make_pair(cs, nullptr); |
308 | 149k | LockOrders::iterator it = lockdata.lockorders.lower_bound(item); |
309 | 244k | while (it != lockdata.lockorders.end() && it->first.first == cs) { |
310 | 94.7k | const LockPair invitem = std::make_pair(it->first.second, it->first.first); |
311 | 94.7k | lockdata.invlockorders.erase(invitem); |
312 | 94.7k | lockdata.lockorders.erase(it++); |
313 | 94.7k | } |
314 | 149k | InvLockOrders::iterator invit = lockdata.invlockorders.lower_bound(item); |
315 | 258k | while (invit != lockdata.invlockorders.end() && invit->first == cs) { |
316 | 109k | const LockPair invinvitem = std::make_pair(invit->second, invit->first); |
317 | 109k | lockdata.lockorders.erase(invinvitem); |
318 | 109k | lockdata.invlockorders.erase(invit++); |
319 | 109k | } |
320 | 149k | } |
321 | | |
322 | | bool LockStackEmpty() |
323 | 14 | { |
324 | 14 | LockData& lockdata = GetLockData(); |
325 | 14 | STDLOCK(lockdata.dd_mutex); |
326 | 14 | const auto it = lockdata.m_lock_stacks.find(std::this_thread::get_id()); |
327 | 14 | if (it == lockdata.m_lock_stacks.end()) { |
328 | 14 | return true; |
329 | 14 | } |
330 | 0 | return it->second.empty(); |
331 | 14 | } |
332 | | |
333 | | bool g_debug_lockorder_abort = true; |
334 | | |
335 | | #endif /* DEBUG_LOCKORDER */ |