1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
base / synchronization / lock.cc [blame]
// Copyright 2011 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is used for debugging assertion support. The Lock class
// is functionally a wrapper around the LockImpl class, so the only
// real intelligence in the class is in the debugging logic.
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
#pragma allow_unsafe_buffers
#endif
#include "base/synchronization/lock.h"
#include <cstdint>
#if DCHECK_IS_ON()
#include <array>
#include <memory>
#include "base/functional/function_ref.h"
#include "base/synchronization/lock_subtle.h"
#include "base/threading/platform_thread.h"
namespace base {
namespace {
// List of locks held by a thread.
//
// As of May 2024, no more than 5 locks were held simultaneously by a thread in
// a test browsing session or while running the CQ (% locks acquired in unit
// tests "WaitSetTest.NoStarvation" and
// "MessagePipeTest.DataPipeConsumerHandlePingPong"). An array of size 10 is
// therefore considered sufficient to track all locks held by a thread. A
// dynamic-size array (e.g. owned by a `ThreadLocalOwnedPointer`) would require
// handling reentrancy issues with allocator shims that use `base::Lock`.
constexpr size_t kHeldLocksCapacity = 10;
thread_local std::array<uintptr_t, kHeldLocksCapacity>
g_tracked_locks_held_by_thread;
// Number of non-nullptr elements in `g_tracked_locks_held_by_thread`.
thread_local size_t g_num_tracked_locks_held_by_thread = 0;
} // namespace
Lock::Lock() = default;
Lock::Lock(FunctionRef<void()> check_invariants)
: check_invariants_(
std::make_unique<FunctionRef<void()>>(check_invariants)) {}
Lock::~Lock() {
DCHECK(owning_thread_ref_.is_null());
}
void Lock::Acquire(subtle::LockTracking tracking) {
lock_.Lock();
if (tracking == subtle::LockTracking::kEnabled) {
AddToLocksHeldOnCurrentThread();
}
CheckUnheldAndMark();
}
void Lock::Release() {
CheckHeldAndUnmark();
if (in_tracked_locks_held_by_current_thread_) {
RemoveFromLocksHeldOnCurrentThread();
}
lock_.Unlock();
}
bool Lock::Try(subtle::LockTracking tracking) {
const bool rv = lock_.Try();
if (rv) {
if (tracking == subtle::LockTracking::kEnabled) {
AddToLocksHeldOnCurrentThread();
}
CheckUnheldAndMark();
}
return rv;
}
void Lock::AssertAcquired() const {
DCHECK_EQ(owning_thread_ref_, PlatformThread::CurrentRef());
}
void Lock::AssertNotHeld() const {
DCHECK(owning_thread_ref_.is_null());
}
void Lock::CheckHeldAndUnmark() {
DCHECK_EQ(owning_thread_ref_, PlatformThread::CurrentRef());
if (check_invariants_) {
(*check_invariants_)();
}
owning_thread_ref_ = PlatformThreadRef();
}
void Lock::CheckUnheldAndMark() {
DCHECK(owning_thread_ref_.is_null());
owning_thread_ref_ = PlatformThread::CurrentRef();
if (check_invariants_) {
(*check_invariants_)();
}
}
void Lock::AddToLocksHeldOnCurrentThread() {
CHECK(!in_tracked_locks_held_by_current_thread_);
// Check if capacity is exceeded.
CHECK_LT(g_num_tracked_locks_held_by_thread, kHeldLocksCapacity)
<< "This thread holds more than " << kHeldLocksCapacity
<< " tracked locks simultaneously. Reach out to //base OWNERS to "
"determine whether `kHeldLocksCapacity` should be increased.";
// Add to the list of held locks.
g_tracked_locks_held_by_thread[g_num_tracked_locks_held_by_thread] =
reinterpret_cast<uintptr_t>(this);
++g_num_tracked_locks_held_by_thread;
in_tracked_locks_held_by_current_thread_ = true;
}
void Lock::RemoveFromLocksHeldOnCurrentThread() {
CHECK(in_tracked_locks_held_by_current_thread_);
for (size_t i = 0; i < g_num_tracked_locks_held_by_thread; ++i) {
// Traverse from the end since locks are typically acquired and released in
// opposite order.
const size_t index = g_num_tracked_locks_held_by_thread - i - 1;
if (g_tracked_locks_held_by_thread[index] ==
reinterpret_cast<uintptr_t>(this)) {
g_tracked_locks_held_by_thread[index] =
g_tracked_locks_held_by_thread[g_num_tracked_locks_held_by_thread -
1];
g_tracked_locks_held_by_thread[g_num_tracked_locks_held_by_thread - 1] =
reinterpret_cast<uintptr_t>(nullptr);
--g_num_tracked_locks_held_by_thread;
break;
}
}
in_tracked_locks_held_by_current_thread_ = false;
}
namespace subtle {
span<const uintptr_t> GetTrackedLocksHeldByCurrentThread() {
return span<const uintptr_t>(g_tracked_locks_held_by_thread.begin(),
g_num_tracked_locks_held_by_thread);
}
} // namespace subtle
} // namespace base
#endif // DCHECK_IS_ON()