1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
base / task / thread_pool / job_task_source.h [blame]
// Copyright 2019 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_
#define BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_
#include <stddef.h>
#include <atomic>
#include <limits>
#include <optional>
#include <utility>
#include "base/base_export.h"
#include "base/functional/callback.h"
#include "base/memory/raw_ptr.h"
#include "base/synchronization/condition_variable.h"
#include "base/task/common/checked_lock.h"
#include "base/task/common/task_annotator.h"
#include "base/task/post_job.h"
#include "base/task/task_traits.h"
#include "base/task/thread_pool/task.h"
#include "base/task/thread_pool/task_source.h"
#include "base/task/thread_pool/task_source_sort_key.h"
namespace base {
namespace internal {
class PooledTaskRunnerDelegate;
// A JobTaskSource generates many Tasks from a single RepeatingClosure.
//
// Derived classes control the intended concurrency with GetMaxConcurrency().
class BASE_EXPORT JobTaskSource : public TaskSource {
public:
JobTaskSource(const Location& from_here,
const TaskTraits& traits,
RepeatingCallback<void(JobDelegate*)> worker_task,
MaxConcurrencyCallback max_concurrency_callback,
PooledTaskRunnerDelegate* delegate);
JobTaskSource(const JobTaskSource&) = delete;
JobTaskSource& operator=(const JobTaskSource&) = delete;
static JobHandle CreateJobHandle(
scoped_refptr<internal::JobTaskSource> task_source) {
return JobHandle(std::move(task_source));
}
// Called before the task source is enqueued to initialize task metadata.
void WillEnqueue(int sequence_num, TaskAnnotator& annotator);
// Notifies this task source that max concurrency was increased, and the
// number of worker should be adjusted.
void NotifyConcurrencyIncrease();
// Informs this JobTaskSource that the current thread would like to join and
// contribute to running |worker_task|. Returns true if the joining thread can
// contribute (RunJoinTask() can be called), or false if joining was completed
// and all other workers returned because either there's no work remaining or
// Job was cancelled.
bool WillJoin();
// Contributes to running |worker_task| and returns true if the joining thread
// can contribute again (RunJoinTask() can be called again), or false if
// joining was completed and all other workers returned because either there's
// no work remaining or Job was cancelled. This should be called only after
// WillJoin() or RunJoinTask() previously returned true.
bool RunJoinTask();
// Cancels this JobTaskSource, causing all workers to yield and WillRunTask()
// to return RunStatus::kDisallowed.
void Cancel(TaskSource::Transaction* transaction = nullptr);
// TaskSource:
ExecutionEnvironment GetExecutionEnvironment() override;
size_t GetRemainingConcurrency() const override;
TaskSourceSortKey GetSortKey() const override;
TimeTicks GetDelayedSortKey() const override;
bool HasReadyTasks(TimeTicks now) const override;
bool IsActive() const;
size_t GetWorkerCount() const;
// Returns the maximum number of tasks from this TaskSource that can run
// concurrently.
size_t GetMaxConcurrency() const;
uint8_t AcquireTaskId();
void ReleaseTaskId(uint8_t task_id);
// Returns true if a worker should return from the worker task on the current
// thread ASAP.
bool ShouldYield();
PooledTaskRunnerDelegate* delegate() const { return delegate_; }
private:
// Atomic internal state to track the number of workers running a task from
// this JobTaskSource and whether this JobTaskSource is canceled. All
// operations are performed with std::memory_order_relaxed as State is only
// ever modified under a lock or read atomically (optimistic read).
class State {
public:
static constexpr uint32_t kCanceledMask = 1;
static constexpr int kWorkerCountBitOffset = 1;
static constexpr uint32_t kWorkerCountIncrement = 1
<< kWorkerCountBitOffset;
struct Value {
uint8_t worker_count() const {
return static_cast<uint8_t>(value >> kWorkerCountBitOffset);
}
// Returns true if canceled.
bool is_canceled() const { return value & kCanceledMask; }
uint32_t value;
};
State();
~State();
// Sets as canceled. Returns the state
// before the operation.
Value Cancel();
// Increments the worker count by 1. Returns the state before the operation.
Value IncrementWorkerCount();
// Decrements the worker count by 1. Returns the state before the operation.
Value DecrementWorkerCount();
// Loads and returns the state.
Value Load() const;
private:
std::atomic<uint32_t> value_{0};
};
// Atomic flag that indicates if the joining thread is currently waiting on
// another worker to yield or to signal.
class JoinFlag {
public:
static constexpr uint32_t kNotWaiting = 0;
static constexpr uint32_t kWaitingForWorkerToSignal = 1;
static constexpr uint32_t kWaitingForWorkerToYield = 3;
// kWaitingForWorkerToYield is 3 because the impl relies on the following
// property.
static_assert((kWaitingForWorkerToYield & kWaitingForWorkerToSignal) ==
kWaitingForWorkerToSignal,
"");
JoinFlag();
~JoinFlag();
// Returns true if the status is not kNotWaiting, using
// std::memory_order_relaxed.
bool IsWaiting() {
return value_.load(std::memory_order_relaxed) != kNotWaiting;
}
// Resets the status as kNotWaiting using std::memory_order_relaxed.
void Reset();
// Sets the status as kWaitingForWorkerToYield using
// std::memory_order_relaxed.
void SetWaiting();
// If the flag is kWaitingForWorkerToYield, returns true indicating that the
// worker should yield, and atomically updates to kWaitingForWorkerToSignal
// (using std::memory_order_relaxed) to ensure that a single worker yields
// in response to SetWaiting().
bool ShouldWorkerYield();
// If the flag is kWaiting*, returns true indicating that the worker should
// signal, and atomically updates to kNotWaiting (using
// std::memory_order_relaxed) to ensure that a single worker signals in
// response to SetWaiting().
bool ShouldWorkerSignal();
private:
std::atomic<uint32_t> value_{kNotWaiting};
};
~JobTaskSource() override;
// Called from the joining thread. Waits for the worker count to be below or
// equal to max concurrency (will happen when a worker calls
// DidProcessTask()). Returns true if the joining thread should run a task, or
// false if joining was completed and all other workers returned because
// either there's no work remaining or Job was cancelled.
bool WaitForParticipationOpportunity() EXCLUSIVE_LOCKS_REQUIRED(worker_lock_);
size_t GetMaxConcurrency(size_t worker_count) const;
// TaskSource:
RunStatus WillRunTask() override;
Task TakeTask(TaskSource::Transaction* transaction) override;
std::optional<Task> Clear(TaskSource::Transaction* transaction) override;
bool DidProcessTask(TaskSource::Transaction* transaction) override;
bool WillReEnqueue(TimeTicks now,
TaskSource::Transaction* transaction) override;
bool OnBecomeReady() override;
// Synchronizes access to workers state.
mutable CheckedLock worker_lock_{UniversalSuccessor()};
// Current atomic state (atomic despite the lock to allow optimistic reads
// and cancellation without the lock).
State state_ GUARDED_BY(worker_lock_);
// Normally, |join_flag_| is protected by |lock_|, except in ShouldYield()
// hence the use of atomics.
JoinFlag join_flag_ GUARDED_BY(worker_lock_);
// Signaled when |join_flag_| is kWaiting* and a worker returns.
std::optional<ConditionVariable> worker_released_condition_
GUARDED_BY(worker_lock_);
std::atomic<uint32_t> assigned_task_ids_{0};
RepeatingCallback<size_t(size_t)> max_concurrency_callback_;
// Worker task set by the job owner.
RepeatingCallback<void(JobDelegate*)> worker_task_;
// Task returned from TakeTask(), that calls |worker_task_| internally.
RepeatingClosure primary_task_;
TaskMetadata task_metadata_;
const TimeTicks ready_time_;
raw_ptr<PooledTaskRunnerDelegate, LeakedDanglingUntriaged> delegate_;
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_