1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
base / profiler / stack_copier_suspend.cc [blame]
// Copyright 2019 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/profiler/stack_copier_suspend.h"
#include "base/profiler/stack_buffer.h"
#include "base/profiler/suspendable_thread_delegate.h"
namespace base {
StackCopierSuspend::StackCopierSuspend(
std::unique_ptr<SuspendableThreadDelegate> thread_delegate)
: thread_delegate_(std::move(thread_delegate)) {}
StackCopierSuspend::~StackCopierSuspend() = default;
// Suspends the thread, copies the stack state, and resumes the thread. The
// copied stack state includes the stack itself, the top address of the stack
// copy, and the register context. Returns true on success, and returns the
// copied state via the params.
//
// NO HEAP ALLOCATIONS within the ScopedSuspendThread scope.
bool StackCopierSuspend::CopyStack(StackBuffer* stack_buffer,
uintptr_t* stack_top,
TimeTicks* timestamp,
RegisterContext* thread_context,
Delegate* delegate) {
const uintptr_t top = thread_delegate_->GetStackBaseAddress();
uintptr_t bottom = 0;
const uint8_t* stack_copy_bottom = nullptr;
{
// Allocation of the ScopedSuspendThread object itself is OK since it
// necessarily occurs before the thread is suspended by the object.
std::unique_ptr<SuspendableThreadDelegate::ScopedSuspendThread>
suspend_thread = thread_delegate_->CreateScopedSuspendThread();
// TimeTicks::Now() is implemented in terms of reads to the timer tick
// counter or TSC register on x86/x86_64 so is reentrant.
*timestamp = TimeTicks::Now();
if (!suspend_thread->WasSuccessful())
return false;
if (!thread_delegate_->GetThreadContext(thread_context))
return false;
bottom = RegisterContextStackPointer(thread_context);
// The StackBuffer allocation is expected to be at least as large as the
// largest stack region allocation on the platform, but check just in case
// it isn't *and* the actual stack itself exceeds the buffer allocation
// size.
if ((top - bottom) > stack_buffer->size())
return false;
if (!thread_delegate_->CanCopyStack(bottom))
return false;
delegate->OnStackCopy();
stack_copy_bottom = CopyStackContentsAndRewritePointers(
reinterpret_cast<uint8_t*>(bottom), reinterpret_cast<uintptr_t*>(top),
StackBuffer::kPlatformStackAlignment, stack_buffer->buffer());
}
*stack_top = reinterpret_cast<uintptr_t>(stack_copy_bottom) + (top - bottom);
for (uintptr_t* reg :
thread_delegate_->GetRegistersToRewrite(thread_context)) {
*reg = RewritePointerIfInOriginalStack(reinterpret_cast<uint8_t*>(bottom),
reinterpret_cast<uintptr_t*>(top),
stack_copy_bottom, *reg);
}
return true;
}
std::vector<uintptr_t*> StackCopierSuspend::GetRegistersToRewrite(
RegisterContext* thread_context) {
return thread_delegate_->GetRegistersToRewrite(thread_context);
}
} // namespace base