1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
gpu / command_buffer / service / dawn_service_serializer.cc [blame]
// Copyright 2019 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "gpu/command_buffer/service/dawn_service_serializer.h"
#include "base/rand_util.h"
#include "base/trace_event/trace_event.h"
#include "gpu/command_buffer/common/webgpu_cmd_format.h"
#include "gpu/command_buffer/service/decoder_client.h"
#include "ipc/ipc_channel.h"
namespace gpu::webgpu {
namespace {
constexpr size_t kMaxWireBufferSize =
std::min(IPC::Channel::kMaximumMessageSize,
static_cast<size_t>(1024 * 1024));
constexpr size_t kDawnReturnCmdsOffset =
offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer);
static_assert(kDawnReturnCmdsOffset < kMaxWireBufferSize, "");
} // anonymous namespace
DawnServiceSerializer::DawnServiceSerializer(DecoderClient* client)
: client_(client),
buffer_(kMaxWireBufferSize),
put_offset_(offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer)) {
// We prepopulate the message with the header and keep it between flushes so
// we never need to write it again.
cmds::DawnReturnCommandsInfoHeader* header =
reinterpret_cast<cmds::DawnReturnCommandsInfoHeader*>(&buffer_[0]);
header->return_data_header.return_data_type =
DawnReturnDataType::kDawnCommands;
}
DawnServiceSerializer::~DawnServiceSerializer() = default;
size_t DawnServiceSerializer::GetMaximumAllocationSize() const {
return kMaxWireBufferSize - kDawnReturnCmdsOffset;
}
void* DawnServiceSerializer::GetCmdSpace(size_t size) {
base::AutoLock guard(lock_);
// Note: Dawn will never call this function with |size| >
// GetMaximumAllocationSize().
DCHECK_LE(put_offset_, kMaxWireBufferSize);
DCHECK_LE(size, GetMaximumAllocationSize());
// Statically check that kMaxWireBufferSize + kMaxWireBufferSize is
// a valid uint32_t. We can add put_offset_ and size without overflow.
static_assert(base::CheckAdd(kMaxWireBufferSize, kMaxWireBufferSize)
.IsValid<uint32_t>(),
"");
uint32_t next_offset = put_offset_ + static_cast<uint32_t>(size);
if (next_offset > buffer_.size()) {
FlushInternal();
// TODO(enga): Keep track of how much command space the application is using
// and adjust the buffer size accordingly.
DCHECK_EQ(put_offset_, kDawnReturnCmdsOffset);
next_offset = put_offset_ + static_cast<uint32_t>(size);
}
uint8_t* ptr = &buffer_[put_offset_];
put_offset_ = next_offset;
return ptr;
}
bool DawnServiceSerializer::NeedsFlush() const {
return put_offset_ > kDawnReturnCmdsOffset;
}
bool DawnServiceSerializer::Flush() {
base::AutoLock guard(lock_);
FlushInternal();
return true;
}
void DawnServiceSerializer::FlushInternal() {
if (NeedsFlush()) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
"DawnServiceSerializer::Flush", "bytes", put_offset_.load());
bool is_tracing = false;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
&is_tracing);
if (is_tracing) {
uint64_t trace_id = base::RandUint64();
TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
"DawnReturnCommands", trace_id,
TRACE_EVENT_FLAG_FLOW_OUT);
cmds::DawnReturnCommandsInfoHeader* header =
reinterpret_cast<cmds::DawnReturnCommandsInfoHeader*>(&buffer_[0]);
header->return_data_header.trace_id = trace_id;
}
client_->HandleReturnData(base::span(buffer_).first(put_offset_.load()));
put_offset_ = kDawnReturnCmdsOffset;
}
}
} // namespace gpu::webgpu