1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
base / memory / shared_memory_security_policy.cc [blame]
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/shared_memory_security_policy.h"
#include <algorithm>
#include <atomic>
#include <limits>
#include <optional>
#include "base/bits.h"
#include "base/memory/page_size.h"
#include "base/numerics/checked_math.h"
#include "build/build_config.h"
namespace base {
namespace {
// Note: pointers are 32 bits on all architectures in NaCl. See
// https://bugs.chromium.org/p/nativeclient/issues/detail?id=1162
#if defined(ARCH_CPU_32_BITS) || BUILDFLAG(IS_NACL)
// No effective limit on 32-bit, since there simply isn't enough address space
// for ASLR to be particularly effective.
constexpr size_t kTotalMappedSizeLimit = std::numeric_limits<size_t>::max();
#elif defined(ARCH_CPU_64_BITS)
// 32 GB of mappings ought to be enough for anybody.
constexpr size_t kTotalMappedSizeLimit = 32ULL * 1024 * 1024 * 1024;
#endif
static std::atomic_size_t total_mapped_size_;
std::optional<size_t> AlignWithPageSize(size_t size) {
#if BUILDFLAG(IS_WIN)
// TODO(crbug.com/40307662): Matches alignment requirements defined in
// platform_shared_memory_region_win.cc:PlatformSharedMemoryRegion::Create.
// Remove this when NaCl is gone.
static const size_t kSectionSize = 65536;
const size_t page_size = std::max(kSectionSize, GetPageSize());
#else
const size_t page_size = GetPageSize();
#endif // BUILDFLAG(IS_WIN)
size_t rounded_size = bits::AlignUp(size, page_size);
// Fail on overflow.
if (rounded_size < size)
return std::nullopt;
return rounded_size;
}
} // namespace
// static
bool SharedMemorySecurityPolicy::AcquireReservationForMapping(size_t size) {
size_t previous_mapped_size =
total_mapped_size_.load(std::memory_order_relaxed);
size_t total_mapped_size;
std::optional<size_t> page_aligned_size = AlignWithPageSize(size);
if (!page_aligned_size)
return false;
// Relaxed memory ordering is all that's needed since all atomicity is all
// that's required. If the value is stale, compare_exchange_weak() will fail
// and the loop will retry the operation with an updated total mapped size.
do {
if (!CheckAdd(previous_mapped_size, *page_aligned_size)
.AssignIfValid(&total_mapped_size)) {
return false;
}
if (total_mapped_size >= kTotalMappedSizeLimit)
return false;
} while (!total_mapped_size_.compare_exchange_weak(
previous_mapped_size, total_mapped_size, std::memory_order_relaxed,
std::memory_order_relaxed));
return true;
}
// static
void SharedMemorySecurityPolicy::ReleaseReservationForMapping(size_t size) {
// Note #1: relaxed memory ordering is sufficient since atomicity is all
// that's required.
// Note #2: |size| should never overflow when aligned to page size, since
// this should only be called if AcquireReservationForMapping() returned true.
std::optional<size_t> page_aligned_size = AlignWithPageSize(size);
total_mapped_size_.fetch_sub(*page_aligned_size, std::memory_order_relaxed);
}
} // namespace base