1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
base / allocator / partition_allocator / src / partition_alloc / hardening_unittest.cc [blame]
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <cstdint>
#include <string>
#include <vector>
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_freelist_entry.h"
#include "partition_alloc/partition_page.h"
#include "partition_alloc/partition_root.h"
#include "partition_alloc/use_death_tests.h"
#include "testing/gtest/include/gtest/gtest.h"
// With *SAN, PartitionAlloc is rerouted to malloc().
#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
namespace partition_alloc::internal {
namespace {
#if PA_USE_DEATH_TESTS() && PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
TEST(HardeningTest, PartialCorruption) {
std::string important_data("very important");
char* to_corrupt = const_cast<char*>(important_data.c_str());
PartitionOptions opts;
PartitionRoot root(opts);
root.UncapEmptySlotSpanMemoryForTesting();
const size_t kAllocSize = 100;
void* data = root.Alloc(kAllocSize);
void* data2 = root.Alloc(kAllocSize);
root.Free(data2);
root.Free(data);
// root->bucket->active_slot_span_head->freelist_head points to data, next_
// points to data2. We can corrupt *data to overwrite the next_ pointer.
// Even if it looks reasonable (valid encoded pointer), freelist corruption
// detection will make the code crash, because shadow_ doesn't match
// encoded_next_.
root.get_freelist_dispatcher()->EmplaceAndInitForTest(
root.ObjectToSlotStart(data), to_corrupt, false);
EXPECT_DEATH(root.Alloc(kAllocSize), "");
}
TEST(HardeningTest, OffHeapPointerCrashing) {
std::string important_data("very important");
char* to_corrupt = const_cast<char*>(important_data.c_str());
PartitionOptions opts;
PartitionRoot root(opts);
root.UncapEmptySlotSpanMemoryForTesting();
const size_t kAllocSize = 100;
void* data = root.Alloc(kAllocSize);
void* data2 = root.Alloc(kAllocSize);
root.Free(data2);
root.Free(data);
// See "PartialCorruption" above for details. This time, make shadow_
// consistent.
root.get_freelist_dispatcher()->EmplaceAndInitForTest(
root.ObjectToSlotStart(data), to_corrupt, true);
// Crashes, because |to_corrupt| is not on the same superpage as data.
EXPECT_DEATH(root.Alloc(kAllocSize), "");
}
TEST(HardeningTest, MetadataPointerCrashing) {
PartitionOptions opts;
PartitionRoot root(opts);
root.UncapEmptySlotSpanMemoryForTesting();
const size_t kAllocSize = 100;
void* data = root.Alloc(kAllocSize);
void* data2 = root.Alloc(kAllocSize);
root.Free(data2);
root.Free(data);
uintptr_t slot_start = root.ObjectToSlotStart(data);
auto* metadata =
SlotSpanMetadata<MetadataKind::kReadOnly>::FromSlotStart(slot_start);
root.get_freelist_dispatcher()->EmplaceAndInitForTest(slot_start, metadata,
true);
// Crashes, because |metadata| points inside the metadata area.
EXPECT_DEATH(root.Alloc(kAllocSize), "");
}
#endif // PA_USE_DEATH_TESTS() && PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
// Below test also misbehaves on Android; as above, death tests don't
// quite work (crbug.com/1240184), and having free slot bitmaps enabled
// force the expectations below to crash.
#if !PA_BUILDFLAG(IS_ANDROID)
TEST(HardeningTest, SuccessfulCorruption) {
PartitionOptions opts;
PartitionRoot root(opts);
root.UncapEmptySlotSpanMemoryForTesting();
uintptr_t* zero_vector = reinterpret_cast<uintptr_t*>(
root.Alloc<AllocFlags::kZeroFill>(100 * sizeof(uintptr_t), ""));
ASSERT_TRUE(zero_vector);
// Pointer to the middle of an existing allocation.
uintptr_t* to_corrupt = zero_vector + 20;
const size_t kAllocSize = 100;
void* data = root.Alloc(kAllocSize);
void* data2 = root.Alloc(kAllocSize);
root.Free(data2);
root.Free(data);
root.get_freelist_dispatcher()->EmplaceAndInitForTest(
root.ObjectToSlotStartUnchecked(data), to_corrupt, true);
#if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
// This part crashes with freeslot bitmap because it detects freelist
// corruptions, which is rather desirable behavior.
EXPECT_DEATH_IF_SUPPORTED(root.Alloc(kAllocSize), "");
#else
// Next allocation is what was in
// root->bucket->active_slot_span_head->freelist_head, so not the corrupted
// pointer.
void* new_data = root.Alloc(kAllocSize);
ASSERT_EQ(new_data, data);
#if !PA_CONFIG(ENFORCE_SLOT_STARTS)
// Not crashing, because a zeroed area is a "valid" freelist entry.
void* new_data2 = root.Alloc(kAllocSize);
// Now we have a pointer to the middle of an existing allocation.
EXPECT_EQ(new_data2, to_corrupt);
#else
// When `SlotStart` enforcement is on, `AllocInternalNoHooks()` will
// call `SlotStartToObject()` and `CHECK()` that it's a slot start.
EXPECT_DEATH_IF_SUPPORTED(root.Alloc(kAllocSize), "");
#endif // !PA_CONFIG(ENFORCE_SLOT_STARTS)
#endif // PA_BUILDFLAG(USE_FREESLOT_BITMAP)
}
#endif // !PA_BUILDFLAG(IS_ANDROID)
#if PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
#if PA_USE_DEATH_TESTS() && PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
TEST(HardeningTest, ConstructPoolOffsetFromStackPointerCrashing) {
int num_to_corrupt = 12345;
int* to_corrupt = &num_to_corrupt;
PartitionOptions opts;
opts.use_pool_offset_freelists = PartitionOptions::kEnabled;
PartitionRoot root(opts);
root.UncapEmptySlotSpanMemoryForTesting();
const size_t kAllocSize = 100;
void* data = root.Alloc(kAllocSize);
EXPECT_DEATH(root.get_freelist_dispatcher()->EmplaceAndInitForTest(
root.ObjectToSlotStart(data), to_corrupt, true),
"");
}
TEST(HardeningTest, PoolOffsetMetadataPointerCrashing) {
PartitionOptions opts;
opts.use_pool_offset_freelists = PartitionOptions::kEnabled;
PartitionRoot root(opts);
root.UncapEmptySlotSpanMemoryForTesting();
const size_t kAllocSize = 100;
void* data = root.Alloc(kAllocSize);
void* data2 = root.Alloc(kAllocSize);
root.Free(data2);
root.Free(data);
uintptr_t slot_start = root.ObjectToSlotStart(data);
auto* metadata =
SlotSpanMetadata<MetadataKind::kReadOnly>::FromSlotStart(slot_start);
root.get_freelist_dispatcher()->EmplaceAndInitForTest(slot_start, metadata,
true);
// Crashes, because |metadata| points inside the metadata area.
EXPECT_DEATH(root.Alloc(kAllocSize), "");
}
#endif // PA_USE_DEATH_TESTS() && PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
#if !PA_BUILDFLAG(IS_ANDROID)
TEST(HardeningTest, PoolOffsetSuccessfulCorruption) {
PartitionOptions opts;
opts.use_pool_offset_freelists = PartitionOptions::kEnabled;
PartitionRoot root(opts);
root.UncapEmptySlotSpanMemoryForTesting();
uintptr_t* zero_vector = reinterpret_cast<uintptr_t*>(
root.Alloc<AllocFlags::kZeroFill>(100 * sizeof(uintptr_t), ""));
ASSERT_TRUE(zero_vector);
// Pointer to the middle of an existing allocation.
uintptr_t* to_corrupt = zero_vector + 20;
const size_t kAllocSize = 100;
void* data = root.Alloc(kAllocSize);
void* data2 = root.Alloc(kAllocSize);
root.Free(data2);
root.Free(data);
root.get_freelist_dispatcher()->EmplaceAndInitForTest(
root.ObjectToSlotStart(data), to_corrupt, true);
#if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
// This part crashes with freeslot bitmap because it detects freelist
// corruptions, which is rather desirable behavior.
EXPECT_DEATH_IF_SUPPORTED(root.Alloc(kAllocSize), "");
#else
// Next allocation is what was in
// root->bucket->active_slot_span_head->freelist_head, so not the corrupted
// pointer.
void* new_data = root.Alloc(kAllocSize);
ASSERT_EQ(new_data, data);
#if !PA_CONFIG(ENFORCE_SLOT_STARTS)
// Not crashing, because a zeroed area is a "valid" freelist entry.
void* new_data2 = root.Alloc(kAllocSize);
// Now we have a pointer to the middle of an existing allocation.
EXPECT_EQ(new_data2, to_corrupt);
#else
// When `SlotStart` enforcement is on, `AllocInternalNoHooks()` will
// call `SlotStartToObject()` and `CHECK()` that it's a slot start.
EXPECT_DEATH_IF_SUPPORTED(root.Alloc(kAllocSize), "");
#endif // !PA_CONFIG(ENFORCE_SLOT_STARTS)
#endif // PA_BUILDFLAG(USE_FREESLOT_BITMAP)
}
#endif // !PA_BUILDFLAG(IS_ANDROID)
#endif // PA_BUILDFLAG(USE_FREELIST_DISPATCHER)
} // namespace
} // namespace partition_alloc::internal
#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)