[libc] Use best-fit binary trie to make malloc logarithmic (#106259)
[llvm-project.git] / libc / test / src / __support / block_test.cpp
blob4d23861155502a04565b63495839385e651983b8
1 //===-- Unittests for a block of memory -------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include <stddef.h>
10 #include "src/__support/CPP/array.h"
11 #include "src/__support/CPP/bit.h"
12 #include "src/__support/CPP/span.h"
13 #include "src/__support/block.h"
14 #include "src/string/memcpy.h"
15 #include "test/UnitTest/Test.h"
17 // Block types.
18 using LargeOffsetBlock = LIBC_NAMESPACE::Block<uint64_t>;
19 using SmallOffsetBlock = LIBC_NAMESPACE::Block<uint16_t>;
21 // For each of the block types above, we'd like to run the same tests since
22 // they should work independently of the parameter sizes. Rather than re-writing
23 // the same test for each case, let's instead create a custom test framework for
24 // each test case that invokes the actual testing function for each block type.
26 // It's organized this way because the ASSERT/EXPECT macros only work within a
27 // `Test` class due to those macros expanding to `test` methods.
28 #define TEST_FOR_EACH_BLOCK_TYPE(TestCase) \
29 class LlvmLibcBlockTest##TestCase : public LIBC_NAMESPACE::testing::Test { \
30 public: \
31 template <typename BlockType> void RunTest(); \
32 }; \
33 TEST_F(LlvmLibcBlockTest##TestCase, TestCase) { \
34 RunTest<LargeOffsetBlock>(); \
35 RunTest<SmallOffsetBlock>(); \
36 } \
37 template <typename BlockType> void LlvmLibcBlockTest##TestCase::RunTest()
39 using LIBC_NAMESPACE::cpp::array;
40 using LIBC_NAMESPACE::cpp::bit_ceil;
41 using LIBC_NAMESPACE::cpp::byte;
42 using LIBC_NAMESPACE::cpp::span;
44 TEST_FOR_EACH_BLOCK_TYPE(CanCreateSingleAlignedBlock) {
45 constexpr size_t kN = 1024;
46 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
48 auto result = BlockType::init(bytes);
49 ASSERT_TRUE(result.has_value());
50 BlockType *block = *result;
52 BlockType *last = block->next();
53 ASSERT_NE(last, static_cast<BlockType *>(nullptr));
54 constexpr size_t last_outer_size = BlockType::BLOCK_OVERHEAD;
55 EXPECT_EQ(last->outer_size(), last_outer_size);
56 EXPECT_EQ(last->prev_free(), block);
57 EXPECT_TRUE(last->used());
59 EXPECT_EQ(block->outer_size(), kN - last_outer_size);
60 constexpr size_t last_prev_field_size =
61 sizeof(typename BlockType::offset_type);
62 EXPECT_EQ(block->inner_size(), kN - last_outer_size -
63 BlockType::BLOCK_OVERHEAD +
64 last_prev_field_size);
65 EXPECT_EQ(block->prev_free(), static_cast<BlockType *>(nullptr));
66 EXPECT_FALSE(block->used());
69 TEST_FOR_EACH_BLOCK_TYPE(CanCreateUnalignedSingleBlock) {
70 constexpr size_t kN = 1024;
72 // Force alignment, so we can un-force it below
73 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
74 span<byte> aligned(bytes);
76 auto result = BlockType::init(aligned.subspan(1));
77 EXPECT_TRUE(result.has_value());
80 TEST_FOR_EACH_BLOCK_TYPE(CannotCreateTooSmallBlock) {
81 array<byte, 2> bytes;
82 auto result = BlockType::init(bytes);
83 EXPECT_FALSE(result.has_value());
86 // This test specifically checks that we cannot allocate a block with a size
87 // larger than what can be held by the offset type, we don't need to test with
88 // multiple block types for this particular check, so we use the normal TEST
89 // macro and not the custom framework.
90 TEST(LlvmLibcBlockTest, CannotCreateTooLargeBlock) {
91 using BlockType = LIBC_NAMESPACE::Block<uint8_t>;
92 constexpr size_t kN = 1024;
94 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
95 auto result = BlockType::init(bytes);
96 EXPECT_FALSE(result.has_value());
99 TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlock) {
100 constexpr size_t kN = 1024;
101 constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type);
102 // Give the split position a large alignment.
103 constexpr size_t kSplitN = 512 + prev_field_size;
105 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
106 auto result = BlockType::init(bytes);
107 ASSERT_TRUE(result.has_value());
108 auto *block1 = *result;
109 size_t orig_size = block1->outer_size();
111 result = block1->split(kSplitN);
112 ASSERT_TRUE(result.has_value());
113 auto *block2 = *result;
115 EXPECT_EQ(block1->inner_size(), kSplitN);
116 EXPECT_EQ(block1->outer_size(),
117 kSplitN - prev_field_size + BlockType::BLOCK_OVERHEAD);
119 EXPECT_EQ(block2->outer_size(), orig_size - block1->outer_size());
120 EXPECT_FALSE(block2->used());
122 EXPECT_EQ(block1->next(), block2);
123 EXPECT_EQ(block2->prev_free(), block1);
126 TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlockUnaligned) {
127 constexpr size_t kN = 1024;
129 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
130 auto result = BlockType::init(bytes);
131 ASSERT_TRUE(result.has_value());
132 BlockType *block1 = *result;
133 size_t orig_size = block1->outer_size();
135 constexpr size_t kSplitN = 513;
136 constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type);
137 uintptr_t split_addr =
138 reinterpret_cast<uintptr_t>(block1) + (kSplitN - prev_field_size);
139 // Round split_addr up to a multiple of the alignment.
140 split_addr += alignof(BlockType) - (split_addr % alignof(BlockType));
141 uintptr_t split_len = split_addr - (uintptr_t)&bytes + prev_field_size;
143 result = block1->split(kSplitN);
144 ASSERT_TRUE(result.has_value());
145 BlockType *block2 = *result;
147 EXPECT_EQ(block1->inner_size(), split_len);
149 EXPECT_EQ(block2->outer_size(), orig_size - block1->outer_size());
150 EXPECT_FALSE(block2->used());
152 EXPECT_EQ(block1->next(), block2);
153 EXPECT_EQ(block2->prev_free(), block1);
156 TEST_FOR_EACH_BLOCK_TYPE(CanSplitMidBlock) {
157 // split once, then split the original block again to ensure that the
158 // pointers get rewired properly.
159 // I.e.
160 // [[ BLOCK 1 ]]
161 // block1->split()
162 // [[ BLOCK1 ]][[ BLOCK2 ]]
163 // block1->split()
164 // [[ BLOCK1 ]][[ BLOCK3 ]][[ BLOCK2 ]]
166 constexpr size_t kN = 1024;
167 constexpr size_t kSplit1 = 512;
168 constexpr size_t kSplit2 = 256;
170 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
171 auto result = BlockType::init(bytes);
172 ASSERT_TRUE(result.has_value());
173 BlockType *block1 = *result;
175 result = block1->split(kSplit1);
176 ASSERT_TRUE(result.has_value());
177 BlockType *block2 = *result;
179 result = block1->split(kSplit2);
180 ASSERT_TRUE(result.has_value());
181 BlockType *block3 = *result;
183 EXPECT_EQ(block1->next(), block3);
184 EXPECT_EQ(block3->prev_free(), block1);
185 EXPECT_EQ(block3->next(), block2);
186 EXPECT_EQ(block2->prev_free(), block3);
189 TEST_FOR_EACH_BLOCK_TYPE(CannotSplitTooSmallBlock) {
190 constexpr size_t kN = 64;
191 constexpr size_t kSplitN = kN + 1;
193 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
194 auto result = BlockType::init(bytes);
195 ASSERT_TRUE(result.has_value());
196 BlockType *block = *result;
198 result = block->split(kSplitN);
199 ASSERT_FALSE(result.has_value());
202 TEST_FOR_EACH_BLOCK_TYPE(CannotSplitBlockWithoutHeaderSpace) {
203 constexpr size_t kN = 1024;
204 constexpr size_t kSplitN = kN - 2 * BlockType::BLOCK_OVERHEAD - 1;
206 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
207 auto result = BlockType::init(bytes);
208 ASSERT_TRUE(result.has_value());
209 BlockType *block = *result;
211 result = block->split(kSplitN);
212 ASSERT_FALSE(result.has_value());
215 TEST_FOR_EACH_BLOCK_TYPE(CannotMakeBlockLargerInSplit) {
216 // Ensure that we can't ask for more space than the block actually has...
217 constexpr size_t kN = 1024;
219 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
220 auto result = BlockType::init(bytes);
221 ASSERT_TRUE(result.has_value());
222 BlockType *block = *result;
224 result = block->split(block->inner_size() + 1);
225 ASSERT_FALSE(result.has_value());
228 TEST_FOR_EACH_BLOCK_TYPE(CannotMakeSecondBlockLargerInSplit) {
229 // Ensure that the second block in split is at least of the size of header.
230 constexpr size_t kN = 1024;
232 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
233 auto result = BlockType::init(bytes);
234 ASSERT_TRUE(result.has_value());
235 BlockType *block = *result;
237 result = block->split(block->inner_size() - BlockType::BLOCK_OVERHEAD + 1);
238 ASSERT_FALSE(result.has_value());
241 TEST_FOR_EACH_BLOCK_TYPE(CanMakeMinimalSizeFirstBlock) {
242 // This block does support splitting with minimal payload size.
243 constexpr size_t kN = 1024;
244 constexpr size_t minimal_size = sizeof(typename BlockType::offset_type);
246 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
247 auto result = BlockType::init(bytes);
248 ASSERT_TRUE(result.has_value());
249 BlockType *block = *result;
251 result = block->split(minimal_size);
252 ASSERT_TRUE(result.has_value());
253 EXPECT_EQ(block->inner_size(), minimal_size);
256 TEST_FOR_EACH_BLOCK_TYPE(CanMakeMinimalSizeSecondBlock) {
257 // Likewise, the split block can be minimal-width.
258 constexpr size_t kN = 1024;
259 constexpr size_t minimal_size = sizeof(typename BlockType::offset_type);
261 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
262 auto result = BlockType::init(bytes);
263 ASSERT_TRUE(result.has_value());
264 BlockType *block1 = *result;
266 result = block1->split(block1->inner_size() - BlockType::BLOCK_OVERHEAD);
267 ASSERT_TRUE(result.has_value());
268 BlockType *block2 = *result;
270 EXPECT_EQ(block2->inner_size(), minimal_size);
273 TEST_FOR_EACH_BLOCK_TYPE(CanMarkBlockUsed) {
274 constexpr size_t kN = 1024;
276 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
277 auto result = BlockType::init(bytes);
278 ASSERT_TRUE(result.has_value());
279 BlockType *block = *result;
280 size_t orig_size = block->outer_size();
282 block->mark_used();
283 EXPECT_TRUE(block->used());
284 EXPECT_EQ(block->outer_size(), orig_size);
286 block->mark_free();
287 EXPECT_FALSE(block->used());
290 TEST_FOR_EACH_BLOCK_TYPE(CannotSplitUsedBlock) {
291 constexpr size_t kN = 1024;
292 constexpr size_t kSplitN = 512;
294 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
295 auto result = BlockType::init(bytes);
296 ASSERT_TRUE(result.has_value());
297 BlockType *block = *result;
299 block->mark_used();
300 result = block->split(kSplitN);
301 ASSERT_FALSE(result.has_value());
304 TEST_FOR_EACH_BLOCK_TYPE(CanMergeWithNextBlock) {
305 // Do the three way merge from "CanSplitMidBlock", and let's
306 // merge block 3 and 2
307 constexpr size_t kN = 1024;
308 // Give the split positions large alignments.
309 constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type);
310 constexpr size_t kSplit1 = 512 + prev_field_size;
311 constexpr size_t kSplit2 = 256 + prev_field_size;
313 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
314 auto result = BlockType::init(bytes);
315 ASSERT_TRUE(result.has_value());
316 BlockType *block1 = *result;
317 size_t orig_size = block1->outer_size();
319 result = block1->split(kSplit1);
320 ASSERT_TRUE(result.has_value());
322 result = block1->split(kSplit2);
323 ASSERT_TRUE(result.has_value());
324 BlockType *block3 = *result;
326 EXPECT_TRUE(block3->merge_next());
328 EXPECT_EQ(block1->next(), block3);
329 EXPECT_EQ(block3->prev_free(), block1);
330 EXPECT_EQ(block1->inner_size(), kSplit2);
331 EXPECT_EQ(block3->outer_size(), orig_size - block1->outer_size());
334 TEST_FOR_EACH_BLOCK_TYPE(CannotMergeWithFirstOrLastBlock) {
335 constexpr size_t kN = 1024;
336 constexpr size_t kSplitN = 512;
338 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
339 auto result = BlockType::init(bytes);
340 ASSERT_TRUE(result.has_value());
341 BlockType *block1 = *result;
343 // Do a split, just to check that the checks on next/prev are different...
344 result = block1->split(kSplitN);
345 ASSERT_TRUE(result.has_value());
346 BlockType *block2 = *result;
348 EXPECT_FALSE(block2->merge_next());
351 TEST_FOR_EACH_BLOCK_TYPE(CannotMergeUsedBlock) {
352 constexpr size_t kN = 1024;
353 constexpr size_t kSplitN = 512;
355 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
356 auto result = BlockType::init(bytes);
357 ASSERT_TRUE(result.has_value());
358 BlockType *block = *result;
360 // Do a split, just to check that the checks on next/prev are different...
361 result = block->split(kSplitN);
362 ASSERT_TRUE(result.has_value());
364 block->mark_used();
365 EXPECT_FALSE(block->merge_next());
368 TEST_FOR_EACH_BLOCK_TYPE(CanGetBlockFromUsableSpace) {
369 constexpr size_t kN = 1024;
371 array<byte, kN> bytes{};
372 auto result = BlockType::init(bytes);
373 ASSERT_TRUE(result.has_value());
374 BlockType *block1 = *result;
376 void *ptr = block1->usable_space();
377 BlockType *block2 = BlockType::from_usable_space(ptr);
378 EXPECT_EQ(block1, block2);
381 TEST_FOR_EACH_BLOCK_TYPE(CanGetConstBlockFromUsableSpace) {
382 constexpr size_t kN = 1024;
384 array<byte, kN> bytes{};
385 auto result = BlockType::init(bytes);
386 ASSERT_TRUE(result.has_value());
387 const BlockType *block1 = *result;
389 const void *ptr = block1->usable_space();
390 const BlockType *block2 = BlockType::from_usable_space(ptr);
391 EXPECT_EQ(block1, block2);
394 TEST_FOR_EACH_BLOCK_TYPE(CanAllocate) {
395 constexpr size_t kN = 1024 + BlockType::BLOCK_OVERHEAD;
397 // Ensure we can allocate everything up to the block size within this block.
398 for (size_t i = 0; i < kN - 2 * BlockType::BLOCK_OVERHEAD; ++i) {
399 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
400 auto result = BlockType::init(bytes);
401 ASSERT_TRUE(result.has_value());
402 BlockType *block = *result;
404 constexpr size_t ALIGN = 1; // Effectively ignores alignment.
405 EXPECT_TRUE(block->can_allocate(ALIGN, i));
407 // For each can_allocate, we should be able to do a successful call to
408 // allocate.
409 auto info = BlockType::allocate(block, ALIGN, i);
410 EXPECT_NE(info.block, static_cast<BlockType *>(nullptr));
413 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
414 auto result = BlockType::init(bytes);
415 ASSERT_TRUE(result.has_value());
416 BlockType *block = *result;
418 // Given a block of size N (assuming it's also a power of two), we should be
419 // able to allocate a block within it that's aligned to N/2. This is
420 // because regardless of where the buffer is located, we can always find a
421 // starting location within it that meets this alignment.
422 EXPECT_TRUE(block->can_allocate(block->outer_size() / 2, 1));
423 auto info = BlockType::allocate(block, block->outer_size() / 2, 1);
424 EXPECT_NE(info.block, static_cast<BlockType *>(nullptr));
427 TEST_FOR_EACH_BLOCK_TYPE(AllocateAlreadyAligned) {
428 constexpr size_t kN = 1024;
430 alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
431 auto result = BlockType::init(bytes);
432 ASSERT_TRUE(result.has_value());
433 BlockType *block = *result;
435 // This should result in no new blocks.
436 constexpr size_t kAlignment = BlockType::ALIGNMENT;
437 constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type);
438 constexpr size_t kExpectedSize = BlockType::ALIGNMENT + prev_field_size;
439 EXPECT_TRUE(block->can_allocate(kAlignment, kExpectedSize));
441 auto [aligned_block, prev, next] =
442 BlockType::allocate(block, BlockType::ALIGNMENT, kExpectedSize);
444 // Since this is already aligned, there should be no previous block.
445 EXPECT_EQ(prev, static_cast<BlockType *>(nullptr));
447 // Ensure we the block is aligned and the size we expect.
448 EXPECT_NE(aligned_block, static_cast<BlockType *>(nullptr));
449 EXPECT_TRUE(aligned_block->is_usable_space_aligned(BlockType::ALIGNMENT));
450 EXPECT_EQ(aligned_block->inner_size(), kExpectedSize);
452 // Check the next block.
453 EXPECT_NE(next, static_cast<BlockType *>(nullptr));
454 EXPECT_EQ(aligned_block->next(), next);
455 EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(),
456 bytes.data() + bytes.size() - BlockType::BLOCK_OVERHEAD);
459 TEST_FOR_EACH_BLOCK_TYPE(AllocateNeedsAlignment) {
460 constexpr size_t kN = 1024;
462 alignas(kN) array<byte, kN> bytes{};
463 auto result = BlockType::init(bytes);
464 ASSERT_TRUE(result.has_value());
465 BlockType *block = *result;
467 // Ensure first the usable_data is only aligned to the block alignment.
468 ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD);
469 ASSERT_EQ(block->prev_free(), static_cast<BlockType *>(nullptr));
471 // Now pick an alignment such that the usable space is not already aligned to
472 // it. We want to explicitly test that the block will split into one before
473 // it.
474 constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
475 ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));
477 constexpr size_t kSize = 10;
478 EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
480 auto [aligned_block, prev, next] =
481 BlockType::allocate(block, kAlignment, kSize);
483 // Check the previous block was created appropriately. Since this block is the
484 // first block, a new one should be made before this.
485 EXPECT_NE(prev, static_cast<BlockType *>(nullptr));
486 EXPECT_EQ(aligned_block->prev_free(), prev);
487 EXPECT_EQ(prev->next(), aligned_block);
488 EXPECT_EQ(prev->outer_size(), reinterpret_cast<uintptr_t>(aligned_block) -
489 reinterpret_cast<uintptr_t>(prev));
491 // Ensure we the block is aligned and the size we expect.
492 EXPECT_NE(next, static_cast<BlockType *>(nullptr));
493 EXPECT_TRUE(aligned_block->is_usable_space_aligned(kAlignment));
495 // Check the next block.
496 EXPECT_NE(next, static_cast<BlockType *>(nullptr));
497 EXPECT_EQ(aligned_block->next(), next);
498 EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(),
499 bytes.data() + bytes.size() - BlockType::BLOCK_OVERHEAD);
502 TEST_FOR_EACH_BLOCK_TYPE(PreviousBlockMergedIfNotFirst) {
503 constexpr size_t kN = 1024;
505 alignas(kN) array<byte, kN> bytes{};
506 auto result = BlockType::init(bytes);
507 ASSERT_TRUE(result.has_value());
508 BlockType *block = *result;
510 // Split the block roughly halfway and work on the second half.
511 auto result2 = block->split(kN / 2);
512 ASSERT_TRUE(result2.has_value());
513 BlockType *newblock = *result2;
514 ASSERT_EQ(newblock->prev_free(), block);
515 size_t old_prev_size = block->outer_size();
517 // Now pick an alignment such that the usable space is not already aligned to
518 // it. We want to explicitly test that the block will split into one before
519 // it.
520 constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
521 ASSERT_FALSE(newblock->is_usable_space_aligned(kAlignment));
523 // Ensure we can allocate in the new block.
524 constexpr size_t kSize = BlockType::ALIGNMENT;
525 EXPECT_TRUE(newblock->can_allocate(kAlignment, kSize));
527 auto [aligned_block, prev, next] =
528 BlockType::allocate(newblock, kAlignment, kSize);
530 // Now there should be no new previous block. Instead, the padding we did
531 // create should be merged into the original previous block.
532 EXPECT_EQ(prev, static_cast<BlockType *>(nullptr));
533 EXPECT_EQ(aligned_block->prev_free(), block);
534 EXPECT_EQ(block->next(), aligned_block);
535 EXPECT_GT(block->outer_size(), old_prev_size);
538 TEST_FOR_EACH_BLOCK_TYPE(CanRemergeBlockAllocations) {
539 // Finally to ensure we made the split blocks correctly via allocate. We
540 // should be able to reconstruct the original block from the blocklets.
542 // This is the same setup as with the `AllocateNeedsAlignment` test case.
543 constexpr size_t kN = 1024;
545 alignas(kN) array<byte, kN> bytes{};
546 auto result = BlockType::init(bytes);
547 ASSERT_TRUE(result.has_value());
548 BlockType *block = *result;
549 BlockType *last = block->next();
551 // Ensure first the usable_data is only aligned to the block alignment.
552 ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD);
553 ASSERT_EQ(block->prev_free(), static_cast<BlockType *>(nullptr));
555 // Now pick an alignment such that the usable space is not already aligned to
556 // it. We want to explicitly test that the block will split into one before
557 // it.
558 constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
559 ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));
561 constexpr size_t kSize = BlockType::ALIGNMENT;
562 EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
564 auto [aligned_block, prev, next] =
565 BlockType::allocate(block, kAlignment, kSize);
567 // Check we have the appropriate blocks.
568 ASSERT_NE(prev, static_cast<BlockType *>(nullptr));
569 ASSERT_EQ(aligned_block->prev_free(), prev);
570 EXPECT_NE(next, static_cast<BlockType *>(nullptr));
571 EXPECT_EQ(aligned_block->next(), next);
572 EXPECT_EQ(next->next(), last);
574 // Now check for successful merges.
575 EXPECT_TRUE(prev->merge_next());
576 EXPECT_EQ(prev->next(), next);
577 EXPECT_TRUE(prev->merge_next());
578 EXPECT_EQ(prev->next(), last);
580 // We should have the original buffer.
581 EXPECT_EQ(reinterpret_cast<byte *>(prev), &*bytes.begin());
582 EXPECT_EQ(prev->outer_size(), bytes.size() - BlockType::BLOCK_OVERHEAD);
583 EXPECT_EQ(reinterpret_cast<byte *>(prev) + prev->outer_size(),
584 &*bytes.end() - BlockType::BLOCK_OVERHEAD);