Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / tools / testing / vma / vma.c
blob8fab5e13c7c3bb6989dfefdd059caa7ceb0df919
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
7 #include "generated/bit-length.h"
9 #include "maple-shared.h"
10 #include "vma_internal.h"
12 /* Include so header guard set. */
13 #include "../../../mm/vma.h"
15 static bool fail_prealloc;
17 /* Then override vma_iter_prealloc() so we can choose to fail it. */
18 #define vma_iter_prealloc(vmi, vma) \
19 (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
22 * Directly import the VMA implementation here. Our vma_internal.h wrapper
23 * provides userland-equivalent functionality for everything vma.c uses.
25 #include "../../../mm/vma.c"
27 const struct vm_operations_struct vma_dummy_vm_ops;
28 static struct anon_vma dummy_anon_vma;
30 #define ASSERT_TRUE(_expr) \
31 do { \
32 if (!(_expr)) { \
33 fprintf(stderr, \
34 "Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
35 __FILE__, __LINE__, __FUNCTION__, #_expr); \
36 return false; \
37 } \
38 } while (0)
39 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
40 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
41 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
43 static struct task_struct __current;
45 struct task_struct *get_current(void)
47 return &__current;
50 /* Helper function to simply allocate a VMA. */
51 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
52 unsigned long start,
53 unsigned long end,
54 pgoff_t pgoff,
55 vm_flags_t flags)
57 struct vm_area_struct *ret = vm_area_alloc(mm);
59 if (ret == NULL)
60 return NULL;
62 ret->vm_start = start;
63 ret->vm_end = end;
64 ret->vm_pgoff = pgoff;
65 ret->__vm_flags = flags;
67 return ret;
70 /* Helper function to allocate a VMA and link it to the tree. */
71 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
72 unsigned long start,
73 unsigned long end,
74 pgoff_t pgoff,
75 vm_flags_t flags)
77 struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
79 if (vma == NULL)
80 return NULL;
82 if (vma_link(mm, vma)) {
83 vm_area_free(vma);
84 return NULL;
88 * Reset this counter which we use to track whether writes have
89 * begun. Linking to the tree will have caused this to be incremented,
90 * which means we will get a false positive otherwise.
92 vma->vm_lock_seq = -1;
94 return vma;
97 /* Helper function which provides a wrapper around a merge new VMA operation. */
98 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
101 * For convenience, get prev and next VMAs. Which the new VMA operation
102 * requires.
104 vmg->next = vma_next(vmg->vmi);
105 vmg->prev = vma_prev(vmg->vmi);
106 vma_iter_next_range(vmg->vmi);
108 return vma_merge_new_range(vmg);
112 * Helper function which provides a wrapper around a merge existing VMA
113 * operation.
115 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
117 return vma_merge_existing_range(vmg);
121 * Helper function which provides a wrapper around the expansion of an existing
122 * VMA.
124 static int expand_existing(struct vma_merge_struct *vmg)
126 return vma_expand(vmg);
130 * Helper function to reset merge state the associated VMA iterator to a
131 * specified new range.
133 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
134 unsigned long end, pgoff_t pgoff, vm_flags_t flags)
136 vma_iter_set(vmg->vmi, start);
138 vmg->prev = NULL;
139 vmg->next = NULL;
140 vmg->vma = NULL;
142 vmg->start = start;
143 vmg->end = end;
144 vmg->pgoff = pgoff;
145 vmg->flags = flags;
149 * Helper function to try to merge a new VMA.
151 * Update vmg and the iterator for it and try to merge, otherwise allocate a new
152 * VMA, link it to the maple tree and return it.
154 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
155 struct vma_merge_struct *vmg,
156 unsigned long start, unsigned long end,
157 pgoff_t pgoff, vm_flags_t flags,
158 bool *was_merged)
160 struct vm_area_struct *merged;
162 vmg_set_range(vmg, start, end, pgoff, flags);
164 merged = merge_new(vmg);
165 if (merged) {
166 *was_merged = true;
167 ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
168 return merged;
171 *was_merged = false;
173 ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
175 return alloc_and_link_vma(mm, start, end, pgoff, flags);
179 * Helper function to reset the dummy anon_vma to indicate it has not been
180 * duplicated.
182 static void reset_dummy_anon_vma(void)
184 dummy_anon_vma.was_cloned = false;
185 dummy_anon_vma.was_unlinked = false;
189 * Helper function to remove all VMAs and destroy the maple tree associated with
190 * a virtual address space. Returns a count of VMAs in the tree.
192 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
194 struct vm_area_struct *vma;
195 int count = 0;
197 fail_prealloc = false;
198 reset_dummy_anon_vma();
200 vma_iter_set(vmi, 0);
201 for_each_vma(*vmi, vma) {
202 vm_area_free(vma);
203 count++;
206 mtree_destroy(&mm->mm_mt);
207 mm->map_count = 0;
208 return count;
211 /* Helper function to determine if VMA has had vma_start_write() performed. */
212 static bool vma_write_started(struct vm_area_struct *vma)
214 int seq = vma->vm_lock_seq;
216 /* We reset after each check. */
217 vma->vm_lock_seq = -1;
219 /* The vma_start_write() stub simply increments this value. */
220 return seq > -1;
223 /* Helper function providing a dummy vm_ops->close() method.*/
224 static void dummy_close(struct vm_area_struct *)
228 static bool test_simple_merge(void)
230 struct vm_area_struct *vma;
231 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
232 struct mm_struct mm = {};
233 struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
234 struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
235 VMA_ITERATOR(vmi, &mm, 0x1000);
236 struct vma_merge_struct vmg = {
237 .mm = &mm,
238 .vmi = &vmi,
239 .start = 0x1000,
240 .end = 0x2000,
241 .flags = flags,
242 .pgoff = 1,
245 ASSERT_FALSE(vma_link(&mm, vma_left));
246 ASSERT_FALSE(vma_link(&mm, vma_right));
248 vma = merge_new(&vmg);
249 ASSERT_NE(vma, NULL);
251 ASSERT_EQ(vma->vm_start, 0);
252 ASSERT_EQ(vma->vm_end, 0x3000);
253 ASSERT_EQ(vma->vm_pgoff, 0);
254 ASSERT_EQ(vma->vm_flags, flags);
256 vm_area_free(vma);
257 mtree_destroy(&mm.mm_mt);
259 return true;
262 static bool test_simple_modify(void)
264 struct vm_area_struct *vma;
265 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
266 struct mm_struct mm = {};
267 struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
268 VMA_ITERATOR(vmi, &mm, 0x1000);
270 ASSERT_FALSE(vma_link(&mm, init_vma));
273 * The flags will not be changed, the vma_modify_flags() function
274 * performs the merge/split only.
276 vma = vma_modify_flags(&vmi, init_vma, init_vma,
277 0x1000, 0x2000, VM_READ | VM_MAYREAD);
278 ASSERT_NE(vma, NULL);
279 /* We modify the provided VMA, and on split allocate new VMAs. */
280 ASSERT_EQ(vma, init_vma);
282 ASSERT_EQ(vma->vm_start, 0x1000);
283 ASSERT_EQ(vma->vm_end, 0x2000);
284 ASSERT_EQ(vma->vm_pgoff, 1);
287 * Now walk through the three split VMAs and make sure they are as
288 * expected.
291 vma_iter_set(&vmi, 0);
292 vma = vma_iter_load(&vmi);
294 ASSERT_EQ(vma->vm_start, 0);
295 ASSERT_EQ(vma->vm_end, 0x1000);
296 ASSERT_EQ(vma->vm_pgoff, 0);
298 vm_area_free(vma);
299 vma_iter_clear(&vmi);
301 vma = vma_next(&vmi);
303 ASSERT_EQ(vma->vm_start, 0x1000);
304 ASSERT_EQ(vma->vm_end, 0x2000);
305 ASSERT_EQ(vma->vm_pgoff, 1);
307 vm_area_free(vma);
308 vma_iter_clear(&vmi);
310 vma = vma_next(&vmi);
312 ASSERT_EQ(vma->vm_start, 0x2000);
313 ASSERT_EQ(vma->vm_end, 0x3000);
314 ASSERT_EQ(vma->vm_pgoff, 2);
316 vm_area_free(vma);
317 mtree_destroy(&mm.mm_mt);
319 return true;
322 static bool test_simple_expand(void)
324 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
325 struct mm_struct mm = {};
326 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
327 VMA_ITERATOR(vmi, &mm, 0);
328 struct vma_merge_struct vmg = {
329 .vmi = &vmi,
330 .vma = vma,
331 .start = 0,
332 .end = 0x3000,
333 .pgoff = 0,
336 ASSERT_FALSE(vma_link(&mm, vma));
338 ASSERT_FALSE(expand_existing(&vmg));
340 ASSERT_EQ(vma->vm_start, 0);
341 ASSERT_EQ(vma->vm_end, 0x3000);
342 ASSERT_EQ(vma->vm_pgoff, 0);
344 vm_area_free(vma);
345 mtree_destroy(&mm.mm_mt);
347 return true;
350 static bool test_simple_shrink(void)
352 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
353 struct mm_struct mm = {};
354 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
355 VMA_ITERATOR(vmi, &mm, 0);
357 ASSERT_FALSE(vma_link(&mm, vma));
359 ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
361 ASSERT_EQ(vma->vm_start, 0);
362 ASSERT_EQ(vma->vm_end, 0x1000);
363 ASSERT_EQ(vma->vm_pgoff, 0);
365 vm_area_free(vma);
366 mtree_destroy(&mm.mm_mt);
368 return true;
371 static bool test_merge_new(void)
373 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
374 struct mm_struct mm = {};
375 VMA_ITERATOR(vmi, &mm, 0);
376 struct vma_merge_struct vmg = {
377 .mm = &mm,
378 .vmi = &vmi,
380 struct anon_vma_chain dummy_anon_vma_chain_a = {
381 .anon_vma = &dummy_anon_vma,
383 struct anon_vma_chain dummy_anon_vma_chain_b = {
384 .anon_vma = &dummy_anon_vma,
386 struct anon_vma_chain dummy_anon_vma_chain_c = {
387 .anon_vma = &dummy_anon_vma,
389 struct anon_vma_chain dummy_anon_vma_chain_d = {
390 .anon_vma = &dummy_anon_vma,
392 const struct vm_operations_struct vm_ops = {
393 .close = dummy_close,
395 int count;
396 struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
397 bool merged;
400 * 0123456789abc
401 * AA B CC
403 vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
404 ASSERT_NE(vma_a, NULL);
405 /* We give each VMA a single avc so we can test anon_vma duplication. */
406 INIT_LIST_HEAD(&vma_a->anon_vma_chain);
407 list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
409 vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
410 ASSERT_NE(vma_b, NULL);
411 INIT_LIST_HEAD(&vma_b->anon_vma_chain);
412 list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
414 vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
415 ASSERT_NE(vma_c, NULL);
416 INIT_LIST_HEAD(&vma_c->anon_vma_chain);
417 list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
420 * NO merge.
422 * 0123456789abc
423 * AA B ** CC
425 vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
426 ASSERT_NE(vma_d, NULL);
427 INIT_LIST_HEAD(&vma_d->anon_vma_chain);
428 list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
429 ASSERT_FALSE(merged);
430 ASSERT_EQ(mm.map_count, 4);
433 * Merge BOTH sides.
435 * 0123456789abc
436 * AA*B DD CC
438 vma_a->vm_ops = &vm_ops; /* This should have no impact. */
439 vma_b->anon_vma = &dummy_anon_vma;
440 vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
441 ASSERT_EQ(vma, vma_a);
442 /* Merge with A, delete B. */
443 ASSERT_TRUE(merged);
444 ASSERT_EQ(vma->vm_start, 0);
445 ASSERT_EQ(vma->vm_end, 0x4000);
446 ASSERT_EQ(vma->vm_pgoff, 0);
447 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
448 ASSERT_TRUE(vma_write_started(vma));
449 ASSERT_EQ(mm.map_count, 3);
452 * Merge to PREVIOUS VMA.
454 * 0123456789abc
455 * AAAA* DD CC
457 vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
458 ASSERT_EQ(vma, vma_a);
459 /* Extend A. */
460 ASSERT_TRUE(merged);
461 ASSERT_EQ(vma->vm_start, 0);
462 ASSERT_EQ(vma->vm_end, 0x5000);
463 ASSERT_EQ(vma->vm_pgoff, 0);
464 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
465 ASSERT_TRUE(vma_write_started(vma));
466 ASSERT_EQ(mm.map_count, 3);
469 * Merge to NEXT VMA.
471 * 0123456789abc
472 * AAAAA *DD CC
474 vma_d->anon_vma = &dummy_anon_vma;
475 vma_d->vm_ops = &vm_ops; /* This should have no impact. */
476 vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
477 ASSERT_EQ(vma, vma_d);
478 /* Prepend. */
479 ASSERT_TRUE(merged);
480 ASSERT_EQ(vma->vm_start, 0x6000);
481 ASSERT_EQ(vma->vm_end, 0x9000);
482 ASSERT_EQ(vma->vm_pgoff, 6);
483 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
484 ASSERT_TRUE(vma_write_started(vma));
485 ASSERT_EQ(mm.map_count, 3);
488 * Merge BOTH sides.
490 * 0123456789abc
491 * AAAAA*DDD CC
493 vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
494 vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
495 ASSERT_EQ(vma, vma_a);
496 /* Merge with A, delete D. */
497 ASSERT_TRUE(merged);
498 ASSERT_EQ(vma->vm_start, 0);
499 ASSERT_EQ(vma->vm_end, 0x9000);
500 ASSERT_EQ(vma->vm_pgoff, 0);
501 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
502 ASSERT_TRUE(vma_write_started(vma));
503 ASSERT_EQ(mm.map_count, 2);
506 * Merge to NEXT VMA.
508 * 0123456789abc
509 * AAAAAAAAA *CC
511 vma_c->anon_vma = &dummy_anon_vma;
512 vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
513 ASSERT_EQ(vma, vma_c);
514 /* Prepend C. */
515 ASSERT_TRUE(merged);
516 ASSERT_EQ(vma->vm_start, 0xa000);
517 ASSERT_EQ(vma->vm_end, 0xc000);
518 ASSERT_EQ(vma->vm_pgoff, 0xa);
519 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
520 ASSERT_TRUE(vma_write_started(vma));
521 ASSERT_EQ(mm.map_count, 2);
524 * Merge BOTH sides.
526 * 0123456789abc
527 * AAAAAAAAA*CCC
529 vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
530 ASSERT_EQ(vma, vma_a);
531 /* Extend A and delete C. */
532 ASSERT_TRUE(merged);
533 ASSERT_EQ(vma->vm_start, 0);
534 ASSERT_EQ(vma->vm_end, 0xc000);
535 ASSERT_EQ(vma->vm_pgoff, 0);
536 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
537 ASSERT_TRUE(vma_write_started(vma));
538 ASSERT_EQ(mm.map_count, 1);
541 * Final state.
543 * 0123456789abc
544 * AAAAAAAAAAAAA
547 count = 0;
548 vma_iter_set(&vmi, 0);
549 for_each_vma(vmi, vma) {
550 ASSERT_NE(vma, NULL);
551 ASSERT_EQ(vma->vm_start, 0);
552 ASSERT_EQ(vma->vm_end, 0xc000);
553 ASSERT_EQ(vma->vm_pgoff, 0);
554 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
556 vm_area_free(vma);
557 count++;
560 /* Should only have one VMA left (though freed) after all is done.*/
561 ASSERT_EQ(count, 1);
563 mtree_destroy(&mm.mm_mt);
564 return true;
567 static bool test_vma_merge_special_flags(void)
569 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
570 struct mm_struct mm = {};
571 VMA_ITERATOR(vmi, &mm, 0);
572 struct vma_merge_struct vmg = {
573 .mm = &mm,
574 .vmi = &vmi,
576 vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
577 vm_flags_t all_special_flags = 0;
578 int i;
579 struct vm_area_struct *vma_left, *vma;
581 /* Make sure there aren't new VM_SPECIAL flags. */
582 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
583 all_special_flags |= special_flags[i];
585 ASSERT_EQ(all_special_flags, VM_SPECIAL);
588 * 01234
589 * AAA
591 vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
592 ASSERT_NE(vma_left, NULL);
594 /* 1. Set up new VMA with special flag that would otherwise merge. */
597 * 01234
598 * AAA*
600 * This should merge if not for the VM_SPECIAL flag.
602 vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
603 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
604 vm_flags_t special_flag = special_flags[i];
606 vma_left->__vm_flags = flags | special_flag;
607 vmg.flags = flags | special_flag;
608 vma = merge_new(&vmg);
609 ASSERT_EQ(vma, NULL);
610 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
613 /* 2. Modify VMA with special flag that would otherwise merge. */
616 * 01234
617 * AAAB
619 * Create a VMA to modify.
621 vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
622 ASSERT_NE(vma, NULL);
623 vmg.vma = vma;
625 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
626 vm_flags_t special_flag = special_flags[i];
628 vma_left->__vm_flags = flags | special_flag;
629 vmg.flags = flags | special_flag;
630 vma = merge_existing(&vmg);
631 ASSERT_EQ(vma, NULL);
632 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
635 cleanup_mm(&mm, &vmi);
636 return true;
639 static bool test_vma_merge_with_close(void)
641 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
642 struct mm_struct mm = {};
643 VMA_ITERATOR(vmi, &mm, 0);
644 struct vma_merge_struct vmg = {
645 .mm = &mm,
646 .vmi = &vmi,
648 const struct vm_operations_struct vm_ops = {
649 .close = dummy_close,
651 struct vm_area_struct *vma_prev, *vma_next, *vma;
654 * When merging VMAs we are not permitted to remove any VMA that has a
655 * vm_ops->close() hook.
657 * Considering the two possible adjacent VMAs to which a VMA can be
658 * merged:
660 * [ prev ][ vma ][ next ]
662 * In no case will we need to delete prev. If the operation is
663 * mergeable, then prev will be extended with one or both of vma and
664 * next deleted.
666 * As a result, during initial mergeability checks, only
667 * can_vma_merge_before() (which implies the VMA being merged with is
668 * 'next' as shown above) bothers to check to see whether the next VMA
669 * has a vm_ops->close() callback that will need to be called when
670 * removed.
672 * If it does, then we cannot merge as the resources that the close()
673 * operation potentially clears down are tied only to the existing VMA
674 * range and we have no way of extending those to the nearly merged one.
676 * We must consider two scenarios:
678 * A.
680 * vm_ops->close: - - !NULL
681 * [ prev ][ vma ][ next ]
683 * Where prev may or may not be present/mergeable.
685 * This is picked up by a specific check in can_vma_merge_before().
687 * B.
689 * vm_ops->close: - !NULL
690 * [ prev ][ vma ]
692 * Where prev and vma are present and mergeable.
694 * This is picked up by a specific check in the modified VMA merge.
696 * IMPORTANT NOTE: We make the assumption that the following case:
698 * - !NULL NULL
699 * [ prev ][ vma ][ next ]
701 * Cannot occur, because vma->vm_ops being the same implies the same
702 * vma->vm_file, and therefore this would mean that next->vm_ops->close
703 * would be set too, and thus scenario A would pick this up.
707 * The only case of a new VMA merge that results in a VMA being deleted
708 * is one where both the previous and next VMAs are merged - in this
709 * instance the next VMA is deleted, and the previous VMA is extended.
711 * If we are unable to do so, we reduce the operation to simply
712 * extending the prev VMA and not merging next.
714 * 0123456789
715 * PPP**NNNN
716 * ->
717 * 0123456789
718 * PPPPPPNNN
721 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
722 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
723 vma_next->vm_ops = &vm_ops;
725 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
726 ASSERT_EQ(merge_new(&vmg), vma_prev);
727 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
728 ASSERT_EQ(vma_prev->vm_start, 0);
729 ASSERT_EQ(vma_prev->vm_end, 0x5000);
730 ASSERT_EQ(vma_prev->vm_pgoff, 0);
732 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
735 * When modifying an existing VMA there are further cases where we
736 * delete VMAs.
738 * <>
739 * 0123456789
740 * PPPVV
742 * In this instance, if vma has a close hook, the merge simply cannot
743 * proceed.
746 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
747 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
748 vma->vm_ops = &vm_ops;
750 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
751 vmg.prev = vma_prev;
752 vmg.vma = vma;
755 * The VMA being modified in a way that would otherwise merge should
756 * also fail.
758 ASSERT_EQ(merge_existing(&vmg), NULL);
759 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
761 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
764 * This case is mirrored if merging with next.
766 * <>
767 * 0123456789
768 * VVNNNN
770 * In this instance, if vma has a close hook, the merge simply cannot
771 * proceed.
774 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
775 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
776 vma->vm_ops = &vm_ops;
778 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
779 vmg.vma = vma;
780 ASSERT_EQ(merge_existing(&vmg), NULL);
782 * Initially this is misapprehended as an out of memory report, as the
783 * close() check is handled in the same way as anon_vma duplication
784 * failures, however a subsequent patch resolves this.
786 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
788 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
791 * Finally, we consider two variants of the case where we modify a VMA
792 * to merge with both the previous and next VMAs.
794 * The first variant is where vma has a close hook. In this instance, no
795 * merge can proceed.
797 * <>
798 * 0123456789
799 * PPPVVNNNN
802 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
803 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
804 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
805 vma->vm_ops = &vm_ops;
807 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
808 vmg.prev = vma_prev;
809 vmg.vma = vma;
811 ASSERT_EQ(merge_existing(&vmg), NULL);
812 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
814 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
817 * The second variant is where next has a close hook. In this instance,
818 * we reduce the operation to a merge between prev and vma.
820 * <>
821 * 0123456789
822 * PPPVVNNNN
823 * ->
824 * 0123456789
825 * PPPPPNNNN
828 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
829 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
830 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
831 vma_next->vm_ops = &vm_ops;
833 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
834 vmg.prev = vma_prev;
835 vmg.vma = vma;
837 ASSERT_EQ(merge_existing(&vmg), vma_prev);
838 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
839 ASSERT_EQ(vma_prev->vm_start, 0);
840 ASSERT_EQ(vma_prev->vm_end, 0x5000);
841 ASSERT_EQ(vma_prev->vm_pgoff, 0);
843 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
845 return true;
848 static bool test_vma_merge_new_with_close(void)
850 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
851 struct mm_struct mm = {};
852 VMA_ITERATOR(vmi, &mm, 0);
853 struct vma_merge_struct vmg = {
854 .mm = &mm,
855 .vmi = &vmi,
857 struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
858 struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
859 const struct vm_operations_struct vm_ops = {
860 .close = dummy_close,
862 struct vm_area_struct *vma;
865 * We should allow the partial merge of a proposed new VMA if the
866 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
867 * compatible), e.g.:
869 * New VMA
870 * A v-------v B
871 * |-----| |-----|
872 * close close
874 * Since the rule is to not DELETE a VMA with a close operation, this
875 * should be permitted, only rather than expanding A and deleting B, we
876 * should simply expand A and leave B intact, e.g.:
878 * New VMA
879 * A B
880 * |------------||-----|
881 * close close
884 /* Have prev and next have a vm_ops->close() hook. */
885 vma_prev->vm_ops = &vm_ops;
886 vma_next->vm_ops = &vm_ops;
888 vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
889 vma = merge_new(&vmg);
890 ASSERT_NE(vma, NULL);
891 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
892 ASSERT_EQ(vma->vm_start, 0);
893 ASSERT_EQ(vma->vm_end, 0x5000);
894 ASSERT_EQ(vma->vm_pgoff, 0);
895 ASSERT_EQ(vma->vm_ops, &vm_ops);
896 ASSERT_TRUE(vma_write_started(vma));
897 ASSERT_EQ(mm.map_count, 2);
899 cleanup_mm(&mm, &vmi);
900 return true;
903 static bool test_merge_existing(void)
905 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
906 struct mm_struct mm = {};
907 VMA_ITERATOR(vmi, &mm, 0);
908 struct vm_area_struct *vma, *vma_prev, *vma_next;
909 struct vma_merge_struct vmg = {
910 .mm = &mm,
911 .vmi = &vmi,
913 const struct vm_operations_struct vm_ops = {
914 .close = dummy_close,
918 * Merge right case - partial span.
920 * <->
921 * 0123456789
922 * VVVVNNN
923 * ->
924 * 0123456789
925 * VNNNNNN
927 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
928 vma->vm_ops = &vm_ops; /* This should have no impact. */
929 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
930 vma_next->vm_ops = &vm_ops; /* This should have no impact. */
931 vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
932 vmg.vma = vma;
933 vmg.prev = vma;
934 vma->anon_vma = &dummy_anon_vma;
935 ASSERT_EQ(merge_existing(&vmg), vma_next);
936 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
937 ASSERT_EQ(vma_next->vm_start, 0x3000);
938 ASSERT_EQ(vma_next->vm_end, 0x9000);
939 ASSERT_EQ(vma_next->vm_pgoff, 3);
940 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
941 ASSERT_EQ(vma->vm_start, 0x2000);
942 ASSERT_EQ(vma->vm_end, 0x3000);
943 ASSERT_EQ(vma->vm_pgoff, 2);
944 ASSERT_TRUE(vma_write_started(vma));
945 ASSERT_TRUE(vma_write_started(vma_next));
946 ASSERT_EQ(mm.map_count, 2);
948 /* Clear down and reset. */
949 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
952 * Merge right case - full span.
954 * <-->
955 * 0123456789
956 * VVVVNNN
957 * ->
958 * 0123456789
959 * NNNNNNN
961 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
962 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
963 vma_next->vm_ops = &vm_ops; /* This should have no impact. */
964 vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
965 vmg.vma = vma;
966 vma->anon_vma = &dummy_anon_vma;
967 ASSERT_EQ(merge_existing(&vmg), vma_next);
968 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
969 ASSERT_EQ(vma_next->vm_start, 0x2000);
970 ASSERT_EQ(vma_next->vm_end, 0x9000);
971 ASSERT_EQ(vma_next->vm_pgoff, 2);
972 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
973 ASSERT_TRUE(vma_write_started(vma_next));
974 ASSERT_EQ(mm.map_count, 1);
976 /* Clear down and reset. We should have deleted vma. */
977 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
980 * Merge left case - partial span.
982 * <->
983 * 0123456789
984 * PPPVVVV
985 * ->
986 * 0123456789
987 * PPPPPPV
989 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
990 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
991 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
992 vma->vm_ops = &vm_ops; /* This should have no impact. */
993 vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
994 vmg.prev = vma_prev;
995 vmg.vma = vma;
996 vma->anon_vma = &dummy_anon_vma;
998 ASSERT_EQ(merge_existing(&vmg), vma_prev);
999 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1000 ASSERT_EQ(vma_prev->vm_start, 0);
1001 ASSERT_EQ(vma_prev->vm_end, 0x6000);
1002 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1003 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1004 ASSERT_EQ(vma->vm_start, 0x6000);
1005 ASSERT_EQ(vma->vm_end, 0x7000);
1006 ASSERT_EQ(vma->vm_pgoff, 6);
1007 ASSERT_TRUE(vma_write_started(vma_prev));
1008 ASSERT_TRUE(vma_write_started(vma));
1009 ASSERT_EQ(mm.map_count, 2);
1011 /* Clear down and reset. */
1012 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1015 * Merge left case - full span.
1017 * <-->
1018 * 0123456789
1019 * PPPVVVV
1020 * ->
1021 * 0123456789
1022 * PPPPPPP
1024 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1025 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1026 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1027 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1028 vmg.prev = vma_prev;
1029 vmg.vma = vma;
1030 vma->anon_vma = &dummy_anon_vma;
1031 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1032 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1033 ASSERT_EQ(vma_prev->vm_start, 0);
1034 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1035 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1036 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1037 ASSERT_TRUE(vma_write_started(vma_prev));
1038 ASSERT_EQ(mm.map_count, 1);
1040 /* Clear down and reset. We should have deleted vma. */
1041 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1044 * Merge both case.
1046 * <-->
1047 * 0123456789
1048 * PPPVVVVNNN
1049 * ->
1050 * 0123456789
1051 * PPPPPPPPPP
1053 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1054 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1055 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1056 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1057 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1058 vmg.prev = vma_prev;
1059 vmg.vma = vma;
1060 vma->anon_vma = &dummy_anon_vma;
1061 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1062 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1063 ASSERT_EQ(vma_prev->vm_start, 0);
1064 ASSERT_EQ(vma_prev->vm_end, 0x9000);
1065 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1066 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1067 ASSERT_TRUE(vma_write_started(vma_prev));
1068 ASSERT_EQ(mm.map_count, 1);
1070 /* Clear down and reset. We should have deleted prev and next. */
1071 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1074 * Non-merge ranges. the modified VMA merge operation assumes that the
1075 * caller always specifies ranges within the input VMA so we need only
1076 * examine these cases.
1081 * <->
1082 * <>
1083 * <>
1084 * 0123456789a
1085 * PPPVVVVVNNN
1088 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1089 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1090 vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
1092 vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
1093 vmg.prev = vma;
1094 vmg.vma = vma;
1095 ASSERT_EQ(merge_existing(&vmg), NULL);
1096 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1098 vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1099 vmg.prev = vma;
1100 vmg.vma = vma;
1101 ASSERT_EQ(merge_existing(&vmg), NULL);
1102 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1104 vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
1105 vmg.prev = vma;
1106 vmg.vma = vma;
1107 ASSERT_EQ(merge_existing(&vmg), NULL);
1108 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1110 vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
1111 vmg.prev = vma;
1112 vmg.vma = vma;
1113 ASSERT_EQ(merge_existing(&vmg), NULL);
1114 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1116 vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1117 vmg.prev = vma;
1118 vmg.vma = vma;
1119 ASSERT_EQ(merge_existing(&vmg), NULL);
1120 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1122 vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1123 vmg.prev = vma;
1124 vmg.vma = vma;
1125 ASSERT_EQ(merge_existing(&vmg), NULL);
1126 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1128 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1130 return true;
1133 static bool test_anon_vma_non_mergeable(void)
1135 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1136 struct mm_struct mm = {};
1137 VMA_ITERATOR(vmi, &mm, 0);
1138 struct vm_area_struct *vma, *vma_prev, *vma_next;
1139 struct vma_merge_struct vmg = {
1140 .mm = &mm,
1141 .vmi = &vmi,
1143 struct anon_vma_chain dummy_anon_vma_chain1 = {
1144 .anon_vma = &dummy_anon_vma,
1146 struct anon_vma_chain dummy_anon_vma_chain2 = {
1147 .anon_vma = &dummy_anon_vma,
1151 * In the case of modified VMA merge, merging both left and right VMAs
1152 * but where prev and next have incompatible anon_vma objects, we revert
1153 * to a merge of prev and VMA:
1155 * <-->
1156 * 0123456789
1157 * PPPVVVVNNN
1158 * ->
1159 * 0123456789
1160 * PPPPPPPNNN
1162 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1163 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1164 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1167 * Give both prev and next single anon_vma_chain fields, so they will
1168 * merge with the NULL vmg->anon_vma.
1170 * However, when prev is compared to next, the merge should fail.
1173 INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1174 list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1175 ASSERT_TRUE(list_is_singular(&vma_prev->anon_vma_chain));
1176 vma_prev->anon_vma = &dummy_anon_vma;
1177 ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_prev->anon_vma, vma_prev));
1179 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1180 list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1181 ASSERT_TRUE(list_is_singular(&vma_next->anon_vma_chain));
1182 vma_next->anon_vma = (struct anon_vma *)2;
1183 ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_next->anon_vma, vma_next));
1185 ASSERT_FALSE(is_mergeable_anon_vma(vma_prev->anon_vma, vma_next->anon_vma, NULL));
1187 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1188 vmg.prev = vma_prev;
1189 vmg.vma = vma;
1191 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1192 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1193 ASSERT_EQ(vma_prev->vm_start, 0);
1194 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1195 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1196 ASSERT_TRUE(vma_write_started(vma_prev));
1197 ASSERT_FALSE(vma_write_started(vma_next));
1199 /* Clear down and reset. */
1200 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1203 * Now consider the new VMA case. This is equivalent, only adding a new
1204 * VMA in a gap between prev and next.
1206 * <-->
1207 * 0123456789
1208 * PPP****NNN
1209 * ->
1210 * 0123456789
1211 * PPPPPPPNNN
1213 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1214 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1216 INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1217 list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1218 vma_prev->anon_vma = (struct anon_vma *)1;
1220 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1221 list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1222 vma_next->anon_vma = (struct anon_vma *)2;
1224 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1225 vmg.prev = vma_prev;
1227 ASSERT_EQ(merge_new(&vmg), vma_prev);
1228 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1229 ASSERT_EQ(vma_prev->vm_start, 0);
1230 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1231 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1232 ASSERT_TRUE(vma_write_started(vma_prev));
1233 ASSERT_FALSE(vma_write_started(vma_next));
1235 /* Final cleanup. */
1236 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1238 return true;
1241 static bool test_dup_anon_vma(void)
1243 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1244 struct mm_struct mm = {};
1245 VMA_ITERATOR(vmi, &mm, 0);
1246 struct vma_merge_struct vmg = {
1247 .mm = &mm,
1248 .vmi = &vmi,
1250 struct anon_vma_chain dummy_anon_vma_chain = {
1251 .anon_vma = &dummy_anon_vma,
1253 struct vm_area_struct *vma_prev, *vma_next, *vma;
1255 reset_dummy_anon_vma();
1258 * Expanding a VMA delete the next one duplicates next's anon_vma and
1259 * assigns it to the expanded VMA.
1261 * This covers new VMA merging, as these operations amount to a VMA
1262 * expand.
1264 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1265 vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1266 vma_next->anon_vma = &dummy_anon_vma;
1268 vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1269 vmg.vma = vma_prev;
1270 vmg.next = vma_next;
1272 ASSERT_EQ(expand_existing(&vmg), 0);
1274 /* Will have been cloned. */
1275 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1276 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1278 /* Cleanup ready for next run. */
1279 cleanup_mm(&mm, &vmi);
1282 * next has anon_vma, we assign to prev.
1284 * |<----->|
1285 * |-------*********-------|
1286 * prev vma next
1287 * extend delete delete
1290 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1291 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1292 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1294 /* Initialise avc so mergeability check passes. */
1295 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1296 list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1298 vma_next->anon_vma = &dummy_anon_vma;
1299 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1300 vmg.prev = vma_prev;
1301 vmg.vma = vma;
1303 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1304 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1306 ASSERT_EQ(vma_prev->vm_start, 0);
1307 ASSERT_EQ(vma_prev->vm_end, 0x8000);
1309 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1310 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1312 cleanup_mm(&mm, &vmi);
1315 * vma has anon_vma, we assign to prev.
1317 * |<----->|
1318 * |-------*********-------|
1319 * prev vma next
1320 * extend delete delete
1323 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1324 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1325 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1327 vma->anon_vma = &dummy_anon_vma;
1328 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1329 vmg.prev = vma_prev;
1330 vmg.vma = vma;
1332 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1333 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1335 ASSERT_EQ(vma_prev->vm_start, 0);
1336 ASSERT_EQ(vma_prev->vm_end, 0x8000);
1338 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1339 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1341 cleanup_mm(&mm, &vmi);
1344 * vma has anon_vma, we assign to prev.
1346 * |<----->|
1347 * |-------*************
1348 * prev vma
1349 * extend shrink/delete
1352 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1353 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1355 vma->anon_vma = &dummy_anon_vma;
1356 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1357 vmg.prev = vma_prev;
1358 vmg.vma = vma;
1360 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1361 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1363 ASSERT_EQ(vma_prev->vm_start, 0);
1364 ASSERT_EQ(vma_prev->vm_end, 0x5000);
1366 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1367 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1369 cleanup_mm(&mm, &vmi);
1372 * vma has anon_vma, we assign to next.
1374 * |<----->|
1375 * *************-------|
1376 * vma next
1377 * shrink/delete extend
1380 vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1381 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1383 vma->anon_vma = &dummy_anon_vma;
1384 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1385 vmg.prev = vma;
1386 vmg.vma = vma;
1388 ASSERT_EQ(merge_existing(&vmg), vma_next);
1389 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1391 ASSERT_EQ(vma_next->vm_start, 0x3000);
1392 ASSERT_EQ(vma_next->vm_end, 0x8000);
1394 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1395 ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1397 cleanup_mm(&mm, &vmi);
1398 return true;
1401 static bool test_vmi_prealloc_fail(void)
1403 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1404 struct mm_struct mm = {};
1405 VMA_ITERATOR(vmi, &mm, 0);
1406 struct vma_merge_struct vmg = {
1407 .mm = &mm,
1408 .vmi = &vmi,
1410 struct vm_area_struct *vma_prev, *vma;
1413 * We are merging vma into prev, with vma possessing an anon_vma, which
1414 * will be duplicated. We cause the vmi preallocation to fail and assert
1415 * the duplicated anon_vma is unlinked.
1418 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1419 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1420 vma->anon_vma = &dummy_anon_vma;
1422 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1423 vmg.prev = vma_prev;
1424 vmg.vma = vma;
1426 fail_prealloc = true;
1428 /* This will cause the merge to fail. */
1429 ASSERT_EQ(merge_existing(&vmg), NULL);
1430 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1431 /* We will already have assigned the anon_vma. */
1432 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1433 /* And it was both cloned and unlinked. */
1434 ASSERT_TRUE(dummy_anon_vma.was_cloned);
1435 ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1437 cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1440 * We repeat the same operation for expanding a VMA, which is what new
1441 * VMA merging ultimately uses too. This asserts that unlinking is
1442 * performed in this case too.
1445 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1446 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1447 vma->anon_vma = &dummy_anon_vma;
1449 vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1450 vmg.vma = vma_prev;
1451 vmg.next = vma;
1453 fail_prealloc = true;
1454 ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1455 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1457 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1458 ASSERT_TRUE(dummy_anon_vma.was_cloned);
1459 ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1461 cleanup_mm(&mm, &vmi);
1462 return true;
1465 static bool test_merge_extend(void)
1467 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1468 struct mm_struct mm = {};
1469 VMA_ITERATOR(vmi, &mm, 0x1000);
1470 struct vm_area_struct *vma;
1472 vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1473 alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1476 * Extend a VMA into the gap between itself and the following VMA.
1477 * This should result in a merge.
1479 * <->
1480 * * *
1484 ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1485 ASSERT_EQ(vma->vm_start, 0);
1486 ASSERT_EQ(vma->vm_end, 0x4000);
1487 ASSERT_EQ(vma->vm_pgoff, 0);
1488 ASSERT_TRUE(vma_write_started(vma));
1489 ASSERT_EQ(mm.map_count, 1);
1491 cleanup_mm(&mm, &vmi);
1492 return true;
1495 static bool test_copy_vma(void)
1497 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1498 struct mm_struct mm = {};
1499 bool need_locks = false;
1500 VMA_ITERATOR(vmi, &mm, 0);
1501 struct vm_area_struct *vma, *vma_new, *vma_next;
1503 /* Move backwards and do not merge. */
1505 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1506 vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1508 ASSERT_NE(vma_new, vma);
1509 ASSERT_EQ(vma_new->vm_start, 0);
1510 ASSERT_EQ(vma_new->vm_end, 0x2000);
1511 ASSERT_EQ(vma_new->vm_pgoff, 0);
1513 cleanup_mm(&mm, &vmi);
1515 /* Move a VMA into position next to another and merge the two. */
1517 vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1518 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1519 vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1521 ASSERT_EQ(vma_new, vma_next);
1523 cleanup_mm(&mm, &vmi);
1524 return true;
1527 static bool test_expand_only_mode(void)
1529 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1530 struct mm_struct mm = {};
1531 VMA_ITERATOR(vmi, &mm, 0);
1532 struct vm_area_struct *vma_prev, *vma;
1533 VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5);
1536 * Place a VMA prior to the one we're expanding so we assert that we do
1537 * not erroneously try to traverse to the previous VMA even though we
1538 * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not
1539 * need to do so.
1541 alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1544 * We will be positioned at the prev VMA, but looking to expand to
1545 * 0x9000.
1547 vma_iter_set(&vmi, 0x3000);
1548 vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1549 vmg.prev = vma_prev;
1550 vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
1552 vma = vma_merge_new_range(&vmg);
1553 ASSERT_NE(vma, NULL);
1554 ASSERT_EQ(vma, vma_prev);
1555 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1556 ASSERT_EQ(vma->vm_start, 0x3000);
1557 ASSERT_EQ(vma->vm_end, 0x9000);
1558 ASSERT_EQ(vma->vm_pgoff, 3);
1559 ASSERT_TRUE(vma_write_started(vma));
1560 ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1562 cleanup_mm(&mm, &vmi);
1563 return true;
1566 int main(void)
1568 int num_tests = 0, num_fail = 0;
1570 maple_tree_init();
1572 #define TEST(name) \
1573 do { \
1574 num_tests++; \
1575 if (!test_##name()) { \
1576 num_fail++; \
1577 fprintf(stderr, "Test " #name " FAILED\n"); \
1579 } while (0)
1581 /* Very simple tests to kick the tyres. */
1582 TEST(simple_merge);
1583 TEST(simple_modify);
1584 TEST(simple_expand);
1585 TEST(simple_shrink);
1587 TEST(merge_new);
1588 TEST(vma_merge_special_flags);
1589 TEST(vma_merge_with_close);
1590 TEST(vma_merge_new_with_close);
1591 TEST(merge_existing);
1592 TEST(anon_vma_non_mergeable);
1593 TEST(dup_anon_vma);
1594 TEST(vmi_prealloc_fail);
1595 TEST(merge_extend);
1596 TEST(copy_vma);
1597 TEST(expand_only_mode);
1599 #undef TEST
1601 printf("%d tests run, %d passed, %d failed.\n",
1602 num_tests, num_tests - num_fail, num_fail);
1604 return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;