1 // SPDX-License-Identifier: GPL-2.0-or-later
7 #include "generated/bit-length.h"
9 #include "maple-shared.h"
10 #include "vma_internal.h"
12 /* Include so header guard set. */
13 #include "../../../mm/vma.h"
15 static bool fail_prealloc
;
17 /* Then override vma_iter_prealloc() so we can choose to fail it. */
18 #define vma_iter_prealloc(vmi, vma) \
19 (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
22 * Directly import the VMA implementation here. Our vma_internal.h wrapper
23 * provides userland-equivalent functionality for everything vma.c uses.
25 #include "../../../mm/vma.c"
27 const struct vm_operations_struct vma_dummy_vm_ops
;
28 static struct anon_vma dummy_anon_vma
;
30 #define ASSERT_TRUE(_expr) \
34 "Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
35 __FILE__, __LINE__, __FUNCTION__, #_expr); \
39 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
40 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
41 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
43 static struct task_struct __current
;
45 struct task_struct
*get_current(void)
50 /* Helper function to simply allocate a VMA. */
51 static struct vm_area_struct
*alloc_vma(struct mm_struct
*mm
,
57 struct vm_area_struct
*ret
= vm_area_alloc(mm
);
62 ret
->vm_start
= start
;
64 ret
->vm_pgoff
= pgoff
;
65 ret
->__vm_flags
= flags
;
70 /* Helper function to allocate a VMA and link it to the tree. */
71 static struct vm_area_struct
*alloc_and_link_vma(struct mm_struct
*mm
,
77 struct vm_area_struct
*vma
= alloc_vma(mm
, start
, end
, pgoff
, flags
);
82 if (vma_link(mm
, vma
)) {
88 * Reset this counter which we use to track whether writes have
89 * begun. Linking to the tree will have caused this to be incremented,
90 * which means we will get a false positive otherwise.
92 vma
->vm_lock_seq
= -1;
97 /* Helper function which provides a wrapper around a merge new VMA operation. */
98 static struct vm_area_struct
*merge_new(struct vma_merge_struct
*vmg
)
101 * For convenience, get prev and next VMAs. Which the new VMA operation
104 vmg
->next
= vma_next(vmg
->vmi
);
105 vmg
->prev
= vma_prev(vmg
->vmi
);
106 vma_iter_next_range(vmg
->vmi
);
108 return vma_merge_new_range(vmg
);
112 * Helper function which provides a wrapper around a merge existing VMA
115 static struct vm_area_struct
*merge_existing(struct vma_merge_struct
*vmg
)
117 return vma_merge_existing_range(vmg
);
121 * Helper function which provides a wrapper around the expansion of an existing
124 static int expand_existing(struct vma_merge_struct
*vmg
)
126 return vma_expand(vmg
);
130 * Helper function to reset merge state the associated VMA iterator to a
131 * specified new range.
133 static void vmg_set_range(struct vma_merge_struct
*vmg
, unsigned long start
,
134 unsigned long end
, pgoff_t pgoff
, vm_flags_t flags
)
136 vma_iter_set(vmg
->vmi
, start
);
149 * Helper function to try to merge a new VMA.
151 * Update vmg and the iterator for it and try to merge, otherwise allocate a new
152 * VMA, link it to the maple tree and return it.
154 static struct vm_area_struct
*try_merge_new_vma(struct mm_struct
*mm
,
155 struct vma_merge_struct
*vmg
,
156 unsigned long start
, unsigned long end
,
157 pgoff_t pgoff
, vm_flags_t flags
,
160 struct vm_area_struct
*merged
;
162 vmg_set_range(vmg
, start
, end
, pgoff
, flags
);
164 merged
= merge_new(vmg
);
167 ASSERT_EQ(vmg
->state
, VMA_MERGE_SUCCESS
);
173 ASSERT_EQ(vmg
->state
, VMA_MERGE_NOMERGE
);
175 return alloc_and_link_vma(mm
, start
, end
, pgoff
, flags
);
179 * Helper function to reset the dummy anon_vma to indicate it has not been
182 static void reset_dummy_anon_vma(void)
184 dummy_anon_vma
.was_cloned
= false;
185 dummy_anon_vma
.was_unlinked
= false;
189 * Helper function to remove all VMAs and destroy the maple tree associated with
190 * a virtual address space. Returns a count of VMAs in the tree.
192 static int cleanup_mm(struct mm_struct
*mm
, struct vma_iterator
*vmi
)
194 struct vm_area_struct
*vma
;
197 fail_prealloc
= false;
198 reset_dummy_anon_vma();
200 vma_iter_set(vmi
, 0);
201 for_each_vma(*vmi
, vma
) {
206 mtree_destroy(&mm
->mm_mt
);
211 /* Helper function to determine if VMA has had vma_start_write() performed. */
212 static bool vma_write_started(struct vm_area_struct
*vma
)
214 int seq
= vma
->vm_lock_seq
;
216 /* We reset after each check. */
217 vma
->vm_lock_seq
= -1;
219 /* The vma_start_write() stub simply increments this value. */
223 /* Helper function providing a dummy vm_ops->close() method.*/
224 static void dummy_close(struct vm_area_struct
*)
228 static bool test_simple_merge(void)
230 struct vm_area_struct
*vma
;
231 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
232 struct mm_struct mm
= {};
233 struct vm_area_struct
*vma_left
= alloc_vma(&mm
, 0, 0x1000, 0, flags
);
234 struct vm_area_struct
*vma_right
= alloc_vma(&mm
, 0x2000, 0x3000, 2, flags
);
235 VMA_ITERATOR(vmi
, &mm
, 0x1000);
236 struct vma_merge_struct vmg
= {
245 ASSERT_FALSE(vma_link(&mm
, vma_left
));
246 ASSERT_FALSE(vma_link(&mm
, vma_right
));
248 vma
= merge_new(&vmg
);
249 ASSERT_NE(vma
, NULL
);
251 ASSERT_EQ(vma
->vm_start
, 0);
252 ASSERT_EQ(vma
->vm_end
, 0x3000);
253 ASSERT_EQ(vma
->vm_pgoff
, 0);
254 ASSERT_EQ(vma
->vm_flags
, flags
);
257 mtree_destroy(&mm
.mm_mt
);
262 static bool test_simple_modify(void)
264 struct vm_area_struct
*vma
;
265 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
266 struct mm_struct mm
= {};
267 struct vm_area_struct
*init_vma
= alloc_vma(&mm
, 0, 0x3000, 0, flags
);
268 VMA_ITERATOR(vmi
, &mm
, 0x1000);
270 ASSERT_FALSE(vma_link(&mm
, init_vma
));
273 * The flags will not be changed, the vma_modify_flags() function
274 * performs the merge/split only.
276 vma
= vma_modify_flags(&vmi
, init_vma
, init_vma
,
277 0x1000, 0x2000, VM_READ
| VM_MAYREAD
);
278 ASSERT_NE(vma
, NULL
);
279 /* We modify the provided VMA, and on split allocate new VMAs. */
280 ASSERT_EQ(vma
, init_vma
);
282 ASSERT_EQ(vma
->vm_start
, 0x1000);
283 ASSERT_EQ(vma
->vm_end
, 0x2000);
284 ASSERT_EQ(vma
->vm_pgoff
, 1);
287 * Now walk through the three split VMAs and make sure they are as
291 vma_iter_set(&vmi
, 0);
292 vma
= vma_iter_load(&vmi
);
294 ASSERT_EQ(vma
->vm_start
, 0);
295 ASSERT_EQ(vma
->vm_end
, 0x1000);
296 ASSERT_EQ(vma
->vm_pgoff
, 0);
299 vma_iter_clear(&vmi
);
301 vma
= vma_next(&vmi
);
303 ASSERT_EQ(vma
->vm_start
, 0x1000);
304 ASSERT_EQ(vma
->vm_end
, 0x2000);
305 ASSERT_EQ(vma
->vm_pgoff
, 1);
308 vma_iter_clear(&vmi
);
310 vma
= vma_next(&vmi
);
312 ASSERT_EQ(vma
->vm_start
, 0x2000);
313 ASSERT_EQ(vma
->vm_end
, 0x3000);
314 ASSERT_EQ(vma
->vm_pgoff
, 2);
317 mtree_destroy(&mm
.mm_mt
);
322 static bool test_simple_expand(void)
324 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
325 struct mm_struct mm
= {};
326 struct vm_area_struct
*vma
= alloc_vma(&mm
, 0, 0x1000, 0, flags
);
327 VMA_ITERATOR(vmi
, &mm
, 0);
328 struct vma_merge_struct vmg
= {
336 ASSERT_FALSE(vma_link(&mm
, vma
));
338 ASSERT_FALSE(expand_existing(&vmg
));
340 ASSERT_EQ(vma
->vm_start
, 0);
341 ASSERT_EQ(vma
->vm_end
, 0x3000);
342 ASSERT_EQ(vma
->vm_pgoff
, 0);
345 mtree_destroy(&mm
.mm_mt
);
350 static bool test_simple_shrink(void)
352 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
353 struct mm_struct mm
= {};
354 struct vm_area_struct
*vma
= alloc_vma(&mm
, 0, 0x3000, 0, flags
);
355 VMA_ITERATOR(vmi
, &mm
, 0);
357 ASSERT_FALSE(vma_link(&mm
, vma
));
359 ASSERT_FALSE(vma_shrink(&vmi
, vma
, 0, 0x1000, 0));
361 ASSERT_EQ(vma
->vm_start
, 0);
362 ASSERT_EQ(vma
->vm_end
, 0x1000);
363 ASSERT_EQ(vma
->vm_pgoff
, 0);
366 mtree_destroy(&mm
.mm_mt
);
371 static bool test_merge_new(void)
373 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
374 struct mm_struct mm
= {};
375 VMA_ITERATOR(vmi
, &mm
, 0);
376 struct vma_merge_struct vmg
= {
380 struct anon_vma_chain dummy_anon_vma_chain_a
= {
381 .anon_vma
= &dummy_anon_vma
,
383 struct anon_vma_chain dummy_anon_vma_chain_b
= {
384 .anon_vma
= &dummy_anon_vma
,
386 struct anon_vma_chain dummy_anon_vma_chain_c
= {
387 .anon_vma
= &dummy_anon_vma
,
389 struct anon_vma_chain dummy_anon_vma_chain_d
= {
390 .anon_vma
= &dummy_anon_vma
,
392 const struct vm_operations_struct vm_ops
= {
393 .close
= dummy_close
,
396 struct vm_area_struct
*vma
, *vma_a
, *vma_b
, *vma_c
, *vma_d
;
403 vma_a
= alloc_and_link_vma(&mm
, 0, 0x2000, 0, flags
);
404 ASSERT_NE(vma_a
, NULL
);
405 /* We give each VMA a single avc so we can test anon_vma duplication. */
406 INIT_LIST_HEAD(&vma_a
->anon_vma_chain
);
407 list_add(&dummy_anon_vma_chain_a
.same_vma
, &vma_a
->anon_vma_chain
);
409 vma_b
= alloc_and_link_vma(&mm
, 0x3000, 0x4000, 3, flags
);
410 ASSERT_NE(vma_b
, NULL
);
411 INIT_LIST_HEAD(&vma_b
->anon_vma_chain
);
412 list_add(&dummy_anon_vma_chain_b
.same_vma
, &vma_b
->anon_vma_chain
);
414 vma_c
= alloc_and_link_vma(&mm
, 0xb000, 0xc000, 0xb, flags
);
415 ASSERT_NE(vma_c
, NULL
);
416 INIT_LIST_HEAD(&vma_c
->anon_vma_chain
);
417 list_add(&dummy_anon_vma_chain_c
.same_vma
, &vma_c
->anon_vma_chain
);
425 vma_d
= try_merge_new_vma(&mm
, &vmg
, 0x7000, 0x9000, 7, flags
, &merged
);
426 ASSERT_NE(vma_d
, NULL
);
427 INIT_LIST_HEAD(&vma_d
->anon_vma_chain
);
428 list_add(&dummy_anon_vma_chain_d
.same_vma
, &vma_d
->anon_vma_chain
);
429 ASSERT_FALSE(merged
);
430 ASSERT_EQ(mm
.map_count
, 4);
438 vma_a
->vm_ops
= &vm_ops
; /* This should have no impact. */
439 vma_b
->anon_vma
= &dummy_anon_vma
;
440 vma
= try_merge_new_vma(&mm
, &vmg
, 0x2000, 0x3000, 2, flags
, &merged
);
441 ASSERT_EQ(vma
, vma_a
);
442 /* Merge with A, delete B. */
444 ASSERT_EQ(vma
->vm_start
, 0);
445 ASSERT_EQ(vma
->vm_end
, 0x4000);
446 ASSERT_EQ(vma
->vm_pgoff
, 0);
447 ASSERT_EQ(vma
->anon_vma
, &dummy_anon_vma
);
448 ASSERT_TRUE(vma_write_started(vma
));
449 ASSERT_EQ(mm
.map_count
, 3);
452 * Merge to PREVIOUS VMA.
457 vma
= try_merge_new_vma(&mm
, &vmg
, 0x4000, 0x5000, 4, flags
, &merged
);
458 ASSERT_EQ(vma
, vma_a
);
461 ASSERT_EQ(vma
->vm_start
, 0);
462 ASSERT_EQ(vma
->vm_end
, 0x5000);
463 ASSERT_EQ(vma
->vm_pgoff
, 0);
464 ASSERT_EQ(vma
->anon_vma
, &dummy_anon_vma
);
465 ASSERT_TRUE(vma_write_started(vma
));
466 ASSERT_EQ(mm
.map_count
, 3);
474 vma_d
->anon_vma
= &dummy_anon_vma
;
475 vma_d
->vm_ops
= &vm_ops
; /* This should have no impact. */
476 vma
= try_merge_new_vma(&mm
, &vmg
, 0x6000, 0x7000, 6, flags
, &merged
);
477 ASSERT_EQ(vma
, vma_d
);
480 ASSERT_EQ(vma
->vm_start
, 0x6000);
481 ASSERT_EQ(vma
->vm_end
, 0x9000);
482 ASSERT_EQ(vma
->vm_pgoff
, 6);
483 ASSERT_EQ(vma
->anon_vma
, &dummy_anon_vma
);
484 ASSERT_TRUE(vma_write_started(vma
));
485 ASSERT_EQ(mm
.map_count
, 3);
493 vma_d
->vm_ops
= NULL
; /* This would otherwise degrade the merge. */
494 vma
= try_merge_new_vma(&mm
, &vmg
, 0x5000, 0x6000, 5, flags
, &merged
);
495 ASSERT_EQ(vma
, vma_a
);
496 /* Merge with A, delete D. */
498 ASSERT_EQ(vma
->vm_start
, 0);
499 ASSERT_EQ(vma
->vm_end
, 0x9000);
500 ASSERT_EQ(vma
->vm_pgoff
, 0);
501 ASSERT_EQ(vma
->anon_vma
, &dummy_anon_vma
);
502 ASSERT_TRUE(vma_write_started(vma
));
503 ASSERT_EQ(mm
.map_count
, 2);
511 vma_c
->anon_vma
= &dummy_anon_vma
;
512 vma
= try_merge_new_vma(&mm
, &vmg
, 0xa000, 0xb000, 0xa, flags
, &merged
);
513 ASSERT_EQ(vma
, vma_c
);
516 ASSERT_EQ(vma
->vm_start
, 0xa000);
517 ASSERT_EQ(vma
->vm_end
, 0xc000);
518 ASSERT_EQ(vma
->vm_pgoff
, 0xa);
519 ASSERT_EQ(vma
->anon_vma
, &dummy_anon_vma
);
520 ASSERT_TRUE(vma_write_started(vma
));
521 ASSERT_EQ(mm
.map_count
, 2);
529 vma
= try_merge_new_vma(&mm
, &vmg
, 0x9000, 0xa000, 0x9, flags
, &merged
);
530 ASSERT_EQ(vma
, vma_a
);
531 /* Extend A and delete C. */
533 ASSERT_EQ(vma
->vm_start
, 0);
534 ASSERT_EQ(vma
->vm_end
, 0xc000);
535 ASSERT_EQ(vma
->vm_pgoff
, 0);
536 ASSERT_EQ(vma
->anon_vma
, &dummy_anon_vma
);
537 ASSERT_TRUE(vma_write_started(vma
));
538 ASSERT_EQ(mm
.map_count
, 1);
548 vma_iter_set(&vmi
, 0);
549 for_each_vma(vmi
, vma
) {
550 ASSERT_NE(vma
, NULL
);
551 ASSERT_EQ(vma
->vm_start
, 0);
552 ASSERT_EQ(vma
->vm_end
, 0xc000);
553 ASSERT_EQ(vma
->vm_pgoff
, 0);
554 ASSERT_EQ(vma
->anon_vma
, &dummy_anon_vma
);
560 /* Should only have one VMA left (though freed) after all is done.*/
563 mtree_destroy(&mm
.mm_mt
);
567 static bool test_vma_merge_special_flags(void)
569 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
570 struct mm_struct mm
= {};
571 VMA_ITERATOR(vmi
, &mm
, 0);
572 struct vma_merge_struct vmg
= {
576 vm_flags_t special_flags
[] = { VM_IO
, VM_DONTEXPAND
, VM_PFNMAP
, VM_MIXEDMAP
};
577 vm_flags_t all_special_flags
= 0;
579 struct vm_area_struct
*vma_left
, *vma
;
581 /* Make sure there aren't new VM_SPECIAL flags. */
582 for (i
= 0; i
< ARRAY_SIZE(special_flags
); i
++) {
583 all_special_flags
|= special_flags
[i
];
585 ASSERT_EQ(all_special_flags
, VM_SPECIAL
);
591 vma_left
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
592 ASSERT_NE(vma_left
, NULL
);
594 /* 1. Set up new VMA with special flag that would otherwise merge. */
600 * This should merge if not for the VM_SPECIAL flag.
602 vmg_set_range(&vmg
, 0x3000, 0x4000, 3, flags
);
603 for (i
= 0; i
< ARRAY_SIZE(special_flags
); i
++) {
604 vm_flags_t special_flag
= special_flags
[i
];
606 vma_left
->__vm_flags
= flags
| special_flag
;
607 vmg
.flags
= flags
| special_flag
;
608 vma
= merge_new(&vmg
);
609 ASSERT_EQ(vma
, NULL
);
610 ASSERT_EQ(vmg
.state
, VMA_MERGE_NOMERGE
);
613 /* 2. Modify VMA with special flag that would otherwise merge. */
619 * Create a VMA to modify.
621 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x4000, 3, flags
);
622 ASSERT_NE(vma
, NULL
);
625 for (i
= 0; i
< ARRAY_SIZE(special_flags
); i
++) {
626 vm_flags_t special_flag
= special_flags
[i
];
628 vma_left
->__vm_flags
= flags
| special_flag
;
629 vmg
.flags
= flags
| special_flag
;
630 vma
= merge_existing(&vmg
);
631 ASSERT_EQ(vma
, NULL
);
632 ASSERT_EQ(vmg
.state
, VMA_MERGE_NOMERGE
);
635 cleanup_mm(&mm
, &vmi
);
639 static bool test_vma_merge_with_close(void)
641 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
642 struct mm_struct mm
= {};
643 VMA_ITERATOR(vmi
, &mm
, 0);
644 struct vma_merge_struct vmg
= {
648 const struct vm_operations_struct vm_ops
= {
649 .close
= dummy_close
,
651 struct vm_area_struct
*vma_prev
, *vma_next
, *vma
;
654 * When merging VMAs we are not permitted to remove any VMA that has a
655 * vm_ops->close() hook.
657 * Considering the two possible adjacent VMAs to which a VMA can be
660 * [ prev ][ vma ][ next ]
662 * In no case will we need to delete prev. If the operation is
663 * mergeable, then prev will be extended with one or both of vma and
666 * As a result, during initial mergeability checks, only
667 * can_vma_merge_before() (which implies the VMA being merged with is
668 * 'next' as shown above) bothers to check to see whether the next VMA
669 * has a vm_ops->close() callback that will need to be called when
672 * If it does, then we cannot merge as the resources that the close()
673 * operation potentially clears down are tied only to the existing VMA
674 * range and we have no way of extending those to the nearly merged one.
676 * We must consider two scenarios:
680 * vm_ops->close: - - !NULL
681 * [ prev ][ vma ][ next ]
683 * Where prev may or may not be present/mergeable.
685 * This is picked up by a specific check in can_vma_merge_before().
689 * vm_ops->close: - !NULL
692 * Where prev and vma are present and mergeable.
694 * This is picked up by a specific check in the modified VMA merge.
696 * IMPORTANT NOTE: We make the assumption that the following case:
699 * [ prev ][ vma ][ next ]
701 * Cannot occur, because vma->vm_ops being the same implies the same
702 * vma->vm_file, and therefore this would mean that next->vm_ops->close
703 * would be set too, and thus scenario A would pick this up.
707 * The only case of a new VMA merge that results in a VMA being deleted
708 * is one where both the previous and next VMAs are merged - in this
709 * instance the next VMA is deleted, and the previous VMA is extended.
711 * If we are unable to do so, we reduce the operation to simply
712 * extending the prev VMA and not merging next.
721 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
722 vma_next
= alloc_and_link_vma(&mm
, 0x5000, 0x9000, 5, flags
);
723 vma_next
->vm_ops
= &vm_ops
;
725 vmg_set_range(&vmg
, 0x3000, 0x5000, 3, flags
);
726 ASSERT_EQ(merge_new(&vmg
), vma_prev
);
727 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
728 ASSERT_EQ(vma_prev
->vm_start
, 0);
729 ASSERT_EQ(vma_prev
->vm_end
, 0x5000);
730 ASSERT_EQ(vma_prev
->vm_pgoff
, 0);
732 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 2);
735 * When modifying an existing VMA there are further cases where we
742 * In this instance, if vma has a close hook, the merge simply cannot
746 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
747 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x5000, 3, flags
);
748 vma
->vm_ops
= &vm_ops
;
750 vmg_set_range(&vmg
, 0x3000, 0x5000, 3, flags
);
755 * The VMA being modified in a way that would otherwise merge should
758 ASSERT_EQ(merge_existing(&vmg
), NULL
);
759 ASSERT_EQ(vmg
.state
, VMA_MERGE_NOMERGE
);
761 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 2);
764 * This case is mirrored if merging with next.
770 * In this instance, if vma has a close hook, the merge simply cannot
774 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x5000, 3, flags
);
775 vma_next
= alloc_and_link_vma(&mm
, 0x5000, 0x9000, 5, flags
);
776 vma
->vm_ops
= &vm_ops
;
778 vmg_set_range(&vmg
, 0x3000, 0x5000, 3, flags
);
780 ASSERT_EQ(merge_existing(&vmg
), NULL
);
782 * Initially this is misapprehended as an out of memory report, as the
783 * close() check is handled in the same way as anon_vma duplication
784 * failures, however a subsequent patch resolves this.
786 ASSERT_EQ(vmg
.state
, VMA_MERGE_NOMERGE
);
788 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 2);
791 * Finally, we consider two variants of the case where we modify a VMA
792 * to merge with both the previous and next VMAs.
794 * The first variant is where vma has a close hook. In this instance, no
802 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
803 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x5000, 3, flags
);
804 vma_next
= alloc_and_link_vma(&mm
, 0x5000, 0x9000, 5, flags
);
805 vma
->vm_ops
= &vm_ops
;
807 vmg_set_range(&vmg
, 0x3000, 0x5000, 3, flags
);
811 ASSERT_EQ(merge_existing(&vmg
), NULL
);
812 ASSERT_EQ(vmg
.state
, VMA_MERGE_NOMERGE
);
814 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 3);
817 * The second variant is where next has a close hook. In this instance,
818 * we reduce the operation to a merge between prev and vma.
828 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
829 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x5000, 3, flags
);
830 vma_next
= alloc_and_link_vma(&mm
, 0x5000, 0x9000, 5, flags
);
831 vma_next
->vm_ops
= &vm_ops
;
833 vmg_set_range(&vmg
, 0x3000, 0x5000, 3, flags
);
837 ASSERT_EQ(merge_existing(&vmg
), vma_prev
);
838 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
839 ASSERT_EQ(vma_prev
->vm_start
, 0);
840 ASSERT_EQ(vma_prev
->vm_end
, 0x5000);
841 ASSERT_EQ(vma_prev
->vm_pgoff
, 0);
843 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 2);
848 static bool test_vma_merge_new_with_close(void)
850 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
851 struct mm_struct mm
= {};
852 VMA_ITERATOR(vmi
, &mm
, 0);
853 struct vma_merge_struct vmg
= {
857 struct vm_area_struct
*vma_prev
= alloc_and_link_vma(&mm
, 0, 0x2000, 0, flags
);
858 struct vm_area_struct
*vma_next
= alloc_and_link_vma(&mm
, 0x5000, 0x7000, 5, flags
);
859 const struct vm_operations_struct vm_ops
= {
860 .close
= dummy_close
,
862 struct vm_area_struct
*vma
;
865 * We should allow the partial merge of a proposed new VMA if the
866 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
874 * Since the rule is to not DELETE a VMA with a close operation, this
875 * should be permitted, only rather than expanding A and deleting B, we
876 * should simply expand A and leave B intact, e.g.:
880 * |------------||-----|
884 /* Have prev and next have a vm_ops->close() hook. */
885 vma_prev
->vm_ops
= &vm_ops
;
886 vma_next
->vm_ops
= &vm_ops
;
888 vmg_set_range(&vmg
, 0x2000, 0x5000, 2, flags
);
889 vma
= merge_new(&vmg
);
890 ASSERT_NE(vma
, NULL
);
891 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
892 ASSERT_EQ(vma
->vm_start
, 0);
893 ASSERT_EQ(vma
->vm_end
, 0x5000);
894 ASSERT_EQ(vma
->vm_pgoff
, 0);
895 ASSERT_EQ(vma
->vm_ops
, &vm_ops
);
896 ASSERT_TRUE(vma_write_started(vma
));
897 ASSERT_EQ(mm
.map_count
, 2);
899 cleanup_mm(&mm
, &vmi
);
903 static bool test_merge_existing(void)
905 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
906 struct mm_struct mm
= {};
907 VMA_ITERATOR(vmi
, &mm
, 0);
908 struct vm_area_struct
*vma
, *vma_prev
, *vma_next
;
909 struct vma_merge_struct vmg
= {
913 const struct vm_operations_struct vm_ops
= {
914 .close
= dummy_close
,
918 * Merge right case - partial span.
927 vma
= alloc_and_link_vma(&mm
, 0x2000, 0x6000, 2, flags
);
928 vma
->vm_ops
= &vm_ops
; /* This should have no impact. */
929 vma_next
= alloc_and_link_vma(&mm
, 0x6000, 0x9000, 6, flags
);
930 vma_next
->vm_ops
= &vm_ops
; /* This should have no impact. */
931 vmg_set_range(&vmg
, 0x3000, 0x6000, 3, flags
);
934 vma
->anon_vma
= &dummy_anon_vma
;
935 ASSERT_EQ(merge_existing(&vmg
), vma_next
);
936 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
937 ASSERT_EQ(vma_next
->vm_start
, 0x3000);
938 ASSERT_EQ(vma_next
->vm_end
, 0x9000);
939 ASSERT_EQ(vma_next
->vm_pgoff
, 3);
940 ASSERT_EQ(vma_next
->anon_vma
, &dummy_anon_vma
);
941 ASSERT_EQ(vma
->vm_start
, 0x2000);
942 ASSERT_EQ(vma
->vm_end
, 0x3000);
943 ASSERT_EQ(vma
->vm_pgoff
, 2);
944 ASSERT_TRUE(vma_write_started(vma
));
945 ASSERT_TRUE(vma_write_started(vma_next
));
946 ASSERT_EQ(mm
.map_count
, 2);
948 /* Clear down and reset. */
949 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 2);
952 * Merge right case - full span.
961 vma
= alloc_and_link_vma(&mm
, 0x2000, 0x6000, 2, flags
);
962 vma_next
= alloc_and_link_vma(&mm
, 0x6000, 0x9000, 6, flags
);
963 vma_next
->vm_ops
= &vm_ops
; /* This should have no impact. */
964 vmg_set_range(&vmg
, 0x2000, 0x6000, 2, flags
);
966 vma
->anon_vma
= &dummy_anon_vma
;
967 ASSERT_EQ(merge_existing(&vmg
), vma_next
);
968 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
969 ASSERT_EQ(vma_next
->vm_start
, 0x2000);
970 ASSERT_EQ(vma_next
->vm_end
, 0x9000);
971 ASSERT_EQ(vma_next
->vm_pgoff
, 2);
972 ASSERT_EQ(vma_next
->anon_vma
, &dummy_anon_vma
);
973 ASSERT_TRUE(vma_write_started(vma_next
));
974 ASSERT_EQ(mm
.map_count
, 1);
976 /* Clear down and reset. We should have deleted vma. */
977 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 1);
980 * Merge left case - partial span.
989 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
990 vma_prev
->vm_ops
= &vm_ops
; /* This should have no impact. */
991 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x7000, 3, flags
);
992 vma
->vm_ops
= &vm_ops
; /* This should have no impact. */
993 vmg_set_range(&vmg
, 0x3000, 0x6000, 3, flags
);
996 vma
->anon_vma
= &dummy_anon_vma
;
998 ASSERT_EQ(merge_existing(&vmg
), vma_prev
);
999 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
1000 ASSERT_EQ(vma_prev
->vm_start
, 0);
1001 ASSERT_EQ(vma_prev
->vm_end
, 0x6000);
1002 ASSERT_EQ(vma_prev
->vm_pgoff
, 0);
1003 ASSERT_EQ(vma_prev
->anon_vma
, &dummy_anon_vma
);
1004 ASSERT_EQ(vma
->vm_start
, 0x6000);
1005 ASSERT_EQ(vma
->vm_end
, 0x7000);
1006 ASSERT_EQ(vma
->vm_pgoff
, 6);
1007 ASSERT_TRUE(vma_write_started(vma_prev
));
1008 ASSERT_TRUE(vma_write_started(vma
));
1009 ASSERT_EQ(mm
.map_count
, 2);
1011 /* Clear down and reset. */
1012 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 2);
1015 * Merge left case - full span.
1024 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
1025 vma_prev
->vm_ops
= &vm_ops
; /* This should have no impact. */
1026 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x7000, 3, flags
);
1027 vmg_set_range(&vmg
, 0x3000, 0x7000, 3, flags
);
1028 vmg
.prev
= vma_prev
;
1030 vma
->anon_vma
= &dummy_anon_vma
;
1031 ASSERT_EQ(merge_existing(&vmg
), vma_prev
);
1032 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
1033 ASSERT_EQ(vma_prev
->vm_start
, 0);
1034 ASSERT_EQ(vma_prev
->vm_end
, 0x7000);
1035 ASSERT_EQ(vma_prev
->vm_pgoff
, 0);
1036 ASSERT_EQ(vma_prev
->anon_vma
, &dummy_anon_vma
);
1037 ASSERT_TRUE(vma_write_started(vma_prev
));
1038 ASSERT_EQ(mm
.map_count
, 1);
1040 /* Clear down and reset. We should have deleted vma. */
1041 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 1);
1053 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
1054 vma_prev
->vm_ops
= &vm_ops
; /* This should have no impact. */
1055 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x7000, 3, flags
);
1056 vma_next
= alloc_and_link_vma(&mm
, 0x7000, 0x9000, 7, flags
);
1057 vmg_set_range(&vmg
, 0x3000, 0x7000, 3, flags
);
1058 vmg
.prev
= vma_prev
;
1060 vma
->anon_vma
= &dummy_anon_vma
;
1061 ASSERT_EQ(merge_existing(&vmg
), vma_prev
);
1062 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
1063 ASSERT_EQ(vma_prev
->vm_start
, 0);
1064 ASSERT_EQ(vma_prev
->vm_end
, 0x9000);
1065 ASSERT_EQ(vma_prev
->vm_pgoff
, 0);
1066 ASSERT_EQ(vma_prev
->anon_vma
, &dummy_anon_vma
);
1067 ASSERT_TRUE(vma_write_started(vma_prev
));
1068 ASSERT_EQ(mm
.map_count
, 1);
1070 /* Clear down and reset. We should have deleted prev and next. */
1071 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 1);
1074 * Non-merge ranges. the modified VMA merge operation assumes that the
1075 * caller always specifies ranges within the input VMA so we need only
1076 * examine these cases.
1088 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
1089 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x8000, 3, flags
);
1090 vma_next
= alloc_and_link_vma(&mm
, 0x8000, 0xa000, 8, flags
);
1092 vmg_set_range(&vmg
, 0x4000, 0x5000, 4, flags
);
1095 ASSERT_EQ(merge_existing(&vmg
), NULL
);
1096 ASSERT_EQ(vmg
.state
, VMA_MERGE_NOMERGE
);
1098 vmg_set_range(&vmg
, 0x5000, 0x6000, 5, flags
);
1101 ASSERT_EQ(merge_existing(&vmg
), NULL
);
1102 ASSERT_EQ(vmg
.state
, VMA_MERGE_NOMERGE
);
1104 vmg_set_range(&vmg
, 0x6000, 0x7000, 6, flags
);
1107 ASSERT_EQ(merge_existing(&vmg
), NULL
);
1108 ASSERT_EQ(vmg
.state
, VMA_MERGE_NOMERGE
);
1110 vmg_set_range(&vmg
, 0x4000, 0x7000, 4, flags
);
1113 ASSERT_EQ(merge_existing(&vmg
), NULL
);
1114 ASSERT_EQ(vmg
.state
, VMA_MERGE_NOMERGE
);
1116 vmg_set_range(&vmg
, 0x4000, 0x6000, 4, flags
);
1119 ASSERT_EQ(merge_existing(&vmg
), NULL
);
1120 ASSERT_EQ(vmg
.state
, VMA_MERGE_NOMERGE
);
1122 vmg_set_range(&vmg
, 0x5000, 0x6000, 5, flags
);
1125 ASSERT_EQ(merge_existing(&vmg
), NULL
);
1126 ASSERT_EQ(vmg
.state
, VMA_MERGE_NOMERGE
);
1128 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 3);
1133 static bool test_anon_vma_non_mergeable(void)
1135 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
1136 struct mm_struct mm
= {};
1137 VMA_ITERATOR(vmi
, &mm
, 0);
1138 struct vm_area_struct
*vma
, *vma_prev
, *vma_next
;
1139 struct vma_merge_struct vmg
= {
1143 struct anon_vma_chain dummy_anon_vma_chain1
= {
1144 .anon_vma
= &dummy_anon_vma
,
1146 struct anon_vma_chain dummy_anon_vma_chain2
= {
1147 .anon_vma
= &dummy_anon_vma
,
1151 * In the case of modified VMA merge, merging both left and right VMAs
1152 * but where prev and next have incompatible anon_vma objects, we revert
1153 * to a merge of prev and VMA:
1162 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
1163 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x7000, 3, flags
);
1164 vma_next
= alloc_and_link_vma(&mm
, 0x7000, 0x9000, 7, flags
);
1167 * Give both prev and next single anon_vma_chain fields, so they will
1168 * merge with the NULL vmg->anon_vma.
1170 * However, when prev is compared to next, the merge should fail.
1173 INIT_LIST_HEAD(&vma_prev
->anon_vma_chain
);
1174 list_add(&dummy_anon_vma_chain1
.same_vma
, &vma_prev
->anon_vma_chain
);
1175 ASSERT_TRUE(list_is_singular(&vma_prev
->anon_vma_chain
));
1176 vma_prev
->anon_vma
= &dummy_anon_vma
;
1177 ASSERT_TRUE(is_mergeable_anon_vma(NULL
, vma_prev
->anon_vma
, vma_prev
));
1179 INIT_LIST_HEAD(&vma_next
->anon_vma_chain
);
1180 list_add(&dummy_anon_vma_chain2
.same_vma
, &vma_next
->anon_vma_chain
);
1181 ASSERT_TRUE(list_is_singular(&vma_next
->anon_vma_chain
));
1182 vma_next
->anon_vma
= (struct anon_vma
*)2;
1183 ASSERT_TRUE(is_mergeable_anon_vma(NULL
, vma_next
->anon_vma
, vma_next
));
1185 ASSERT_FALSE(is_mergeable_anon_vma(vma_prev
->anon_vma
, vma_next
->anon_vma
, NULL
));
1187 vmg_set_range(&vmg
, 0x3000, 0x7000, 3, flags
);
1188 vmg
.prev
= vma_prev
;
1191 ASSERT_EQ(merge_existing(&vmg
), vma_prev
);
1192 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
1193 ASSERT_EQ(vma_prev
->vm_start
, 0);
1194 ASSERT_EQ(vma_prev
->vm_end
, 0x7000);
1195 ASSERT_EQ(vma_prev
->vm_pgoff
, 0);
1196 ASSERT_TRUE(vma_write_started(vma_prev
));
1197 ASSERT_FALSE(vma_write_started(vma_next
));
1199 /* Clear down and reset. */
1200 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 2);
1203 * Now consider the new VMA case. This is equivalent, only adding a new
1204 * VMA in a gap between prev and next.
1213 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
1214 vma_next
= alloc_and_link_vma(&mm
, 0x7000, 0x9000, 7, flags
);
1216 INIT_LIST_HEAD(&vma_prev
->anon_vma_chain
);
1217 list_add(&dummy_anon_vma_chain1
.same_vma
, &vma_prev
->anon_vma_chain
);
1218 vma_prev
->anon_vma
= (struct anon_vma
*)1;
1220 INIT_LIST_HEAD(&vma_next
->anon_vma_chain
);
1221 list_add(&dummy_anon_vma_chain2
.same_vma
, &vma_next
->anon_vma_chain
);
1222 vma_next
->anon_vma
= (struct anon_vma
*)2;
1224 vmg_set_range(&vmg
, 0x3000, 0x7000, 3, flags
);
1225 vmg
.prev
= vma_prev
;
1227 ASSERT_EQ(merge_new(&vmg
), vma_prev
);
1228 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
1229 ASSERT_EQ(vma_prev
->vm_start
, 0);
1230 ASSERT_EQ(vma_prev
->vm_end
, 0x7000);
1231 ASSERT_EQ(vma_prev
->vm_pgoff
, 0);
1232 ASSERT_TRUE(vma_write_started(vma_prev
));
1233 ASSERT_FALSE(vma_write_started(vma_next
));
1235 /* Final cleanup. */
1236 ASSERT_EQ(cleanup_mm(&mm
, &vmi
), 2);
1241 static bool test_dup_anon_vma(void)
1243 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
1244 struct mm_struct mm
= {};
1245 VMA_ITERATOR(vmi
, &mm
, 0);
1246 struct vma_merge_struct vmg
= {
1250 struct anon_vma_chain dummy_anon_vma_chain
= {
1251 .anon_vma
= &dummy_anon_vma
,
1253 struct vm_area_struct
*vma_prev
, *vma_next
, *vma
;
1255 reset_dummy_anon_vma();
1258 * Expanding a VMA delete the next one duplicates next's anon_vma and
1259 * assigns it to the expanded VMA.
1261 * This covers new VMA merging, as these operations amount to a VMA
1264 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
1265 vma_next
= alloc_and_link_vma(&mm
, 0x3000, 0x5000, 3, flags
);
1266 vma_next
->anon_vma
= &dummy_anon_vma
;
1268 vmg_set_range(&vmg
, 0, 0x5000, 0, flags
);
1270 vmg
.next
= vma_next
;
1272 ASSERT_EQ(expand_existing(&vmg
), 0);
1274 /* Will have been cloned. */
1275 ASSERT_EQ(vma_prev
->anon_vma
, &dummy_anon_vma
);
1276 ASSERT_TRUE(vma_prev
->anon_vma
->was_cloned
);
1278 /* Cleanup ready for next run. */
1279 cleanup_mm(&mm
, &vmi
);
1282 * next has anon_vma, we assign to prev.
1285 * |-------*********-------|
1287 * extend delete delete
1290 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
1291 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x5000, 3, flags
);
1292 vma_next
= alloc_and_link_vma(&mm
, 0x5000, 0x8000, 5, flags
);
1294 /* Initialise avc so mergeability check passes. */
1295 INIT_LIST_HEAD(&vma_next
->anon_vma_chain
);
1296 list_add(&dummy_anon_vma_chain
.same_vma
, &vma_next
->anon_vma_chain
);
1298 vma_next
->anon_vma
= &dummy_anon_vma
;
1299 vmg_set_range(&vmg
, 0x3000, 0x5000, 3, flags
);
1300 vmg
.prev
= vma_prev
;
1303 ASSERT_EQ(merge_existing(&vmg
), vma_prev
);
1304 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
1306 ASSERT_EQ(vma_prev
->vm_start
, 0);
1307 ASSERT_EQ(vma_prev
->vm_end
, 0x8000);
1309 ASSERT_EQ(vma_prev
->anon_vma
, &dummy_anon_vma
);
1310 ASSERT_TRUE(vma_prev
->anon_vma
->was_cloned
);
1312 cleanup_mm(&mm
, &vmi
);
1315 * vma has anon_vma, we assign to prev.
1318 * |-------*********-------|
1320 * extend delete delete
1323 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
1324 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x5000, 3, flags
);
1325 vma_next
= alloc_and_link_vma(&mm
, 0x5000, 0x8000, 5, flags
);
1327 vma
->anon_vma
= &dummy_anon_vma
;
1328 vmg_set_range(&vmg
, 0x3000, 0x5000, 3, flags
);
1329 vmg
.prev
= vma_prev
;
1332 ASSERT_EQ(merge_existing(&vmg
), vma_prev
);
1333 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
1335 ASSERT_EQ(vma_prev
->vm_start
, 0);
1336 ASSERT_EQ(vma_prev
->vm_end
, 0x8000);
1338 ASSERT_EQ(vma_prev
->anon_vma
, &dummy_anon_vma
);
1339 ASSERT_TRUE(vma_prev
->anon_vma
->was_cloned
);
1341 cleanup_mm(&mm
, &vmi
);
1344 * vma has anon_vma, we assign to prev.
1347 * |-------*************
1349 * extend shrink/delete
1352 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
1353 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x8000, 3, flags
);
1355 vma
->anon_vma
= &dummy_anon_vma
;
1356 vmg_set_range(&vmg
, 0x3000, 0x5000, 3, flags
);
1357 vmg
.prev
= vma_prev
;
1360 ASSERT_EQ(merge_existing(&vmg
), vma_prev
);
1361 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
1363 ASSERT_EQ(vma_prev
->vm_start
, 0);
1364 ASSERT_EQ(vma_prev
->vm_end
, 0x5000);
1366 ASSERT_EQ(vma_prev
->anon_vma
, &dummy_anon_vma
);
1367 ASSERT_TRUE(vma_prev
->anon_vma
->was_cloned
);
1369 cleanup_mm(&mm
, &vmi
);
1372 * vma has anon_vma, we assign to next.
1375 * *************-------|
1377 * shrink/delete extend
1380 vma
= alloc_and_link_vma(&mm
, 0, 0x5000, 0, flags
);
1381 vma_next
= alloc_and_link_vma(&mm
, 0x5000, 0x8000, 5, flags
);
1383 vma
->anon_vma
= &dummy_anon_vma
;
1384 vmg_set_range(&vmg
, 0x3000, 0x5000, 3, flags
);
1388 ASSERT_EQ(merge_existing(&vmg
), vma_next
);
1389 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
1391 ASSERT_EQ(vma_next
->vm_start
, 0x3000);
1392 ASSERT_EQ(vma_next
->vm_end
, 0x8000);
1394 ASSERT_EQ(vma_next
->anon_vma
, &dummy_anon_vma
);
1395 ASSERT_TRUE(vma_next
->anon_vma
->was_cloned
);
1397 cleanup_mm(&mm
, &vmi
);
1401 static bool test_vmi_prealloc_fail(void)
1403 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
1404 struct mm_struct mm
= {};
1405 VMA_ITERATOR(vmi
, &mm
, 0);
1406 struct vma_merge_struct vmg
= {
1410 struct vm_area_struct
*vma_prev
, *vma
;
1413 * We are merging vma into prev, with vma possessing an anon_vma, which
1414 * will be duplicated. We cause the vmi preallocation to fail and assert
1415 * the duplicated anon_vma is unlinked.
1418 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
1419 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x5000, 3, flags
);
1420 vma
->anon_vma
= &dummy_anon_vma
;
1422 vmg_set_range(&vmg
, 0x3000, 0x5000, 3, flags
);
1423 vmg
.prev
= vma_prev
;
1426 fail_prealloc
= true;
1428 /* This will cause the merge to fail. */
1429 ASSERT_EQ(merge_existing(&vmg
), NULL
);
1430 ASSERT_EQ(vmg
.state
, VMA_MERGE_ERROR_NOMEM
);
1431 /* We will already have assigned the anon_vma. */
1432 ASSERT_EQ(vma_prev
->anon_vma
, &dummy_anon_vma
);
1433 /* And it was both cloned and unlinked. */
1434 ASSERT_TRUE(dummy_anon_vma
.was_cloned
);
1435 ASSERT_TRUE(dummy_anon_vma
.was_unlinked
);
1437 cleanup_mm(&mm
, &vmi
); /* Resets fail_prealloc too. */
1440 * We repeat the same operation for expanding a VMA, which is what new
1441 * VMA merging ultimately uses too. This asserts that unlinking is
1442 * performed in this case too.
1445 vma_prev
= alloc_and_link_vma(&mm
, 0, 0x3000, 0, flags
);
1446 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x5000, 3, flags
);
1447 vma
->anon_vma
= &dummy_anon_vma
;
1449 vmg_set_range(&vmg
, 0, 0x5000, 3, flags
);
1453 fail_prealloc
= true;
1454 ASSERT_EQ(expand_existing(&vmg
), -ENOMEM
);
1455 ASSERT_EQ(vmg
.state
, VMA_MERGE_ERROR_NOMEM
);
1457 ASSERT_EQ(vma_prev
->anon_vma
, &dummy_anon_vma
);
1458 ASSERT_TRUE(dummy_anon_vma
.was_cloned
);
1459 ASSERT_TRUE(dummy_anon_vma
.was_unlinked
);
1461 cleanup_mm(&mm
, &vmi
);
1465 static bool test_merge_extend(void)
1467 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
1468 struct mm_struct mm
= {};
1469 VMA_ITERATOR(vmi
, &mm
, 0x1000);
1470 struct vm_area_struct
*vma
;
1472 vma
= alloc_and_link_vma(&mm
, 0, 0x1000, 0, flags
);
1473 alloc_and_link_vma(&mm
, 0x3000, 0x4000, 3, flags
);
1476 * Extend a VMA into the gap between itself and the following VMA.
1477 * This should result in a merge.
1484 ASSERT_EQ(vma_merge_extend(&vmi
, vma
, 0x2000), vma
);
1485 ASSERT_EQ(vma
->vm_start
, 0);
1486 ASSERT_EQ(vma
->vm_end
, 0x4000);
1487 ASSERT_EQ(vma
->vm_pgoff
, 0);
1488 ASSERT_TRUE(vma_write_started(vma
));
1489 ASSERT_EQ(mm
.map_count
, 1);
1491 cleanup_mm(&mm
, &vmi
);
1495 static bool test_copy_vma(void)
1497 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
1498 struct mm_struct mm
= {};
1499 bool need_locks
= false;
1500 VMA_ITERATOR(vmi
, &mm
, 0);
1501 struct vm_area_struct
*vma
, *vma_new
, *vma_next
;
1503 /* Move backwards and do not merge. */
1505 vma
= alloc_and_link_vma(&mm
, 0x3000, 0x5000, 3, flags
);
1506 vma_new
= copy_vma(&vma
, 0, 0x2000, 0, &need_locks
);
1508 ASSERT_NE(vma_new
, vma
);
1509 ASSERT_EQ(vma_new
->vm_start
, 0);
1510 ASSERT_EQ(vma_new
->vm_end
, 0x2000);
1511 ASSERT_EQ(vma_new
->vm_pgoff
, 0);
1513 cleanup_mm(&mm
, &vmi
);
1515 /* Move a VMA into position next to another and merge the two. */
1517 vma
= alloc_and_link_vma(&mm
, 0, 0x2000, 0, flags
);
1518 vma_next
= alloc_and_link_vma(&mm
, 0x6000, 0x8000, 6, flags
);
1519 vma_new
= copy_vma(&vma
, 0x4000, 0x2000, 4, &need_locks
);
1521 ASSERT_EQ(vma_new
, vma_next
);
1523 cleanup_mm(&mm
, &vmi
);
1527 static bool test_expand_only_mode(void)
1529 unsigned long flags
= VM_READ
| VM_WRITE
| VM_MAYREAD
| VM_MAYWRITE
;
1530 struct mm_struct mm
= {};
1531 VMA_ITERATOR(vmi
, &mm
, 0);
1532 struct vm_area_struct
*vma_prev
, *vma
;
1533 VMG_STATE(vmg
, &mm
, &vmi
, 0x5000, 0x9000, flags
, 5);
1536 * Place a VMA prior to the one we're expanding so we assert that we do
1537 * not erroneously try to traverse to the previous VMA even though we
1538 * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not
1541 alloc_and_link_vma(&mm
, 0, 0x2000, 0, flags
);
1544 * We will be positioned at the prev VMA, but looking to expand to
1547 vma_iter_set(&vmi
, 0x3000);
1548 vma_prev
= alloc_and_link_vma(&mm
, 0x3000, 0x5000, 3, flags
);
1549 vmg
.prev
= vma_prev
;
1550 vmg
.merge_flags
= VMG_FLAG_JUST_EXPAND
;
1552 vma
= vma_merge_new_range(&vmg
);
1553 ASSERT_NE(vma
, NULL
);
1554 ASSERT_EQ(vma
, vma_prev
);
1555 ASSERT_EQ(vmg
.state
, VMA_MERGE_SUCCESS
);
1556 ASSERT_EQ(vma
->vm_start
, 0x3000);
1557 ASSERT_EQ(vma
->vm_end
, 0x9000);
1558 ASSERT_EQ(vma
->vm_pgoff
, 3);
1559 ASSERT_TRUE(vma_write_started(vma
));
1560 ASSERT_EQ(vma_iter_addr(&vmi
), 0x3000);
1562 cleanup_mm(&mm
, &vmi
);
1568 int num_tests
= 0, num_fail
= 0;
1572 #define TEST(name) \
1575 if (!test_##name()) { \
1577 fprintf(stderr, "Test " #name " FAILED\n"); \
1581 /* Very simple tests to kick the tyres. */
1583 TEST(simple_modify
);
1584 TEST(simple_expand
);
1585 TEST(simple_shrink
);
1588 TEST(vma_merge_special_flags
);
1589 TEST(vma_merge_with_close
);
1590 TEST(vma_merge_new_with_close
);
1591 TEST(merge_existing
);
1592 TEST(anon_vma_non_mergeable
);
1594 TEST(vmi_prealloc_fail
);
1597 TEST(expand_only_mode
);
1601 printf("%d tests run, %d passed, %d failed.\n",
1602 num_tests
, num_tests
- num_fail
, num_fail
);
1604 return num_fail
== 0 ? EXIT_SUCCESS
: EXIT_FAILURE
;