2 * mm/interval_tree.c - interval tree for mapping->i_mmap
4 * Copyright (C) 2012, Michel Lespinasse <walken@google.com>
6 * This file is released under the GPL v2.
11 #include <linux/rmap.h>
12 #include <linux/interval_tree_generic.h>
14 static inline unsigned long vma_start_pgoff(struct vm_area_struct
*v
)
19 static inline unsigned long vma_last_pgoff(struct vm_area_struct
*v
)
21 return v
->vm_pgoff
+ ((v
->vm_end
- v
->vm_start
) >> PAGE_SHIFT
) - 1;
24 INTERVAL_TREE_DEFINE(struct vm_area_struct
, shared
.rb
,
25 unsigned long, shared
.rb_subtree_last
,
26 vma_start_pgoff
, vma_last_pgoff
,, vma_interval_tree
)
28 /* Insert node immediately after prev in the interval tree */
29 void vma_interval_tree_insert_after(struct vm_area_struct
*node
,
30 struct vm_area_struct
*prev
,
31 struct rb_root_cached
*root
)
33 struct rb_node
**link
;
34 struct vm_area_struct
*parent
;
35 unsigned long last
= vma_last_pgoff(node
);
37 VM_BUG_ON_VMA(vma_start_pgoff(node
) != vma_start_pgoff(prev
), node
);
39 if (!prev
->shared
.rb
.rb_right
) {
41 link
= &prev
->shared
.rb
.rb_right
;
43 parent
= rb_entry(prev
->shared
.rb
.rb_right
,
44 struct vm_area_struct
, shared
.rb
);
45 if (parent
->shared
.rb_subtree_last
< last
)
46 parent
->shared
.rb_subtree_last
= last
;
47 while (parent
->shared
.rb
.rb_left
) {
48 parent
= rb_entry(parent
->shared
.rb
.rb_left
,
49 struct vm_area_struct
, shared
.rb
);
50 if (parent
->shared
.rb_subtree_last
< last
)
51 parent
->shared
.rb_subtree_last
= last
;
53 link
= &parent
->shared
.rb
.rb_left
;
56 node
->shared
.rb_subtree_last
= last
;
57 rb_link_node(&node
->shared
.rb
, &parent
->shared
.rb
, link
);
58 rb_insert_augmented(&node
->shared
.rb
, &root
->rb_root
,
59 &vma_interval_tree_augment
);
62 static inline unsigned long avc_start_pgoff(struct anon_vma_chain
*avc
)
64 return vma_start_pgoff(avc
->vma
);
67 static inline unsigned long avc_last_pgoff(struct anon_vma_chain
*avc
)
69 return vma_last_pgoff(avc
->vma
);
72 INTERVAL_TREE_DEFINE(struct anon_vma_chain
, rb
, unsigned long, rb_subtree_last
,
73 avc_start_pgoff
, avc_last_pgoff
,
74 static inline, __anon_vma_interval_tree
)
76 void anon_vma_interval_tree_insert(struct anon_vma_chain
*node
,
77 struct rb_root_cached
*root
)
79 #ifdef CONFIG_DEBUG_VM_RB
80 node
->cached_vma_start
= avc_start_pgoff(node
);
81 node
->cached_vma_last
= avc_last_pgoff(node
);
83 __anon_vma_interval_tree_insert(node
, root
);
86 void anon_vma_interval_tree_remove(struct anon_vma_chain
*node
,
87 struct rb_root_cached
*root
)
89 __anon_vma_interval_tree_remove(node
, root
);
92 struct anon_vma_chain
*
93 anon_vma_interval_tree_iter_first(struct rb_root_cached
*root
,
94 unsigned long first
, unsigned long last
)
96 return __anon_vma_interval_tree_iter_first(root
, first
, last
);
99 struct anon_vma_chain
*
100 anon_vma_interval_tree_iter_next(struct anon_vma_chain
*node
,
101 unsigned long first
, unsigned long last
)
103 return __anon_vma_interval_tree_iter_next(node
, first
, last
);
106 #ifdef CONFIG_DEBUG_VM_RB
107 void anon_vma_interval_tree_verify(struct anon_vma_chain
*node
)
109 WARN_ON_ONCE(node
->cached_vma_start
!= avc_start_pgoff(node
));
110 WARN_ON_ONCE(node
->cached_vma_last
!= avc_last_pgoff(node
));