1 /* SPDX-License-Identifier: GPL-2.0+ */
5 * Header providing userland wrappers and shims for the functionality provided
6 * by mm/vma_internal.h.
8 * We make the header guard the same as mm/vma_internal.h, so if this shim
9 * header is included, it precludes the inclusion of the kernel one.
12 #ifndef __MM_VMA_INTERNAL_H
13 #define __MM_VMA_INTERNAL_H
17 #define __randomize_layout
20 #define CONFIG_PER_VMA_LOCK
24 #include <linux/list.h>
25 #include <linux/maple_tree.h>
27 #include <linux/rbtree.h>
28 #include <linux/rwsem.h>
30 #define VM_WARN_ON(_expr) (WARN_ON(_expr))
31 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
32 #define VM_BUG_ON(_expr) (BUG_ON(_expr))
33 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
35 #define VM_NONE 0x00000000
36 #define VM_READ 0x00000001
37 #define VM_WRITE 0x00000002
38 #define VM_EXEC 0x00000004
39 #define VM_SHARED 0x00000008
40 #define VM_MAYREAD 0x00000010
41 #define VM_MAYWRITE 0x00000020
42 #define VM_GROWSDOWN 0x00000100
43 #define VM_PFNMAP 0x00000400
44 #define VM_LOCKED 0x00002000
45 #define VM_IO 0x00004000
46 #define VM_DONTEXPAND 0x00040000
47 #define VM_LOCKONFAULT 0x00080000
48 #define VM_ACCOUNT 0x00100000
49 #define VM_NORESERVE 0x00200000
50 #define VM_MIXEDMAP 0x10000000
51 #define VM_STACK VM_GROWSDOWN
52 #define VM_SHADOW_STACK VM_NONE
53 #define VM_SOFTDIRTY 0
55 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
56 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
58 /* This mask represents all the VMA flag bits used by mlock */
59 #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
62 /* VM is sealed, in vm_flags */
63 #define VM_SEALED _BITUL(63)
66 #define FIRST_USER_ADDRESS 0UL
67 #define USER_PGTABLES_CEILING 0UL
69 #define vma_policy(vma) NULL
71 #define down_write_nest_lock(sem, nest_lock)
73 #define pgprot_val(x) ((x).pgprot)
74 #define __pgprot(x) ((pgprot_t) { (x) } )
76 #define for_each_vma(__vmi, __vma) \
77 while (((__vma) = vma_next(&(__vmi))) != NULL)
79 /* The MM code likes to work with exclusive end addresses */
80 #define for_each_vma_range(__vmi, __vma, __end) \
81 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
83 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
85 #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
87 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
88 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
90 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
92 #define AS_MM_ALL_LOCKS 2
94 /* We hardcode this for now. */
95 #define sysctl_max_map_count 0x1000000UL
97 #define pgoff_t unsigned long
98 typedef unsigned long pgprotval_t
;
99 typedef struct pgprot
{ pgprotval_t pgprot
; } pgprot_t
;
100 typedef unsigned long vm_flags_t
;
101 typedef __bitwise
unsigned int vm_fault_t
;
104 * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
107 #define pr_warn_once pr_err
109 typedef struct refcount_struct
{
118 * Define the task command name length as enum, then it can be visible to
126 char comm
[TASK_COMM_LEN
];
128 struct mm_struct
*mm
;
131 struct task_struct
*get_current(void);
132 #define current get_current()
135 struct anon_vma
*root
;
136 struct rb_root_cached rb_root
;
143 struct anon_vma_chain
{
144 struct anon_vma
*anon_vma
;
145 struct list_head same_vma
;
148 struct anon_vma_name
{
150 /* The name needs to be at the end because it is dynamically sized. */
154 struct vma_iterator
{
158 #define VMA_ITERATOR(name, __mm, __addr) \
159 struct vma_iterator name = { \
161 .tree = &(__mm)->mm_mt, \
164 .status = ma_start, \
168 struct address_space
{
169 struct rb_root_cached i_mmap
;
171 atomic_t i_mmap_writable
;
174 struct vm_userfaultfd_ctx
{};
176 struct mmu_gather
{};
178 #define DEFINE_MUTEX(mutexname) \
179 struct mutex mutexname = {}
182 struct maple_tree mm_mt
;
183 int map_count
; /* number of VMAs */
184 unsigned long total_vm
; /* Total pages mapped */
185 unsigned long locked_vm
; /* Pages that have PG_mlocked set */
186 unsigned long data_vm
; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
187 unsigned long exec_vm
; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
188 unsigned long stack_vm
; /* VM_STACK */
192 struct rw_semaphore lock
;
197 struct address_space
*f_mapping
;
200 struct vm_area_struct
{
201 /* The first cache line has the info for VMA tree walking. */
205 /* VMA covers [vm_start; vm_end) addresses within mm */
206 unsigned long vm_start
;
207 unsigned long vm_end
;
209 #ifdef CONFIG_PER_VMA_LOCK
210 struct rcu_head vm_rcu
; /* Used for deferred freeing. */
214 struct mm_struct
*vm_mm
; /* The address space we belong to. */
215 pgprot_t vm_page_prot
; /* Access permissions of this VMA. */
219 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
222 const vm_flags_t vm_flags
;
223 vm_flags_t __private __vm_flags
;
226 #ifdef CONFIG_PER_VMA_LOCK
227 /* Flag to indicate areas detached from the mm->mm_mt tree */
231 * Can only be written (using WRITE_ONCE()) while holding both:
232 * - mmap_lock (in write mode)
233 * - vm_lock->lock (in write mode)
234 * Can be read reliably while holding one of:
235 * - mmap_lock (in read or write mode)
236 * - vm_lock->lock (in read or write mode)
237 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
238 * while holding nothing (except RCU to keep the VMA struct allocated).
240 * This sequence counter is explicitly allowed to overflow; sequence
241 * counter reuse can only lead to occasional unnecessary use of the
245 struct vma_lock
*vm_lock
;
249 * For areas with an address space and backing store,
250 * linkage into the address_space->i_mmap interval tree.
255 unsigned long rb_subtree_last
;
259 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
260 * list, after a COW of one of the file pages. A MAP_SHARED vma
261 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
262 * or brk vma (with NULL file) can only be in an anon_vma list.
264 struct list_head anon_vma_chain
; /* Serialized by mmap_lock &
266 struct anon_vma
*anon_vma
; /* Serialized by page_table_lock */
268 /* Function pointers to deal with this struct. */
269 const struct vm_operations_struct
*vm_ops
;
271 /* Information about our backing store: */
272 unsigned long vm_pgoff
; /* Offset (within vm_file) in PAGE_SIZE
274 struct file
* vm_file
; /* File we map to (can be NULL). */
275 void * vm_private_data
; /* was vm_pte (shared mem) */
277 #ifdef CONFIG_ANON_VMA_NAME
279 * For private and shared anonymous mappings, a pointer to a null
280 * terminated string containing the name given to the vma, or NULL if
281 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
283 struct anon_vma_name
*anon_name
;
286 atomic_long_t swap_readahead_info
;
289 struct vm_region
*vm_region
; /* NOMMU mapping region */
292 struct mempolicy
*vm_policy
; /* NUMA policy for the VMA */
294 #ifdef CONFIG_NUMA_BALANCING
295 struct vma_numab_state
*numab_state
; /* NUMA Balancing state */
297 struct vm_userfaultfd_ctx vm_userfaultfd_ctx
;
298 } __randomize_layout
;
302 struct vm_operations_struct
{
303 void (*open
)(struct vm_area_struct
* area
);
305 * @close: Called when the VMA is being removed from the MM.
306 * Context: User context. May sleep. Caller holds mmap_lock.
308 void (*close
)(struct vm_area_struct
* area
);
309 /* Called any time before splitting to check if it's allowed */
310 int (*may_split
)(struct vm_area_struct
*area
, unsigned long addr
);
311 int (*mremap
)(struct vm_area_struct
*area
);
313 * Called by mprotect() to make driver-specific permission
314 * checks before mprotect() is finalised. The VMA must not
315 * be modified. Returns 0 if mprotect() can proceed.
317 int (*mprotect
)(struct vm_area_struct
*vma
, unsigned long start
,
318 unsigned long end
, unsigned long newflags
);
319 vm_fault_t (*fault
)(struct vm_fault
*vmf
);
320 vm_fault_t (*huge_fault
)(struct vm_fault
*vmf
, unsigned int order
);
321 vm_fault_t (*map_pages
)(struct vm_fault
*vmf
,
322 pgoff_t start_pgoff
, pgoff_t end_pgoff
);
323 unsigned long (*pagesize
)(struct vm_area_struct
* area
);
325 /* notification that a previously read-only page is about to become
326 * writable, if an error is returned it will cause a SIGBUS */
327 vm_fault_t (*page_mkwrite
)(struct vm_fault
*vmf
);
329 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
330 vm_fault_t (*pfn_mkwrite
)(struct vm_fault
*vmf
);
332 /* called by access_process_vm when get_user_pages() fails, typically
333 * for use by special VMAs. See also generic_access_phys() for a generic
334 * implementation useful for any iomem mapping.
336 int (*access
)(struct vm_area_struct
*vma
, unsigned long addr
,
337 void *buf
, int len
, int write
);
339 /* Called by the /proc/PID/maps code to ask the vma whether it
340 * has a special name. Returning non-NULL will also cause this
341 * vma to be dumped unconditionally. */
342 const char *(*name
)(struct vm_area_struct
*vma
);
346 * set_policy() op must add a reference to any non-NULL @new mempolicy
347 * to hold the policy upon return. Caller should pass NULL @new to
348 * remove a policy and fall back to surrounding context--i.e. do not
349 * install a MPOL_DEFAULT policy, nor the task or system default
352 int (*set_policy
)(struct vm_area_struct
*vma
, struct mempolicy
*new);
355 * get_policy() op must add reference [mpol_get()] to any policy at
356 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
357 * in mm/mempolicy.c will do this automatically.
358 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
359 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
360 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
361 * must return NULL--i.e., do not "fallback" to task or system default
364 struct mempolicy
*(*get_policy
)(struct vm_area_struct
*vma
,
365 unsigned long addr
, pgoff_t
*ilx
);
368 * Called by vm_normal_page() for special PTEs to find the
369 * page for @addr. This is useful if the default behavior
370 * (using pte_page()) would not find the correct page.
372 struct page
*(*find_special_page
)(struct vm_area_struct
*vma
,
376 static inline void vma_iter_invalidate(struct vma_iterator
*vmi
)
378 mas_pause(&vmi
->mas
);
381 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
383 return __pgprot(pgprot_val(oldprot
) | pgprot_val(newprot
));
386 static inline pgprot_t
vm_get_page_prot(unsigned long vm_flags
)
388 return __pgprot(vm_flags
);
391 static inline bool is_shared_maywrite(vm_flags_t vm_flags
)
393 return (vm_flags
& (VM_SHARED
| VM_MAYWRITE
)) ==
394 (VM_SHARED
| VM_MAYWRITE
);
397 static inline bool vma_is_shared_maywrite(struct vm_area_struct
*vma
)
399 return is_shared_maywrite(vma
->vm_flags
);
402 static inline struct vm_area_struct
*vma_next(struct vma_iterator
*vmi
)
405 * Uses mas_find() to get the first VMA when the iterator starts.
406 * Calling mas_next() could skip the first entry.
408 return mas_find(&vmi
->mas
, ULONG_MAX
);
411 static inline bool vma_lock_alloc(struct vm_area_struct
*vma
)
413 vma
->vm_lock
= calloc(1, sizeof(struct vma_lock
));
418 init_rwsem(&vma
->vm_lock
->lock
);
419 vma
->vm_lock_seq
= -1;
424 static inline void vma_assert_write_locked(struct vm_area_struct
*);
425 static inline void vma_mark_detached(struct vm_area_struct
*vma
, bool detached
)
427 /* When detaching vma should be write-locked */
429 vma_assert_write_locked(vma
);
430 vma
->detached
= detached
;
433 extern const struct vm_operations_struct vma_dummy_vm_ops
;
435 static inline void vma_init(struct vm_area_struct
*vma
, struct mm_struct
*mm
)
437 memset(vma
, 0, sizeof(*vma
));
439 vma
->vm_ops
= &vma_dummy_vm_ops
;
440 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
441 vma_mark_detached(vma
, false);
444 static inline struct vm_area_struct
*vm_area_alloc(struct mm_struct
*mm
)
446 struct vm_area_struct
*vma
= calloc(1, sizeof(struct vm_area_struct
));
452 if (!vma_lock_alloc(vma
)) {
460 static inline struct vm_area_struct
*vm_area_dup(struct vm_area_struct
*orig
)
462 struct vm_area_struct
*new = calloc(1, sizeof(struct vm_area_struct
));
467 memcpy(new, orig
, sizeof(*new));
468 if (!vma_lock_alloc(new)) {
472 INIT_LIST_HEAD(&new->anon_vma_chain
);
478 * These are defined in vma.h, but sadly vm_stat_account() is referenced by
479 * kernel/fork.c, so we have to these broadly available there, and temporarily
480 * define them here to resolve the dependency cycle.
483 #define is_exec_mapping(flags) \
484 ((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
486 #define is_stack_mapping(flags) \
487 (((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
489 #define is_data_mapping(flags) \
490 ((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
492 static inline void vm_stat_account(struct mm_struct
*mm
, vm_flags_t flags
,
495 WRITE_ONCE(mm
->total_vm
, READ_ONCE(mm
->total_vm
)+npages
);
497 if (is_exec_mapping(flags
))
498 mm
->exec_vm
+= npages
;
499 else if (is_stack_mapping(flags
))
500 mm
->stack_vm
+= npages
;
501 else if (is_data_mapping(flags
))
502 mm
->data_vm
+= npages
;
505 #undef is_exec_mapping
506 #undef is_stack_mapping
507 #undef is_data_mapping
509 /* Currently stubbed but we may later wish to un-stub. */
510 static inline void vm_acct_memory(long pages
);
511 static inline void vm_unacct_memory(long pages
)
513 vm_acct_memory(-pages
);
516 static inline void mapping_allow_writable(struct address_space
*mapping
)
518 atomic_inc(&mapping
->i_mmap_writable
);
521 static inline void vma_set_range(struct vm_area_struct
*vma
,
522 unsigned long start
, unsigned long end
,
525 vma
->vm_start
= start
;
527 vma
->vm_pgoff
= pgoff
;
531 struct vm_area_struct
*vma_find(struct vma_iterator
*vmi
, unsigned long max
)
533 return mas_find(&vmi
->mas
, max
- 1);
536 static inline int vma_iter_clear_gfp(struct vma_iterator
*vmi
,
537 unsigned long start
, unsigned long end
, gfp_t gfp
)
539 __mas_set_range(&vmi
->mas
, start
, end
- 1);
540 mas_store_gfp(&vmi
->mas
, NULL
, gfp
);
541 if (unlikely(mas_is_err(&vmi
->mas
)))
547 static inline void mmap_assert_locked(struct mm_struct
*);
548 static inline struct vm_area_struct
*find_vma_intersection(struct mm_struct
*mm
,
549 unsigned long start_addr
,
550 unsigned long end_addr
)
552 unsigned long index
= start_addr
;
554 mmap_assert_locked(mm
);
555 return mt_find(&mm
->mm_mt
, &index
, end_addr
- 1);
559 struct vm_area_struct
*vma_lookup(struct mm_struct
*mm
, unsigned long addr
)
561 return mtree_load(&mm
->mm_mt
, addr
);
564 static inline struct vm_area_struct
*vma_prev(struct vma_iterator
*vmi
)
566 return mas_prev(&vmi
->mas
, 0);
569 static inline void vma_iter_set(struct vma_iterator
*vmi
, unsigned long addr
)
571 mas_set(&vmi
->mas
, addr
);
574 static inline bool vma_is_anonymous(struct vm_area_struct
*vma
)
579 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
580 #define vma_iter_load(vmi) \
581 mas_walk(&(vmi)->mas)
583 static inline struct vm_area_struct
*
584 find_vma_prev(struct mm_struct
*mm
, unsigned long addr
,
585 struct vm_area_struct
**pprev
)
587 struct vm_area_struct
*vma
;
588 VMA_ITERATOR(vmi
, mm
, addr
);
590 vma
= vma_iter_load(&vmi
);
591 *pprev
= vma_prev(&vmi
);
593 vma
= vma_next(&vmi
);
599 static inline void vma_iter_init(struct vma_iterator
*vmi
,
600 struct mm_struct
*mm
, unsigned long addr
)
602 mas_init(&vmi
->mas
, &mm
->mm_mt
, addr
);
605 /* Stubbed functions. */
607 static inline struct anon_vma_name
*anon_vma_name(struct vm_area_struct
*vma
)
612 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct
*vma
,
613 struct vm_userfaultfd_ctx vm_ctx
)
618 static inline bool anon_vma_name_eq(struct anon_vma_name
*anon_name1
,
619 struct anon_vma_name
*anon_name2
)
624 static inline void might_sleep(void)
628 static inline unsigned long vma_pages(struct vm_area_struct
*vma
)
630 return (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
633 static inline void fput(struct file
*)
637 static inline void mpol_put(struct mempolicy
*)
641 static inline void vma_lock_free(struct vm_area_struct
*vma
)
646 static inline void __vm_area_free(struct vm_area_struct
*vma
)
652 static inline void vm_area_free(struct vm_area_struct
*vma
)
657 static inline void lru_add_drain(void)
661 static inline void tlb_gather_mmu(struct mmu_gather
*, struct mm_struct
*)
665 static inline void update_hiwater_rss(struct mm_struct
*)
669 static inline void update_hiwater_vm(struct mm_struct
*)
673 static inline void unmap_vmas(struct mmu_gather
*tlb
, struct ma_state
*mas
,
674 struct vm_area_struct
*vma
, unsigned long start_addr
,
675 unsigned long end_addr
, unsigned long tree_end
,
687 static inline void free_pgtables(struct mmu_gather
*tlb
, struct ma_state
*mas
,
688 struct vm_area_struct
*vma
, unsigned long floor
,
689 unsigned long ceiling
, bool mm_wr_locked
)
699 static inline void mapping_unmap_writable(struct address_space
*)
703 static inline void flush_dcache_mmap_lock(struct address_space
*)
707 static inline void tlb_finish_mmu(struct mmu_gather
*)
711 static inline struct file
*get_file(struct file
*f
)
716 static inline int vma_dup_policy(struct vm_area_struct
*, struct vm_area_struct
*)
721 static inline int anon_vma_clone(struct vm_area_struct
*dst
, struct vm_area_struct
*src
)
723 /* For testing purposes. We indicate that an anon_vma has been cloned. */
724 if (src
->anon_vma
!= NULL
) {
725 dst
->anon_vma
= src
->anon_vma
;
726 dst
->anon_vma
->was_cloned
= true;
732 static inline void vma_start_write(struct vm_area_struct
*vma
)
734 /* Used to indicate to tests that a write operation has begun. */
738 static inline void vma_adjust_trans_huge(struct vm_area_struct
*vma
,
749 static inline void vma_iter_free(struct vma_iterator
*vmi
)
751 mas_destroy(&vmi
->mas
);
755 struct vm_area_struct
*vma_iter_next_range(struct vma_iterator
*vmi
)
757 return mas_next_range(&vmi
->mas
, ULONG_MAX
);
760 static inline void vm_acct_memory(long pages
)
764 static inline void vma_interval_tree_insert(struct vm_area_struct
*,
765 struct rb_root_cached
*)
769 static inline void vma_interval_tree_remove(struct vm_area_struct
*,
770 struct rb_root_cached
*)
774 static inline void flush_dcache_mmap_unlock(struct address_space
*)
778 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain
*,
779 struct rb_root_cached
*)
783 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain
*,
784 struct rb_root_cached
*)
788 static inline void uprobe_mmap(struct vm_area_struct
*)
792 static inline void uprobe_munmap(struct vm_area_struct
*vma
,
793 unsigned long start
, unsigned long end
)
800 static inline void i_mmap_lock_write(struct address_space
*)
804 static inline void anon_vma_lock_write(struct anon_vma
*)
808 static inline void vma_assert_write_locked(struct vm_area_struct
*)
812 static inline void unlink_anon_vmas(struct vm_area_struct
*vma
)
814 /* For testing purposes, indicate that the anon_vma was unlinked. */
815 vma
->anon_vma
->was_unlinked
= true;
818 static inline void anon_vma_unlock_write(struct anon_vma
*)
822 static inline void i_mmap_unlock_write(struct address_space
*)
826 static inline void anon_vma_merge(struct vm_area_struct
*,
827 struct vm_area_struct
*)
831 static inline int userfaultfd_unmap_prep(struct vm_area_struct
*vma
,
834 struct list_head
*unmaps
)
844 static inline void mmap_write_downgrade(struct mm_struct
*)
848 static inline void mmap_read_unlock(struct mm_struct
*)
852 static inline void mmap_write_unlock(struct mm_struct
*)
856 static inline bool can_modify_mm(struct mm_struct
*mm
,
867 static inline void arch_unmap(struct mm_struct
*mm
,
876 static inline void mmap_assert_locked(struct mm_struct
*)
880 static inline bool mpol_equal(struct mempolicy
*, struct mempolicy
*)
885 static inline void khugepaged_enter_vma(struct vm_area_struct
*vma
,
886 unsigned long vm_flags
)
892 static inline bool mapping_can_writeback(struct address_space
*)
897 static inline bool is_vm_hugetlb_page(struct vm_area_struct
*)
902 static inline bool vma_soft_dirty_enabled(struct vm_area_struct
*)
907 static inline bool userfaultfd_wp(struct vm_area_struct
*)
912 static inline void mmap_assert_write_locked(struct mm_struct
*)
916 static inline void mutex_lock(struct mutex
*)
920 static inline void mutex_unlock(struct mutex
*)
924 static inline bool mutex_is_locked(struct mutex
*)
929 static inline bool signal_pending(void *)
934 static inline bool is_file_hugepages(struct file
*)
939 static inline int security_vm_enough_memory_mm(struct mm_struct
*, long)
944 static inline bool may_expand_vm(struct mm_struct
*, vm_flags_t
, unsigned long)
949 static inline void vm_flags_init(struct vm_area_struct
*vma
,
952 vma
->__vm_flags
= flags
;
955 static inline void vm_flags_set(struct vm_area_struct
*vma
,
958 vma_start_write(vma
);
959 vma
->__vm_flags
|= flags
;
962 static inline void vm_flags_clear(struct vm_area_struct
*vma
,
965 vma_start_write(vma
);
966 vma
->__vm_flags
&= ~flags
;
969 static inline int call_mmap(struct file
*, struct vm_area_struct
*)
974 static inline int shmem_zero_setup(struct vm_area_struct
*)
979 static inline void vma_set_anonymous(struct vm_area_struct
*vma
)
984 static inline void ksm_add_vma(struct vm_area_struct
*)
988 static inline void perf_event_mmap(struct vm_area_struct
*)
992 static inline bool vma_is_dax(struct vm_area_struct
*)
997 static inline struct vm_area_struct
*get_gate_vma(struct mm_struct
*)
1002 bool vma_wants_writenotify(struct vm_area_struct
*vma
, pgprot_t vm_page_prot
);
1004 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
1005 static inline void vma_set_page_prot(struct vm_area_struct
*vma
)
1007 unsigned long vm_flags
= vma
->vm_flags
;
1008 pgprot_t vm_page_prot
;
1010 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1011 vm_page_prot
= pgprot_modify(vma
->vm_page_prot
, vm_get_page_prot(vm_flags
));
1013 if (vma_wants_writenotify(vma
, vm_page_prot
)) {
1014 vm_flags
&= ~VM_SHARED
;
1015 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1016 vm_page_prot
= pgprot_modify(vm_page_prot
, vm_get_page_prot(vm_flags
));
1018 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1019 WRITE_ONCE(vma
->vm_page_prot
, vm_page_prot
);
1022 static inline bool arch_validate_flags(unsigned long)
1027 static inline void vma_close(struct vm_area_struct
*)
1031 static inline int mmap_file(struct file
*, struct vm_area_struct
*)
1036 #endif /* __MM_VMA_INTERNAL_H */