1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1994-1999 Linus Torvalds
9 * The msync() system call.
13 #include <linux/mman.h>
14 #include <linux/file.h>
15 #include <linux/syscalls.h>
16 #include <linux/sched.h>
19 * MS_SYNC syncs the entire file - including mappings.
21 * MS_ASYNC does not start I/O (it used to, up to 2.5.67).
22 * Nor does it marks the relevant pages dirty (it used to up to 2.6.17).
23 * Now it doesn't do anything, since dirty pages are properly tracked.
25 * The application may now run fsync() to
26 * write out the dirty pages and wait on the writeout and check the result.
27 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
28 * async writeout immediately.
29 * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to
32 SYSCALL_DEFINE3(msync
, unsigned long, start
, size_t, len
, int, flags
)
35 struct mm_struct
*mm
= current
->mm
;
36 struct vm_area_struct
*vma
;
37 int unmapped_error
= 0;
40 start
= untagged_addr(start
);
42 if (flags
& ~(MS_ASYNC
| MS_INVALIDATE
| MS_SYNC
))
44 if (offset_in_page(start
))
46 if ((flags
& MS_ASYNC
) && (flags
& MS_SYNC
))
49 len
= (len
+ ~PAGE_MASK
) & PAGE_MASK
;
57 * If the interval [start,end) covers some unmapped address ranges,
58 * just ignore them, but return -ENOMEM at the end.
61 vma
= find_vma(mm
, start
);
66 /* Still start < end. */
70 /* Here start < vma->vm_end. */
71 if (start
< vma
->vm_start
) {
72 start
= vma
->vm_start
;
75 unmapped_error
= -ENOMEM
;
77 /* Here vma->vm_start <= start < vma->vm_end. */
78 if ((flags
& MS_INVALIDATE
) &&
79 (vma
->vm_flags
& VM_LOCKED
)) {
84 fstart
= (start
- vma
->vm_start
) +
85 ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
86 fend
= fstart
+ (min(end
, vma
->vm_end
) - start
) - 1;
88 if ((flags
& MS_SYNC
) && file
&&
89 (vma
->vm_flags
& VM_SHARED
)) {
92 error
= vfs_fsync_range(file
, fstart
, fend
, 1);
94 if (error
|| start
>= end
)
97 vma
= find_vma(mm
, start
);
107 mmap_read_unlock(mm
);
109 return error
? : unmapped_error
;