Merge tag 'locking-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / mm / fadvise.c
blobd6baa4f451c5fa8dbaef2181eee795178cdcee69
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/fadvise.c
5 * Copyright (C) 2002, Linus Torvalds
7 * 11Jan2003 Andrew Morton
8 * Initial version.
9 */
11 #include <linux/kernel.h>
12 #include <linux/file.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/fadvise.h>
19 #include <linux/writeback.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
23 #include <asm/unistd.h>
25 #include "internal.h"
28 * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
29 * deactivate the pages and clear PG_Referenced.
32 int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
34 struct inode *inode;
35 struct address_space *mapping;
36 struct backing_dev_info *bdi;
37 loff_t endbyte; /* inclusive */
38 pgoff_t start_index;
39 pgoff_t end_index;
40 unsigned long nrpages;
42 inode = file_inode(file);
43 if (S_ISFIFO(inode->i_mode))
44 return -ESPIPE;
46 mapping = file->f_mapping;
47 if (!mapping || len < 0)
48 return -EINVAL;
50 bdi = inode_to_bdi(mapping->host);
52 if (IS_DAX(inode) || (bdi == &noop_backing_dev_info)) {
53 switch (advice) {
54 case POSIX_FADV_NORMAL:
55 case POSIX_FADV_RANDOM:
56 case POSIX_FADV_SEQUENTIAL:
57 case POSIX_FADV_WILLNEED:
58 case POSIX_FADV_NOREUSE:
59 case POSIX_FADV_DONTNEED:
60 /* no bad return value, but ignore advice */
61 break;
62 default:
63 return -EINVAL;
65 return 0;
69 * Careful about overflows. Len == 0 means "as much as possible". Use
70 * unsigned math because signed overflows are undefined and UBSan
71 * complains.
73 endbyte = (u64)offset + (u64)len;
74 if (!len || endbyte < len)
75 endbyte = -1;
76 else
77 endbyte--; /* inclusive */
79 switch (advice) {
80 case POSIX_FADV_NORMAL:
81 file->f_ra.ra_pages = bdi->ra_pages;
82 spin_lock(&file->f_lock);
83 file->f_mode &= ~FMODE_RANDOM;
84 spin_unlock(&file->f_lock);
85 break;
86 case POSIX_FADV_RANDOM:
87 spin_lock(&file->f_lock);
88 file->f_mode |= FMODE_RANDOM;
89 spin_unlock(&file->f_lock);
90 break;
91 case POSIX_FADV_SEQUENTIAL:
92 file->f_ra.ra_pages = bdi->ra_pages * 2;
93 spin_lock(&file->f_lock);
94 file->f_mode &= ~FMODE_RANDOM;
95 spin_unlock(&file->f_lock);
96 break;
97 case POSIX_FADV_WILLNEED:
98 /* First and last PARTIAL page! */
99 start_index = offset >> PAGE_SHIFT;
100 end_index = endbyte >> PAGE_SHIFT;
102 /* Careful about overflow on the "+1" */
103 nrpages = end_index - start_index + 1;
104 if (!nrpages)
105 nrpages = ~0UL;
107 force_page_cache_readahead(mapping, file, start_index, nrpages);
108 break;
109 case POSIX_FADV_NOREUSE:
110 break;
111 case POSIX_FADV_DONTNEED:
112 if (!inode_write_congested(mapping->host))
113 __filemap_fdatawrite_range(mapping, offset, endbyte,
114 WB_SYNC_NONE);
117 * First and last FULL page! Partial pages are deliberately
118 * preserved on the expectation that it is better to preserve
119 * needed memory than to discard unneeded memory.
121 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
122 end_index = (endbyte >> PAGE_SHIFT);
124 * The page at end_index will be inclusively discarded according
125 * by invalidate_mapping_pages(), so subtracting 1 from
126 * end_index means we will skip the last page. But if endbyte
127 * is page aligned or is at the end of file, we should not skip
128 * that page - discarding the last page is safe enough.
130 if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK &&
131 endbyte != inode->i_size - 1) {
132 /* First page is tricky as 0 - 1 = -1, but pgoff_t
133 * is unsigned, so the end_index >= start_index
134 * check below would be true and we'll discard the whole
135 * file cache which is not what was asked.
137 if (end_index == 0)
138 break;
140 end_index--;
143 if (end_index >= start_index) {
144 unsigned long nr_pagevec = 0;
147 * It's common to FADV_DONTNEED right after
148 * the read or write that instantiates the
149 * pages, in which case there will be some
150 * sitting on the local LRU cache. Try to
151 * avoid the expensive remote drain and the
152 * second cache tree walk below by flushing
153 * them out right away.
155 lru_add_drain();
157 invalidate_mapping_pagevec(mapping,
158 start_index, end_index,
159 &nr_pagevec);
162 * If fewer pages were invalidated than expected then
163 * it is possible that some of the pages were on
164 * a per-cpu pagevec for a remote CPU. Drain all
165 * pagevecs and try again.
167 if (nr_pagevec) {
168 lru_add_drain_all();
169 invalidate_mapping_pages(mapping, start_index,
170 end_index);
173 break;
174 default:
175 return -EINVAL;
177 return 0;
179 EXPORT_SYMBOL(generic_fadvise);
181 int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
183 if (file->f_op->fadvise)
184 return file->f_op->fadvise(file, offset, len, advice);
186 return generic_fadvise(file, offset, len, advice);
188 EXPORT_SYMBOL(vfs_fadvise);
190 #ifdef CONFIG_ADVISE_SYSCALLS
192 int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
194 struct fd f = fdget(fd);
195 int ret;
197 if (!f.file)
198 return -EBADF;
200 ret = vfs_fadvise(f.file, offset, len, advice);
202 fdput(f);
203 return ret;
206 SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
208 return ksys_fadvise64_64(fd, offset, len, advice);
211 #ifdef __ARCH_WANT_SYS_FADVISE64
213 SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
215 return ksys_fadvise64_64(fd, offset, len, advice);
218 #endif
219 #endif