1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2002, Linus Torvalds
7 * 11Jan2003 Andrew Morton
11 #include <linux/kernel.h>
12 #include <linux/file.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/fadvise.h>
19 #include <linux/writeback.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
23 #include <asm/unistd.h>
28 * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
29 * deactivate the pages and clear PG_Referenced.
32 int generic_fadvise(struct file
*file
, loff_t offset
, loff_t len
, int advice
)
35 struct address_space
*mapping
;
36 struct backing_dev_info
*bdi
;
37 loff_t endbyte
; /* inclusive */
40 unsigned long nrpages
;
42 inode
= file_inode(file
);
43 if (S_ISFIFO(inode
->i_mode
))
46 mapping
= file
->f_mapping
;
47 if (!mapping
|| len
< 0)
50 bdi
= inode_to_bdi(mapping
->host
);
52 if (IS_DAX(inode
) || (bdi
== &noop_backing_dev_info
)) {
54 case POSIX_FADV_NORMAL
:
55 case POSIX_FADV_RANDOM
:
56 case POSIX_FADV_SEQUENTIAL
:
57 case POSIX_FADV_WILLNEED
:
58 case POSIX_FADV_NOREUSE
:
59 case POSIX_FADV_DONTNEED
:
60 /* no bad return value, but ignore advice */
69 * Careful about overflows. Len == 0 means "as much as possible". Use
70 * unsigned math because signed overflows are undefined and UBSan
73 endbyte
= (u64
)offset
+ (u64
)len
;
74 if (!len
|| endbyte
< len
)
77 endbyte
--; /* inclusive */
80 case POSIX_FADV_NORMAL
:
81 file
->f_ra
.ra_pages
= bdi
->ra_pages
;
82 spin_lock(&file
->f_lock
);
83 file
->f_mode
&= ~FMODE_RANDOM
;
84 spin_unlock(&file
->f_lock
);
86 case POSIX_FADV_RANDOM
:
87 spin_lock(&file
->f_lock
);
88 file
->f_mode
|= FMODE_RANDOM
;
89 spin_unlock(&file
->f_lock
);
91 case POSIX_FADV_SEQUENTIAL
:
92 file
->f_ra
.ra_pages
= bdi
->ra_pages
* 2;
93 spin_lock(&file
->f_lock
);
94 file
->f_mode
&= ~FMODE_RANDOM
;
95 spin_unlock(&file
->f_lock
);
97 case POSIX_FADV_WILLNEED
:
98 /* First and last PARTIAL page! */
99 start_index
= offset
>> PAGE_SHIFT
;
100 end_index
= endbyte
>> PAGE_SHIFT
;
102 /* Careful about overflow on the "+1" */
103 nrpages
= end_index
- start_index
+ 1;
107 force_page_cache_readahead(mapping
, file
, start_index
, nrpages
);
109 case POSIX_FADV_NOREUSE
:
111 case POSIX_FADV_DONTNEED
:
112 if (!inode_write_congested(mapping
->host
))
113 __filemap_fdatawrite_range(mapping
, offset
, endbyte
,
117 * First and last FULL page! Partial pages are deliberately
118 * preserved on the expectation that it is better to preserve
119 * needed memory than to discard unneeded memory.
121 start_index
= (offset
+(PAGE_SIZE
-1)) >> PAGE_SHIFT
;
122 end_index
= (endbyte
>> PAGE_SHIFT
);
124 * The page at end_index will be inclusively discarded according
125 * by invalidate_mapping_pages(), so subtracting 1 from
126 * end_index means we will skip the last page. But if endbyte
127 * is page aligned or is at the end of file, we should not skip
128 * that page - discarding the last page is safe enough.
130 if ((endbyte
& ~PAGE_MASK
) != ~PAGE_MASK
&&
131 endbyte
!= inode
->i_size
- 1) {
132 /* First page is tricky as 0 - 1 = -1, but pgoff_t
133 * is unsigned, so the end_index >= start_index
134 * check below would be true and we'll discard the whole
135 * file cache which is not what was asked.
143 if (end_index
>= start_index
) {
144 unsigned long nr_pagevec
= 0;
147 * It's common to FADV_DONTNEED right after
148 * the read or write that instantiates the
149 * pages, in which case there will be some
150 * sitting on the local LRU cache. Try to
151 * avoid the expensive remote drain and the
152 * second cache tree walk below by flushing
153 * them out right away.
157 invalidate_mapping_pagevec(mapping
,
158 start_index
, end_index
,
162 * If fewer pages were invalidated than expected then
163 * it is possible that some of the pages were on
164 * a per-cpu pagevec for a remote CPU. Drain all
165 * pagevecs and try again.
169 invalidate_mapping_pages(mapping
, start_index
,
179 EXPORT_SYMBOL(generic_fadvise
);
181 int vfs_fadvise(struct file
*file
, loff_t offset
, loff_t len
, int advice
)
183 if (file
->f_op
->fadvise
)
184 return file
->f_op
->fadvise(file
, offset
, len
, advice
);
186 return generic_fadvise(file
, offset
, len
, advice
);
188 EXPORT_SYMBOL(vfs_fadvise
);
190 #ifdef CONFIG_ADVISE_SYSCALLS
192 int ksys_fadvise64_64(int fd
, loff_t offset
, loff_t len
, int advice
)
194 struct fd f
= fdget(fd
);
200 ret
= vfs_fadvise(f
.file
, offset
, len
, advice
);
206 SYSCALL_DEFINE4(fadvise64_64
, int, fd
, loff_t
, offset
, loff_t
, len
, int, advice
)
208 return ksys_fadvise64_64(fd
, offset
, len
, advice
);
211 #ifdef __ARCH_WANT_SYS_FADVISE64
213 SYSCALL_DEFINE4(fadvise64
, int, fd
, loff_t
, offset
, size_t, len
, int, advice
)
215 return ksys_fadvise64_64(fd
, offset
, len
, advice
);