2 * mm/readahead.c - address_space-level file readahead.
4 * Copyright (C) 2002, Linus Torvalds
6 * 09Apr2002 Andrew Morton
10 #include <linux/kernel.h>
11 #include <linux/dax.h>
12 #include <linux/gfp.h>
13 #include <linux/export.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/task_io_accounting_ops.h>
17 #include <linux/pagevec.h>
18 #include <linux/pagemap.h>
19 #include <linux/syscalls.h>
20 #include <linux/file.h>
21 #include <linux/mm_inline.h>
22 #include <linux/blk-cgroup.h>
23 #include <linux/fadvise.h>
28 * Initialise a struct file's readahead state. Assumes that the caller has
32 file_ra_state_init(struct file_ra_state
*ra
, struct address_space
*mapping
)
34 ra
->ra_pages
= inode_to_bdi(mapping
->host
)->ra_pages
;
37 EXPORT_SYMBOL_GPL(file_ra_state_init
);
40 * see if a page needs releasing upon read_cache_pages() failure
41 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
42 * before calling, such as the NFS fs marking pages that are cached locally
43 * on disk, thus we need to give the fs a chance to clean up in the event of
46 static void read_cache_pages_invalidate_page(struct address_space
*mapping
,
49 if (page_has_private(page
)) {
50 if (!trylock_page(page
))
52 page
->mapping
= mapping
;
53 do_invalidatepage(page
, 0, PAGE_SIZE
);
61 * release a list of pages, invalidating them first if need be
63 static void read_cache_pages_invalidate_pages(struct address_space
*mapping
,
64 struct list_head
*pages
)
68 while (!list_empty(pages
)) {
69 victim
= lru_to_page(pages
);
70 list_del(&victim
->lru
);
71 read_cache_pages_invalidate_page(mapping
, victim
);
76 * read_cache_pages - populate an address space with some pages & start reads against them
77 * @mapping: the address_space
78 * @pages: The address of a list_head which contains the target pages. These
79 * pages have their ->index populated and are otherwise uninitialised.
80 * @filler: callback routine for filling a single page.
81 * @data: private data for the callback routine.
83 * Hides the details of the LRU cache etc from the filesystems.
85 int read_cache_pages(struct address_space
*mapping
, struct list_head
*pages
,
86 int (*filler
)(void *, struct page
*), void *data
)
91 while (!list_empty(pages
)) {
92 page
= lru_to_page(pages
);
94 if (add_to_page_cache_lru(page
, mapping
, page
->index
,
95 readahead_gfp_mask(mapping
))) {
96 read_cache_pages_invalidate_page(mapping
, page
);
101 ret
= filler(data
, page
);
103 read_cache_pages_invalidate_pages(mapping
, pages
);
106 task_io_account_read(PAGE_SIZE
);
111 EXPORT_SYMBOL(read_cache_pages
);
113 static int read_pages(struct address_space
*mapping
, struct file
*filp
,
114 struct list_head
*pages
, unsigned int nr_pages
, gfp_t gfp
)
116 struct blk_plug plug
;
120 blk_start_plug(&plug
);
122 if (mapping
->a_ops
->readpages
) {
123 ret
= mapping
->a_ops
->readpages(filp
, mapping
, pages
, nr_pages
);
124 /* Clean up the remaining pages */
125 put_pages_list(pages
);
129 for (page_idx
= 0; page_idx
< nr_pages
; page_idx
++) {
130 struct page
*page
= lru_to_page(pages
);
131 list_del(&page
->lru
);
132 if (!add_to_page_cache_lru(page
, mapping
, page
->index
, gfp
))
133 mapping
->a_ops
->readpage(filp
, page
);
139 blk_finish_plug(&plug
);
145 * __do_page_cache_readahead() actually reads a chunk of disk. It allocates
146 * the pages first, then submits them for I/O. This avoids the very bad
147 * behaviour which would occur if page allocations are causing VM writeback.
148 * We really don't want to intermingle reads and writes like that.
150 * Returns the number of pages requested, or the maximum amount of I/O allowed.
152 unsigned int __do_page_cache_readahead(struct address_space
*mapping
,
153 struct file
*filp
, pgoff_t offset
, unsigned long nr_to_read
,
154 unsigned long lookahead_size
)
156 struct inode
*inode
= mapping
->host
;
158 unsigned long end_index
; /* The last page we want to read */
159 LIST_HEAD(page_pool
);
161 unsigned int nr_pages
= 0;
162 loff_t isize
= i_size_read(inode
);
163 gfp_t gfp_mask
= readahead_gfp_mask(mapping
);
168 end_index
= ((isize
- 1) >> PAGE_SHIFT
);
171 * Preallocate as many pages as we will need.
173 for (page_idx
= 0; page_idx
< nr_to_read
; page_idx
++) {
174 pgoff_t page_offset
= offset
+ page_idx
;
176 if (page_offset
> end_index
)
180 page
= radix_tree_lookup(&mapping
->i_pages
, page_offset
);
182 if (page
&& !radix_tree_exceptional_entry(page
)) {
184 * Page already present? Kick off the current batch of
185 * contiguous pages before continuing with the next
189 read_pages(mapping
, filp
, &page_pool
, nr_pages
,
195 page
= __page_cache_alloc(gfp_mask
);
198 page
->index
= page_offset
;
199 list_add(&page
->lru
, &page_pool
);
200 if (page_idx
== nr_to_read
- lookahead_size
)
201 SetPageReadahead(page
);
206 * Now start the IO. We ignore I/O errors - if the page is not
207 * uptodate then the caller will launch readpage again, and
208 * will then handle the error.
211 read_pages(mapping
, filp
, &page_pool
, nr_pages
, gfp_mask
);
212 BUG_ON(!list_empty(&page_pool
));
218 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
221 int force_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
222 pgoff_t offset
, unsigned long nr_to_read
)
224 struct backing_dev_info
*bdi
= inode_to_bdi(mapping
->host
);
225 struct file_ra_state
*ra
= &filp
->f_ra
;
226 unsigned long max_pages
;
228 if (unlikely(!mapping
->a_ops
->readpage
&& !mapping
->a_ops
->readpages
))
232 * If the request exceeds the readahead window, allow the read to
233 * be up to the optimal hardware IO size
235 max_pages
= max_t(unsigned long, bdi
->io_pages
, ra
->ra_pages
);
236 nr_to_read
= min(nr_to_read
, max_pages
);
238 unsigned long this_chunk
= (2 * 1024 * 1024) / PAGE_SIZE
;
240 if (this_chunk
> nr_to_read
)
241 this_chunk
= nr_to_read
;
242 __do_page_cache_readahead(mapping
, filp
, offset
, this_chunk
, 0);
244 offset
+= this_chunk
;
245 nr_to_read
-= this_chunk
;
251 * Set the initial window size, round to next power of 2 and square
252 * for small size, x 4 for medium, and x 2 for large
253 * for 128k (32 page) max ra
254 * 1-8 page = 32k initial, > 8 page = 128k initial
256 static unsigned long get_init_ra_size(unsigned long size
, unsigned long max
)
258 unsigned long newsize
= roundup_pow_of_two(size
);
260 if (newsize
<= max
/ 32)
261 newsize
= newsize
* 4;
262 else if (newsize
<= max
/ 4)
263 newsize
= newsize
* 2;
271 * Get the previous window size, ramp it up, and
272 * return it as the new window size.
274 static unsigned long get_next_ra_size(struct file_ra_state
*ra
,
277 unsigned long cur
= ra
->size
;
278 unsigned long newsize
;
285 return min(newsize
, max
);
289 * On-demand readahead design.
291 * The fields in struct file_ra_state represent the most-recently-executed
294 * |<----- async_size ---------|
295 * |------------------- size -------------------->|
296 * |==================#===========================|
297 * ^start ^page marked with PG_readahead
299 * To overlap application thinking time and disk I/O time, we do
300 * `readahead pipelining': Do not wait until the application consumed all
301 * readahead pages and stalled on the missing page at readahead_index;
302 * Instead, submit an asynchronous readahead I/O as soon as there are
303 * only async_size pages left in the readahead window. Normally async_size
304 * will be equal to size, for maximum pipelining.
306 * In interleaved sequential reads, concurrent streams on the same fd can
307 * be invalidating each other's readahead state. So we flag the new readahead
308 * page at (start+size-async_size) with PG_readahead, and use it as readahead
309 * indicator. The flag won't be set on already cached pages, to avoid the
310 * readahead-for-nothing fuss, saving pointless page cache lookups.
312 * prev_pos tracks the last visited byte in the _previous_ read request.
313 * It should be maintained by the caller, and will be used for detecting
314 * small random reads. Note that the readahead algorithm checks loosely
315 * for sequential patterns. Hence interleaved reads might be served as
318 * There is a special-case: if the first page which the application tries to
319 * read happens to be the first page of the file, it is assumed that a linear
320 * read is about to happen and the window is immediately set to the initial size
321 * based on I/O request size and the max_readahead.
323 * The code ramps up the readahead size aggressively at first, but slow down as
324 * it approaches max_readhead.
328 * Count contiguously cached pages from @offset-1 to @offset-@max,
329 * this count is a conservative estimation of
330 * - length of the sequential read sequence, or
331 * - thrashing threshold in memory tight systems
333 static pgoff_t
count_history_pages(struct address_space
*mapping
,
334 pgoff_t offset
, unsigned long max
)
339 head
= page_cache_prev_hole(mapping
, offset
- 1, max
);
342 return offset
- 1 - head
;
346 * page cache context based read-ahead
348 static int try_context_readahead(struct address_space
*mapping
,
349 struct file_ra_state
*ra
,
351 unsigned long req_size
,
356 size
= count_history_pages(mapping
, offset
, max
);
359 * not enough history pages:
360 * it could be a random read
362 if (size
<= req_size
)
366 * starts from beginning of file:
367 * it is a strong indication of long-run stream (or whole-file-read)
373 ra
->size
= min(size
+ req_size
, max
);
380 * A minimal readahead algorithm for trivial sequential/random reads.
383 ondemand_readahead(struct address_space
*mapping
,
384 struct file_ra_state
*ra
, struct file
*filp
,
385 bool hit_readahead_marker
, pgoff_t offset
,
386 unsigned long req_size
)
388 struct backing_dev_info
*bdi
= inode_to_bdi(mapping
->host
);
389 unsigned long max_pages
= ra
->ra_pages
;
390 unsigned long add_pages
;
394 * If the request exceeds the readahead window, allow the read to
395 * be up to the optimal hardware IO size
397 if (req_size
> max_pages
&& bdi
->io_pages
> max_pages
)
398 max_pages
= min(req_size
, bdi
->io_pages
);
404 goto initial_readahead
;
407 * It's the expected callback offset, assume sequential access.
408 * Ramp up sizes, and push forward the readahead window.
410 if ((offset
== (ra
->start
+ ra
->size
- ra
->async_size
) ||
411 offset
== (ra
->start
+ ra
->size
))) {
412 ra
->start
+= ra
->size
;
413 ra
->size
= get_next_ra_size(ra
, max_pages
);
414 ra
->async_size
= ra
->size
;
419 * Hit a marked page without valid readahead state.
420 * E.g. interleaved reads.
421 * Query the pagecache for async_size, which normally equals to
422 * readahead size. Ramp it up and use it as the new readahead size.
424 if (hit_readahead_marker
) {
428 start
= page_cache_next_hole(mapping
, offset
+ 1, max_pages
);
431 if (!start
|| start
- offset
> max_pages
)
435 ra
->size
= start
- offset
; /* old async_size */
436 ra
->size
+= req_size
;
437 ra
->size
= get_next_ra_size(ra
, max_pages
);
438 ra
->async_size
= ra
->size
;
445 if (req_size
> max_pages
)
446 goto initial_readahead
;
449 * sequential cache miss
450 * trivial case: (offset - prev_offset) == 1
451 * unaligned reads: (offset - prev_offset) == 0
453 prev_offset
= (unsigned long long)ra
->prev_pos
>> PAGE_SHIFT
;
454 if (offset
- prev_offset
<= 1UL)
455 goto initial_readahead
;
458 * Query the page cache and look for the traces(cached history pages)
459 * that a sequential stream would leave behind.
461 if (try_context_readahead(mapping
, ra
, offset
, req_size
, max_pages
))
465 * standalone, small random read
466 * Read as is, and do not pollute the readahead state.
468 return __do_page_cache_readahead(mapping
, filp
, offset
, req_size
, 0);
472 ra
->size
= get_init_ra_size(req_size
, max_pages
);
473 ra
->async_size
= ra
->size
> req_size
? ra
->size
- req_size
: ra
->size
;
477 * Will this read hit the readahead marker made by itself?
478 * If so, trigger the readahead marker hit now, and merge
479 * the resulted next readahead window into the current one.
480 * Take care of maximum IO pages as above.
482 if (offset
== ra
->start
&& ra
->size
== ra
->async_size
) {
483 add_pages
= get_next_ra_size(ra
, max_pages
);
484 if (ra
->size
+ add_pages
<= max_pages
) {
485 ra
->async_size
= add_pages
;
486 ra
->size
+= add_pages
;
488 ra
->size
= max_pages
;
489 ra
->async_size
= max_pages
>> 1;
493 return ra_submit(ra
, mapping
, filp
);
497 * page_cache_sync_readahead - generic file readahead
498 * @mapping: address_space which holds the pagecache and I/O vectors
499 * @ra: file_ra_state which holds the readahead state
500 * @filp: passed on to ->readpage() and ->readpages()
501 * @offset: start offset into @mapping, in pagecache page-sized units
502 * @req_size: hint: total size of the read which the caller is performing in
505 * page_cache_sync_readahead() should be called when a cache miss happened:
506 * it will submit the read. The readahead logic may decide to piggyback more
507 * pages onto the read request if access patterns suggest it will improve
510 void page_cache_sync_readahead(struct address_space
*mapping
,
511 struct file_ra_state
*ra
, struct file
*filp
,
512 pgoff_t offset
, unsigned long req_size
)
518 if (blk_cgroup_congested())
522 if (filp
&& (filp
->f_mode
& FMODE_RANDOM
)) {
523 force_page_cache_readahead(mapping
, filp
, offset
, req_size
);
528 ondemand_readahead(mapping
, ra
, filp
, false, offset
, req_size
);
530 EXPORT_SYMBOL_GPL(page_cache_sync_readahead
);
533 * page_cache_async_readahead - file readahead for marked pages
534 * @mapping: address_space which holds the pagecache and I/O vectors
535 * @ra: file_ra_state which holds the readahead state
536 * @filp: passed on to ->readpage() and ->readpages()
537 * @page: the page at @offset which has the PG_readahead flag set
538 * @offset: start offset into @mapping, in pagecache page-sized units
539 * @req_size: hint: total size of the read which the caller is performing in
542 * page_cache_async_readahead() should be called when a page is used which
543 * has the PG_readahead flag; this is a marker to suggest that the application
544 * has used up enough of the readahead window that we should start pulling in
548 page_cache_async_readahead(struct address_space
*mapping
,
549 struct file_ra_state
*ra
, struct file
*filp
,
550 struct page
*page
, pgoff_t offset
,
551 unsigned long req_size
)
558 * Same bit is used for PG_readahead and PG_reclaim.
560 if (PageWriteback(page
))
563 ClearPageReadahead(page
);
566 * Defer asynchronous read-ahead on IO congestion.
568 if (inode_read_congested(mapping
->host
))
571 if (blk_cgroup_congested())
575 ondemand_readahead(mapping
, ra
, filp
, true, offset
, req_size
);
577 EXPORT_SYMBOL_GPL(page_cache_async_readahead
);
579 ssize_t
ksys_readahead(int fd
, loff_t offset
, size_t count
)
586 if (!f
.file
|| !(f
.file
->f_mode
& FMODE_READ
))
590 * The readahead() syscall is intended to run only on files
591 * that can execute readahead. If readahead is not possible
592 * on this file, then we must return -EINVAL.
595 if (!f
.file
->f_mapping
|| !f
.file
->f_mapping
->a_ops
||
596 !S_ISREG(file_inode(f
.file
)->i_mode
))
599 ret
= vfs_fadvise(f
.file
, offset
, count
, POSIX_FADV_WILLNEED
);
605 SYSCALL_DEFINE3(readahead
, int, fd
, loff_t
, offset
, size_t, count
)
607 return ksys_readahead(fd
, offset
, count
);