1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGEMAP_H
3 #define _LINUX_PAGEMAP_H
6 * Copyright 1995 Linus Torvalds
10 #include <linux/list.h>
11 #include <linux/highmem.h>
12 #include <linux/compiler.h>
13 #include <linux/uaccess.h>
14 #include <linux/gfp.h>
15 #include <linux/bitops.h>
16 #include <linux/hardirq.h> /* for in_interrupt() */
17 #include <linux/hugetlb_inline.h>
21 unsigned long invalidate_mapping_pages(struct address_space
*mapping
,
22 pgoff_t start
, pgoff_t end
);
24 static inline void invalidate_remote_inode(struct inode
*inode
)
26 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
27 S_ISLNK(inode
->i_mode
))
28 invalidate_mapping_pages(inode
->i_mapping
, 0, -1);
30 int invalidate_inode_pages2(struct address_space
*mapping
);
31 int invalidate_inode_pages2_range(struct address_space
*mapping
,
32 pgoff_t start
, pgoff_t end
);
33 int kiocb_invalidate_pages(struct kiocb
*iocb
, size_t count
);
34 void kiocb_invalidate_post_direct_write(struct kiocb
*iocb
, size_t count
);
35 int filemap_invalidate_pages(struct address_space
*mapping
,
36 loff_t pos
, loff_t end
, bool nowait
);
38 int write_inode_now(struct inode
*, int sync
);
39 int filemap_fdatawrite(struct address_space
*);
40 int filemap_flush(struct address_space
*);
41 int filemap_fdatawait_keep_errors(struct address_space
*mapping
);
42 int filemap_fdatawait_range(struct address_space
*, loff_t lstart
, loff_t lend
);
43 int filemap_fdatawait_range_keep_errors(struct address_space
*mapping
,
44 loff_t start_byte
, loff_t end_byte
);
45 int filemap_invalidate_inode(struct inode
*inode
, bool flush
,
46 loff_t start
, loff_t end
);
48 static inline int filemap_fdatawait(struct address_space
*mapping
)
50 return filemap_fdatawait_range(mapping
, 0, LLONG_MAX
);
53 bool filemap_range_has_page(struct address_space
*, loff_t lstart
, loff_t lend
);
54 int filemap_write_and_wait_range(struct address_space
*mapping
,
55 loff_t lstart
, loff_t lend
);
56 int __filemap_fdatawrite_range(struct address_space
*mapping
,
57 loff_t start
, loff_t end
, int sync_mode
);
58 int filemap_fdatawrite_range(struct address_space
*mapping
,
59 loff_t start
, loff_t end
);
60 int filemap_check_errors(struct address_space
*mapping
);
61 void __filemap_set_wb_err(struct address_space
*mapping
, int err
);
62 int filemap_fdatawrite_wbc(struct address_space
*mapping
,
63 struct writeback_control
*wbc
);
64 int kiocb_write_and_wait(struct kiocb
*iocb
, size_t count
);
66 static inline int filemap_write_and_wait(struct address_space
*mapping
)
68 return filemap_write_and_wait_range(mapping
, 0, LLONG_MAX
);
72 * filemap_set_wb_err - set a writeback error on an address_space
73 * @mapping: mapping in which to set writeback error
74 * @err: error to be set in mapping
76 * When writeback fails in some way, we must record that error so that
77 * userspace can be informed when fsync and the like are called. We endeavor
78 * to report errors on any file that was open at the time of the error. Some
79 * internal callers also need to know when writeback errors have occurred.
81 * When a writeback error occurs, most filesystems will want to call
82 * filemap_set_wb_err to record the error in the mapping so that it will be
83 * automatically reported whenever fsync is called on the file.
85 static inline void filemap_set_wb_err(struct address_space
*mapping
, int err
)
87 /* Fastpath for common case of no error */
89 __filemap_set_wb_err(mapping
, err
);
93 * filemap_check_wb_err - has an error occurred since the mark was sampled?
94 * @mapping: mapping to check for writeback errors
95 * @since: previously-sampled errseq_t
97 * Grab the errseq_t value from the mapping, and see if it has changed "since"
98 * the given value was sampled.
100 * If it has then report the latest error set, otherwise return 0.
102 static inline int filemap_check_wb_err(struct address_space
*mapping
,
105 return errseq_check(&mapping
->wb_err
, since
);
109 * filemap_sample_wb_err - sample the current errseq_t to test for later errors
110 * @mapping: mapping to be sampled
112 * Writeback errors are always reported relative to a particular sample point
113 * in the past. This function provides those sample points.
115 static inline errseq_t
filemap_sample_wb_err(struct address_space
*mapping
)
117 return errseq_sample(&mapping
->wb_err
);
121 * file_sample_sb_err - sample the current errseq_t to test for later errors
122 * @file: file pointer to be sampled
124 * Grab the most current superblock-level errseq_t value for the given
127 static inline errseq_t
file_sample_sb_err(struct file
*file
)
129 return errseq_sample(&file
->f_path
.dentry
->d_sb
->s_wb_err
);
133 * Flush file data before changing attributes. Caller must hold any locks
134 * required to prevent further writes to this file until we're done setting
137 static inline int inode_drain_writes(struct inode
*inode
)
139 inode_dio_wait(inode
);
140 return filemap_write_and_wait(inode
->i_mapping
);
143 static inline bool mapping_empty(struct address_space
*mapping
)
145 return xa_empty(&mapping
->i_pages
);
149 * mapping_shrinkable - test if page cache state allows inode reclaim
150 * @mapping: the page cache mapping
152 * This checks the mapping's cache state for the pupose of inode
153 * reclaim and LRU management.
155 * The caller is expected to hold the i_lock, but is not required to
156 * hold the i_pages lock, which usually protects cache state. That's
157 * because the i_lock and the list_lru lock that protect the inode and
158 * its LRU state don't nest inside the irq-safe i_pages lock.
160 * Cache deletions are performed under the i_lock, which ensures that
161 * when an inode goes empty, it will reliably get queued on the LRU.
163 * Cache additions do not acquire the i_lock and may race with this
164 * check, in which case we'll report the inode as shrinkable when it
165 * has cache pages. This is okay: the shrinker also checks the
166 * refcount and the referenced bit, which will be elevated or set in
167 * the process of adding new cache pages to an inode.
169 static inline bool mapping_shrinkable(struct address_space
*mapping
)
174 * On highmem systems, there could be lowmem pressure from the
175 * inodes before there is highmem pressure from the page
176 * cache. Make inodes shrinkable regardless of cache state.
178 if (IS_ENABLED(CONFIG_HIGHMEM
))
181 /* Cache completely empty? Shrink away. */
182 head
= rcu_access_pointer(mapping
->i_pages
.xa_head
);
187 * The xarray stores single offset-0 entries directly in the
188 * head pointer, which allows non-resident page cache entries
189 * to escape the shadow shrinker's list of xarray nodes. The
190 * inode shrinker needs to pick them up under memory pressure.
192 if (!xa_is_node(head
) && xa_is_value(head
))
199 * Bits in mapping->flags.
202 AS_EIO
= 0, /* IO error on async write */
203 AS_ENOSPC
= 1, /* ENOSPC on async write */
204 AS_MM_ALL_LOCKS
= 2, /* under mm_take_all_locks() */
205 AS_UNEVICTABLE
= 3, /* e.g., ramdisk, SHM_LOCK */
206 AS_EXITING
= 4, /* final truncate in progress */
207 /* writeback related tags are not used */
208 AS_NO_WRITEBACK_TAGS
= 5,
209 AS_RELEASE_ALWAYS
= 6, /* Call ->release_folio(), even if no private data */
210 AS_STABLE_WRITES
= 7, /* must wait for writeback before modifying
212 AS_INACCESSIBLE
= 8, /* Do not attempt direct R/W access to the mapping */
213 /* Bits 16-25 are used for FOLIO_ORDER */
214 AS_FOLIO_ORDER_BITS
= 5,
215 AS_FOLIO_ORDER_MIN
= 16,
216 AS_FOLIO_ORDER_MAX
= AS_FOLIO_ORDER_MIN
+ AS_FOLIO_ORDER_BITS
,
219 #define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1)
220 #define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN)
221 #define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX)
222 #define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK)
225 * mapping_set_error - record a writeback error in the address_space
226 * @mapping: the mapping in which an error should be set
227 * @error: the error to set in the mapping
229 * When writeback fails in some way, we must record that error so that
230 * userspace can be informed when fsync and the like are called. We endeavor
231 * to report errors on any file that was open at the time of the error. Some
232 * internal callers also need to know when writeback errors have occurred.
234 * When a writeback error occurs, most filesystems will want to call
235 * mapping_set_error to record the error in the mapping so that it can be
236 * reported when the application calls fsync(2).
238 static inline void mapping_set_error(struct address_space
*mapping
, int error
)
243 /* Record in wb_err for checkers using errseq_t based tracking */
244 __filemap_set_wb_err(mapping
, error
);
246 /* Record it in superblock */
248 errseq_set(&mapping
->host
->i_sb
->s_wb_err
, error
);
250 /* Record it in flags for now, for legacy callers */
251 if (error
== -ENOSPC
)
252 set_bit(AS_ENOSPC
, &mapping
->flags
);
254 set_bit(AS_EIO
, &mapping
->flags
);
257 static inline void mapping_set_unevictable(struct address_space
*mapping
)
259 set_bit(AS_UNEVICTABLE
, &mapping
->flags
);
262 static inline void mapping_clear_unevictable(struct address_space
*mapping
)
264 clear_bit(AS_UNEVICTABLE
, &mapping
->flags
);
267 static inline bool mapping_unevictable(struct address_space
*mapping
)
269 return mapping
&& test_bit(AS_UNEVICTABLE
, &mapping
->flags
);
272 static inline void mapping_set_exiting(struct address_space
*mapping
)
274 set_bit(AS_EXITING
, &mapping
->flags
);
277 static inline int mapping_exiting(struct address_space
*mapping
)
279 return test_bit(AS_EXITING
, &mapping
->flags
);
282 static inline void mapping_set_no_writeback_tags(struct address_space
*mapping
)
284 set_bit(AS_NO_WRITEBACK_TAGS
, &mapping
->flags
);
287 static inline int mapping_use_writeback_tags(struct address_space
*mapping
)
289 return !test_bit(AS_NO_WRITEBACK_TAGS
, &mapping
->flags
);
292 static inline bool mapping_release_always(const struct address_space
*mapping
)
294 return test_bit(AS_RELEASE_ALWAYS
, &mapping
->flags
);
297 static inline void mapping_set_release_always(struct address_space
*mapping
)
299 set_bit(AS_RELEASE_ALWAYS
, &mapping
->flags
);
302 static inline void mapping_clear_release_always(struct address_space
*mapping
)
304 clear_bit(AS_RELEASE_ALWAYS
, &mapping
->flags
);
307 static inline bool mapping_stable_writes(const struct address_space
*mapping
)
309 return test_bit(AS_STABLE_WRITES
, &mapping
->flags
);
312 static inline void mapping_set_stable_writes(struct address_space
*mapping
)
314 set_bit(AS_STABLE_WRITES
, &mapping
->flags
);
317 static inline void mapping_clear_stable_writes(struct address_space
*mapping
)
319 clear_bit(AS_STABLE_WRITES
, &mapping
->flags
);
322 static inline void mapping_set_inaccessible(struct address_space
*mapping
)
325 * It's expected inaccessible mappings are also unevictable. Compaction
326 * migrate scanner (isolate_migratepages_block()) relies on this to
327 * reduce page locking.
329 set_bit(AS_UNEVICTABLE
, &mapping
->flags
);
330 set_bit(AS_INACCESSIBLE
, &mapping
->flags
);
333 static inline bool mapping_inaccessible(struct address_space
*mapping
)
335 return test_bit(AS_INACCESSIBLE
, &mapping
->flags
);
338 static inline gfp_t
mapping_gfp_mask(struct address_space
* mapping
)
340 return mapping
->gfp_mask
;
343 /* Restricts the given gfp_mask to what the mapping allows. */
344 static inline gfp_t
mapping_gfp_constraint(struct address_space
*mapping
,
347 return mapping_gfp_mask(mapping
) & gfp_mask
;
351 * This is non-atomic. Only to be used before the mapping is activated.
352 * Probably needs a barrier...
354 static inline void mapping_set_gfp_mask(struct address_space
*m
, gfp_t mask
)
360 * There are some parts of the kernel which assume that PMD entries
361 * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
362 * limit the maximum allocation order to PMD size. I'm not aware of any
363 * assumptions about maximum order if THP are disabled, but 8 seems like
364 * a good order (that's 1MB if you're using 4kB pages)
366 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
367 #define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
369 #define PREFERRED_MAX_PAGECACHE_ORDER 8
373 * xas_split_alloc() does not support arbitrary orders. This implies no
374 * 512MB THP on ARM64 with 64KB base page size.
376 #define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
377 #define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
380 * mapping_max_folio_size_supported() - Check the max folio size supported
382 * The filesystem should call this function at mount time if there is a
383 * requirement on the folio mapping size in the page cache.
385 static inline size_t mapping_max_folio_size_supported(void)
387 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE
))
388 return 1U << (PAGE_SHIFT
+ MAX_PAGECACHE_ORDER
);
393 * mapping_set_folio_order_range() - Set the orders supported by a file.
394 * @mapping: The address space of the file.
395 * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
396 * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive).
398 * The filesystem should call this function in its inode constructor to
399 * indicate which base size (min) and maximum size (max) of folio the VFS
400 * can use to cache the contents of the file. This should only be used
401 * if the filesystem needs special handling of folio sizes (ie there is
402 * something the core cannot know).
403 * Do not tune it based on, eg, i_size.
405 * Context: This should not be called while the inode is active as it
408 static inline void mapping_set_folio_order_range(struct address_space
*mapping
,
412 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE
))
415 if (min
> MAX_PAGECACHE_ORDER
)
416 min
= MAX_PAGECACHE_ORDER
;
418 if (max
> MAX_PAGECACHE_ORDER
)
419 max
= MAX_PAGECACHE_ORDER
;
424 mapping
->flags
= (mapping
->flags
& ~AS_FOLIO_ORDER_MASK
) |
425 (min
<< AS_FOLIO_ORDER_MIN
) | (max
<< AS_FOLIO_ORDER_MAX
);
428 static inline void mapping_set_folio_min_order(struct address_space
*mapping
,
431 mapping_set_folio_order_range(mapping
, min
, MAX_PAGECACHE_ORDER
);
435 * mapping_set_large_folios() - Indicate the file supports large folios.
436 * @mapping: The address space of the file.
438 * The filesystem should call this function in its inode constructor to
439 * indicate that the VFS can use large folios to cache the contents of
442 * Context: This should not be called while the inode is active as it
445 static inline void mapping_set_large_folios(struct address_space
*mapping
)
447 mapping_set_folio_order_range(mapping
, 0, MAX_PAGECACHE_ORDER
);
450 static inline unsigned int
451 mapping_max_folio_order(const struct address_space
*mapping
)
453 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE
))
455 return (mapping
->flags
& AS_FOLIO_ORDER_MAX_MASK
) >> AS_FOLIO_ORDER_MAX
;
458 static inline unsigned int
459 mapping_min_folio_order(const struct address_space
*mapping
)
461 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE
))
463 return (mapping
->flags
& AS_FOLIO_ORDER_MIN_MASK
) >> AS_FOLIO_ORDER_MIN
;
466 static inline unsigned long
467 mapping_min_folio_nrpages(struct address_space
*mapping
)
469 return 1UL << mapping_min_folio_order(mapping
);
473 * mapping_align_index() - Align index for this mapping.
474 * @mapping: The address_space.
475 * @index: The page index.
477 * The index of a folio must be naturally aligned. If you are adding a
478 * new folio to the page cache and need to know what index to give it,
479 * call this function.
481 static inline pgoff_t
mapping_align_index(struct address_space
*mapping
,
484 return round_down(index
, mapping_min_folio_nrpages(mapping
));
488 * Large folio support currently depends on THP. These dependencies are
489 * being worked on but are not yet fixed.
491 static inline bool mapping_large_folio_support(struct address_space
*mapping
)
493 /* AS_FOLIO_ORDER is only reasonable for pagecache folios */
494 VM_WARN_ONCE((unsigned long)mapping
& PAGE_MAPPING_ANON
,
495 "Anonymous mapping always supports large folio");
497 return mapping_max_folio_order(mapping
) > 0;
500 /* Return the maximum folio size for this pagecache mapping, in bytes. */
501 static inline size_t mapping_max_folio_size(const struct address_space
*mapping
)
503 return PAGE_SIZE
<< mapping_max_folio_order(mapping
);
506 static inline int filemap_nr_thps(struct address_space
*mapping
)
508 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
509 return atomic_read(&mapping
->nr_thps
);
515 static inline void filemap_nr_thps_inc(struct address_space
*mapping
)
517 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
518 if (!mapping_large_folio_support(mapping
))
519 atomic_inc(&mapping
->nr_thps
);
521 WARN_ON_ONCE(mapping_large_folio_support(mapping
) == 0);
525 static inline void filemap_nr_thps_dec(struct address_space
*mapping
)
527 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
528 if (!mapping_large_folio_support(mapping
))
529 atomic_dec(&mapping
->nr_thps
);
531 WARN_ON_ONCE(mapping_large_folio_support(mapping
) == 0);
535 struct address_space
*folio_mapping(struct folio
*);
536 struct address_space
*swapcache_mapping(struct folio
*);
539 * folio_file_mapping - Find the mapping this folio belongs to.
542 * For folios which are in the page cache, return the mapping that this
543 * page belongs to. Folios in the swap cache return the mapping of the
544 * swap file or swap device where the data is stored. This is different
545 * from the mapping returned by folio_mapping(). The only reason to
546 * use it is if, like NFS, you return 0 from ->activate_swapfile.
548 * Do not call this for folios which aren't in the page cache or swap cache.
550 static inline struct address_space
*folio_file_mapping(struct folio
*folio
)
552 if (unlikely(folio_test_swapcache(folio
)))
553 return swapcache_mapping(folio
);
555 return folio
->mapping
;
559 * folio_flush_mapping - Find the file mapping this folio belongs to.
562 * For folios which are in the page cache, return the mapping that this
563 * page belongs to. Anonymous folios return NULL, even if they're in
564 * the swap cache. Other kinds of folio also return NULL.
566 * This is ONLY used by architecture cache flushing code. If you aren't
567 * writing cache flushing code, you want either folio_mapping() or
568 * folio_file_mapping().
570 static inline struct address_space
*folio_flush_mapping(struct folio
*folio
)
572 if (unlikely(folio_test_swapcache(folio
)))
575 return folio_mapping(folio
);
578 static inline struct address_space
*page_file_mapping(struct page
*page
)
580 return folio_file_mapping(page_folio(page
));
584 * folio_inode - Get the host inode for this folio.
587 * For folios which are in the page cache, return the inode that this folio
590 * Do not call this for folios which aren't in the page cache.
592 static inline struct inode
*folio_inode(struct folio
*folio
)
594 return folio
->mapping
->host
;
598 * folio_attach_private - Attach private data to a folio.
599 * @folio: Folio to attach data to.
600 * @data: Data to attach to folio.
602 * Attaching private data to a folio increments the page's reference count.
603 * The data must be detached before the folio will be freed.
605 static inline void folio_attach_private(struct folio
*folio
, void *data
)
608 folio
->private = data
;
609 folio_set_private(folio
);
613 * folio_change_private - Change private data on a folio.
614 * @folio: Folio to change the data on.
615 * @data: Data to set on the folio.
617 * Change the private data attached to a folio and return the old
618 * data. The page must previously have had data attached and the data
619 * must be detached before the folio will be freed.
621 * Return: Data that was previously attached to the folio.
623 static inline void *folio_change_private(struct folio
*folio
, void *data
)
625 void *old
= folio_get_private(folio
);
627 folio
->private = data
;
632 * folio_detach_private - Detach private data from a folio.
633 * @folio: Folio to detach data from.
635 * Removes the data that was previously attached to the folio and decrements
636 * the refcount on the page.
638 * Return: Data that was attached to the folio.
640 static inline void *folio_detach_private(struct folio
*folio
)
642 void *data
= folio_get_private(folio
);
644 if (!folio_test_private(folio
))
646 folio_clear_private(folio
);
647 folio
->private = NULL
;
653 static inline void attach_page_private(struct page
*page
, void *data
)
655 folio_attach_private(page_folio(page
), data
);
658 static inline void *detach_page_private(struct page
*page
)
660 return folio_detach_private(page_folio(page
));
664 struct folio
*filemap_alloc_folio_noprof(gfp_t gfp
, unsigned int order
);
666 static inline struct folio
*filemap_alloc_folio_noprof(gfp_t gfp
, unsigned int order
)
668 return folio_alloc_noprof(gfp
, order
);
672 #define filemap_alloc_folio(...) \
673 alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__))
675 static inline struct page
*__page_cache_alloc(gfp_t gfp
)
677 return &filemap_alloc_folio(gfp
, 0)->page
;
680 static inline gfp_t
readahead_gfp_mask(struct address_space
*x
)
682 return mapping_gfp_mask(x
) | __GFP_NORETRY
| __GFP_NOWARN
;
685 typedef int filler_t(struct file
*, struct folio
*);
687 pgoff_t
page_cache_next_miss(struct address_space
*mapping
,
688 pgoff_t index
, unsigned long max_scan
);
689 pgoff_t
page_cache_prev_miss(struct address_space
*mapping
,
690 pgoff_t index
, unsigned long max_scan
);
693 * typedef fgf_t - Flags for getting folios from the page cache.
695 * Most users of the page cache will not need to use these flags;
696 * there are convenience functions such as filemap_get_folio() and
697 * filemap_lock_folio(). For users which need more control over exactly
698 * what is done with the folios, these flags to __filemap_get_folio()
701 * * %FGP_ACCESSED - The folio will be marked accessed.
702 * * %FGP_LOCK - The folio is returned locked.
703 * * %FGP_CREAT - If no folio is present then a new folio is allocated,
704 * added to the page cache and the VM's LRU list. The folio is
706 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
707 * folio is already in cache. If the folio was allocated, unlock it
708 * before returning so the caller can do the same dance.
709 * * %FGP_WRITE - The folio will be written to by the caller.
710 * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
711 * * %FGP_NOWAIT - Don't block on the folio lock.
712 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
713 * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
716 typedef unsigned int __bitwise fgf_t
;
718 #define FGP_ACCESSED ((__force fgf_t)0x00000001)
719 #define FGP_LOCK ((__force fgf_t)0x00000002)
720 #define FGP_CREAT ((__force fgf_t)0x00000004)
721 #define FGP_WRITE ((__force fgf_t)0x00000008)
722 #define FGP_NOFS ((__force fgf_t)0x00000010)
723 #define FGP_NOWAIT ((__force fgf_t)0x00000020)
724 #define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
725 #define FGP_STABLE ((__force fgf_t)0x00000080)
726 #define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
728 #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
731 * fgf_set_order - Encode a length in the fgf_t flags.
732 * @size: The suggested size of the folio to create.
734 * The caller of __filemap_get_folio() can use this to suggest a preferred
735 * size for the folio that is created. If there is already a folio at
736 * the index, it will be returned, no matter what its size. If a folio
737 * is freshly created, it may be of a different size than requested
738 * due to alignment constraints, memory pressure, or the presence of
739 * other folios at nearby indices.
741 static inline fgf_t
fgf_set_order(size_t size
)
743 unsigned int shift
= ilog2(size
);
745 if (shift
<= PAGE_SHIFT
)
747 return (__force fgf_t
)((shift
- PAGE_SHIFT
) << 26);
750 void *filemap_get_entry(struct address_space
*mapping
, pgoff_t index
);
751 struct folio
*__filemap_get_folio(struct address_space
*mapping
, pgoff_t index
,
752 fgf_t fgp_flags
, gfp_t gfp
);
753 struct page
*pagecache_get_page(struct address_space
*mapping
, pgoff_t index
,
754 fgf_t fgp_flags
, gfp_t gfp
);
757 * filemap_get_folio - Find and get a folio.
758 * @mapping: The address_space to search.
759 * @index: The page index.
761 * Looks up the page cache entry at @mapping & @index. If a folio is
762 * present, it is returned with an increased refcount.
764 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
765 * this index. Will not return a shadow, swap or DAX entry.
767 static inline struct folio
*filemap_get_folio(struct address_space
*mapping
,
770 return __filemap_get_folio(mapping
, index
, 0, 0);
774 * filemap_lock_folio - Find and lock a folio.
775 * @mapping: The address_space to search.
776 * @index: The page index.
778 * Looks up the page cache entry at @mapping & @index. If a folio is
779 * present, it is returned locked with an increased refcount.
781 * Context: May sleep.
782 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
783 * this index. Will not return a shadow, swap or DAX entry.
785 static inline struct folio
*filemap_lock_folio(struct address_space
*mapping
,
788 return __filemap_get_folio(mapping
, index
, FGP_LOCK
, 0);
792 * filemap_grab_folio - grab a folio from the page cache
793 * @mapping: The address space to search
794 * @index: The page index
796 * Looks up the page cache entry at @mapping & @index. If no folio is found,
797 * a new folio is created. The folio is locked, marked as accessed, and
800 * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
801 * and failed to create a folio.
803 static inline struct folio
*filemap_grab_folio(struct address_space
*mapping
,
806 return __filemap_get_folio(mapping
, index
,
807 FGP_LOCK
| FGP_ACCESSED
| FGP_CREAT
,
808 mapping_gfp_mask(mapping
));
812 * find_get_page - find and get a page reference
813 * @mapping: the address_space to search
814 * @offset: the page index
816 * Looks up the page cache slot at @mapping & @offset. If there is a
817 * page cache page, it is returned with an increased refcount.
819 * Otherwise, %NULL is returned.
821 static inline struct page
*find_get_page(struct address_space
*mapping
,
824 return pagecache_get_page(mapping
, offset
, 0, 0);
827 static inline struct page
*find_get_page_flags(struct address_space
*mapping
,
828 pgoff_t offset
, fgf_t fgp_flags
)
830 return pagecache_get_page(mapping
, offset
, fgp_flags
, 0);
834 * find_lock_page - locate, pin and lock a pagecache page
835 * @mapping: the address_space to search
836 * @index: the page index
838 * Looks up the page cache entry at @mapping & @index. If there is a
839 * page cache page, it is returned locked and with an increased
842 * Context: May sleep.
843 * Return: A struct page or %NULL if there is no page in the cache for this
846 static inline struct page
*find_lock_page(struct address_space
*mapping
,
849 return pagecache_get_page(mapping
, index
, FGP_LOCK
, 0);
853 * find_or_create_page - locate or add a pagecache page
854 * @mapping: the page's address_space
855 * @index: the page's index into the mapping
856 * @gfp_mask: page allocation mode
858 * Looks up the page cache slot at @mapping & @offset. If there is a
859 * page cache page, it is returned locked and with an increased
862 * If the page is not present, a new page is allocated using @gfp_mask
863 * and added to the page cache and the VM's LRU list. The page is
864 * returned locked and with an increased refcount.
866 * On memory exhaustion, %NULL is returned.
868 * find_or_create_page() may sleep, even if @gfp_flags specifies an
871 static inline struct page
*find_or_create_page(struct address_space
*mapping
,
872 pgoff_t index
, gfp_t gfp_mask
)
874 return pagecache_get_page(mapping
, index
,
875 FGP_LOCK
|FGP_ACCESSED
|FGP_CREAT
,
880 * grab_cache_page_nowait - returns locked page at given index in given cache
881 * @mapping: target address_space
882 * @index: the page index
884 * Same as grab_cache_page(), but do not wait if the page is unavailable.
885 * This is intended for speculative data generators, where the data can
886 * be regenerated if the page couldn't be grabbed. This routine should
887 * be safe to call while holding the lock for another page.
889 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
890 * and deadlock against the caller's locked page.
892 static inline struct page
*grab_cache_page_nowait(struct address_space
*mapping
,
895 return pagecache_get_page(mapping
, index
,
896 FGP_LOCK
|FGP_CREAT
|FGP_NOFS
|FGP_NOWAIT
,
897 mapping_gfp_mask(mapping
));
900 extern pgoff_t
__folio_swap_cache_index(struct folio
*folio
);
903 * folio_index - File index of a folio.
906 * For a folio which is either in the page cache or the swap cache,
907 * return its index within the address_space it belongs to. If you know
908 * the page is definitely in the page cache, you can look at the folio's
911 * Return: The index (offset in units of pages) of a folio in its file.
913 static inline pgoff_t
folio_index(struct folio
*folio
)
915 if (unlikely(folio_test_swapcache(folio
)))
916 return __folio_swap_cache_index(folio
);
921 * folio_next_index - Get the index of the next folio.
922 * @folio: The current folio.
924 * Return: The index of the folio which follows this folio in the file.
926 static inline pgoff_t
folio_next_index(struct folio
*folio
)
928 return folio
->index
+ folio_nr_pages(folio
);
932 * folio_file_page - The page for a particular index.
933 * @folio: The folio which contains this index.
934 * @index: The index we want to look up.
936 * Sometimes after looking up a folio in the page cache, we need to
937 * obtain the specific page for an index (eg a page fault).
939 * Return: The page containing the file data for this index.
941 static inline struct page
*folio_file_page(struct folio
*folio
, pgoff_t index
)
943 return folio_page(folio
, index
& (folio_nr_pages(folio
) - 1));
947 * folio_contains - Does this folio contain this index?
949 * @index: The page index within the file.
951 * Context: The caller should have the page locked in order to prevent
952 * (eg) shmem from moving the page between the page cache and swap cache
953 * and changing its index in the middle of the operation.
954 * Return: true or false.
956 static inline bool folio_contains(struct folio
*folio
, pgoff_t index
)
958 return index
- folio_index(folio
) < folio_nr_pages(folio
);
962 * Given the page we found in the page cache, return the page corresponding
963 * to this index in the file
965 static inline struct page
*find_subpage(struct page
*head
, pgoff_t index
)
967 /* HugeTLBfs wants the head page regardless */
971 return head
+ (index
& (thp_nr_pages(head
) - 1));
974 unsigned filemap_get_folios(struct address_space
*mapping
, pgoff_t
*start
,
975 pgoff_t end
, struct folio_batch
*fbatch
);
976 unsigned filemap_get_folios_contig(struct address_space
*mapping
,
977 pgoff_t
*start
, pgoff_t end
, struct folio_batch
*fbatch
);
978 unsigned filemap_get_folios_tag(struct address_space
*mapping
, pgoff_t
*start
,
979 pgoff_t end
, xa_mark_t tag
, struct folio_batch
*fbatch
);
981 struct page
*grab_cache_page_write_begin(struct address_space
*mapping
,
985 * Returns locked page at given index in given cache, creating it if needed.
987 static inline struct page
*grab_cache_page(struct address_space
*mapping
,
990 return find_or_create_page(mapping
, index
, mapping_gfp_mask(mapping
));
993 struct folio
*read_cache_folio(struct address_space
*, pgoff_t index
,
994 filler_t
*filler
, struct file
*file
);
995 struct folio
*mapping_read_folio_gfp(struct address_space
*, pgoff_t index
,
997 struct page
*read_cache_page(struct address_space
*, pgoff_t index
,
998 filler_t
*filler
, struct file
*file
);
999 extern struct page
* read_cache_page_gfp(struct address_space
*mapping
,
1000 pgoff_t index
, gfp_t gfp_mask
);
1002 static inline struct page
*read_mapping_page(struct address_space
*mapping
,
1003 pgoff_t index
, struct file
*file
)
1005 return read_cache_page(mapping
, index
, NULL
, file
);
1008 static inline struct folio
*read_mapping_folio(struct address_space
*mapping
,
1009 pgoff_t index
, struct file
*file
)
1011 return read_cache_folio(mapping
, index
, NULL
, file
);
1015 * page_pgoff - Calculate the logical page offset of this page.
1016 * @folio: The folio containing this page.
1017 * @page: The page which we need the offset of.
1019 * For file pages, this is the offset from the beginning of the file
1020 * in units of PAGE_SIZE. For anonymous pages, this is the offset from
1021 * the beginning of the anon_vma in units of PAGE_SIZE. This will
1022 * return nonsense for KSM pages.
1024 * Context: Caller must have a reference on the folio or otherwise
1025 * prevent it from being split or freed.
1027 * Return: The offset in units of PAGE_SIZE.
1029 static inline pgoff_t
page_pgoff(const struct folio
*folio
,
1030 const struct page
*page
)
1032 return folio
->index
+ folio_page_idx(folio
, page
);
1036 * Return byte-offset into filesystem object for page.
1038 static inline loff_t
page_offset(struct page
*page
)
1040 return ((loff_t
)page
->index
) << PAGE_SHIFT
;
1044 * folio_pos - Returns the byte position of this folio in its file.
1045 * @folio: The folio.
1047 static inline loff_t
folio_pos(struct folio
*folio
)
1049 return page_offset(&folio
->page
);
1053 * Get the offset in PAGE_SIZE (even for hugetlb folios).
1055 static inline pgoff_t
folio_pgoff(struct folio
*folio
)
1057 return folio
->index
;
1060 static inline pgoff_t
linear_page_index(struct vm_area_struct
*vma
,
1061 unsigned long address
)
1064 pgoff
= (address
- vma
->vm_start
) >> PAGE_SHIFT
;
1065 pgoff
+= vma
->vm_pgoff
;
1069 struct wait_page_key
{
1070 struct folio
*folio
;
1075 struct wait_page_queue
{
1076 struct folio
*folio
;
1078 wait_queue_entry_t wait
;
1081 static inline bool wake_page_match(struct wait_page_queue
*wait_page
,
1082 struct wait_page_key
*key
)
1084 if (wait_page
->folio
!= key
->folio
)
1086 key
->page_match
= 1;
1088 if (wait_page
->bit_nr
!= key
->bit_nr
)
1094 void __folio_lock(struct folio
*folio
);
1095 int __folio_lock_killable(struct folio
*folio
);
1096 vm_fault_t
__folio_lock_or_retry(struct folio
*folio
, struct vm_fault
*vmf
);
1097 void unlock_page(struct page
*page
);
1098 void folio_unlock(struct folio
*folio
);
1101 * folio_trylock() - Attempt to lock a folio.
1102 * @folio: The folio to attempt to lock.
1104 * Sometimes it is undesirable to wait for a folio to be unlocked (eg
1105 * when the locks are being taken in the wrong order, or if making
1106 * progress through a batch of folios is more important than processing
1107 * them in order). Usually folio_lock() is the correct function to call.
1109 * Context: Any context.
1110 * Return: Whether the lock was successfully acquired.
1112 static inline bool folio_trylock(struct folio
*folio
)
1114 return likely(!test_and_set_bit_lock(PG_locked
, folio_flags(folio
, 0)));
1118 * Return true if the page was successfully locked
1120 static inline bool trylock_page(struct page
*page
)
1122 return folio_trylock(page_folio(page
));
1126 * folio_lock() - Lock this folio.
1127 * @folio: The folio to lock.
1129 * The folio lock protects against many things, probably more than it
1130 * should. It is primarily held while a folio is being brought uptodate,
1131 * either from its backing file or from swap. It is also held while a
1132 * folio is being truncated from its address_space, so holding the lock
1133 * is sufficient to keep folio->mapping stable.
1135 * The folio lock is also held while write() is modifying the page to
1136 * provide POSIX atomicity guarantees (as long as the write does not
1137 * cross a page boundary). Other modifications to the data in the folio
1138 * do not hold the folio lock and can race with writes, eg DMA and stores
1141 * Context: May sleep. If you need to acquire the locks of two or
1142 * more folios, they must be in order of ascending index, if they are
1143 * in the same address_space. If they are in different address_spaces,
1144 * acquire the lock of the folio which belongs to the address_space which
1145 * has the lowest address in memory first.
1147 static inline void folio_lock(struct folio
*folio
)
1150 if (!folio_trylock(folio
))
1151 __folio_lock(folio
);
1155 * lock_page() - Lock the folio containing this page.
1156 * @page: The page to lock.
1158 * See folio_lock() for a description of what the lock protects.
1159 * This is a legacy function and new code should probably use folio_lock()
1162 * Context: May sleep. Pages in the same folio share a lock, so do not
1163 * attempt to lock two pages which share a folio.
1165 static inline void lock_page(struct page
*page
)
1167 struct folio
*folio
;
1170 folio
= page_folio(page
);
1171 if (!folio_trylock(folio
))
1172 __folio_lock(folio
);
1176 * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
1177 * @folio: The folio to lock.
1179 * Attempts to lock the folio, like folio_lock(), except that the sleep
1180 * to acquire the lock is interruptible by a fatal signal.
1182 * Context: May sleep; see folio_lock().
1183 * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
1185 static inline int folio_lock_killable(struct folio
*folio
)
1188 if (!folio_trylock(folio
))
1189 return __folio_lock_killable(folio
);
1194 * folio_lock_or_retry - Lock the folio, unless this would block and the
1195 * caller indicated that it can handle a retry.
1197 * Return value and mmap_lock implications depend on flags; see
1198 * __folio_lock_or_retry().
1200 static inline vm_fault_t
folio_lock_or_retry(struct folio
*folio
,
1201 struct vm_fault
*vmf
)
1204 if (!folio_trylock(folio
))
1205 return __folio_lock_or_retry(folio
, vmf
);
1210 * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
1211 * and should not be used directly.
1213 void folio_wait_bit(struct folio
*folio
, int bit_nr
);
1214 int folio_wait_bit_killable(struct folio
*folio
, int bit_nr
);
1217 * Wait for a folio to be unlocked.
1219 * This must be called with the caller "holding" the folio,
1220 * ie with increased folio reference count so that the folio won't
1221 * go away during the wait.
1223 static inline void folio_wait_locked(struct folio
*folio
)
1225 if (folio_test_locked(folio
))
1226 folio_wait_bit(folio
, PG_locked
);
1229 static inline int folio_wait_locked_killable(struct folio
*folio
)
1231 if (!folio_test_locked(folio
))
1233 return folio_wait_bit_killable(folio
, PG_locked
);
1236 static inline void wait_on_page_locked(struct page
*page
)
1238 folio_wait_locked(page_folio(page
));
1241 void folio_end_read(struct folio
*folio
, bool success
);
1242 void wait_on_page_writeback(struct page
*page
);
1243 void folio_wait_writeback(struct folio
*folio
);
1244 int folio_wait_writeback_killable(struct folio
*folio
);
1245 void end_page_writeback(struct page
*page
);
1246 void folio_end_writeback(struct folio
*folio
);
1247 void wait_for_stable_page(struct page
*page
);
1248 void folio_wait_stable(struct folio
*folio
);
1249 void __folio_mark_dirty(struct folio
*folio
, struct address_space
*, int warn
);
1250 void folio_account_cleaned(struct folio
*folio
, struct bdi_writeback
*wb
);
1251 void __folio_cancel_dirty(struct folio
*folio
);
1252 static inline void folio_cancel_dirty(struct folio
*folio
)
1254 /* Avoid atomic ops, locking, etc. when not actually needed. */
1255 if (folio_test_dirty(folio
))
1256 __folio_cancel_dirty(folio
);
1258 bool folio_clear_dirty_for_io(struct folio
*folio
);
1259 bool clear_page_dirty_for_io(struct page
*page
);
1260 void folio_invalidate(struct folio
*folio
, size_t offset
, size_t length
);
1261 bool noop_dirty_folio(struct address_space
*mapping
, struct folio
*folio
);
1263 #ifdef CONFIG_MIGRATION
1264 int filemap_migrate_folio(struct address_space
*mapping
, struct folio
*dst
,
1265 struct folio
*src
, enum migrate_mode mode
);
1267 #define filemap_migrate_folio NULL
1269 void folio_end_private_2(struct folio
*folio
);
1270 void folio_wait_private_2(struct folio
*folio
);
1271 int folio_wait_private_2_killable(struct folio
*folio
);
1274 * Add an arbitrary waiter to a page's wait queue
1276 void folio_add_wait_queue(struct folio
*folio
, wait_queue_entry_t
*waiter
);
1279 * Fault in userspace address range.
1281 size_t fault_in_writeable(char __user
*uaddr
, size_t size
);
1282 size_t fault_in_subpage_writeable(char __user
*uaddr
, size_t size
);
1283 size_t fault_in_safe_writeable(const char __user
*uaddr
, size_t size
);
1284 size_t fault_in_readable(const char __user
*uaddr
, size_t size
);
1286 int add_to_page_cache_lru(struct page
*page
, struct address_space
*mapping
,
1287 pgoff_t index
, gfp_t gfp
);
1288 int filemap_add_folio(struct address_space
*mapping
, struct folio
*folio
,
1289 pgoff_t index
, gfp_t gfp
);
1290 void filemap_remove_folio(struct folio
*folio
);
1291 void __filemap_remove_folio(struct folio
*folio
, void *shadow
);
1292 void replace_page_cache_folio(struct folio
*old
, struct folio
*new);
1293 void delete_from_page_cache_batch(struct address_space
*mapping
,
1294 struct folio_batch
*fbatch
);
1295 bool filemap_release_folio(struct folio
*folio
, gfp_t gfp
);
1296 loff_t
mapping_seek_hole_data(struct address_space
*, loff_t start
, loff_t end
,
1299 /* Must be non-static for BPF error injection */
1300 int __filemap_add_folio(struct address_space
*mapping
, struct folio
*folio
,
1301 pgoff_t index
, gfp_t gfp
, void **shadowp
);
1303 bool filemap_range_has_writeback(struct address_space
*mapping
,
1304 loff_t start_byte
, loff_t end_byte
);
1307 * filemap_range_needs_writeback - check if range potentially needs writeback
1308 * @mapping: address space within which to check
1309 * @start_byte: offset in bytes where the range starts
1310 * @end_byte: offset in bytes where the range ends (inclusive)
1312 * Find at least one page in the range supplied, usually used to check if
1313 * direct writing in this range will trigger a writeback. Used by O_DIRECT
1314 * read/write with IOCB_NOWAIT, to see if the caller needs to do
1315 * filemap_write_and_wait_range() before proceeding.
1317 * Return: %true if the caller should do filemap_write_and_wait_range() before
1318 * doing O_DIRECT to a page in this range, %false otherwise.
1320 static inline bool filemap_range_needs_writeback(struct address_space
*mapping
,
1324 if (!mapping
->nrpages
)
1326 if (!mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
) &&
1327 !mapping_tagged(mapping
, PAGECACHE_TAG_WRITEBACK
))
1329 return filemap_range_has_writeback(mapping
, start_byte
, end_byte
);
1333 * struct readahead_control - Describes a readahead request.
1335 * A readahead request is for consecutive pages. Filesystems which
1336 * implement the ->readahead method should call readahead_page() or
1337 * readahead_page_batch() in a loop and attempt to start I/O against
1338 * each page in the request.
1340 * Most of the fields in this struct are private and should be accessed
1341 * by the functions below.
1343 * @file: The file, used primarily by network filesystems for authentication.
1344 * May be NULL if invoked internally by the filesystem.
1345 * @mapping: Readahead this filesystem object.
1346 * @ra: File readahead state. May be NULL.
1348 struct readahead_control
{
1350 struct address_space
*mapping
;
1351 struct file_ra_state
*ra
;
1352 /* private: use the readahead_* accessors instead */
1354 unsigned int _nr_pages
;
1355 unsigned int _batch_count
;
1357 unsigned long _pflags
;
1360 #define DEFINE_READAHEAD(ractl, f, r, m, i) \
1361 struct readahead_control ractl = { \
1368 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
1370 void page_cache_ra_unbounded(struct readahead_control
*,
1371 unsigned long nr_to_read
, unsigned long lookahead_count
);
1372 void page_cache_sync_ra(struct readahead_control
*, unsigned long req_count
);
1373 void page_cache_async_ra(struct readahead_control
*, struct folio
*,
1374 unsigned long req_count
);
1375 void readahead_expand(struct readahead_control
*ractl
,
1376 loff_t new_start
, size_t new_len
);
1379 * page_cache_sync_readahead - generic file readahead
1380 * @mapping: address_space which holds the pagecache and I/O vectors
1381 * @ra: file_ra_state which holds the readahead state
1382 * @file: Used by the filesystem for authentication.
1383 * @index: Index of first page to be read.
1384 * @req_count: Total number of pages being read by the caller.
1386 * page_cache_sync_readahead() should be called when a cache miss happened:
1387 * it will submit the read. The readahead logic may decide to piggyback more
1388 * pages onto the read request if access patterns suggest it will improve
1392 void page_cache_sync_readahead(struct address_space
*mapping
,
1393 struct file_ra_state
*ra
, struct file
*file
, pgoff_t index
,
1394 unsigned long req_count
)
1396 DEFINE_READAHEAD(ractl
, file
, ra
, mapping
, index
);
1397 page_cache_sync_ra(&ractl
, req_count
);
1401 * page_cache_async_readahead - file readahead for marked pages
1402 * @mapping: address_space which holds the pagecache and I/O vectors
1403 * @ra: file_ra_state which holds the readahead state
1404 * @file: Used by the filesystem for authentication.
1405 * @folio: The folio which triggered the readahead call.
1406 * @req_count: Total number of pages being read by the caller.
1408 * page_cache_async_readahead() should be called when a page is used which
1409 * is marked as PageReadahead; this is a marker to suggest that the application
1410 * has used up enough of the readahead window that we should start pulling in
1414 void page_cache_async_readahead(struct address_space
*mapping
,
1415 struct file_ra_state
*ra
, struct file
*file
,
1416 struct folio
*folio
, unsigned long req_count
)
1418 DEFINE_READAHEAD(ractl
, file
, ra
, mapping
, folio
->index
);
1419 page_cache_async_ra(&ractl
, folio
, req_count
);
1422 static inline struct folio
*__readahead_folio(struct readahead_control
*ractl
)
1424 struct folio
*folio
;
1426 BUG_ON(ractl
->_batch_count
> ractl
->_nr_pages
);
1427 ractl
->_nr_pages
-= ractl
->_batch_count
;
1428 ractl
->_index
+= ractl
->_batch_count
;
1430 if (!ractl
->_nr_pages
) {
1431 ractl
->_batch_count
= 0;
1435 folio
= xa_load(&ractl
->mapping
->i_pages
, ractl
->_index
);
1436 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
1437 ractl
->_batch_count
= folio_nr_pages(folio
);
1443 * readahead_page - Get the next page to read.
1444 * @ractl: The current readahead request.
1446 * Context: The page is locked and has an elevated refcount. The caller
1447 * should decreases the refcount once the page has been submitted for I/O
1448 * and unlock the page once all I/O to that page has completed.
1449 * Return: A pointer to the next page, or %NULL if we are done.
1451 static inline struct page
*readahead_page(struct readahead_control
*ractl
)
1453 struct folio
*folio
= __readahead_folio(ractl
);
1455 return &folio
->page
;
1459 * readahead_folio - Get the next folio to read.
1460 * @ractl: The current readahead request.
1462 * Context: The folio is locked. The caller should unlock the folio once
1463 * all I/O to that folio has completed.
1464 * Return: A pointer to the next folio, or %NULL if we are done.
1466 static inline struct folio
*readahead_folio(struct readahead_control
*ractl
)
1468 struct folio
*folio
= __readahead_folio(ractl
);
1475 static inline unsigned int __readahead_batch(struct readahead_control
*rac
,
1476 struct page
**array
, unsigned int array_sz
)
1479 XA_STATE(xas
, &rac
->mapping
->i_pages
, 0);
1482 BUG_ON(rac
->_batch_count
> rac
->_nr_pages
);
1483 rac
->_nr_pages
-= rac
->_batch_count
;
1484 rac
->_index
+= rac
->_batch_count
;
1485 rac
->_batch_count
= 0;
1487 xas_set(&xas
, rac
->_index
);
1489 xas_for_each(&xas
, page
, rac
->_index
+ rac
->_nr_pages
- 1) {
1490 if (xas_retry(&xas
, page
))
1492 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
1493 VM_BUG_ON_PAGE(PageTail(page
), page
);
1495 rac
->_batch_count
+= thp_nr_pages(page
);
1505 * readahead_page_batch - Get a batch of pages to read.
1506 * @rac: The current readahead request.
1507 * @array: An array of pointers to struct page.
1509 * Context: The pages are locked and have an elevated refcount. The caller
1510 * should decreases the refcount once the page has been submitted for I/O
1511 * and unlock the page once all I/O to that page has completed.
1512 * Return: The number of pages placed in the array. 0 indicates the request
1515 #define readahead_page_batch(rac, array) \
1516 __readahead_batch(rac, array, ARRAY_SIZE(array))
1519 * readahead_pos - The byte offset into the file of this readahead request.
1520 * @rac: The readahead request.
1522 static inline loff_t
readahead_pos(struct readahead_control
*rac
)
1524 return (loff_t
)rac
->_index
* PAGE_SIZE
;
1528 * readahead_length - The number of bytes in this readahead request.
1529 * @rac: The readahead request.
1531 static inline size_t readahead_length(struct readahead_control
*rac
)
1533 return rac
->_nr_pages
* PAGE_SIZE
;
1537 * readahead_index - The index of the first page in this readahead request.
1538 * @rac: The readahead request.
1540 static inline pgoff_t
readahead_index(struct readahead_control
*rac
)
1546 * readahead_count - The number of pages in this readahead request.
1547 * @rac: The readahead request.
1549 static inline unsigned int readahead_count(struct readahead_control
*rac
)
1551 return rac
->_nr_pages
;
1555 * readahead_batch_length - The number of bytes in the current batch.
1556 * @rac: The readahead request.
1558 static inline size_t readahead_batch_length(struct readahead_control
*rac
)
1560 return rac
->_batch_count
* PAGE_SIZE
;
1563 static inline unsigned long dir_pages(struct inode
*inode
)
1565 return (unsigned long)(inode
->i_size
+ PAGE_SIZE
- 1) >>
1570 * folio_mkwrite_check_truncate - check if folio was truncated
1571 * @folio: the folio to check
1572 * @inode: the inode to check the folio against
1574 * Return: the number of bytes in the folio up to EOF,
1575 * or -EFAULT if the folio was truncated.
1577 static inline ssize_t
folio_mkwrite_check_truncate(struct folio
*folio
,
1578 struct inode
*inode
)
1580 loff_t size
= i_size_read(inode
);
1581 pgoff_t index
= size
>> PAGE_SHIFT
;
1582 size_t offset
= offset_in_folio(folio
, size
);
1584 if (!folio
->mapping
)
1587 /* folio is wholly inside EOF */
1588 if (folio_next_index(folio
) - 1 < index
)
1589 return folio_size(folio
);
1590 /* folio is wholly past EOF */
1591 if (folio
->index
> index
|| !offset
)
1593 /* folio is partially inside EOF */
1598 * page_mkwrite_check_truncate - check if page was truncated
1599 * @page: the page to check
1600 * @inode: the inode to check the page against
1602 * Returns the number of bytes in the page up to EOF,
1603 * or -EFAULT if the page was truncated.
1605 static inline int page_mkwrite_check_truncate(struct page
*page
,
1606 struct inode
*inode
)
1608 loff_t size
= i_size_read(inode
);
1609 pgoff_t index
= size
>> PAGE_SHIFT
;
1610 int offset
= offset_in_page(size
);
1612 if (page
->mapping
!= inode
->i_mapping
)
1615 /* page is wholly inside EOF */
1616 if (page
->index
< index
)
1618 /* page is wholly past EOF */
1619 if (page
->index
> index
|| !offset
)
1621 /* page is partially inside EOF */
1626 * i_blocks_per_folio - How many blocks fit in this folio.
1627 * @inode: The inode which contains the blocks.
1628 * @folio: The folio.
1630 * If the block size is larger than the size of this folio, return zero.
1632 * Context: The caller should hold a refcount on the folio to prevent it
1634 * Return: The number of filesystem blocks covered by this folio.
1637 unsigned int i_blocks_per_folio(struct inode
*inode
, struct folio
*folio
)
1639 return folio_size(folio
) >> inode
->i_blkbits
;
1641 #endif /* _LINUX_PAGEMAP_H */