1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ext4/readpage.c
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2015, Google, Inc.
8 * This was originally taken from fs/mpage.c
10 * The ext4_mpage_readpages() function here is intended to
11 * replace mpage_readahead() in the general case, not just for
12 * encrypted files. It has some limitations (see below), where it
13 * will fall back to read_block_full_page(), but these limitations
14 * should only be hit when page_size != block_size.
16 * This will allow us to attach a callback function to support ext4
19 * If anything unusual happens, such as:
21 * - encountering a page which has buffers
22 * - encountering a page which has a non-hole after a hole
23 * - encountering a page with non-contiguous blocks
25 * then this code just gives up and calls the buffer_head-based read function.
26 * It does handle a page which has holes at the end - that is a common case:
27 * the end-of-file on blocksize < PAGE_SIZE setups.
31 #include <linux/kernel.h>
32 #include <linux/export.h>
34 #include <linux/kdev_t.h>
35 #include <linux/gfp.h>
36 #include <linux/bio.h>
38 #include <linux/buffer_head.h>
39 #include <linux/blkdev.h>
40 #include <linux/highmem.h>
41 #include <linux/prefetch.h>
42 #include <linux/mpage.h>
43 #include <linux/writeback.h>
44 #include <linux/backing-dev.h>
45 #include <linux/pagevec.h>
46 #include <linux/cleancache.h>
50 #define NUM_PREALLOC_POST_READ_CTXS 128
52 static struct kmem_cache
*bio_post_read_ctx_cache
;
53 static mempool_t
*bio_post_read_ctx_pool
;
55 /* postprocessing steps for read bios */
56 enum bio_post_read_step
{
63 struct bio_post_read_ctx
{
65 struct work_struct work
;
66 unsigned int cur_step
;
67 unsigned int enabled_steps
;
70 static void __read_end_io(struct bio
*bio
)
74 struct bvec_iter_all iter_all
;
76 bio_for_each_segment_all(bv
, bio
, iter_all
) {
79 /* PG_error was set if any post_read step failed */
80 if (bio
->bi_status
|| PageError(page
)) {
81 ClearPageUptodate(page
);
82 /* will re-read again later */
85 SetPageUptodate(page
);
90 mempool_free(bio
->bi_private
, bio_post_read_ctx_pool
);
94 static void bio_post_read_processing(struct bio_post_read_ctx
*ctx
);
96 static void decrypt_work(struct work_struct
*work
)
98 struct bio_post_read_ctx
*ctx
=
99 container_of(work
, struct bio_post_read_ctx
, work
);
101 fscrypt_decrypt_bio(ctx
->bio
);
103 bio_post_read_processing(ctx
);
106 static void verity_work(struct work_struct
*work
)
108 struct bio_post_read_ctx
*ctx
=
109 container_of(work
, struct bio_post_read_ctx
, work
);
110 struct bio
*bio
= ctx
->bio
;
113 * fsverity_verify_bio() may call readpages() again, and although verity
114 * will be disabled for that, decryption may still be needed, causing
115 * another bio_post_read_ctx to be allocated. So to guarantee that
116 * mempool_alloc() never deadlocks we must free the current ctx first.
117 * This is safe because verity is the last post-read step.
119 BUILD_BUG_ON(STEP_VERITY
+ 1 != STEP_MAX
);
120 mempool_free(ctx
, bio_post_read_ctx_pool
);
121 bio
->bi_private
= NULL
;
123 fsverity_verify_bio(bio
);
128 static void bio_post_read_processing(struct bio_post_read_ctx
*ctx
)
131 * We use different work queues for decryption and for verity because
132 * verity may require reading metadata pages that need decryption, and
133 * we shouldn't recurse to the same workqueue.
135 switch (++ctx
->cur_step
) {
137 if (ctx
->enabled_steps
& (1 << STEP_DECRYPT
)) {
138 INIT_WORK(&ctx
->work
, decrypt_work
);
139 fscrypt_enqueue_decrypt_work(&ctx
->work
);
145 if (ctx
->enabled_steps
& (1 << STEP_VERITY
)) {
146 INIT_WORK(&ctx
->work
, verity_work
);
147 fsverity_enqueue_verify_work(&ctx
->work
);
153 __read_end_io(ctx
->bio
);
157 static bool bio_post_read_required(struct bio
*bio
)
159 return bio
->bi_private
&& !bio
->bi_status
;
163 * I/O completion handler for multipage BIOs.
165 * The mpage code never puts partial pages into a BIO (except for end-of-file).
166 * If a page does not map to a contiguous run of blocks then it simply falls
167 * back to block_read_full_page().
169 * Why is this? If a page's completion depends on a number of different BIOs
170 * which can complete in any order (or at the same time) then determining the
171 * status of that page is hard. See end_buffer_async_read() for the details.
172 * There is no point in duplicating all that complexity.
174 static void mpage_end_io(struct bio
*bio
)
176 if (bio_post_read_required(bio
)) {
177 struct bio_post_read_ctx
*ctx
= bio
->bi_private
;
179 ctx
->cur_step
= STEP_INITIAL
;
180 bio_post_read_processing(ctx
);
186 static inline bool ext4_need_verity(const struct inode
*inode
, pgoff_t idx
)
188 return fsverity_active(inode
) &&
189 idx
< DIV_ROUND_UP(inode
->i_size
, PAGE_SIZE
);
192 static void ext4_set_bio_post_read_ctx(struct bio
*bio
,
193 const struct inode
*inode
,
196 unsigned int post_read_steps
= 0;
198 if (IS_ENCRYPTED(inode
) && S_ISREG(inode
->i_mode
))
199 post_read_steps
|= 1 << STEP_DECRYPT
;
201 if (ext4_need_verity(inode
, first_idx
))
202 post_read_steps
|= 1 << STEP_VERITY
;
204 if (post_read_steps
) {
205 /* Due to the mempool, this never fails. */
206 struct bio_post_read_ctx
*ctx
=
207 mempool_alloc(bio_post_read_ctx_pool
, GFP_NOFS
);
210 ctx
->enabled_steps
= post_read_steps
;
211 bio
->bi_private
= ctx
;
215 static inline loff_t
ext4_readpage_limit(struct inode
*inode
)
217 if (IS_ENABLED(CONFIG_FS_VERITY
) &&
218 (IS_VERITY(inode
) || ext4_verity_in_progress(inode
)))
219 return inode
->i_sb
->s_maxbytes
;
221 return i_size_read(inode
);
224 int ext4_mpage_readpages(struct inode
*inode
,
225 struct readahead_control
*rac
, struct page
*page
)
227 struct bio
*bio
= NULL
;
228 sector_t last_block_in_bio
= 0;
230 const unsigned blkbits
= inode
->i_blkbits
;
231 const unsigned blocks_per_page
= PAGE_SIZE
>> blkbits
;
232 const unsigned blocksize
= 1 << blkbits
;
233 sector_t block_in_file
;
235 sector_t last_block_in_file
;
236 sector_t blocks
[MAX_BUF_PER_PAGE
];
238 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
240 unsigned relative_block
= 0;
241 struct ext4_map_blocks map
;
242 unsigned int nr_pages
= rac
? readahead_count(rac
) : 1;
249 for (; nr_pages
; nr_pages
--) {
250 int fully_mapped
= 1;
251 unsigned first_hole
= blocks_per_page
;
254 page
= readahead_page(rac
);
255 prefetchw(&page
->flags
);
258 if (page_has_buffers(page
))
261 block_in_file
= (sector_t
)page
->index
<< (PAGE_SHIFT
- blkbits
);
262 last_block
= block_in_file
+ nr_pages
* blocks_per_page
;
263 last_block_in_file
= (ext4_readpage_limit(inode
) +
264 blocksize
- 1) >> blkbits
;
265 if (last_block
> last_block_in_file
)
266 last_block
= last_block_in_file
;
270 * Map blocks using the previous result first.
272 if ((map
.m_flags
& EXT4_MAP_MAPPED
) &&
273 block_in_file
> map
.m_lblk
&&
274 block_in_file
< (map
.m_lblk
+ map
.m_len
)) {
275 unsigned map_offset
= block_in_file
- map
.m_lblk
;
276 unsigned last
= map
.m_len
- map_offset
;
278 for (relative_block
= 0; ; relative_block
++) {
279 if (relative_block
== last
) {
281 map
.m_flags
&= ~EXT4_MAP_MAPPED
;
284 if (page_block
== blocks_per_page
)
286 blocks
[page_block
] = map
.m_pblk
+ map_offset
+
294 * Then do more ext4_map_blocks() calls until we are
295 * done with this page.
297 while (page_block
< blocks_per_page
) {
298 if (block_in_file
< last_block
) {
299 map
.m_lblk
= block_in_file
;
300 map
.m_len
= last_block
- block_in_file
;
302 if (ext4_map_blocks(NULL
, inode
, &map
, 0) < 0) {
305 zero_user_segment(page
, 0,
311 if ((map
.m_flags
& EXT4_MAP_MAPPED
) == 0) {
313 if (first_hole
== blocks_per_page
)
314 first_hole
= page_block
;
319 if (first_hole
!= blocks_per_page
)
320 goto confused
; /* hole -> non-hole */
322 /* Contiguous blocks? */
323 if (page_block
&& blocks
[page_block
-1] != map
.m_pblk
-1)
325 for (relative_block
= 0; ; relative_block
++) {
326 if (relative_block
== map
.m_len
) {
328 map
.m_flags
&= ~EXT4_MAP_MAPPED
;
330 } else if (page_block
== blocks_per_page
)
332 blocks
[page_block
] = map
.m_pblk
+relative_block
;
337 if (first_hole
!= blocks_per_page
) {
338 zero_user_segment(page
, first_hole
<< blkbits
,
340 if (first_hole
== 0) {
341 if (ext4_need_verity(inode
, page
->index
) &&
342 !fsverity_verify_page(page
))
344 SetPageUptodate(page
);
348 } else if (fully_mapped
) {
349 SetPageMappedToDisk(page
);
351 if (fully_mapped
&& blocks_per_page
== 1 &&
352 !PageUptodate(page
) && cleancache_get_page(page
) == 0) {
353 SetPageUptodate(page
);
358 * This page will go to BIO. Do we need to send this
361 if (bio
&& (last_block_in_bio
!= blocks
[0] - 1)) {
368 * bio_alloc will _always_ be able to allocate a bio if
369 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
371 bio
= bio_alloc(GFP_KERNEL
,
372 min_t(int, nr_pages
, BIO_MAX_PAGES
));
373 ext4_set_bio_post_read_ctx(bio
, inode
, page
->index
);
374 bio_set_dev(bio
, bdev
);
375 bio
->bi_iter
.bi_sector
= blocks
[0] << (blkbits
- 9);
376 bio
->bi_end_io
= mpage_end_io
;
377 bio_set_op_attrs(bio
, REQ_OP_READ
,
378 rac
? REQ_RAHEAD
: 0);
381 length
= first_hole
<< blkbits
;
382 if (bio_add_page(bio
, page
, length
, 0) < length
)
383 goto submit_and_realloc
;
385 if (((map
.m_flags
& EXT4_MAP_BOUNDARY
) &&
386 (relative_block
== map
.m_len
)) ||
387 (first_hole
!= blocks_per_page
)) {
391 last_block_in_bio
= blocks
[blocks_per_page
- 1];
398 if (!PageUptodate(page
))
399 block_read_full_page(page
, ext4_get_block
);
411 int __init
ext4_init_post_read_processing(void)
413 bio_post_read_ctx_cache
=
414 kmem_cache_create("ext4_bio_post_read_ctx",
415 sizeof(struct bio_post_read_ctx
), 0, 0, NULL
);
416 if (!bio_post_read_ctx_cache
)
418 bio_post_read_ctx_pool
=
419 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS
,
420 bio_post_read_ctx_cache
);
421 if (!bio_post_read_ctx_pool
)
422 goto fail_free_cache
;
426 kmem_cache_destroy(bio_post_read_ctx_cache
);
431 void ext4_exit_post_read_processing(void)
433 mempool_destroy(bio_post_read_ctx_pool
);
434 kmem_cache_destroy(bio_post_read_ctx_cache
);