1 // SPDX-License-Identifier: GPL-2.0
3 * Utility functions for file contents encryption/decryption on
4 * block device-based filesystems.
6 * Copyright (C) 2015, Google, Inc.
7 * Copyright (C) 2015, Motorola Mobility
10 #include <linux/pagemap.h>
11 #include <linux/module.h>
12 #include <linux/bio.h>
13 #include <linux/namei.h>
14 #include "fscrypt_private.h"
17 * fscrypt_decrypt_bio() - decrypt the contents of a bio
18 * @bio: the bio to decrypt
20 * Decrypt the contents of a "read" bio following successful completion of the
21 * underlying disk read. The bio must be reading a whole number of blocks of an
22 * encrypted file directly into the page cache. If the bio is reading the
23 * ciphertext into bounce pages instead of the page cache (for example, because
24 * the file is also compressed, so decompression is required after decryption),
25 * then this function isn't applicable. This function may sleep, so it must be
26 * called from a workqueue rather than from the bio's bi_end_io callback.
28 * Return: %true on success; %false on failure. On failure, bio->bi_status is
29 * also set to an error status.
31 bool fscrypt_decrypt_bio(struct bio
*bio
)
35 bio_for_each_folio_all(fi
, bio
) {
36 int err
= fscrypt_decrypt_pagecache_blocks(fi
.folio
, fi
.length
,
40 bio
->bi_status
= errno_to_blk_status(err
);
46 EXPORT_SYMBOL(fscrypt_decrypt_bio
);
48 static int fscrypt_zeroout_range_inline_crypt(const struct inode
*inode
,
49 pgoff_t lblk
, sector_t pblk
,
52 const unsigned int blockbits
= inode
->i_blkbits
;
53 const unsigned int blocks_per_page
= 1 << (PAGE_SHIFT
- blockbits
);
58 /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
59 bio
= bio_alloc(inode
->i_sb
->s_bdev
, BIO_MAX_VECS
, REQ_OP_WRITE
,
63 unsigned int blocks_this_page
= min(len
, blocks_per_page
);
64 unsigned int bytes_this_page
= blocks_this_page
<< blockbits
;
67 fscrypt_set_bio_crypt_ctx(bio
, inode
, lblk
, GFP_NOFS
);
68 bio
->bi_iter
.bi_sector
=
69 pblk
<< (blockbits
- SECTOR_SHIFT
);
71 ret
= bio_add_page(bio
, ZERO_PAGE(0), bytes_this_page
, 0);
72 if (WARN_ON_ONCE(ret
!= bytes_this_page
)) {
77 len
-= blocks_this_page
;
78 lblk
+= blocks_this_page
;
79 pblk
+= blocks_this_page
;
80 if (num_pages
== BIO_MAX_VECS
|| !len
||
81 !fscrypt_mergeable_bio(bio
, inode
, lblk
)) {
82 err
= submit_bio_wait(bio
);
85 bio_reset(bio
, inode
->i_sb
->s_bdev
, REQ_OP_WRITE
);
95 * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file
96 * @inode: the file's inode
97 * @lblk: the first file logical block to zero out
98 * @pblk: the first filesystem physical block to zero out
99 * @len: number of blocks to zero out
101 * Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write
102 * ciphertext blocks which decrypt to the all-zeroes block. The blocks must be
103 * both logically and physically contiguous. It's also assumed that the
104 * filesystem only uses a single block device, ->s_bdev.
106 * Note that since each block uses a different IV, this involves writing a
107 * different ciphertext to each block; we can't simply reuse the same one.
109 * Return: 0 on success; -errno on failure.
111 int fscrypt_zeroout_range(const struct inode
*inode
, pgoff_t lblk
,
112 sector_t pblk
, unsigned int len
)
114 const struct fscrypt_inode_info
*ci
= inode
->i_crypt_info
;
115 const unsigned int du_bits
= ci
->ci_data_unit_bits
;
116 const unsigned int du_size
= 1U << du_bits
;
117 const unsigned int du_per_page_bits
= PAGE_SHIFT
- du_bits
;
118 const unsigned int du_per_page
= 1U << du_per_page_bits
;
119 u64 du_index
= (u64
)lblk
<< (inode
->i_blkbits
- du_bits
);
120 u64 du_remaining
= (u64
)len
<< (inode
->i_blkbits
- du_bits
);
121 sector_t sector
= pblk
<< (inode
->i_blkbits
- SECTOR_SHIFT
);
122 struct page
*pages
[16]; /* write up to 16 pages at a time */
123 unsigned int nr_pages
;
132 if (fscrypt_inode_uses_inline_crypto(inode
))
133 return fscrypt_zeroout_range_inline_crypt(inode
, lblk
, pblk
,
136 BUILD_BUG_ON(ARRAY_SIZE(pages
) > BIO_MAX_VECS
);
137 nr_pages
= min_t(u64
, ARRAY_SIZE(pages
),
138 (du_remaining
+ du_per_page
- 1) >> du_per_page_bits
);
141 * We need at least one page for ciphertext. Allocate the first one
142 * from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail.
144 * Any additional page allocations are allowed to fail, as they only
145 * help performance, and waiting on the mempool for them could deadlock.
147 for (i
= 0; i
< nr_pages
; i
++) {
148 pages
[i
] = fscrypt_alloc_bounce_page(i
== 0 ? GFP_NOFS
:
149 GFP_NOWAIT
| __GFP_NOWARN
);
154 if (WARN_ON_ONCE(nr_pages
<= 0))
157 /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
158 bio
= bio_alloc(inode
->i_sb
->s_bdev
, nr_pages
, REQ_OP_WRITE
, GFP_NOFS
);
161 bio
->bi_iter
.bi_sector
= sector
;
166 err
= fscrypt_crypt_data_unit(ci
, FS_ENCRYPT
, du_index
,
167 ZERO_PAGE(0), pages
[i
],
173 sector
+= 1U << (du_bits
- SECTOR_SHIFT
);
176 if (offset
== PAGE_SIZE
|| du_remaining
== 0) {
177 ret
= bio_add_page(bio
, pages
[i
++], offset
, 0);
178 if (WARN_ON_ONCE(ret
!= offset
)) {
184 } while (i
!= nr_pages
&& du_remaining
!= 0);
186 err
= submit_bio_wait(bio
);
189 bio_reset(bio
, inode
->i_sb
->s_bdev
, REQ_OP_WRITE
);
190 } while (du_remaining
!= 0);
194 for (i
= 0; i
< nr_pages
; i
++)
195 fscrypt_free_bounce_page(pages
[i
]);
198 EXPORT_SYMBOL(fscrypt_zeroout_range
);