1 // SPDX-License-Identifier: GPL-2.0
3 * fs/ext4/verity.c: fs-verity support for ext4
5 * Copyright 2019 Google LLC
9 * Implementation of fsverity_operations for ext4.
11 * ext4 stores the verity metadata (Merkle tree and fsverity_descriptor) past
12 * the end of the file, starting at the first 64K boundary beyond i_size. This
13 * approach works because (a) verity files are readonly, and (b) pages fully
14 * beyond i_size aren't visible to userspace but can be read/written internally
15 * by ext4 with only some relatively small changes to ext4. This approach
16 * avoids having to depend on the EA_INODE feature and on rearchitecturing
17 * ext4's xattr support to support paging multi-gigabyte xattrs into memory, and
18 * to support encrypting xattrs. Note that the verity metadata *must* be
19 * encrypted when the file is, since it contains hashes of the plaintext data.
21 * Using a 64K boundary rather than a 4K one keeps things ready for
22 * architectures with 64K pages, and it doesn't necessarily waste space on-disk
23 * since there can be a hole between i_size and the start of the Merkle tree.
26 #include <linux/quotaops.h>
29 #include "ext4_extents.h"
30 #include "ext4_jbd2.h"
32 static inline loff_t
ext4_verity_metadata_pos(const struct inode
*inode
)
34 return round_up(inode
->i_size
, 65536);
38 * Read some verity metadata from the inode. __vfs_read() can't be used because
39 * we need to read beyond i_size.
41 static int pagecache_read(struct inode
*inode
, void *buf
, size_t count
,
45 size_t n
= min_t(size_t, count
,
46 PAGE_SIZE
- offset_in_page(pos
));
50 page
= read_mapping_page(inode
->i_mapping
, pos
>> PAGE_SHIFT
,
55 addr
= kmap_atomic(page
);
56 memcpy(buf
, addr
+ offset_in_page(pos
), n
);
69 * Write some verity metadata to the inode for FS_IOC_ENABLE_VERITY.
70 * kernel_write() can't be used because the file descriptor is readonly.
72 static int pagecache_write(struct inode
*inode
, const void *buf
, size_t count
,
75 if (pos
+ count
> inode
->i_sb
->s_maxbytes
)
79 size_t n
= min_t(size_t, count
,
80 PAGE_SIZE
- offset_in_page(pos
));
86 res
= pagecache_write_begin(NULL
, inode
->i_mapping
, pos
, n
, 0,
91 addr
= kmap_atomic(page
);
92 memcpy(addr
+ offset_in_page(pos
), buf
, n
);
95 res
= pagecache_write_end(NULL
, inode
->i_mapping
, pos
, n
, n
,
109 static int ext4_begin_enable_verity(struct file
*filp
)
111 struct inode
*inode
= file_inode(filp
);
112 const int credits
= 2; /* superblock and inode for ext4_orphan_add() */
116 if (ext4_verity_in_progress(inode
))
120 * Since the file was opened readonly, we have to initialize the jbd
121 * inode and quotas here and not rely on ->open() doing it. This must
122 * be done before evicting the inline data.
125 err
= ext4_inode_attach_jinode(inode
);
129 err
= dquot_initialize(inode
);
133 err
= ext4_convert_inline_data(inode
);
137 if (!ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
)) {
138 ext4_warning_inode(inode
,
139 "verity is only allowed on extent-based files");
144 * ext4 uses the last allocated block to find the verity descriptor, so
145 * we must remove any other blocks past EOF which might confuse things.
147 err
= ext4_truncate(inode
);
151 handle
= ext4_journal_start(inode
, EXT4_HT_INODE
, credits
);
153 return PTR_ERR(handle
);
155 err
= ext4_orphan_add(handle
, inode
);
157 ext4_set_inode_state(inode
, EXT4_STATE_VERITY_IN_PROGRESS
);
159 ext4_journal_stop(handle
);
164 * ext4 stores the verity descriptor beginning on the next filesystem block
165 * boundary after the Merkle tree. Then, the descriptor size is stored in the
166 * last 4 bytes of the last allocated filesystem block --- which is either the
167 * block in which the descriptor ends, or the next block after that if there
168 * weren't at least 4 bytes remaining.
170 * We can't simply store the descriptor in an xattr because it *must* be
171 * encrypted when ext4 encryption is used, but ext4 encryption doesn't encrypt
172 * xattrs. Also, if the descriptor includes a large signature blob it may be
173 * too large to store in an xattr without the EA_INODE feature.
175 static int ext4_write_verity_descriptor(struct inode
*inode
, const void *desc
,
176 size_t desc_size
, u64 merkle_tree_size
)
178 const u64 desc_pos
= round_up(ext4_verity_metadata_pos(inode
) +
179 merkle_tree_size
, i_blocksize(inode
));
180 const u64 desc_end
= desc_pos
+ desc_size
;
181 const __le32 desc_size_disk
= cpu_to_le32(desc_size
);
182 const u64 desc_size_pos
= round_up(desc_end
+ sizeof(desc_size_disk
),
183 i_blocksize(inode
)) -
184 sizeof(desc_size_disk
);
187 err
= pagecache_write(inode
, desc
, desc_size
, desc_pos
);
191 return pagecache_write(inode
, &desc_size_disk
, sizeof(desc_size_disk
),
195 static int ext4_end_enable_verity(struct file
*filp
, const void *desc
,
196 size_t desc_size
, u64 merkle_tree_size
)
198 struct inode
*inode
= file_inode(filp
);
199 const int credits
= 2; /* superblock and inode for ext4_orphan_del() */
205 /* Succeeded; write the verity descriptor. */
206 err
= ext4_write_verity_descriptor(inode
, desc
, desc_size
,
209 /* Write all pages before clearing VERITY_IN_PROGRESS. */
211 err
= filemap_write_and_wait(inode
->i_mapping
);
214 /* If we failed, truncate anything we wrote past i_size. */
215 if (desc
== NULL
|| err
)
216 ext4_truncate(inode
);
219 * We must always clean up by clearing EXT4_STATE_VERITY_IN_PROGRESS and
220 * deleting the inode from the orphan list, even if something failed.
221 * If everything succeeded, we'll also set the verity bit in the same
225 ext4_clear_inode_state(inode
, EXT4_STATE_VERITY_IN_PROGRESS
);
227 handle
= ext4_journal_start(inode
, EXT4_HT_INODE
, credits
);
228 if (IS_ERR(handle
)) {
229 ext4_orphan_del(NULL
, inode
);
230 return PTR_ERR(handle
);
233 err2
= ext4_orphan_del(handle
, inode
);
237 if (desc
!= NULL
&& !err
) {
238 struct ext4_iloc iloc
;
240 err
= ext4_reserve_inode_write(handle
, inode
, &iloc
);
243 ext4_set_inode_flag(inode
, EXT4_INODE_VERITY
);
244 ext4_set_inode_flags(inode
);
245 err
= ext4_mark_iloc_dirty(handle
, inode
, &iloc
);
248 ext4_journal_stop(handle
);
252 static int ext4_get_verity_descriptor_location(struct inode
*inode
,
253 size_t *desc_size_ret
,
256 struct ext4_ext_path
*path
;
257 struct ext4_extent
*last_extent
;
260 __le32 desc_size_disk
;
266 * Descriptor size is in last 4 bytes of last allocated block.
267 * See ext4_write_verity_descriptor().
270 if (!ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
)) {
271 EXT4_ERROR_INODE(inode
, "verity file doesn't use extents");
272 return -EFSCORRUPTED
;
275 path
= ext4_find_extent(inode
, EXT_MAX_BLOCKS
- 1, NULL
, 0);
277 return PTR_ERR(path
);
279 last_extent
= path
[path
->p_depth
].p_ext
;
281 EXT4_ERROR_INODE(inode
, "verity file has no extents");
282 ext4_ext_drop_refs(path
);
284 return -EFSCORRUPTED
;
287 end_lblk
= le32_to_cpu(last_extent
->ee_block
) +
288 ext4_ext_get_actual_len(last_extent
);
289 desc_size_pos
= (u64
)end_lblk
<< inode
->i_blkbits
;
290 ext4_ext_drop_refs(path
);
293 if (desc_size_pos
< sizeof(desc_size_disk
))
295 desc_size_pos
-= sizeof(desc_size_disk
);
297 err
= pagecache_read(inode
, &desc_size_disk
, sizeof(desc_size_disk
),
301 desc_size
= le32_to_cpu(desc_size_disk
);
304 * The descriptor is stored just before the desc_size_disk, but starting
305 * on a filesystem block boundary.
308 if (desc_size
> INT_MAX
|| desc_size
> desc_size_pos
)
311 desc_pos
= round_down(desc_size_pos
- desc_size
, i_blocksize(inode
));
312 if (desc_pos
< ext4_verity_metadata_pos(inode
))
315 *desc_size_ret
= desc_size
;
316 *desc_pos_ret
= desc_pos
;
320 EXT4_ERROR_INODE(inode
, "verity file corrupted; can't find descriptor");
321 return -EFSCORRUPTED
;
324 static int ext4_get_verity_descriptor(struct inode
*inode
, void *buf
,
327 size_t desc_size
= 0;
331 err
= ext4_get_verity_descriptor_location(inode
, &desc_size
, &desc_pos
);
336 if (desc_size
> buf_size
)
338 err
= pagecache_read(inode
, buf
, desc_size
, desc_pos
);
346 * Prefetch some pages from the file's Merkle tree.
348 * This is basically a stripped-down version of __do_page_cache_readahead()
349 * which works on pages past i_size.
351 static void ext4_merkle_tree_readahead(struct address_space
*mapping
,
352 pgoff_t start_index
, unsigned long count
)
355 unsigned int nr_pages
= 0;
358 struct blk_plug plug
;
360 for (index
= start_index
; index
< start_index
+ count
; index
++) {
361 page
= xa_load(&mapping
->i_pages
, index
);
362 if (!page
|| xa_is_value(page
)) {
363 page
= __page_cache_alloc(readahead_gfp_mask(mapping
));
367 list_add(&page
->lru
, &pages
);
371 blk_start_plug(&plug
);
372 ext4_mpage_readpages(mapping
, &pages
, NULL
, nr_pages
, true);
373 blk_finish_plug(&plug
);
376 static struct page
*ext4_read_merkle_tree_page(struct inode
*inode
,
378 unsigned long num_ra_pages
)
382 index
+= ext4_verity_metadata_pos(inode
) >> PAGE_SHIFT
;
384 page
= find_get_page_flags(inode
->i_mapping
, index
, FGP_ACCESSED
);
385 if (!page
|| !PageUptodate(page
)) {
388 else if (num_ra_pages
> 1)
389 ext4_merkle_tree_readahead(inode
->i_mapping
, index
,
391 page
= read_mapping_page(inode
->i_mapping
, index
, NULL
);
396 static int ext4_write_merkle_tree_block(struct inode
*inode
, const void *buf
,
397 u64 index
, int log_blocksize
)
399 loff_t pos
= ext4_verity_metadata_pos(inode
) + (index
<< log_blocksize
);
401 return pagecache_write(inode
, buf
, 1 << log_blocksize
, pos
);
404 const struct fsverity_operations ext4_verityops
= {
405 .begin_enable_verity
= ext4_begin_enable_verity
,
406 .end_enable_verity
= ext4_end_enable_verity
,
407 .get_verity_descriptor
= ext4_get_verity_descriptor
,
408 .read_merkle_tree_page
= ext4_read_merkle_tree_page
,
409 .write_merkle_tree_block
= ext4_write_merkle_tree_block
,