1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Red Hat, Inc.
5 * Author: Mikulas Patocka <mpatocka@redhat.com>
7 * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
9 * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
10 * default prefetch value. Data are read in "prefetch_cluster" chunks from the
11 * hash device. Setting this greatly improves performance when data and hash
12 * are on the same disk on different partitions on devices with poor random
16 #include "dm-verity.h"
17 #include "dm-verity-fec.h"
18 #include "dm-verity-verify-sig.h"
20 #include <linux/module.h>
21 #include <linux/reboot.h>
22 #include <linux/scatterlist.h>
23 #include <linux/string.h>
24 #include <linux/jump_label.h>
25 #include <linux/security.h>
27 #define DM_MSG_PREFIX "verity"
29 #define DM_VERITY_ENV_LENGTH 42
30 #define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
32 #define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
34 #define DM_VERITY_MAX_CORRUPTED_ERRS 100
36 #define DM_VERITY_OPT_LOGGING "ignore_corruption"
37 #define DM_VERITY_OPT_RESTART "restart_on_corruption"
38 #define DM_VERITY_OPT_PANIC "panic_on_corruption"
39 #define DM_VERITY_OPT_ERROR_RESTART "restart_on_error"
40 #define DM_VERITY_OPT_ERROR_PANIC "panic_on_error"
41 #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
42 #define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
43 #define DM_VERITY_OPT_TASKLET_VERIFY "try_verify_in_tasklet"
45 #define DM_VERITY_OPTS_MAX (5 + DM_VERITY_OPTS_FEC + \
46 DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
48 static unsigned int dm_verity_prefetch_cluster
= DM_VERITY_DEFAULT_PREFETCH_SIZE
;
50 module_param_named(prefetch_cluster
, dm_verity_prefetch_cluster
, uint
, 0644);
52 static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled
);
54 /* Is at least one dm-verity instance using ahash_tfm instead of shash_tfm? */
55 static DEFINE_STATIC_KEY_FALSE(ahash_enabled
);
57 struct dm_verity_prefetch_work
{
58 struct work_struct work
;
60 unsigned short ioprio
;
62 unsigned int n_blocks
;
66 * Auxiliary structure appended to each dm-bufio buffer. If the value
67 * hash_verified is nonzero, hash of the block has been verified.
69 * The variable hash_verified is set to 0 when allocating the buffer, then
70 * it can be changed to 1 and it is never reset to 0 again.
72 * There is no lock around this value, a race condition can at worst cause
73 * that multiple processes verify the hash of the same buffer simultaneously
74 * and write 1 to hash_verified simultaneously.
75 * This condition is harmless, so we don't need locking.
82 * Initialize struct buffer_aux for a freshly created buffer.
84 static void dm_bufio_alloc_callback(struct dm_buffer
*buf
)
86 struct buffer_aux
*aux
= dm_bufio_get_aux_data(buf
);
88 aux
->hash_verified
= 0;
92 * Translate input sector number to the sector number on the target device.
94 static sector_t
verity_map_sector(struct dm_verity
*v
, sector_t bi_sector
)
96 return dm_target_offset(v
->ti
, bi_sector
);
100 * Return hash position of a specified block at a specified tree level
101 * (0 is the lowest level).
102 * The lowest "hash_per_block_bits"-bits of the result denote hash position
103 * inside a hash block. The remaining bits denote location of the hash block.
105 static sector_t
verity_position_at_level(struct dm_verity
*v
, sector_t block
,
108 return block
>> (level
* v
->hash_per_block_bits
);
111 static int verity_ahash_update(struct dm_verity
*v
, struct ahash_request
*req
,
112 const u8
*data
, size_t len
,
113 struct crypto_wait
*wait
)
115 struct scatterlist sg
;
117 if (likely(!is_vmalloc_addr(data
))) {
118 sg_init_one(&sg
, data
, len
);
119 ahash_request_set_crypt(req
, &sg
, NULL
, len
);
120 return crypto_wait_req(crypto_ahash_update(req
), wait
);
125 size_t this_step
= min_t(size_t, len
, PAGE_SIZE
- offset_in_page(data
));
127 flush_kernel_vmap_range((void *)data
, this_step
);
128 sg_init_table(&sg
, 1);
129 sg_set_page(&sg
, vmalloc_to_page(data
), this_step
, offset_in_page(data
));
130 ahash_request_set_crypt(req
, &sg
, NULL
, this_step
);
131 r
= crypto_wait_req(crypto_ahash_update(req
), wait
);
142 * Wrapper for crypto_ahash_init, which handles verity salting.
144 static int verity_ahash_init(struct dm_verity
*v
, struct ahash_request
*req
,
145 struct crypto_wait
*wait
, bool may_sleep
)
149 ahash_request_set_tfm(req
, v
->ahash_tfm
);
150 ahash_request_set_callback(req
,
151 may_sleep
? CRYPTO_TFM_REQ_MAY_SLEEP
| CRYPTO_TFM_REQ_MAY_BACKLOG
: 0,
152 crypto_req_done
, (void *)wait
);
153 crypto_init_wait(wait
);
155 r
= crypto_wait_req(crypto_ahash_init(req
), wait
);
157 if (unlikely(r
< 0)) {
159 DMERR("crypto_ahash_init failed: %d", r
);
163 if (likely(v
->salt_size
&& (v
->version
>= 1)))
164 r
= verity_ahash_update(v
, req
, v
->salt
, v
->salt_size
, wait
);
169 static int verity_ahash_final(struct dm_verity
*v
, struct ahash_request
*req
,
170 u8
*digest
, struct crypto_wait
*wait
)
174 if (unlikely(v
->salt_size
&& (!v
->version
))) {
175 r
= verity_ahash_update(v
, req
, v
->salt
, v
->salt_size
, wait
);
178 DMERR("%s failed updating salt: %d", __func__
, r
);
183 ahash_request_set_crypt(req
, NULL
, digest
, 0);
184 r
= crypto_wait_req(crypto_ahash_final(req
), wait
);
189 int verity_hash(struct dm_verity
*v
, struct dm_verity_io
*io
,
190 const u8
*data
, size_t len
, u8
*digest
, bool may_sleep
)
194 if (static_branch_unlikely(&ahash_enabled
) && !v
->shash_tfm
) {
195 struct ahash_request
*req
= verity_io_hash_req(v
, io
);
196 struct crypto_wait wait
;
198 r
= verity_ahash_init(v
, req
, &wait
, may_sleep
) ?:
199 verity_ahash_update(v
, req
, data
, len
, &wait
) ?:
200 verity_ahash_final(v
, req
, digest
, &wait
);
202 struct shash_desc
*desc
= verity_io_hash_req(v
, io
);
204 desc
->tfm
= v
->shash_tfm
;
205 r
= crypto_shash_import(desc
, v
->initial_hashstate
) ?:
206 crypto_shash_finup(desc
, data
, len
, digest
);
209 DMERR("Error hashing block: %d", r
);
213 static void verity_hash_at_level(struct dm_verity
*v
, sector_t block
, int level
,
214 sector_t
*hash_block
, unsigned int *offset
)
216 sector_t position
= verity_position_at_level(v
, block
, level
);
219 *hash_block
= v
->hash_level_block
[level
] + (position
>> v
->hash_per_block_bits
);
224 idx
= position
& ((1 << v
->hash_per_block_bits
) - 1);
226 *offset
= idx
* v
->digest_size
;
228 *offset
= idx
<< (v
->hash_dev_block_bits
- v
->hash_per_block_bits
);
232 * Handle verification errors.
234 static int verity_handle_err(struct dm_verity
*v
, enum verity_block_type type
,
235 unsigned long long block
)
237 char verity_env
[DM_VERITY_ENV_LENGTH
];
238 char *envp
[] = { verity_env
, NULL
};
239 const char *type_str
= "";
240 struct mapped_device
*md
= dm_table_get_md(v
->ti
->table
);
242 /* Corruption should be visible in device status in all modes */
243 v
->hash_failed
= true;
245 if (v
->corrupted_errs
>= DM_VERITY_MAX_CORRUPTED_ERRS
)
251 case DM_VERITY_BLOCK_TYPE_DATA
:
254 case DM_VERITY_BLOCK_TYPE_METADATA
:
255 type_str
= "metadata";
261 DMERR_LIMIT("%s: %s block %llu is corrupted", v
->data_dev
->name
,
264 if (v
->corrupted_errs
== DM_VERITY_MAX_CORRUPTED_ERRS
) {
265 DMERR("%s: reached maximum errors", v
->data_dev
->name
);
266 dm_audit_log_target(DM_MSG_PREFIX
, "max-corrupted-errors", v
->ti
, 0);
269 snprintf(verity_env
, DM_VERITY_ENV_LENGTH
, "%s=%d,%llu",
270 DM_VERITY_ENV_VAR_NAME
, type
, block
);
272 kobject_uevent_env(&disk_to_dev(dm_disk(md
))->kobj
, KOBJ_CHANGE
, envp
);
275 if (v
->mode
== DM_VERITY_MODE_LOGGING
)
278 if (v
->mode
== DM_VERITY_MODE_RESTART
)
279 kernel_restart("dm-verity device corrupted");
281 if (v
->mode
== DM_VERITY_MODE_PANIC
)
282 panic("dm-verity device corrupted");
288 * Verify hash of a metadata block pertaining to the specified data block
289 * ("block" argument) at a specified level ("level" argument).
291 * On successful return, verity_io_want_digest(v, io) contains the hash value
292 * for a lower tree level or for the data block (if we're at the lowest level).
294 * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
295 * If "skip_unverified" is false, unverified buffer is hashed and verified
296 * against current value of verity_io_want_digest(v, io).
298 static int verity_verify_level(struct dm_verity
*v
, struct dm_verity_io
*io
,
299 sector_t block
, int level
, bool skip_unverified
,
302 struct dm_buffer
*buf
;
303 struct buffer_aux
*aux
;
308 struct bio
*bio
= dm_bio_from_per_bio_data(io
, v
->ti
->per_io_data_size
);
310 verity_hash_at_level(v
, block
, level
, &hash_block
, &offset
);
312 if (static_branch_unlikely(&use_bh_wq_enabled
) && io
->in_bh
) {
313 data
= dm_bufio_get(v
->bufio
, hash_block
, &buf
);
316 * In tasklet and the hash was not in the bufio cache.
317 * Return early and resume execution from a work-queue
318 * to read the hash from disk.
323 data
= dm_bufio_read_with_ioprio(v
->bufio
, hash_block
,
324 &buf
, bio_prio(bio
));
328 return PTR_ERR(data
);
330 aux
= dm_bufio_get_aux_data(buf
);
332 if (!aux
->hash_verified
) {
333 if (skip_unverified
) {
338 r
= verity_hash(v
, io
, data
, 1 << v
->hash_dev_block_bits
,
339 verity_io_real_digest(v
, io
), !io
->in_bh
);
343 if (likely(memcmp(verity_io_real_digest(v
, io
), want_digest
,
344 v
->digest_size
) == 0))
345 aux
->hash_verified
= 1;
346 else if (static_branch_unlikely(&use_bh_wq_enabled
) && io
->in_bh
) {
348 * Error handling code (FEC included) cannot be run in a
349 * tasklet since it may sleep, so fallback to work-queue.
353 } else if (verity_fec_decode(v
, io
, DM_VERITY_BLOCK_TYPE_METADATA
,
354 hash_block
, data
) == 0)
355 aux
->hash_verified
= 1;
356 else if (verity_handle_err(v
,
357 DM_VERITY_BLOCK_TYPE_METADATA
,
360 io
->had_mismatch
= true;
361 bio
= dm_bio_from_per_bio_data(io
, v
->ti
->per_io_data_size
);
362 dm_audit_log_bio(DM_MSG_PREFIX
, "verify-metadata", bio
,
370 memcpy(want_digest
, data
, v
->digest_size
);
374 dm_bufio_release(buf
);
379 * Find a hash for a given block, write it to digest and verify the integrity
380 * of the hash tree if necessary.
382 int verity_hash_for_block(struct dm_verity
*v
, struct dm_verity_io
*io
,
383 sector_t block
, u8
*digest
, bool *is_zero
)
387 if (likely(v
->levels
)) {
389 * First, we try to get the requested hash for
390 * the current block. If the hash block itself is
391 * verified, zero is returned. If it isn't, this
392 * function returns 1 and we fall back to whole
393 * chain verification.
395 r
= verity_verify_level(v
, io
, block
, 0, true, digest
);
400 memcpy(digest
, v
->root_digest
, v
->digest_size
);
402 for (i
= v
->levels
- 1; i
>= 0; i
--) {
403 r
= verity_verify_level(v
, io
, block
, i
, false, digest
);
408 if (!r
&& v
->zero_digest
)
409 *is_zero
= !memcmp(v
->zero_digest
, digest
, v
->digest_size
);
416 static noinline
int verity_recheck(struct dm_verity
*v
, struct dm_verity_io
*io
,
417 sector_t cur_block
, u8
*dest
)
422 struct dm_io_request io_req
;
423 struct dm_io_region io_loc
;
425 page
= mempool_alloc(&v
->recheck_pool
, GFP_NOIO
);
426 buffer
= page_to_virt(page
);
428 io_req
.bi_opf
= REQ_OP_READ
;
429 io_req
.mem
.type
= DM_IO_KMEM
;
430 io_req
.mem
.ptr
.addr
= buffer
;
431 io_req
.notify
.fn
= NULL
;
432 io_req
.client
= v
->io
;
433 io_loc
.bdev
= v
->data_dev
->bdev
;
434 io_loc
.sector
= cur_block
<< (v
->data_dev_block_bits
- SECTOR_SHIFT
);
435 io_loc
.count
= 1 << (v
->data_dev_block_bits
- SECTOR_SHIFT
);
436 r
= dm_io(&io_req
, 1, &io_loc
, NULL
, IOPRIO_DEFAULT
);
440 r
= verity_hash(v
, io
, buffer
, 1 << v
->data_dev_block_bits
,
441 verity_io_real_digest(v
, io
), true);
445 if (memcmp(verity_io_real_digest(v
, io
),
446 verity_io_want_digest(v
, io
), v
->digest_size
)) {
451 memcpy(dest
, buffer
, 1 << v
->data_dev_block_bits
);
454 mempool_free(page
, &v
->recheck_pool
);
459 static int verity_handle_data_hash_mismatch(struct dm_verity
*v
,
460 struct dm_verity_io
*io
,
461 struct bio
*bio
, sector_t blkno
,
464 if (static_branch_unlikely(&use_bh_wq_enabled
) && io
->in_bh
) {
466 * Error handling code (FEC included) cannot be run in the
467 * BH workqueue, so fallback to a standard workqueue.
471 if (verity_recheck(v
, io
, blkno
, data
) == 0) {
472 if (v
->validated_blocks
)
473 set_bit(blkno
, v
->validated_blocks
);
476 #if defined(CONFIG_DM_VERITY_FEC)
477 if (verity_fec_decode(v
, io
, DM_VERITY_BLOCK_TYPE_DATA
, blkno
,
482 return -EIO
; /* Error correction failed; Just return error */
484 if (verity_handle_err(v
, DM_VERITY_BLOCK_TYPE_DATA
, blkno
)) {
485 io
->had_mismatch
= true;
486 dm_audit_log_bio(DM_MSG_PREFIX
, "verify-data", bio
, blkno
, 0);
493 * Verify one "dm_verity_io" structure.
495 static int verity_verify_io(struct dm_verity_io
*io
)
497 struct dm_verity
*v
= io
->v
;
498 const unsigned int block_size
= 1 << v
->data_dev_block_bits
;
499 struct bvec_iter iter_copy
;
500 struct bvec_iter
*iter
;
501 struct bio
*bio
= dm_bio_from_per_bio_data(io
, v
->ti
->per_io_data_size
);
504 if (static_branch_unlikely(&use_bh_wq_enabled
) && io
->in_bh
) {
506 * Copy the iterator in case we need to restart
507 * verification in a work-queue.
509 iter_copy
= io
->iter
;
514 for (b
= 0; b
< io
->n_blocks
;
515 b
++, bio_advance_iter(bio
, iter
, block_size
)) {
517 sector_t cur_block
= io
->block
+ b
;
522 if (v
->validated_blocks
&& bio
->bi_status
== BLK_STS_OK
&&
523 likely(test_bit(cur_block
, v
->validated_blocks
)))
526 r
= verity_hash_for_block(v
, io
, cur_block
,
527 verity_io_want_digest(v
, io
),
532 bv
= bio_iter_iovec(bio
, *iter
);
533 if (unlikely(bv
.bv_len
< block_size
)) {
535 * Data block spans pages. This should not happen,
536 * since dm-verity sets dma_alignment to the data block
537 * size minus 1, and dm-verity also doesn't allow the
538 * data block size to be greater than PAGE_SIZE.
540 DMERR_LIMIT("unaligned io (data block spans pages)");
544 data
= bvec_kmap_local(&bv
);
548 * If we expect a zero block, don't validate, just
551 memset(data
, 0, block_size
);
556 r
= verity_hash(v
, io
, data
, block_size
,
557 verity_io_real_digest(v
, io
), !io
->in_bh
);
558 if (unlikely(r
< 0)) {
563 if (likely(memcmp(verity_io_real_digest(v
, io
),
564 verity_io_want_digest(v
, io
), v
->digest_size
) == 0)) {
565 if (v
->validated_blocks
)
566 set_bit(cur_block
, v
->validated_blocks
);
570 r
= verity_handle_data_hash_mismatch(v
, io
, bio
, cur_block
,
581 * Skip verity work in response to I/O error when system is shutting down.
583 static inline bool verity_is_system_shutting_down(void)
585 return system_state
== SYSTEM_HALT
|| system_state
== SYSTEM_POWER_OFF
586 || system_state
== SYSTEM_RESTART
;
589 static void restart_io_error(struct work_struct
*w
)
591 kernel_restart("dm-verity device has I/O error");
595 * End one "io" structure with a given error.
597 static void verity_finish_io(struct dm_verity_io
*io
, blk_status_t status
)
599 struct dm_verity
*v
= io
->v
;
600 struct bio
*bio
= dm_bio_from_per_bio_data(io
, v
->ti
->per_io_data_size
);
602 bio
->bi_end_io
= io
->orig_bi_end_io
;
603 bio
->bi_status
= status
;
605 if (!static_branch_unlikely(&use_bh_wq_enabled
) || !io
->in_bh
)
606 verity_fec_finish_io(io
);
608 if (unlikely(status
!= BLK_STS_OK
) &&
609 unlikely(!(bio
->bi_opf
& REQ_RAHEAD
)) &&
611 !verity_is_system_shutting_down()) {
612 if (v
->error_mode
== DM_VERITY_MODE_PANIC
) {
613 panic("dm-verity device has I/O error");
615 if (v
->error_mode
== DM_VERITY_MODE_RESTART
) {
616 static DECLARE_WORK(restart_work
, restart_io_error
);
617 queue_work(v
->verify_wq
, &restart_work
);
619 * We deliberately don't call bio_endio here, because
620 * the machine will be restarted anyway.
629 static void verity_work(struct work_struct
*w
)
631 struct dm_verity_io
*io
= container_of(w
, struct dm_verity_io
, work
);
635 verity_finish_io(io
, errno_to_blk_status(verity_verify_io(io
)));
638 static void verity_bh_work(struct work_struct
*w
)
640 struct dm_verity_io
*io
= container_of(w
, struct dm_verity_io
, bh_work
);
644 err
= verity_verify_io(io
);
645 if (err
== -EAGAIN
|| err
== -ENOMEM
) {
646 /* fallback to retrying with work-queue */
647 INIT_WORK(&io
->work
, verity_work
);
648 queue_work(io
->v
->verify_wq
, &io
->work
);
652 verity_finish_io(io
, errno_to_blk_status(err
));
655 static void verity_end_io(struct bio
*bio
)
657 struct dm_verity_io
*io
= bio
->bi_private
;
659 if (bio
->bi_status
&&
660 (!verity_fec_is_enabled(io
->v
) ||
661 verity_is_system_shutting_down() ||
662 (bio
->bi_opf
& REQ_RAHEAD
))) {
663 verity_finish_io(io
, bio
->bi_status
);
667 if (static_branch_unlikely(&use_bh_wq_enabled
) && io
->v
->use_bh_wq
) {
668 INIT_WORK(&io
->bh_work
, verity_bh_work
);
669 queue_work(system_bh_wq
, &io
->bh_work
);
671 INIT_WORK(&io
->work
, verity_work
);
672 queue_work(io
->v
->verify_wq
, &io
->work
);
677 * Prefetch buffers for the specified io.
678 * The root buffer is not prefetched, it is assumed that it will be cached
681 static void verity_prefetch_io(struct work_struct
*work
)
683 struct dm_verity_prefetch_work
*pw
=
684 container_of(work
, struct dm_verity_prefetch_work
, work
);
685 struct dm_verity
*v
= pw
->v
;
688 for (i
= v
->levels
- 2; i
>= 0; i
--) {
689 sector_t hash_block_start
;
690 sector_t hash_block_end
;
692 verity_hash_at_level(v
, pw
->block
, i
, &hash_block_start
, NULL
);
693 verity_hash_at_level(v
, pw
->block
+ pw
->n_blocks
- 1, i
, &hash_block_end
, NULL
);
696 unsigned int cluster
= READ_ONCE(dm_verity_prefetch_cluster
);
698 cluster
>>= v
->data_dev_block_bits
;
699 if (unlikely(!cluster
))
700 goto no_prefetch_cluster
;
702 if (unlikely(cluster
& (cluster
- 1)))
703 cluster
= 1 << __fls(cluster
);
705 hash_block_start
&= ~(sector_t
)(cluster
- 1);
706 hash_block_end
|= cluster
- 1;
707 if (unlikely(hash_block_end
>= v
->hash_blocks
))
708 hash_block_end
= v
->hash_blocks
- 1;
711 dm_bufio_prefetch_with_ioprio(v
->bufio
, hash_block_start
,
712 hash_block_end
- hash_block_start
+ 1,
719 static void verity_submit_prefetch(struct dm_verity
*v
, struct dm_verity_io
*io
,
720 unsigned short ioprio
)
722 sector_t block
= io
->block
;
723 unsigned int n_blocks
= io
->n_blocks
;
724 struct dm_verity_prefetch_work
*pw
;
726 if (v
->validated_blocks
) {
727 while (n_blocks
&& test_bit(block
, v
->validated_blocks
)) {
731 while (n_blocks
&& test_bit(block
+ n_blocks
- 1,
732 v
->validated_blocks
))
738 pw
= kmalloc(sizeof(struct dm_verity_prefetch_work
),
739 GFP_NOIO
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
744 INIT_WORK(&pw
->work
, verity_prefetch_io
);
747 pw
->n_blocks
= n_blocks
;
749 queue_work(v
->verify_wq
, &pw
->work
);
753 * Bio map function. It allocates dm_verity_io structure and bio vector and
754 * fills them. Then it issues prefetches and the I/O.
756 static int verity_map(struct dm_target
*ti
, struct bio
*bio
)
758 struct dm_verity
*v
= ti
->private;
759 struct dm_verity_io
*io
;
761 bio_set_dev(bio
, v
->data_dev
->bdev
);
762 bio
->bi_iter
.bi_sector
= verity_map_sector(v
, bio
->bi_iter
.bi_sector
);
764 if (((unsigned int)bio
->bi_iter
.bi_sector
| bio_sectors(bio
)) &
765 ((1 << (v
->data_dev_block_bits
- SECTOR_SHIFT
)) - 1)) {
766 DMERR_LIMIT("unaligned io");
767 return DM_MAPIO_KILL
;
770 if (bio_end_sector(bio
) >>
771 (v
->data_dev_block_bits
- SECTOR_SHIFT
) > v
->data_blocks
) {
772 DMERR_LIMIT("io out of range");
773 return DM_MAPIO_KILL
;
776 if (bio_data_dir(bio
) == WRITE
)
777 return DM_MAPIO_KILL
;
779 io
= dm_per_bio_data(bio
, ti
->per_io_data_size
);
781 io
->orig_bi_end_io
= bio
->bi_end_io
;
782 io
->block
= bio
->bi_iter
.bi_sector
>> (v
->data_dev_block_bits
- SECTOR_SHIFT
);
783 io
->n_blocks
= bio
->bi_iter
.bi_size
>> v
->data_dev_block_bits
;
784 io
->had_mismatch
= false;
786 bio
->bi_end_io
= verity_end_io
;
787 bio
->bi_private
= io
;
788 io
->iter
= bio
->bi_iter
;
790 verity_fec_init_io(io
);
792 verity_submit_prefetch(v
, io
, bio_prio(bio
));
794 submit_bio_noacct(bio
);
796 return DM_MAPIO_SUBMITTED
;
800 * Status: V (valid) or C (corruption found)
802 static void verity_status(struct dm_target
*ti
, status_type_t type
,
803 unsigned int status_flags
, char *result
, unsigned int maxlen
)
805 struct dm_verity
*v
= ti
->private;
806 unsigned int args
= 0;
811 case STATUSTYPE_INFO
:
812 DMEMIT("%c", v
->hash_failed
? 'C' : 'V');
814 case STATUSTYPE_TABLE
:
815 DMEMIT("%u %s %s %u %u %llu %llu %s ",
819 1 << v
->data_dev_block_bits
,
820 1 << v
->hash_dev_block_bits
,
821 (unsigned long long)v
->data_blocks
,
822 (unsigned long long)v
->hash_start
,
825 for (x
= 0; x
< v
->digest_size
; x
++)
826 DMEMIT("%02x", v
->root_digest
[x
]);
831 for (x
= 0; x
< v
->salt_size
; x
++)
832 DMEMIT("%02x", v
->salt
[x
]);
833 if (v
->mode
!= DM_VERITY_MODE_EIO
)
835 if (v
->error_mode
!= DM_VERITY_MODE_EIO
)
837 if (verity_fec_is_enabled(v
))
838 args
+= DM_VERITY_OPTS_FEC
;
841 if (v
->validated_blocks
)
845 if (v
->signature_key_desc
)
846 args
+= DM_VERITY_ROOT_HASH_VERIFICATION_OPTS
;
850 if (v
->mode
!= DM_VERITY_MODE_EIO
) {
853 case DM_VERITY_MODE_LOGGING
:
854 DMEMIT(DM_VERITY_OPT_LOGGING
);
856 case DM_VERITY_MODE_RESTART
:
857 DMEMIT(DM_VERITY_OPT_RESTART
);
859 case DM_VERITY_MODE_PANIC
:
860 DMEMIT(DM_VERITY_OPT_PANIC
);
866 if (v
->error_mode
!= DM_VERITY_MODE_EIO
) {
868 switch (v
->error_mode
) {
869 case DM_VERITY_MODE_RESTART
:
870 DMEMIT(DM_VERITY_OPT_ERROR_RESTART
);
872 case DM_VERITY_MODE_PANIC
:
873 DMEMIT(DM_VERITY_OPT_ERROR_PANIC
);
880 DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES
);
881 if (v
->validated_blocks
)
882 DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE
);
884 DMEMIT(" " DM_VERITY_OPT_TASKLET_VERIFY
);
885 sz
= verity_fec_status_table(v
, sz
, result
, maxlen
);
886 if (v
->signature_key_desc
)
887 DMEMIT(" " DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY
888 " %s", v
->signature_key_desc
);
892 DMEMIT_TARGET_NAME_VERSION(ti
->type
);
893 DMEMIT(",hash_failed=%c", v
->hash_failed
? 'C' : 'V');
894 DMEMIT(",verity_version=%u", v
->version
);
895 DMEMIT(",data_device_name=%s", v
->data_dev
->name
);
896 DMEMIT(",hash_device_name=%s", v
->hash_dev
->name
);
897 DMEMIT(",verity_algorithm=%s", v
->alg_name
);
899 DMEMIT(",root_digest=");
900 for (x
= 0; x
< v
->digest_size
; x
++)
901 DMEMIT("%02x", v
->root_digest
[x
]);
907 for (x
= 0; x
< v
->salt_size
; x
++)
908 DMEMIT("%02x", v
->salt
[x
]);
910 DMEMIT(",ignore_zero_blocks=%c", v
->zero_digest
? 'y' : 'n');
911 DMEMIT(",check_at_most_once=%c", v
->validated_blocks
? 'y' : 'n');
912 if (v
->signature_key_desc
)
913 DMEMIT(",root_hash_sig_key_desc=%s", v
->signature_key_desc
);
915 if (v
->mode
!= DM_VERITY_MODE_EIO
) {
916 DMEMIT(",verity_mode=");
918 case DM_VERITY_MODE_LOGGING
:
919 DMEMIT(DM_VERITY_OPT_LOGGING
);
921 case DM_VERITY_MODE_RESTART
:
922 DMEMIT(DM_VERITY_OPT_RESTART
);
924 case DM_VERITY_MODE_PANIC
:
925 DMEMIT(DM_VERITY_OPT_PANIC
);
931 if (v
->error_mode
!= DM_VERITY_MODE_EIO
) {
932 DMEMIT(",verity_error_mode=");
933 switch (v
->error_mode
) {
934 case DM_VERITY_MODE_RESTART
:
935 DMEMIT(DM_VERITY_OPT_ERROR_RESTART
);
937 case DM_VERITY_MODE_PANIC
:
938 DMEMIT(DM_VERITY_OPT_ERROR_PANIC
);
949 static int verity_prepare_ioctl(struct dm_target
*ti
, struct block_device
**bdev
)
951 struct dm_verity
*v
= ti
->private;
953 *bdev
= v
->data_dev
->bdev
;
955 if (ti
->len
!= bdev_nr_sectors(v
->data_dev
->bdev
))
960 static int verity_iterate_devices(struct dm_target
*ti
,
961 iterate_devices_callout_fn fn
, void *data
)
963 struct dm_verity
*v
= ti
->private;
965 return fn(ti
, v
->data_dev
, 0, ti
->len
, data
);
968 static void verity_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
970 struct dm_verity
*v
= ti
->private;
972 if (limits
->logical_block_size
< 1 << v
->data_dev_block_bits
)
973 limits
->logical_block_size
= 1 << v
->data_dev_block_bits
;
975 if (limits
->physical_block_size
< 1 << v
->data_dev_block_bits
)
976 limits
->physical_block_size
= 1 << v
->data_dev_block_bits
;
978 limits
->io_min
= limits
->logical_block_size
;
981 * Similar to what dm-crypt does, opt dm-verity out of support for
982 * direct I/O that is aligned to less than the traditional direct I/O
983 * alignment requirement of logical_block_size. This prevents dm-verity
984 * data blocks from crossing pages, eliminating various edge cases.
986 limits
->dma_alignment
= limits
->logical_block_size
- 1;
989 #ifdef CONFIG_SECURITY
991 static int verity_init_sig(struct dm_verity
*v
, const void *sig
,
994 v
->sig_size
= sig_size
;
997 v
->root_digest_sig
= kmemdup(sig
, v
->sig_size
, GFP_KERNEL
);
998 if (!v
->root_digest_sig
)
1005 static void verity_free_sig(struct dm_verity
*v
)
1007 kfree(v
->root_digest_sig
);
1012 static inline int verity_init_sig(struct dm_verity
*v
, const void *sig
,
1018 static inline void verity_free_sig(struct dm_verity
*v
)
1022 #endif /* CONFIG_SECURITY */
1024 static void verity_dtr(struct dm_target
*ti
)
1026 struct dm_verity
*v
= ti
->private;
1029 destroy_workqueue(v
->verify_wq
);
1031 mempool_exit(&v
->recheck_pool
);
1033 dm_io_client_destroy(v
->io
);
1036 dm_bufio_client_destroy(v
->bufio
);
1038 kvfree(v
->validated_blocks
);
1040 kfree(v
->initial_hashstate
);
1041 kfree(v
->root_digest
);
1042 kfree(v
->zero_digest
);
1046 static_branch_dec(&ahash_enabled
);
1047 crypto_free_ahash(v
->ahash_tfm
);
1049 crypto_free_shash(v
->shash_tfm
);
1055 dm_put_device(ti
, v
->hash_dev
);
1058 dm_put_device(ti
, v
->data_dev
);
1062 kfree(v
->signature_key_desc
);
1065 static_branch_dec(&use_bh_wq_enabled
);
1069 dm_audit_log_dtr(DM_MSG_PREFIX
, ti
, 1);
1072 static int verity_alloc_most_once(struct dm_verity
*v
)
1074 struct dm_target
*ti
= v
->ti
;
1076 /* the bitset can only handle INT_MAX blocks */
1077 if (v
->data_blocks
> INT_MAX
) {
1078 ti
->error
= "device too large to use check_at_most_once";
1082 v
->validated_blocks
= kvcalloc(BITS_TO_LONGS(v
->data_blocks
),
1083 sizeof(unsigned long),
1085 if (!v
->validated_blocks
) {
1086 ti
->error
= "failed to allocate bitset for check_at_most_once";
1093 static int verity_alloc_zero_digest(struct dm_verity
*v
)
1096 struct dm_verity_io
*io
;
1099 v
->zero_digest
= kmalloc(v
->digest_size
, GFP_KERNEL
);
1101 if (!v
->zero_digest
)
1104 io
= kmalloc(sizeof(*io
) + v
->hash_reqsize
, GFP_KERNEL
);
1107 return r
; /* verity_dtr will free zero_digest */
1109 zero_data
= kzalloc(1 << v
->data_dev_block_bits
, GFP_KERNEL
);
1114 r
= verity_hash(v
, io
, zero_data
, 1 << v
->data_dev_block_bits
,
1115 v
->zero_digest
, true);
1124 static inline bool verity_is_verity_mode(const char *arg_name
)
1126 return (!strcasecmp(arg_name
, DM_VERITY_OPT_LOGGING
) ||
1127 !strcasecmp(arg_name
, DM_VERITY_OPT_RESTART
) ||
1128 !strcasecmp(arg_name
, DM_VERITY_OPT_PANIC
));
1131 static int verity_parse_verity_mode(struct dm_verity
*v
, const char *arg_name
)
1136 if (!strcasecmp(arg_name
, DM_VERITY_OPT_LOGGING
))
1137 v
->mode
= DM_VERITY_MODE_LOGGING
;
1138 else if (!strcasecmp(arg_name
, DM_VERITY_OPT_RESTART
))
1139 v
->mode
= DM_VERITY_MODE_RESTART
;
1140 else if (!strcasecmp(arg_name
, DM_VERITY_OPT_PANIC
))
1141 v
->mode
= DM_VERITY_MODE_PANIC
;
1146 static inline bool verity_is_verity_error_mode(const char *arg_name
)
1148 return (!strcasecmp(arg_name
, DM_VERITY_OPT_ERROR_RESTART
) ||
1149 !strcasecmp(arg_name
, DM_VERITY_OPT_ERROR_PANIC
));
1152 static int verity_parse_verity_error_mode(struct dm_verity
*v
, const char *arg_name
)
1157 if (!strcasecmp(arg_name
, DM_VERITY_OPT_ERROR_RESTART
))
1158 v
->error_mode
= DM_VERITY_MODE_RESTART
;
1159 else if (!strcasecmp(arg_name
, DM_VERITY_OPT_ERROR_PANIC
))
1160 v
->error_mode
= DM_VERITY_MODE_PANIC
;
1165 static int verity_parse_opt_args(struct dm_arg_set
*as
, struct dm_verity
*v
,
1166 struct dm_verity_sig_opts
*verify_args
,
1167 bool only_modifier_opts
)
1171 struct dm_target
*ti
= v
->ti
;
1172 const char *arg_name
;
1174 static const struct dm_arg _args
[] = {
1175 {0, DM_VERITY_OPTS_MAX
, "Invalid number of feature args"},
1178 r
= dm_read_arg_group(_args
, as
, &argc
, &ti
->error
);
1186 arg_name
= dm_shift_arg(as
);
1189 if (verity_is_verity_mode(arg_name
)) {
1190 if (only_modifier_opts
)
1192 r
= verity_parse_verity_mode(v
, arg_name
);
1194 ti
->error
= "Conflicting error handling parameters";
1199 } else if (verity_is_verity_error_mode(arg_name
)) {
1200 if (only_modifier_opts
)
1202 r
= verity_parse_verity_error_mode(v
, arg_name
);
1204 ti
->error
= "Conflicting error handling parameters";
1209 } else if (!strcasecmp(arg_name
, DM_VERITY_OPT_IGN_ZEROES
)) {
1210 if (only_modifier_opts
)
1212 r
= verity_alloc_zero_digest(v
);
1214 ti
->error
= "Cannot allocate zero digest";
1219 } else if (!strcasecmp(arg_name
, DM_VERITY_OPT_AT_MOST_ONCE
)) {
1220 if (only_modifier_opts
)
1222 r
= verity_alloc_most_once(v
);
1227 } else if (!strcasecmp(arg_name
, DM_VERITY_OPT_TASKLET_VERIFY
)) {
1228 v
->use_bh_wq
= true;
1229 static_branch_inc(&use_bh_wq_enabled
);
1232 } else if (verity_is_fec_opt_arg(arg_name
)) {
1233 if (only_modifier_opts
)
1235 r
= verity_fec_parse_opt_args(as
, v
, &argc
, arg_name
);
1240 } else if (verity_verify_is_sig_opt_arg(arg_name
)) {
1241 if (only_modifier_opts
)
1243 r
= verity_verify_sig_parse_opt_args(as
, v
,
1250 } else if (only_modifier_opts
) {
1252 * Ignore unrecognized opt, could easily be an extra
1253 * argument to an option whose parsing was skipped.
1254 * Normal parsing (@only_modifier_opts=false) will
1255 * properly parse all options (and their extra args).
1260 DMERR("Unrecognized verity feature request: %s", arg_name
);
1261 ti
->error
= "Unrecognized verity feature request";
1263 } while (argc
&& !r
);
1268 static int verity_setup_hash_alg(struct dm_verity
*v
, const char *alg_name
)
1270 struct dm_target
*ti
= v
->ti
;
1271 struct crypto_ahash
*ahash
;
1272 struct crypto_shash
*shash
= NULL
;
1273 const char *driver_name
;
1275 v
->alg_name
= kstrdup(alg_name
, GFP_KERNEL
);
1277 ti
->error
= "Cannot allocate algorithm name";
1282 * Allocate the hash transformation object that this dm-verity instance
1283 * will use. The vast majority of dm-verity users use CPU-based
1284 * hashing, so when possible use the shash API to minimize the crypto
1285 * API overhead. If the ahash API resolves to a different driver
1286 * (likely an off-CPU hardware offload), use ahash instead. Also use
1287 * ahash if the obsolete dm-verity format with the appended salt is
1288 * being used, so that quirk only needs to be handled in one place.
1290 ahash
= crypto_alloc_ahash(alg_name
, 0,
1291 v
->use_bh_wq
? CRYPTO_ALG_ASYNC
: 0);
1292 if (IS_ERR(ahash
)) {
1293 ti
->error
= "Cannot initialize hash function";
1294 return PTR_ERR(ahash
);
1296 driver_name
= crypto_ahash_driver_name(ahash
);
1297 if (v
->version
>= 1 /* salt prepended, not appended? */) {
1298 shash
= crypto_alloc_shash(alg_name
, 0, 0);
1299 if (!IS_ERR(shash
) &&
1300 strcmp(crypto_shash_driver_name(shash
), driver_name
) != 0) {
1302 * ahash gave a different driver than shash, so probably
1303 * this is a case of real hardware offload. Use ahash.
1305 crypto_free_shash(shash
);
1309 if (!IS_ERR_OR_NULL(shash
)) {
1310 crypto_free_ahash(ahash
);
1312 v
->shash_tfm
= shash
;
1313 v
->digest_size
= crypto_shash_digestsize(shash
);
1314 v
->hash_reqsize
= sizeof(struct shash_desc
) +
1315 crypto_shash_descsize(shash
);
1316 DMINFO("%s using shash \"%s\"", alg_name
, driver_name
);
1318 v
->ahash_tfm
= ahash
;
1319 static_branch_inc(&ahash_enabled
);
1320 v
->digest_size
= crypto_ahash_digestsize(ahash
);
1321 v
->hash_reqsize
= sizeof(struct ahash_request
) +
1322 crypto_ahash_reqsize(ahash
);
1323 DMINFO("%s using ahash \"%s\"", alg_name
, driver_name
);
1325 if ((1 << v
->hash_dev_block_bits
) < v
->digest_size
* 2) {
1326 ti
->error
= "Digest size too big";
1332 static int verity_setup_salt_and_hashstate(struct dm_verity
*v
, const char *arg
)
1334 struct dm_target
*ti
= v
->ti
;
1336 if (strcmp(arg
, "-") != 0) {
1337 v
->salt_size
= strlen(arg
) / 2;
1338 v
->salt
= kmalloc(v
->salt_size
, GFP_KERNEL
);
1340 ti
->error
= "Cannot allocate salt";
1343 if (strlen(arg
) != v
->salt_size
* 2 ||
1344 hex2bin(v
->salt
, arg
, v
->salt_size
)) {
1345 ti
->error
= "Invalid salt";
1350 SHASH_DESC_ON_STACK(desc
, v
->shash_tfm
);
1354 * Compute the pre-salted hash state that can be passed to
1355 * crypto_shash_import() for each block later.
1357 v
->initial_hashstate
= kmalloc(
1358 crypto_shash_statesize(v
->shash_tfm
), GFP_KERNEL
);
1359 if (!v
->initial_hashstate
) {
1360 ti
->error
= "Cannot allocate initial hash state";
1363 desc
->tfm
= v
->shash_tfm
;
1364 r
= crypto_shash_init(desc
) ?:
1365 crypto_shash_update(desc
, v
->salt
, v
->salt_size
) ?:
1366 crypto_shash_export(desc
, v
->initial_hashstate
);
1368 ti
->error
= "Cannot set up initial hash state";
1376 * Target parameters:
1377 * <version> The current format is version 1.
1378 * Vsn 0 is compatible with original Chromium OS releases.
1383 * <the number of data blocks>
1384 * <hash start block>
1387 * <salt> Hex string or "-" if no salt.
1389 static int verity_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1391 struct dm_verity
*v
;
1392 struct dm_verity_sig_opts verify_args
= {0};
1393 struct dm_arg_set as
;
1395 unsigned long long num_ll
;
1398 sector_t hash_position
;
1400 char *root_hash_digest_to_validate
;
1402 v
= kzalloc(sizeof(struct dm_verity
), GFP_KERNEL
);
1404 ti
->error
= "Cannot allocate verity structure";
1410 r
= verity_fec_ctr_alloc(v
);
1414 if ((dm_table_get_mode(ti
->table
) & ~BLK_OPEN_READ
)) {
1415 ti
->error
= "Device must be readonly";
1421 ti
->error
= "Not enough arguments";
1426 /* Parse optional parameters that modify primary args */
1428 as
.argc
= argc
- 10;
1429 as
.argv
= argv
+ 10;
1430 r
= verity_parse_opt_args(&as
, v
, &verify_args
, true);
1435 if (sscanf(argv
[0], "%u%c", &num
, &dummy
) != 1 ||
1437 ti
->error
= "Invalid version";
1443 r
= dm_get_device(ti
, argv
[1], BLK_OPEN_READ
, &v
->data_dev
);
1445 ti
->error
= "Data device lookup failed";
1449 r
= dm_get_device(ti
, argv
[2], BLK_OPEN_READ
, &v
->hash_dev
);
1451 ti
->error
= "Hash device lookup failed";
1455 if (sscanf(argv
[3], "%u%c", &num
, &dummy
) != 1 ||
1456 !num
|| (num
& (num
- 1)) ||
1457 num
< bdev_logical_block_size(v
->data_dev
->bdev
) ||
1459 ti
->error
= "Invalid data device block size";
1463 v
->data_dev_block_bits
= __ffs(num
);
1465 if (sscanf(argv
[4], "%u%c", &num
, &dummy
) != 1 ||
1466 !num
|| (num
& (num
- 1)) ||
1467 num
< bdev_logical_block_size(v
->hash_dev
->bdev
) ||
1469 ti
->error
= "Invalid hash device block size";
1473 v
->hash_dev_block_bits
= __ffs(num
);
1475 if (sscanf(argv
[5], "%llu%c", &num_ll
, &dummy
) != 1 ||
1476 (sector_t
)(num_ll
<< (v
->data_dev_block_bits
- SECTOR_SHIFT
))
1477 >> (v
->data_dev_block_bits
- SECTOR_SHIFT
) != num_ll
) {
1478 ti
->error
= "Invalid data blocks";
1482 v
->data_blocks
= num_ll
;
1484 if (ti
->len
> (v
->data_blocks
<< (v
->data_dev_block_bits
- SECTOR_SHIFT
))) {
1485 ti
->error
= "Data device is too small";
1490 if (sscanf(argv
[6], "%llu%c", &num_ll
, &dummy
) != 1 ||
1491 (sector_t
)(num_ll
<< (v
->hash_dev_block_bits
- SECTOR_SHIFT
))
1492 >> (v
->hash_dev_block_bits
- SECTOR_SHIFT
) != num_ll
) {
1493 ti
->error
= "Invalid hash start";
1497 v
->hash_start
= num_ll
;
1499 r
= verity_setup_hash_alg(v
, argv
[7]);
1503 v
->root_digest
= kmalloc(v
->digest_size
, GFP_KERNEL
);
1504 if (!v
->root_digest
) {
1505 ti
->error
= "Cannot allocate root digest";
1509 if (strlen(argv
[8]) != v
->digest_size
* 2 ||
1510 hex2bin(v
->root_digest
, argv
[8], v
->digest_size
)) {
1511 ti
->error
= "Invalid root digest";
1515 root_hash_digest_to_validate
= argv
[8];
1517 r
= verity_setup_salt_and_hashstate(v
, argv
[9]);
1524 /* Optional parameters */
1528 r
= verity_parse_opt_args(&as
, v
, &verify_args
, false);
1533 /* Root hash signature is a optional parameter*/
1534 r
= verity_verify_root_hash(root_hash_digest_to_validate
,
1535 strlen(root_hash_digest_to_validate
),
1537 verify_args
.sig_size
);
1539 ti
->error
= "Root hash verification failed";
1543 r
= verity_init_sig(v
, verify_args
.sig
, verify_args
.sig_size
);
1545 ti
->error
= "Cannot allocate root digest signature";
1549 v
->hash_per_block_bits
=
1550 __fls((1 << v
->hash_dev_block_bits
) / v
->digest_size
);
1554 while (v
->hash_per_block_bits
* v
->levels
< 64 &&
1555 (unsigned long long)(v
->data_blocks
- 1) >>
1556 (v
->hash_per_block_bits
* v
->levels
))
1559 if (v
->levels
> DM_VERITY_MAX_LEVELS
) {
1560 ti
->error
= "Too many tree levels";
1565 hash_position
= v
->hash_start
;
1566 for (i
= v
->levels
- 1; i
>= 0; i
--) {
1569 v
->hash_level_block
[i
] = hash_position
;
1570 s
= (v
->data_blocks
+ ((sector_t
)1 << ((i
+ 1) * v
->hash_per_block_bits
)) - 1)
1571 >> ((i
+ 1) * v
->hash_per_block_bits
);
1572 if (hash_position
+ s
< hash_position
) {
1573 ti
->error
= "Hash device offset overflow";
1579 v
->hash_blocks
= hash_position
;
1581 r
= mempool_init_page_pool(&v
->recheck_pool
, 1, 0);
1583 ti
->error
= "Cannot allocate mempool";
1587 v
->io
= dm_io_client_create();
1588 if (IS_ERR(v
->io
)) {
1591 ti
->error
= "Cannot allocate dm io";
1595 v
->bufio
= dm_bufio_client_create(v
->hash_dev
->bdev
,
1596 1 << v
->hash_dev_block_bits
, 1, sizeof(struct buffer_aux
),
1597 dm_bufio_alloc_callback
, NULL
,
1598 v
->use_bh_wq
? DM_BUFIO_CLIENT_NO_SLEEP
: 0);
1599 if (IS_ERR(v
->bufio
)) {
1600 ti
->error
= "Cannot initialize dm-bufio";
1601 r
= PTR_ERR(v
->bufio
);
1606 if (dm_bufio_get_device_size(v
->bufio
) < v
->hash_blocks
) {
1607 ti
->error
= "Hash device is too small";
1613 * Using WQ_HIGHPRI improves throughput and completion latency by
1614 * reducing wait times when reading from a dm-verity device.
1616 * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
1617 * allows verify_wq to preempt softirq since verification in BH workqueue
1618 * will fall-back to using it for error handling (or if the bufio cache
1619 * doesn't have required hashes).
1621 v
->verify_wq
= alloc_workqueue("kverityd", WQ_MEM_RECLAIM
| WQ_HIGHPRI
, 0);
1622 if (!v
->verify_wq
) {
1623 ti
->error
= "Cannot allocate workqueue";
1628 ti
->per_io_data_size
= sizeof(struct dm_verity_io
) + v
->hash_reqsize
;
1630 r
= verity_fec_ctr(v
);
1634 ti
->per_io_data_size
= roundup(ti
->per_io_data_size
,
1635 __alignof__(struct dm_verity_io
));
1637 verity_verify_sig_opts_cleanup(&verify_args
);
1639 dm_audit_log_ctr(DM_MSG_PREFIX
, ti
, 1);
1645 verity_verify_sig_opts_cleanup(&verify_args
);
1646 dm_audit_log_ctr(DM_MSG_PREFIX
, ti
, 0);
1653 * Get the verity mode (error behavior) of a verity target.
1655 * Returns the verity mode of the target, or -EINVAL if 'ti' is not a verity
1658 int dm_verity_get_mode(struct dm_target
*ti
)
1660 struct dm_verity
*v
= ti
->private;
1662 if (!dm_is_verity_target(ti
))
1669 * Get the root digest of a verity target.
1671 * Returns a copy of the root digest, the caller is responsible for
1672 * freeing the memory of the digest.
1674 int dm_verity_get_root_digest(struct dm_target
*ti
, u8
**root_digest
, unsigned int *digest_size
)
1676 struct dm_verity
*v
= ti
->private;
1678 if (!dm_is_verity_target(ti
))
1681 *root_digest
= kmemdup(v
->root_digest
, v
->digest_size
, GFP_KERNEL
);
1682 if (*root_digest
== NULL
)
1685 *digest_size
= v
->digest_size
;
1690 #ifdef CONFIG_SECURITY
1692 #ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG
1694 static int verity_security_set_signature(struct block_device
*bdev
,
1695 struct dm_verity
*v
)
1698 * if the dm-verity target is unsigned, v->root_digest_sig will
1699 * be NULL, and the hook call is still required to let LSMs mark
1700 * the device as unsigned. This information is crucial for LSMs to
1701 * block operations such as execution on unsigned files
1703 return security_bdev_setintegrity(bdev
,
1704 LSM_INT_DMVERITY_SIG_VALID
,
1711 static inline int verity_security_set_signature(struct block_device
*bdev
,
1712 struct dm_verity
*v
)
1717 #endif /* CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG */
1720 * Expose verity target's root hash and signature data to LSMs before resume.
1722 * Returns 0 on success, or -ENOMEM if the system is out of memory.
1724 static int verity_preresume(struct dm_target
*ti
)
1726 struct block_device
*bdev
;
1727 struct dm_verity_digest root_digest
;
1728 struct dm_verity
*v
;
1732 bdev
= dm_disk(dm_table_get_md(ti
->table
))->part0
;
1733 root_digest
.digest
= v
->root_digest
;
1734 root_digest
.digest_len
= v
->digest_size
;
1735 if (static_branch_unlikely(&ahash_enabled
) && !v
->shash_tfm
)
1736 root_digest
.alg
= crypto_ahash_alg_name(v
->ahash_tfm
);
1738 root_digest
.alg
= crypto_shash_alg_name(v
->shash_tfm
);
1740 r
= security_bdev_setintegrity(bdev
, LSM_INT_DMVERITY_ROOTHASH
, &root_digest
,
1741 sizeof(root_digest
));
1745 r
= verity_security_set_signature(bdev
, v
);
1753 security_bdev_setintegrity(bdev
, LSM_INT_DMVERITY_ROOTHASH
, NULL
, 0);
1758 #endif /* CONFIG_SECURITY */
1760 static struct target_type verity_target
= {
1762 /* Note: the LSMs depend on the singleton and immutable features */
1763 .features
= DM_TARGET_SINGLETON
| DM_TARGET_IMMUTABLE
,
1764 .version
= {1, 10, 0},
1765 .module
= THIS_MODULE
,
1769 .status
= verity_status
,
1770 .prepare_ioctl
= verity_prepare_ioctl
,
1771 .iterate_devices
= verity_iterate_devices
,
1772 .io_hints
= verity_io_hints
,
1773 #ifdef CONFIG_SECURITY
1774 .preresume
= verity_preresume
,
1775 #endif /* CONFIG_SECURITY */
1780 * Check whether a DM target is a verity target.
1782 bool dm_is_verity_target(struct dm_target
*ti
)
1784 return ti
->type
== &verity_target
;
1787 MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
1788 MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
1789 MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
1790 MODULE_DESCRIPTION(DM_NAME
" target for transparent disk integrity checking");
1791 MODULE_LICENSE("GPL");