1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2003 Jana Saout <jana@saout.de>
4 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
5 * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
8 * This file is released under the GPL.
11 #include <linux/completion.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/key.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-integrity.h>
20 #include <linux/mempool.h>
21 #include <linux/slab.h>
22 #include <linux/crypto.h>
23 #include <linux/workqueue.h>
24 #include <linux/kthread.h>
25 #include <linux/backing-dev.h>
26 #include <linux/atomic.h>
27 #include <linux/scatterlist.h>
28 #include <linux/rbtree.h>
29 #include <linux/ctype.h>
31 #include <linux/unaligned.h>
32 #include <crypto/hash.h>
33 #include <crypto/md5.h>
34 #include <crypto/skcipher.h>
35 #include <crypto/aead.h>
36 #include <crypto/authenc.h>
37 #include <crypto/utils.h>
38 #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
39 #include <linux/key-type.h>
40 #include <keys/user-type.h>
41 #include <keys/encrypted-type.h>
42 #include <keys/trusted-type.h>
44 #include <linux/device-mapper.h>
48 #define DM_MSG_PREFIX "crypt"
50 static DEFINE_IDA(workqueue_ida
);
53 * context holding the current state of a multi-part conversion
55 struct convert_context
{
56 struct completion restart
;
58 struct bvec_iter iter_in
;
60 struct bvec_iter iter_out
;
64 struct skcipher_request
*req
;
65 struct aead_request
*req_aead
;
73 * per bio private data
76 struct crypt_config
*cc
;
78 u8
*integrity_metadata
;
79 bool integrity_metadata_from_pool
:1;
81 struct work_struct work
;
83 struct convert_context ctx
;
89 struct bvec_iter saved_bi_iter
;
91 struct rb_node rb_node
;
92 } CRYPTO_MINALIGN_ATTR
;
94 struct dm_crypt_request
{
95 struct convert_context
*ctx
;
96 struct scatterlist sg_in
[4];
97 struct scatterlist sg_out
[4];
103 struct crypt_iv_operations
{
104 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
106 void (*dtr
)(struct crypt_config
*cc
);
107 int (*init
)(struct crypt_config
*cc
);
108 int (*wipe
)(struct crypt_config
*cc
);
109 int (*generator
)(struct crypt_config
*cc
, u8
*iv
,
110 struct dm_crypt_request
*dmreq
);
111 int (*post
)(struct crypt_config
*cc
, u8
*iv
,
112 struct dm_crypt_request
*dmreq
);
115 struct iv_benbi_private
{
119 #define LMK_SEED_SIZE 64 /* hash + 0 */
120 struct iv_lmk_private
{
121 struct crypto_shash
*hash_tfm
;
125 #define TCW_WHITENING_SIZE 16
126 struct iv_tcw_private
{
127 struct crypto_shash
*crc32_tfm
;
132 #define ELEPHANT_MAX_KEY_SIZE 32
133 struct iv_elephant_private
{
134 struct crypto_skcipher
*tfm
;
138 * Crypt: maps a linear range of a block device
139 * and encrypts / decrypts at the same time.
141 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
,
142 DM_CRYPT_SAME_CPU
, DM_CRYPT_HIGH_PRIORITY
,
143 DM_CRYPT_NO_OFFLOAD
, DM_CRYPT_NO_READ_WORKQUEUE
,
144 DM_CRYPT_NO_WRITE_WORKQUEUE
, DM_CRYPT_WRITE_INLINE
};
147 CRYPT_MODE_INTEGRITY_AEAD
, /* Use authenticated mode for cipher */
148 CRYPT_IV_LARGE_SECTORS
, /* Calculate IV from sector_size, not 512B sectors */
149 CRYPT_ENCRYPT_PREPROCESS
, /* Must preprocess data for encryption (elephant) */
150 CRYPT_KEY_MAC_SIZE_SET
, /* The integrity_key_size option was used */
154 * The fields in here must be read only after initialization.
156 struct crypt_config
{
160 struct percpu_counter n_allocated_pages
;
162 struct workqueue_struct
*io_queue
;
163 struct workqueue_struct
*crypt_queue
;
165 spinlock_t write_thread_lock
;
166 struct task_struct
*write_thread
;
167 struct rb_root write_tree
;
173 const struct crypt_iv_operations
*iv_gen_ops
;
175 struct iv_benbi_private benbi
;
176 struct iv_lmk_private lmk
;
177 struct iv_tcw_private tcw
;
178 struct iv_elephant_private elephant
;
181 unsigned int iv_size
;
182 unsigned short sector_size
;
183 unsigned char sector_shift
;
186 struct crypto_skcipher
**tfms
;
187 struct crypto_aead
**tfms_aead
;
189 unsigned int tfms_count
;
191 unsigned long cipher_flags
;
194 * Layout of each crypto request:
196 * struct skcipher_request
199 * struct dm_crypt_request
203 * The padding is added so that dm_crypt_request and the IV are
206 unsigned int dmreq_start
;
208 unsigned int per_bio_data_size
;
211 unsigned int key_size
;
212 unsigned int key_parts
; /* independent parts in key buffer */
213 unsigned int key_extra_size
; /* additional keys length */
214 unsigned int key_mac_size
; /* MAC key size for authenc(...) */
216 unsigned int integrity_tag_size
;
217 unsigned int integrity_iv_size
;
218 unsigned int used_tag_size
;
219 unsigned int tuple_size
;
222 * pool for per bio private data, crypto requests,
223 * encryption requeusts/buffer pages and integrity tags
225 unsigned int tag_pool_max_sectors
;
231 struct mutex bio_alloc_lock
;
233 u8
*authenc_key
; /* space for keys in authenc() format (if used) */
234 u8 key
[] __counted_by(key_size
);
238 #define MAX_TAG_SIZE 480
239 #define POOL_ENTRY_SIZE 512
241 static DEFINE_SPINLOCK(dm_crypt_clients_lock
);
242 static unsigned int dm_crypt_clients_n
;
243 static volatile unsigned long dm_crypt_pages_per_client
;
244 #define DM_CRYPT_MEMORY_PERCENT 2
245 #define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
246 #define DM_CRYPT_DEFAULT_MAX_READ_SIZE 131072
247 #define DM_CRYPT_DEFAULT_MAX_WRITE_SIZE 131072
249 static unsigned int max_read_size
= 0;
250 module_param(max_read_size
, uint
, 0644);
251 MODULE_PARM_DESC(max_read_size
, "Maximum size of a read request");
252 static unsigned int max_write_size
= 0;
253 module_param(max_write_size
, uint
, 0644);
254 MODULE_PARM_DESC(max_write_size
, "Maximum size of a write request");
255 static unsigned get_max_request_size(struct crypt_config
*cc
, bool wrt
)
257 unsigned val
, sector_align
;
258 val
= !wrt
? READ_ONCE(max_read_size
) : READ_ONCE(max_write_size
);
260 val
= !wrt
? DM_CRYPT_DEFAULT_MAX_READ_SIZE
: DM_CRYPT_DEFAULT_MAX_WRITE_SIZE
;
261 if (wrt
|| cc
->used_tag_size
) {
262 if (unlikely(val
> BIO_MAX_VECS
<< PAGE_SHIFT
))
263 val
= BIO_MAX_VECS
<< PAGE_SHIFT
;
265 sector_align
= max(bdev_logical_block_size(cc
->dev
->bdev
), (unsigned)cc
->sector_size
);
266 val
= round_down(val
, sector_align
);
269 return val
>> SECTOR_SHIFT
;
272 static void crypt_endio(struct bio
*clone
);
273 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
);
274 static struct scatterlist
*crypt_get_sg_data(struct crypt_config
*cc
,
275 struct scatterlist
*sg
);
277 static bool crypt_integrity_aead(struct crypt_config
*cc
);
280 * Use this to access cipher attributes that are independent of the key.
282 static struct crypto_skcipher
*any_tfm(struct crypt_config
*cc
)
284 return cc
->cipher_tfm
.tfms
[0];
287 static struct crypto_aead
*any_tfm_aead(struct crypt_config
*cc
)
289 return cc
->cipher_tfm
.tfms_aead
[0];
293 * Different IV generation algorithms:
295 * plain: the initial vector is the 32-bit little-endian version of the sector
296 * number, padded with zeros if necessary.
298 * plain64: the initial vector is the 64-bit little-endian version of the sector
299 * number, padded with zeros if necessary.
301 * plain64be: the initial vector is the 64-bit big-endian version of the sector
302 * number, padded with zeros if necessary.
304 * essiv: "encrypted sector|salt initial vector", the sector number is
305 * encrypted with the bulk cipher using a salt as key. The salt
306 * should be derived from the bulk cipher's key via hashing.
308 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
309 * (needed for LRW-32-AES and possible other narrow block modes)
311 * null: the initial vector is always zero. Provides compatibility with
312 * obsolete loop_fish2 devices. Do not use for new devices.
314 * lmk: Compatible implementation of the block chaining mode used
315 * by the Loop-AES block device encryption system
316 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
317 * It operates on full 512 byte sectors and uses CBC
318 * with an IV derived from the sector number, the data and
319 * optionally extra IV seed.
320 * This means that after decryption the first block
321 * of sector must be tweaked according to decrypted data.
322 * Loop-AES can use three encryption schemes:
323 * version 1: is plain aes-cbc mode
324 * version 2: uses 64 multikey scheme with lmk IV generator
325 * version 3: the same as version 2 with additional IV seed
326 * (it uses 65 keys, last key is used as IV seed)
328 * tcw: Compatible implementation of the block chaining mode used
329 * by the TrueCrypt device encryption system (prior to version 4.1).
330 * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
331 * It operates on full 512 byte sectors and uses CBC
332 * with an IV derived from initial key and the sector number.
333 * In addition, whitening value is applied on every sector, whitening
334 * is calculated from initial key, sector number and mixed using CRC32.
335 * Note that this encryption scheme is vulnerable to watermarking attacks
336 * and should be used for old compatible containers access only.
338 * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
339 * The IV is encrypted little-endian byte-offset (with the same key
340 * and cipher as the volume).
342 * elephant: The extended version of eboiv with additional Elephant diffuser
343 * used with Bitlocker CBC mode.
344 * This mode was used in older Windows systems
345 * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
348 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
,
349 struct dm_crypt_request
*dmreq
)
351 memset(iv
, 0, cc
->iv_size
);
352 *(__le32
*)iv
= cpu_to_le32(dmreq
->iv_sector
& 0xffffffff);
357 static int crypt_iv_plain64_gen(struct crypt_config
*cc
, u8
*iv
,
358 struct dm_crypt_request
*dmreq
)
360 memset(iv
, 0, cc
->iv_size
);
361 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
366 static int crypt_iv_plain64be_gen(struct crypt_config
*cc
, u8
*iv
,
367 struct dm_crypt_request
*dmreq
)
369 memset(iv
, 0, cc
->iv_size
);
370 /* iv_size is at least of size u64; usually it is 16 bytes */
371 *(__be64
*)&iv
[cc
->iv_size
- sizeof(u64
)] = cpu_to_be64(dmreq
->iv_sector
);
376 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
,
377 struct dm_crypt_request
*dmreq
)
380 * ESSIV encryption of the IV is now handled by the crypto API,
381 * so just pass the plain sector number here.
383 memset(iv
, 0, cc
->iv_size
);
384 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
389 static int crypt_iv_benbi_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
395 if (crypt_integrity_aead(cc
))
396 bs
= crypto_aead_blocksize(any_tfm_aead(cc
));
398 bs
= crypto_skcipher_blocksize(any_tfm(cc
));
402 * We need to calculate how far we must shift the sector count
403 * to get the cipher block count, we use this shift in _gen.
405 if (1 << log
!= bs
) {
406 ti
->error
= "cypher blocksize is not a power of 2";
411 ti
->error
= "cypher blocksize is > 512";
415 cc
->iv_gen_private
.benbi
.shift
= 9 - log
;
420 static void crypt_iv_benbi_dtr(struct crypt_config
*cc
)
424 static int crypt_iv_benbi_gen(struct crypt_config
*cc
, u8
*iv
,
425 struct dm_crypt_request
*dmreq
)
429 memset(iv
, 0, cc
->iv_size
- sizeof(u64
)); /* rest is cleared below */
431 val
= cpu_to_be64(((u64
)dmreq
->iv_sector
<< cc
->iv_gen_private
.benbi
.shift
) + 1);
432 put_unaligned(val
, (__be64
*)(iv
+ cc
->iv_size
- sizeof(u64
)));
437 static int crypt_iv_null_gen(struct crypt_config
*cc
, u8
*iv
,
438 struct dm_crypt_request
*dmreq
)
440 memset(iv
, 0, cc
->iv_size
);
445 static void crypt_iv_lmk_dtr(struct crypt_config
*cc
)
447 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
449 if (lmk
->hash_tfm
&& !IS_ERR(lmk
->hash_tfm
))
450 crypto_free_shash(lmk
->hash_tfm
);
451 lmk
->hash_tfm
= NULL
;
453 kfree_sensitive(lmk
->seed
);
457 static int crypt_iv_lmk_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
460 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
462 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
)) {
463 ti
->error
= "Unsupported sector size for LMK";
467 lmk
->hash_tfm
= crypto_alloc_shash("md5", 0,
468 CRYPTO_ALG_ALLOCATES_MEMORY
);
469 if (IS_ERR(lmk
->hash_tfm
)) {
470 ti
->error
= "Error initializing LMK hash";
471 return PTR_ERR(lmk
->hash_tfm
);
474 /* No seed in LMK version 2 */
475 if (cc
->key_parts
== cc
->tfms_count
) {
480 lmk
->seed
= kzalloc(LMK_SEED_SIZE
, GFP_KERNEL
);
482 crypt_iv_lmk_dtr(cc
);
483 ti
->error
= "Error kmallocing seed storage in LMK";
490 static int crypt_iv_lmk_init(struct crypt_config
*cc
)
492 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
493 int subkey_size
= cc
->key_size
/ cc
->key_parts
;
495 /* LMK seed is on the position of LMK_KEYS + 1 key */
497 memcpy(lmk
->seed
, cc
->key
+ (cc
->tfms_count
* subkey_size
),
498 crypto_shash_digestsize(lmk
->hash_tfm
));
503 static int crypt_iv_lmk_wipe(struct crypt_config
*cc
)
505 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
508 memset(lmk
->seed
, 0, LMK_SEED_SIZE
);
513 static int crypt_iv_lmk_one(struct crypt_config
*cc
, u8
*iv
,
514 struct dm_crypt_request
*dmreq
,
517 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
518 SHASH_DESC_ON_STACK(desc
, lmk
->hash_tfm
);
519 struct md5_state md5state
;
523 desc
->tfm
= lmk
->hash_tfm
;
525 r
= crypto_shash_init(desc
);
530 r
= crypto_shash_update(desc
, lmk
->seed
, LMK_SEED_SIZE
);
535 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
536 r
= crypto_shash_update(desc
, data
+ 16, 16 * 31);
540 /* Sector is cropped to 56 bits here */
541 buf
[0] = cpu_to_le32(dmreq
->iv_sector
& 0xFFFFFFFF);
542 buf
[1] = cpu_to_le32((((u64
)dmreq
->iv_sector
>> 32) & 0x00FFFFFF) | 0x80000000);
543 buf
[2] = cpu_to_le32(4024);
545 r
= crypto_shash_update(desc
, (u8
*)buf
, sizeof(buf
));
549 /* No MD5 padding here */
550 r
= crypto_shash_export(desc
, &md5state
);
554 for (i
= 0; i
< MD5_HASH_WORDS
; i
++)
555 __cpu_to_le32s(&md5state
.hash
[i
]);
556 memcpy(iv
, &md5state
.hash
, cc
->iv_size
);
561 static int crypt_iv_lmk_gen(struct crypt_config
*cc
, u8
*iv
,
562 struct dm_crypt_request
*dmreq
)
564 struct scatterlist
*sg
;
568 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
569 sg
= crypt_get_sg_data(cc
, dmreq
->sg_in
);
570 src
= kmap_local_page(sg_page(sg
));
571 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, src
+ sg
->offset
);
574 memset(iv
, 0, cc
->iv_size
);
579 static int crypt_iv_lmk_post(struct crypt_config
*cc
, u8
*iv
,
580 struct dm_crypt_request
*dmreq
)
582 struct scatterlist
*sg
;
586 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
)
589 sg
= crypt_get_sg_data(cc
, dmreq
->sg_out
);
590 dst
= kmap_local_page(sg_page(sg
));
591 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, dst
+ sg
->offset
);
593 /* Tweak the first block of plaintext sector */
595 crypto_xor(dst
+ sg
->offset
, iv
, cc
->iv_size
);
601 static void crypt_iv_tcw_dtr(struct crypt_config
*cc
)
603 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
605 kfree_sensitive(tcw
->iv_seed
);
607 kfree_sensitive(tcw
->whitening
);
608 tcw
->whitening
= NULL
;
610 if (tcw
->crc32_tfm
&& !IS_ERR(tcw
->crc32_tfm
))
611 crypto_free_shash(tcw
->crc32_tfm
);
612 tcw
->crc32_tfm
= NULL
;
615 static int crypt_iv_tcw_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
618 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
620 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
)) {
621 ti
->error
= "Unsupported sector size for TCW";
625 if (cc
->key_size
<= (cc
->iv_size
+ TCW_WHITENING_SIZE
)) {
626 ti
->error
= "Wrong key size for TCW";
630 tcw
->crc32_tfm
= crypto_alloc_shash("crc32", 0,
631 CRYPTO_ALG_ALLOCATES_MEMORY
);
632 if (IS_ERR(tcw
->crc32_tfm
)) {
633 ti
->error
= "Error initializing CRC32 in TCW";
634 return PTR_ERR(tcw
->crc32_tfm
);
637 tcw
->iv_seed
= kzalloc(cc
->iv_size
, GFP_KERNEL
);
638 tcw
->whitening
= kzalloc(TCW_WHITENING_SIZE
, GFP_KERNEL
);
639 if (!tcw
->iv_seed
|| !tcw
->whitening
) {
640 crypt_iv_tcw_dtr(cc
);
641 ti
->error
= "Error allocating seed storage in TCW";
648 static int crypt_iv_tcw_init(struct crypt_config
*cc
)
650 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
651 int key_offset
= cc
->key_size
- cc
->iv_size
- TCW_WHITENING_SIZE
;
653 memcpy(tcw
->iv_seed
, &cc
->key
[key_offset
], cc
->iv_size
);
654 memcpy(tcw
->whitening
, &cc
->key
[key_offset
+ cc
->iv_size
],
660 static int crypt_iv_tcw_wipe(struct crypt_config
*cc
)
662 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
664 memset(tcw
->iv_seed
, 0, cc
->iv_size
);
665 memset(tcw
->whitening
, 0, TCW_WHITENING_SIZE
);
670 static int crypt_iv_tcw_whitening(struct crypt_config
*cc
,
671 struct dm_crypt_request
*dmreq
,
674 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
675 __le64 sector
= cpu_to_le64(dmreq
->iv_sector
);
676 u8 buf
[TCW_WHITENING_SIZE
];
677 SHASH_DESC_ON_STACK(desc
, tcw
->crc32_tfm
);
680 /* xor whitening with sector number */
681 crypto_xor_cpy(buf
, tcw
->whitening
, (u8
*)§or
, 8);
682 crypto_xor_cpy(&buf
[8], tcw
->whitening
+ 8, (u8
*)§or
, 8);
684 /* calculate crc32 for every 32bit part and xor it */
685 desc
->tfm
= tcw
->crc32_tfm
;
686 for (i
= 0; i
< 4; i
++) {
687 r
= crypto_shash_digest(desc
, &buf
[i
* 4], 4, &buf
[i
* 4]);
691 crypto_xor(&buf
[0], &buf
[12], 4);
692 crypto_xor(&buf
[4], &buf
[8], 4);
694 /* apply whitening (8 bytes) to whole sector */
695 for (i
= 0; i
< ((1 << SECTOR_SHIFT
) / 8); i
++)
696 crypto_xor(data
+ i
* 8, buf
, 8);
698 memzero_explicit(buf
, sizeof(buf
));
702 static int crypt_iv_tcw_gen(struct crypt_config
*cc
, u8
*iv
,
703 struct dm_crypt_request
*dmreq
)
705 struct scatterlist
*sg
;
706 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
707 __le64 sector
= cpu_to_le64(dmreq
->iv_sector
);
711 /* Remove whitening from ciphertext */
712 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
) {
713 sg
= crypt_get_sg_data(cc
, dmreq
->sg_in
);
714 src
= kmap_local_page(sg_page(sg
));
715 r
= crypt_iv_tcw_whitening(cc
, dmreq
, src
+ sg
->offset
);
720 crypto_xor_cpy(iv
, tcw
->iv_seed
, (u8
*)§or
, 8);
722 crypto_xor_cpy(&iv
[8], tcw
->iv_seed
+ 8, (u8
*)§or
,
728 static int crypt_iv_tcw_post(struct crypt_config
*cc
, u8
*iv
,
729 struct dm_crypt_request
*dmreq
)
731 struct scatterlist
*sg
;
735 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
)
738 /* Apply whitening on ciphertext */
739 sg
= crypt_get_sg_data(cc
, dmreq
->sg_out
);
740 dst
= kmap_local_page(sg_page(sg
));
741 r
= crypt_iv_tcw_whitening(cc
, dmreq
, dst
+ sg
->offset
);
747 static int crypt_iv_random_gen(struct crypt_config
*cc
, u8
*iv
,
748 struct dm_crypt_request
*dmreq
)
750 /* Used only for writes, there must be an additional space to store IV */
751 get_random_bytes(iv
, cc
->iv_size
);
755 static int crypt_iv_eboiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
758 if (crypt_integrity_aead(cc
)) {
759 ti
->error
= "AEAD transforms not supported for EBOIV";
763 if (crypto_skcipher_blocksize(any_tfm(cc
)) != cc
->iv_size
) {
764 ti
->error
= "Block size of EBOIV cipher does not match IV size of block cipher";
771 static int crypt_iv_eboiv_gen(struct crypt_config
*cc
, u8
*iv
,
772 struct dm_crypt_request
*dmreq
)
774 struct crypto_skcipher
*tfm
= any_tfm(cc
);
775 struct skcipher_request
*req
;
776 struct scatterlist src
, dst
;
777 DECLARE_CRYPTO_WAIT(wait
);
778 unsigned int reqsize
;
782 reqsize
= sizeof(*req
) + crypto_skcipher_reqsize(tfm
);
783 reqsize
= ALIGN(reqsize
, __alignof__(__le64
));
785 req
= kmalloc(reqsize
+ cc
->iv_size
, GFP_NOIO
);
789 skcipher_request_set_tfm(req
, tfm
);
791 buf
= (u8
*)req
+ reqsize
;
792 memset(buf
, 0, cc
->iv_size
);
793 *(__le64
*)buf
= cpu_to_le64(dmreq
->iv_sector
* cc
->sector_size
);
795 sg_init_one(&src
, page_address(ZERO_PAGE(0)), cc
->iv_size
);
796 sg_init_one(&dst
, iv
, cc
->iv_size
);
797 skcipher_request_set_crypt(req
, &src
, &dst
, cc
->iv_size
, buf
);
798 skcipher_request_set_callback(req
, 0, crypto_req_done
, &wait
);
799 err
= crypto_wait_req(crypto_skcipher_encrypt(req
), &wait
);
800 kfree_sensitive(req
);
805 static void crypt_iv_elephant_dtr(struct crypt_config
*cc
)
807 struct iv_elephant_private
*elephant
= &cc
->iv_gen_private
.elephant
;
809 crypto_free_skcipher(elephant
->tfm
);
810 elephant
->tfm
= NULL
;
813 static int crypt_iv_elephant_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
816 struct iv_elephant_private
*elephant
= &cc
->iv_gen_private
.elephant
;
819 elephant
->tfm
= crypto_alloc_skcipher("ecb(aes)", 0,
820 CRYPTO_ALG_ALLOCATES_MEMORY
);
821 if (IS_ERR(elephant
->tfm
)) {
822 r
= PTR_ERR(elephant
->tfm
);
823 elephant
->tfm
= NULL
;
827 r
= crypt_iv_eboiv_ctr(cc
, ti
, NULL
);
829 crypt_iv_elephant_dtr(cc
);
833 static void diffuser_disk_to_cpu(u32
*d
, size_t n
)
835 #ifndef __LITTLE_ENDIAN
838 for (i
= 0; i
< n
; i
++)
839 d
[i
] = le32_to_cpu((__le32
)d
[i
]);
843 static void diffuser_cpu_to_disk(__le32
*d
, size_t n
)
845 #ifndef __LITTLE_ENDIAN
848 for (i
= 0; i
< n
; i
++)
849 d
[i
] = cpu_to_le32((u32
)d
[i
]);
853 static void diffuser_a_decrypt(u32
*d
, size_t n
)
857 for (i
= 0; i
< 5; i
++) {
862 while (i1
< (n
- 1)) {
863 d
[i1
] += d
[i2
] ^ (d
[i3
] << 9 | d
[i3
] >> 23);
869 d
[i1
] += d
[i2
] ^ d
[i3
];
875 d
[i1
] += d
[i2
] ^ (d
[i3
] << 13 | d
[i3
] >> 19);
878 d
[i1
] += d
[i2
] ^ d
[i3
];
884 static void diffuser_a_encrypt(u32
*d
, size_t n
)
888 for (i
= 0; i
< 5; i
++) {
894 d
[i1
] -= d
[i2
] ^ d
[i3
];
897 d
[i1
] -= d
[i2
] ^ (d
[i3
] << 13 | d
[i3
] >> 19);
903 d
[i1
] -= d
[i2
] ^ d
[i3
];
909 d
[i1
] -= d
[i2
] ^ (d
[i3
] << 9 | d
[i3
] >> 23);
915 static void diffuser_b_decrypt(u32
*d
, size_t n
)
919 for (i
= 0; i
< 3; i
++) {
924 while (i1
< (n
- 1)) {
925 d
[i1
] += d
[i2
] ^ d
[i3
];
928 d
[i1
] += d
[i2
] ^ (d
[i3
] << 10 | d
[i3
] >> 22);
934 d
[i1
] += d
[i2
] ^ d
[i3
];
940 d
[i1
] += d
[i2
] ^ (d
[i3
] << 25 | d
[i3
] >> 7);
946 static void diffuser_b_encrypt(u32
*d
, size_t n
)
950 for (i
= 0; i
< 3; i
++) {
956 d
[i1
] -= d
[i2
] ^ (d
[i3
] << 25 | d
[i3
] >> 7);
962 d
[i1
] -= d
[i2
] ^ d
[i3
];
968 d
[i1
] -= d
[i2
] ^ (d
[i3
] << 10 | d
[i3
] >> 22);
971 d
[i1
] -= d
[i2
] ^ d
[i3
];
977 static int crypt_iv_elephant(struct crypt_config
*cc
, struct dm_crypt_request
*dmreq
)
979 struct iv_elephant_private
*elephant
= &cc
->iv_gen_private
.elephant
;
980 u8
*es
, *ks
, *data
, *data2
, *data_offset
;
981 struct skcipher_request
*req
;
982 struct scatterlist
*sg
, *sg2
, src
, dst
;
983 DECLARE_CRYPTO_WAIT(wait
);
986 req
= skcipher_request_alloc(elephant
->tfm
, GFP_NOIO
);
987 es
= kzalloc(16, GFP_NOIO
); /* Key for AES */
988 ks
= kzalloc(32, GFP_NOIO
); /* Elephant sector key */
990 if (!req
|| !es
|| !ks
) {
995 *(__le64
*)es
= cpu_to_le64(dmreq
->iv_sector
* cc
->sector_size
);
998 sg_init_one(&src
, es
, 16);
999 sg_init_one(&dst
, ks
, 16);
1000 skcipher_request_set_crypt(req
, &src
, &dst
, 16, NULL
);
1001 skcipher_request_set_callback(req
, 0, crypto_req_done
, &wait
);
1002 r
= crypto_wait_req(crypto_skcipher_encrypt(req
), &wait
);
1008 sg_init_one(&dst
, &ks
[16], 16);
1009 r
= crypto_wait_req(crypto_skcipher_encrypt(req
), &wait
);
1013 sg
= crypt_get_sg_data(cc
, dmreq
->sg_out
);
1014 data
= kmap_local_page(sg_page(sg
));
1015 data_offset
= data
+ sg
->offset
;
1017 /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
1018 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
1019 sg2
= crypt_get_sg_data(cc
, dmreq
->sg_in
);
1020 data2
= kmap_local_page(sg_page(sg2
));
1021 memcpy(data_offset
, data2
+ sg2
->offset
, cc
->sector_size
);
1022 kunmap_local(data2
);
1025 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
) {
1026 diffuser_disk_to_cpu((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1027 diffuser_b_decrypt((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1028 diffuser_a_decrypt((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1029 diffuser_cpu_to_disk((__le32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1032 for (i
= 0; i
< (cc
->sector_size
/ 32); i
++)
1033 crypto_xor(data_offset
+ i
* 32, ks
, 32);
1035 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
1036 diffuser_disk_to_cpu((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1037 diffuser_a_encrypt((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1038 diffuser_b_encrypt((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1039 diffuser_cpu_to_disk((__le32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1044 kfree_sensitive(ks
);
1045 kfree_sensitive(es
);
1046 skcipher_request_free(req
);
1050 static int crypt_iv_elephant_gen(struct crypt_config
*cc
, u8
*iv
,
1051 struct dm_crypt_request
*dmreq
)
1055 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
1056 r
= crypt_iv_elephant(cc
, dmreq
);
1061 return crypt_iv_eboiv_gen(cc
, iv
, dmreq
);
1064 static int crypt_iv_elephant_post(struct crypt_config
*cc
, u8
*iv
,
1065 struct dm_crypt_request
*dmreq
)
1067 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
)
1068 return crypt_iv_elephant(cc
, dmreq
);
1073 static int crypt_iv_elephant_init(struct crypt_config
*cc
)
1075 struct iv_elephant_private
*elephant
= &cc
->iv_gen_private
.elephant
;
1076 int key_offset
= cc
->key_size
- cc
->key_extra_size
;
1078 return crypto_skcipher_setkey(elephant
->tfm
, &cc
->key
[key_offset
], cc
->key_extra_size
);
1081 static int crypt_iv_elephant_wipe(struct crypt_config
*cc
)
1083 struct iv_elephant_private
*elephant
= &cc
->iv_gen_private
.elephant
;
1084 u8 key
[ELEPHANT_MAX_KEY_SIZE
];
1086 memset(key
, 0, cc
->key_extra_size
);
1087 return crypto_skcipher_setkey(elephant
->tfm
, key
, cc
->key_extra_size
);
1090 static const struct crypt_iv_operations crypt_iv_plain_ops
= {
1091 .generator
= crypt_iv_plain_gen
1094 static const struct crypt_iv_operations crypt_iv_plain64_ops
= {
1095 .generator
= crypt_iv_plain64_gen
1098 static const struct crypt_iv_operations crypt_iv_plain64be_ops
= {
1099 .generator
= crypt_iv_plain64be_gen
1102 static const struct crypt_iv_operations crypt_iv_essiv_ops
= {
1103 .generator
= crypt_iv_essiv_gen
1106 static const struct crypt_iv_operations crypt_iv_benbi_ops
= {
1107 .ctr
= crypt_iv_benbi_ctr
,
1108 .dtr
= crypt_iv_benbi_dtr
,
1109 .generator
= crypt_iv_benbi_gen
1112 static const struct crypt_iv_operations crypt_iv_null_ops
= {
1113 .generator
= crypt_iv_null_gen
1116 static const struct crypt_iv_operations crypt_iv_lmk_ops
= {
1117 .ctr
= crypt_iv_lmk_ctr
,
1118 .dtr
= crypt_iv_lmk_dtr
,
1119 .init
= crypt_iv_lmk_init
,
1120 .wipe
= crypt_iv_lmk_wipe
,
1121 .generator
= crypt_iv_lmk_gen
,
1122 .post
= crypt_iv_lmk_post
1125 static const struct crypt_iv_operations crypt_iv_tcw_ops
= {
1126 .ctr
= crypt_iv_tcw_ctr
,
1127 .dtr
= crypt_iv_tcw_dtr
,
1128 .init
= crypt_iv_tcw_init
,
1129 .wipe
= crypt_iv_tcw_wipe
,
1130 .generator
= crypt_iv_tcw_gen
,
1131 .post
= crypt_iv_tcw_post
1134 static const struct crypt_iv_operations crypt_iv_random_ops
= {
1135 .generator
= crypt_iv_random_gen
1138 static const struct crypt_iv_operations crypt_iv_eboiv_ops
= {
1139 .ctr
= crypt_iv_eboiv_ctr
,
1140 .generator
= crypt_iv_eboiv_gen
1143 static const struct crypt_iv_operations crypt_iv_elephant_ops
= {
1144 .ctr
= crypt_iv_elephant_ctr
,
1145 .dtr
= crypt_iv_elephant_dtr
,
1146 .init
= crypt_iv_elephant_init
,
1147 .wipe
= crypt_iv_elephant_wipe
,
1148 .generator
= crypt_iv_elephant_gen
,
1149 .post
= crypt_iv_elephant_post
1153 * Integrity extensions
1155 static bool crypt_integrity_aead(struct crypt_config
*cc
)
1157 return test_bit(CRYPT_MODE_INTEGRITY_AEAD
, &cc
->cipher_flags
);
1160 static bool crypt_integrity_hmac(struct crypt_config
*cc
)
1162 return crypt_integrity_aead(cc
) && cc
->key_mac_size
;
1165 /* Get sg containing data */
1166 static struct scatterlist
*crypt_get_sg_data(struct crypt_config
*cc
,
1167 struct scatterlist
*sg
)
1169 if (unlikely(crypt_integrity_aead(cc
)))
1175 static int dm_crypt_integrity_io_alloc(struct dm_crypt_io
*io
, struct bio
*bio
)
1177 struct bio_integrity_payload
*bip
;
1178 unsigned int tag_len
;
1181 if (!bio_sectors(bio
) || !io
->cc
->tuple_size
)
1184 bip
= bio_integrity_alloc(bio
, GFP_NOIO
, 1);
1186 return PTR_ERR(bip
);
1188 tag_len
= io
->cc
->tuple_size
* (bio_sectors(bio
) >> io
->cc
->sector_shift
);
1190 bip
->bip_iter
.bi_sector
= io
->cc
->start
+ io
->sector
;
1192 ret
= bio_integrity_add_page(bio
, virt_to_page(io
->integrity_metadata
),
1193 tag_len
, offset_in_page(io
->integrity_metadata
));
1194 if (unlikely(ret
!= tag_len
))
1200 static int crypt_integrity_ctr(struct crypt_config
*cc
, struct dm_target
*ti
)
1202 #ifdef CONFIG_BLK_DEV_INTEGRITY
1203 struct blk_integrity
*bi
= blk_get_integrity(cc
->dev
->bdev
->bd_disk
);
1204 struct mapped_device
*md
= dm_table_get_md(ti
->table
);
1206 /* We require an underlying device with non-PI metadata */
1207 if (!bi
|| bi
->csum_type
!= BLK_INTEGRITY_CSUM_NONE
) {
1208 ti
->error
= "Integrity profile not supported.";
1212 if (bi
->tuple_size
< cc
->used_tag_size
) {
1213 ti
->error
= "Integrity profile tag size mismatch.";
1216 cc
->tuple_size
= bi
->tuple_size
;
1217 if (1 << bi
->interval_exp
!= cc
->sector_size
) {
1218 ti
->error
= "Integrity profile sector size mismatch.";
1222 if (crypt_integrity_aead(cc
)) {
1223 cc
->integrity_tag_size
= cc
->used_tag_size
- cc
->integrity_iv_size
;
1224 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md
),
1225 cc
->integrity_tag_size
, cc
->integrity_iv_size
);
1227 if (crypto_aead_setauthsize(any_tfm_aead(cc
), cc
->integrity_tag_size
)) {
1228 ti
->error
= "Integrity AEAD auth tag size is not supported.";
1231 } else if (cc
->integrity_iv_size
)
1232 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md
),
1233 cc
->integrity_iv_size
);
1235 if ((cc
->integrity_tag_size
+ cc
->integrity_iv_size
) > cc
->tuple_size
) {
1236 ti
->error
= "Not enough space for integrity tag in the profile.";
1242 ti
->error
= "Integrity profile not supported.";
1247 static void crypt_convert_init(struct crypt_config
*cc
,
1248 struct convert_context
*ctx
,
1249 struct bio
*bio_out
, struct bio
*bio_in
,
1252 ctx
->bio_in
= bio_in
;
1253 ctx
->bio_out
= bio_out
;
1255 ctx
->iter_in
= bio_in
->bi_iter
;
1257 ctx
->iter_out
= bio_out
->bi_iter
;
1258 ctx
->cc_sector
= sector
+ cc
->iv_offset
;
1259 init_completion(&ctx
->restart
);
1262 static struct dm_crypt_request
*dmreq_of_req(struct crypt_config
*cc
,
1265 return (struct dm_crypt_request
*)((char *)req
+ cc
->dmreq_start
);
1268 static void *req_of_dmreq(struct crypt_config
*cc
, struct dm_crypt_request
*dmreq
)
1270 return (void *)((char *)dmreq
- cc
->dmreq_start
);
1273 static u8
*iv_of_dmreq(struct crypt_config
*cc
,
1274 struct dm_crypt_request
*dmreq
)
1276 if (crypt_integrity_aead(cc
))
1277 return (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
1278 crypto_aead_alignmask(any_tfm_aead(cc
)) + 1);
1280 return (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
1281 crypto_skcipher_alignmask(any_tfm(cc
)) + 1);
1284 static u8
*org_iv_of_dmreq(struct crypt_config
*cc
,
1285 struct dm_crypt_request
*dmreq
)
1287 return iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
;
1290 static __le64
*org_sector_of_dmreq(struct crypt_config
*cc
,
1291 struct dm_crypt_request
*dmreq
)
1293 u8
*ptr
= iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
+ cc
->iv_size
;
1295 return (__le64
*) ptr
;
1298 static unsigned int *org_tag_of_dmreq(struct crypt_config
*cc
,
1299 struct dm_crypt_request
*dmreq
)
1301 u8
*ptr
= iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
+
1302 cc
->iv_size
+ sizeof(uint64_t);
1304 return (unsigned int *)ptr
;
1307 static void *tag_from_dmreq(struct crypt_config
*cc
,
1308 struct dm_crypt_request
*dmreq
)
1310 struct convert_context
*ctx
= dmreq
->ctx
;
1311 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
1313 return &io
->integrity_metadata
[*org_tag_of_dmreq(cc
, dmreq
) *
1317 static void *iv_tag_from_dmreq(struct crypt_config
*cc
,
1318 struct dm_crypt_request
*dmreq
)
1320 return tag_from_dmreq(cc
, dmreq
) + cc
->integrity_tag_size
;
1323 static int crypt_convert_block_aead(struct crypt_config
*cc
,
1324 struct convert_context
*ctx
,
1325 struct aead_request
*req
,
1326 unsigned int tag_offset
)
1328 struct bio_vec bv_in
= bio_iter_iovec(ctx
->bio_in
, ctx
->iter_in
);
1329 struct bio_vec bv_out
= bio_iter_iovec(ctx
->bio_out
, ctx
->iter_out
);
1330 struct dm_crypt_request
*dmreq
;
1331 u8
*iv
, *org_iv
, *tag_iv
, *tag
;
1335 BUG_ON(cc
->integrity_iv_size
&& cc
->integrity_iv_size
!= cc
->iv_size
);
1337 /* Reject unexpected unaligned bio. */
1338 if (unlikely(bv_in
.bv_len
& (cc
->sector_size
- 1)))
1341 dmreq
= dmreq_of_req(cc
, req
);
1342 dmreq
->iv_sector
= ctx
->cc_sector
;
1343 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
1344 dmreq
->iv_sector
>>= cc
->sector_shift
;
1347 *org_tag_of_dmreq(cc
, dmreq
) = tag_offset
;
1349 sector
= org_sector_of_dmreq(cc
, dmreq
);
1350 *sector
= cpu_to_le64(ctx
->cc_sector
- cc
->iv_offset
);
1352 iv
= iv_of_dmreq(cc
, dmreq
);
1353 org_iv
= org_iv_of_dmreq(cc
, dmreq
);
1354 tag
= tag_from_dmreq(cc
, dmreq
);
1355 tag_iv
= iv_tag_from_dmreq(cc
, dmreq
);
1358 * |----- AAD -------|------ DATA -------|-- AUTH TAG --|
1359 * | (authenticated) | (auth+encryption) | |
1360 * | sector_LE | IV | sector in/out | tag in/out |
1362 sg_init_table(dmreq
->sg_in
, 4);
1363 sg_set_buf(&dmreq
->sg_in
[0], sector
, sizeof(uint64_t));
1364 sg_set_buf(&dmreq
->sg_in
[1], org_iv
, cc
->iv_size
);
1365 sg_set_page(&dmreq
->sg_in
[2], bv_in
.bv_page
, cc
->sector_size
, bv_in
.bv_offset
);
1366 sg_set_buf(&dmreq
->sg_in
[3], tag
, cc
->integrity_tag_size
);
1368 sg_init_table(dmreq
->sg_out
, 4);
1369 sg_set_buf(&dmreq
->sg_out
[0], sector
, sizeof(uint64_t));
1370 sg_set_buf(&dmreq
->sg_out
[1], org_iv
, cc
->iv_size
);
1371 sg_set_page(&dmreq
->sg_out
[2], bv_out
.bv_page
, cc
->sector_size
, bv_out
.bv_offset
);
1372 sg_set_buf(&dmreq
->sg_out
[3], tag
, cc
->integrity_tag_size
);
1374 if (cc
->iv_gen_ops
) {
1375 /* For READs use IV stored in integrity metadata */
1376 if (cc
->integrity_iv_size
&& bio_data_dir(ctx
->bio_in
) != WRITE
) {
1377 memcpy(org_iv
, tag_iv
, cc
->iv_size
);
1379 r
= cc
->iv_gen_ops
->generator(cc
, org_iv
, dmreq
);
1382 /* Store generated IV in integrity metadata */
1383 if (cc
->integrity_iv_size
)
1384 memcpy(tag_iv
, org_iv
, cc
->iv_size
);
1386 /* Working copy of IV, to be modified in crypto API */
1387 memcpy(iv
, org_iv
, cc
->iv_size
);
1390 aead_request_set_ad(req
, sizeof(uint64_t) + cc
->iv_size
);
1391 if (bio_data_dir(ctx
->bio_in
) == WRITE
) {
1392 aead_request_set_crypt(req
, dmreq
->sg_in
, dmreq
->sg_out
,
1393 cc
->sector_size
, iv
);
1394 r
= crypto_aead_encrypt(req
);
1395 if (cc
->integrity_tag_size
+ cc
->integrity_iv_size
!= cc
->tuple_size
)
1396 memset(tag
+ cc
->integrity_tag_size
+ cc
->integrity_iv_size
, 0,
1397 cc
->tuple_size
- (cc
->integrity_tag_size
+ cc
->integrity_iv_size
));
1399 aead_request_set_crypt(req
, dmreq
->sg_in
, dmreq
->sg_out
,
1400 cc
->sector_size
+ cc
->integrity_tag_size
, iv
);
1401 r
= crypto_aead_decrypt(req
);
1404 if (r
== -EBADMSG
) {
1405 sector_t s
= le64_to_cpu(*sector
);
1407 ctx
->aead_failed
= true;
1408 if (ctx
->aead_recheck
) {
1409 DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
1410 ctx
->bio_in
->bi_bdev
, s
);
1411 dm_audit_log_bio(DM_MSG_PREFIX
, "integrity-aead",
1416 if (!r
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1417 r
= cc
->iv_gen_ops
->post(cc
, org_iv
, dmreq
);
1419 bio_advance_iter(ctx
->bio_in
, &ctx
->iter_in
, cc
->sector_size
);
1420 bio_advance_iter(ctx
->bio_out
, &ctx
->iter_out
, cc
->sector_size
);
1425 static int crypt_convert_block_skcipher(struct crypt_config
*cc
,
1426 struct convert_context
*ctx
,
1427 struct skcipher_request
*req
,
1428 unsigned int tag_offset
)
1430 struct bio_vec bv_in
= bio_iter_iovec(ctx
->bio_in
, ctx
->iter_in
);
1431 struct bio_vec bv_out
= bio_iter_iovec(ctx
->bio_out
, ctx
->iter_out
);
1432 struct scatterlist
*sg_in
, *sg_out
;
1433 struct dm_crypt_request
*dmreq
;
1434 u8
*iv
, *org_iv
, *tag_iv
;
1438 /* Reject unexpected unaligned bio. */
1439 if (unlikely(bv_in
.bv_len
& (cc
->sector_size
- 1)))
1442 dmreq
= dmreq_of_req(cc
, req
);
1443 dmreq
->iv_sector
= ctx
->cc_sector
;
1444 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
1445 dmreq
->iv_sector
>>= cc
->sector_shift
;
1448 *org_tag_of_dmreq(cc
, dmreq
) = tag_offset
;
1450 iv
= iv_of_dmreq(cc
, dmreq
);
1451 org_iv
= org_iv_of_dmreq(cc
, dmreq
);
1452 tag_iv
= iv_tag_from_dmreq(cc
, dmreq
);
1454 sector
= org_sector_of_dmreq(cc
, dmreq
);
1455 *sector
= cpu_to_le64(ctx
->cc_sector
- cc
->iv_offset
);
1457 /* For skcipher we use only the first sg item */
1458 sg_in
= &dmreq
->sg_in
[0];
1459 sg_out
= &dmreq
->sg_out
[0];
1461 sg_init_table(sg_in
, 1);
1462 sg_set_page(sg_in
, bv_in
.bv_page
, cc
->sector_size
, bv_in
.bv_offset
);
1464 sg_init_table(sg_out
, 1);
1465 sg_set_page(sg_out
, bv_out
.bv_page
, cc
->sector_size
, bv_out
.bv_offset
);
1467 if (cc
->iv_gen_ops
) {
1468 /* For READs use IV stored in integrity metadata */
1469 if (cc
->integrity_iv_size
&& bio_data_dir(ctx
->bio_in
) != WRITE
) {
1470 memcpy(org_iv
, tag_iv
, cc
->integrity_iv_size
);
1472 r
= cc
->iv_gen_ops
->generator(cc
, org_iv
, dmreq
);
1475 /* Data can be already preprocessed in generator */
1476 if (test_bit(CRYPT_ENCRYPT_PREPROCESS
, &cc
->cipher_flags
))
1478 /* Store generated IV in integrity metadata */
1479 if (cc
->integrity_iv_size
)
1480 memcpy(tag_iv
, org_iv
, cc
->integrity_iv_size
);
1482 /* Working copy of IV, to be modified in crypto API */
1483 memcpy(iv
, org_iv
, cc
->iv_size
);
1486 skcipher_request_set_crypt(req
, sg_in
, sg_out
, cc
->sector_size
, iv
);
1488 if (bio_data_dir(ctx
->bio_in
) == WRITE
)
1489 r
= crypto_skcipher_encrypt(req
);
1491 r
= crypto_skcipher_decrypt(req
);
1493 if (!r
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1494 r
= cc
->iv_gen_ops
->post(cc
, org_iv
, dmreq
);
1496 bio_advance_iter(ctx
->bio_in
, &ctx
->iter_in
, cc
->sector_size
);
1497 bio_advance_iter(ctx
->bio_out
, &ctx
->iter_out
, cc
->sector_size
);
1502 static void kcryptd_async_done(void *async_req
, int error
);
1504 static int crypt_alloc_req_skcipher(struct crypt_config
*cc
,
1505 struct convert_context
*ctx
)
1507 unsigned int key_index
= ctx
->cc_sector
& (cc
->tfms_count
- 1);
1510 ctx
->r
.req
= mempool_alloc(&cc
->req_pool
, in_interrupt() ? GFP_ATOMIC
: GFP_NOIO
);
1515 skcipher_request_set_tfm(ctx
->r
.req
, cc
->cipher_tfm
.tfms
[key_index
]);
1518 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1519 * requests if driver request queue is full.
1521 skcipher_request_set_callback(ctx
->r
.req
,
1522 CRYPTO_TFM_REQ_MAY_BACKLOG
,
1523 kcryptd_async_done
, dmreq_of_req(cc
, ctx
->r
.req
));
1528 static int crypt_alloc_req_aead(struct crypt_config
*cc
,
1529 struct convert_context
*ctx
)
1531 if (!ctx
->r
.req_aead
) {
1532 ctx
->r
.req_aead
= mempool_alloc(&cc
->req_pool
, in_interrupt() ? GFP_ATOMIC
: GFP_NOIO
);
1533 if (!ctx
->r
.req_aead
)
1537 aead_request_set_tfm(ctx
->r
.req_aead
, cc
->cipher_tfm
.tfms_aead
[0]);
1540 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1541 * requests if driver request queue is full.
1543 aead_request_set_callback(ctx
->r
.req_aead
,
1544 CRYPTO_TFM_REQ_MAY_BACKLOG
,
1545 kcryptd_async_done
, dmreq_of_req(cc
, ctx
->r
.req_aead
));
1550 static int crypt_alloc_req(struct crypt_config
*cc
,
1551 struct convert_context
*ctx
)
1553 if (crypt_integrity_aead(cc
))
1554 return crypt_alloc_req_aead(cc
, ctx
);
1556 return crypt_alloc_req_skcipher(cc
, ctx
);
1559 static void crypt_free_req_skcipher(struct crypt_config
*cc
,
1560 struct skcipher_request
*req
, struct bio
*base_bio
)
1562 struct dm_crypt_io
*io
= dm_per_bio_data(base_bio
, cc
->per_bio_data_size
);
1564 if ((struct skcipher_request
*)(io
+ 1) != req
)
1565 mempool_free(req
, &cc
->req_pool
);
1568 static void crypt_free_req_aead(struct crypt_config
*cc
,
1569 struct aead_request
*req
, struct bio
*base_bio
)
1571 struct dm_crypt_io
*io
= dm_per_bio_data(base_bio
, cc
->per_bio_data_size
);
1573 if ((struct aead_request
*)(io
+ 1) != req
)
1574 mempool_free(req
, &cc
->req_pool
);
1577 static void crypt_free_req(struct crypt_config
*cc
, void *req
, struct bio
*base_bio
)
1579 if (crypt_integrity_aead(cc
))
1580 crypt_free_req_aead(cc
, req
, base_bio
);
1582 crypt_free_req_skcipher(cc
, req
, base_bio
);
1586 * Encrypt / decrypt data from one bio to another one (can be the same one)
1588 static blk_status_t
crypt_convert(struct crypt_config
*cc
,
1589 struct convert_context
*ctx
, bool atomic
, bool reset_pending
)
1591 unsigned int tag_offset
= 0;
1592 unsigned int sector_step
= cc
->sector_size
>> SECTOR_SHIFT
;
1596 * if reset_pending is set we are dealing with the bio for the first time,
1597 * else we're continuing to work on the previous bio, so don't mess with
1598 * the cc_pending counter
1601 atomic_set(&ctx
->cc_pending
, 1);
1603 while (ctx
->iter_in
.bi_size
&& ctx
->iter_out
.bi_size
) {
1605 r
= crypt_alloc_req(cc
, ctx
);
1607 complete(&ctx
->restart
);
1608 return BLK_STS_DEV_RESOURCE
;
1611 atomic_inc(&ctx
->cc_pending
);
1613 if (crypt_integrity_aead(cc
))
1614 r
= crypt_convert_block_aead(cc
, ctx
, ctx
->r
.req_aead
, tag_offset
);
1616 r
= crypt_convert_block_skcipher(cc
, ctx
, ctx
->r
.req
, tag_offset
);
1620 * The request was queued by a crypto driver
1621 * but the driver request queue is full, let's wait.
1624 if (in_interrupt()) {
1625 if (try_wait_for_completion(&ctx
->restart
)) {
1627 * we don't have to block to wait for completion,
1632 * we can't wait for completion without blocking
1633 * exit and continue processing in a workqueue
1636 ctx
->cc_sector
+= sector_step
;
1638 return BLK_STS_DEV_RESOURCE
;
1641 wait_for_completion(&ctx
->restart
);
1643 reinit_completion(&ctx
->restart
);
1646 * The request is queued and processed asynchronously,
1647 * completion function kcryptd_async_done() will be called.
1651 ctx
->cc_sector
+= sector_step
;
1655 * The request was already processed (synchronously).
1658 atomic_dec(&ctx
->cc_pending
);
1659 ctx
->cc_sector
+= sector_step
;
1665 * There was a data integrity error.
1668 atomic_dec(&ctx
->cc_pending
);
1669 return BLK_STS_PROTECTION
;
1671 * There was an error while processing the request.
1674 atomic_dec(&ctx
->cc_pending
);
1675 return BLK_STS_IOERR
;
1682 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
);
1685 * Generate a new unfragmented bio with the given size
1686 * This should never violate the device limitations (but if it did then block
1687 * core should split the bio as needed).
1689 * This function may be called concurrently. If we allocate from the mempool
1690 * concurrently, there is a possibility of deadlock. For example, if we have
1691 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
1692 * the mempool concurrently, it may deadlock in a situation where both processes
1693 * have allocated 128 pages and the mempool is exhausted.
1695 * In order to avoid this scenario we allocate the pages under a mutex.
1697 * In order to not degrade performance with excessive locking, we try
1698 * non-blocking allocations without a mutex first but on failure we fallback
1699 * to blocking allocations with a mutex.
1701 * In order to reduce allocation overhead, we try to allocate compound pages in
1702 * the first pass. If they are not available, we fall back to the mempool.
1704 static struct bio
*crypt_alloc_buffer(struct dm_crypt_io
*io
, unsigned int size
)
1706 struct crypt_config
*cc
= io
->cc
;
1708 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1709 gfp_t gfp_mask
= GFP_NOWAIT
| __GFP_HIGHMEM
;
1710 unsigned int remaining_size
;
1711 unsigned int order
= MAX_PAGE_ORDER
;
1714 if (unlikely(gfp_mask
& __GFP_DIRECT_RECLAIM
))
1715 mutex_lock(&cc
->bio_alloc_lock
);
1717 clone
= bio_alloc_bioset(cc
->dev
->bdev
, nr_iovecs
, io
->base_bio
->bi_opf
,
1719 clone
->bi_private
= io
;
1720 clone
->bi_end_io
= crypt_endio
;
1721 clone
->bi_ioprio
= io
->base_bio
->bi_ioprio
;
1723 remaining_size
= size
;
1725 while (remaining_size
) {
1727 unsigned size_to_add
;
1728 unsigned remaining_order
= __fls((remaining_size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
);
1729 order
= min(order
, remaining_order
);
1732 if (unlikely(percpu_counter_read_positive(&cc
->n_allocated_pages
) +
1733 (1 << order
) > dm_crypt_pages_per_client
))
1734 goto decrease_order
;
1735 pages
= alloc_pages(gfp_mask
1736 | __GFP_NOMEMALLOC
| __GFP_NORETRY
| __GFP_NOWARN
| __GFP_COMP
,
1738 if (likely(pages
!= NULL
)) {
1739 percpu_counter_add(&cc
->n_allocated_pages
, 1 << order
);
1746 pages
= mempool_alloc(&cc
->page_pool
, gfp_mask
);
1748 crypt_free_buffer_pages(cc
, clone
);
1750 gfp_mask
|= __GFP_DIRECT_RECLAIM
;
1756 size_to_add
= min((unsigned)PAGE_SIZE
<< order
, remaining_size
);
1757 __bio_add_page(clone
, pages
, size_to_add
, 0);
1758 remaining_size
-= size_to_add
;
1761 /* Allocate space for integrity tags */
1762 if (dm_crypt_integrity_io_alloc(io
, clone
)) {
1763 crypt_free_buffer_pages(cc
, clone
);
1768 if (unlikely(gfp_mask
& __GFP_DIRECT_RECLAIM
))
1769 mutex_unlock(&cc
->bio_alloc_lock
);
1774 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
)
1776 struct folio_iter fi
;
1778 if (clone
->bi_vcnt
> 0) { /* bio_for_each_folio_all crashes with an empty bio */
1779 bio_for_each_folio_all(fi
, clone
) {
1780 if (folio_test_large(fi
.folio
)) {
1781 percpu_counter_sub(&cc
->n_allocated_pages
,
1782 1 << folio_order(fi
.folio
));
1783 folio_put(fi
.folio
);
1785 mempool_free(&fi
.folio
->page
, &cc
->page_pool
);
1791 static void crypt_io_init(struct dm_crypt_io
*io
, struct crypt_config
*cc
,
1792 struct bio
*bio
, sector_t sector
)
1796 io
->sector
= sector
;
1798 io
->ctx
.aead_recheck
= false;
1799 io
->ctx
.aead_failed
= false;
1800 io
->ctx
.r
.req
= NULL
;
1801 io
->integrity_metadata
= NULL
;
1802 io
->integrity_metadata_from_pool
= false;
1803 atomic_set(&io
->io_pending
, 0);
1806 static void crypt_inc_pending(struct dm_crypt_io
*io
)
1808 atomic_inc(&io
->io_pending
);
1811 static void kcryptd_queue_read(struct dm_crypt_io
*io
);
1814 * One of the bios was finished. Check for completion of
1815 * the whole request and correctly clean up the buffer.
1817 static void crypt_dec_pending(struct dm_crypt_io
*io
)
1819 struct crypt_config
*cc
= io
->cc
;
1820 struct bio
*base_bio
= io
->base_bio
;
1821 blk_status_t error
= io
->error
;
1823 if (!atomic_dec_and_test(&io
->io_pending
))
1826 if (likely(!io
->ctx
.aead_recheck
) && unlikely(io
->ctx
.aead_failed
) &&
1827 cc
->used_tag_size
&& bio_data_dir(base_bio
) == READ
) {
1828 io
->ctx
.aead_recheck
= true;
1829 io
->ctx
.aead_failed
= false;
1831 kcryptd_queue_read(io
);
1836 crypt_free_req(cc
, io
->ctx
.r
.req
, base_bio
);
1838 if (unlikely(io
->integrity_metadata_from_pool
))
1839 mempool_free(io
->integrity_metadata
, &io
->cc
->tag_pool
);
1841 kfree(io
->integrity_metadata
);
1843 base_bio
->bi_status
= error
;
1845 bio_endio(base_bio
);
1849 * kcryptd/kcryptd_io:
1851 * Needed because it would be very unwise to do decryption in an
1852 * interrupt context.
1854 * kcryptd performs the actual encryption or decryption.
1856 * kcryptd_io performs the IO submission.
1858 * They must be separated as otherwise the final stages could be
1859 * starved by new requests which can block in the first stages due
1860 * to memory allocation.
1862 * The work is done per CPU global for all dm-crypt instances.
1863 * They should not depend on each other and do not block.
1865 static void crypt_endio(struct bio
*clone
)
1867 struct dm_crypt_io
*io
= clone
->bi_private
;
1868 struct crypt_config
*cc
= io
->cc
;
1869 unsigned int rw
= bio_data_dir(clone
);
1870 blk_status_t error
= clone
->bi_status
;
1872 if (io
->ctx
.aead_recheck
&& !error
) {
1873 kcryptd_queue_crypt(io
);
1878 * free the processed pages
1880 if (rw
== WRITE
|| io
->ctx
.aead_recheck
)
1881 crypt_free_buffer_pages(cc
, clone
);
1885 if (rw
== READ
&& !error
) {
1886 kcryptd_queue_crypt(io
);
1890 if (unlikely(error
))
1893 crypt_dec_pending(io
);
1896 #define CRYPT_MAP_READ_GFP GFP_NOWAIT
1898 static int kcryptd_io_read(struct dm_crypt_io
*io
, gfp_t gfp
)
1900 struct crypt_config
*cc
= io
->cc
;
1903 if (io
->ctx
.aead_recheck
) {
1904 if (!(gfp
& __GFP_DIRECT_RECLAIM
))
1906 crypt_inc_pending(io
);
1907 clone
= crypt_alloc_buffer(io
, io
->base_bio
->bi_iter
.bi_size
);
1908 if (unlikely(!clone
)) {
1909 crypt_dec_pending(io
);
1912 clone
->bi_iter
.bi_sector
= cc
->start
+ io
->sector
;
1913 crypt_convert_init(cc
, &io
->ctx
, clone
, clone
, io
->sector
);
1914 io
->saved_bi_iter
= clone
->bi_iter
;
1915 dm_submit_bio_remap(io
->base_bio
, clone
);
1920 * We need the original biovec array in order to decrypt the whole bio
1921 * data *afterwards* -- thanks to immutable biovecs we don't need to
1922 * worry about the block layer modifying the biovec array; so leverage
1923 * bio_alloc_clone().
1925 clone
= bio_alloc_clone(cc
->dev
->bdev
, io
->base_bio
, gfp
, &cc
->bs
);
1928 clone
->bi_private
= io
;
1929 clone
->bi_end_io
= crypt_endio
;
1931 crypt_inc_pending(io
);
1933 clone
->bi_iter
.bi_sector
= cc
->start
+ io
->sector
;
1935 if (dm_crypt_integrity_io_alloc(io
, clone
)) {
1936 crypt_dec_pending(io
);
1941 dm_submit_bio_remap(io
->base_bio
, clone
);
1945 static void kcryptd_io_read_work(struct work_struct
*work
)
1947 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1949 crypt_inc_pending(io
);
1950 if (kcryptd_io_read(io
, GFP_NOIO
))
1951 io
->error
= BLK_STS_RESOURCE
;
1952 crypt_dec_pending(io
);
1955 static void kcryptd_queue_read(struct dm_crypt_io
*io
)
1957 struct crypt_config
*cc
= io
->cc
;
1959 INIT_WORK(&io
->work
, kcryptd_io_read_work
);
1960 queue_work(cc
->io_queue
, &io
->work
);
1963 static void kcryptd_io_write(struct dm_crypt_io
*io
)
1965 struct bio
*clone
= io
->ctx
.bio_out
;
1967 dm_submit_bio_remap(io
->base_bio
, clone
);
1970 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1972 static int dmcrypt_write(void *data
)
1974 struct crypt_config
*cc
= data
;
1975 struct dm_crypt_io
*io
;
1978 struct rb_root write_tree
;
1979 struct blk_plug plug
;
1981 spin_lock_irq(&cc
->write_thread_lock
);
1984 if (!RB_EMPTY_ROOT(&cc
->write_tree
))
1987 set_current_state(TASK_INTERRUPTIBLE
);
1989 spin_unlock_irq(&cc
->write_thread_lock
);
1991 if (unlikely(kthread_should_stop())) {
1992 set_current_state(TASK_RUNNING
);
1998 spin_lock_irq(&cc
->write_thread_lock
);
1999 goto continue_locked
;
2002 write_tree
= cc
->write_tree
;
2003 cc
->write_tree
= RB_ROOT
;
2004 spin_unlock_irq(&cc
->write_thread_lock
);
2006 BUG_ON(rb_parent(write_tree
.rb_node
));
2009 * Note: we cannot walk the tree here with rb_next because
2010 * the structures may be freed when kcryptd_io_write is called.
2012 blk_start_plug(&plug
);
2014 io
= crypt_io_from_node(rb_first(&write_tree
));
2015 rb_erase(&io
->rb_node
, &write_tree
);
2016 kcryptd_io_write(io
);
2018 } while (!RB_EMPTY_ROOT(&write_tree
));
2019 blk_finish_plug(&plug
);
2024 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io
*io
, int async
)
2026 struct bio
*clone
= io
->ctx
.bio_out
;
2027 struct crypt_config
*cc
= io
->cc
;
2028 unsigned long flags
;
2030 struct rb_node
**rbp
, *parent
;
2032 if (unlikely(io
->error
)) {
2033 crypt_free_buffer_pages(cc
, clone
);
2035 crypt_dec_pending(io
);
2039 /* crypt_convert should have filled the clone bio */
2040 BUG_ON(io
->ctx
.iter_out
.bi_size
);
2042 clone
->bi_iter
.bi_sector
= cc
->start
+ io
->sector
;
2044 if ((likely(!async
) && test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
)) ||
2045 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
)) {
2046 dm_submit_bio_remap(io
->base_bio
, clone
);
2050 spin_lock_irqsave(&cc
->write_thread_lock
, flags
);
2051 if (RB_EMPTY_ROOT(&cc
->write_tree
))
2052 wake_up_process(cc
->write_thread
);
2053 rbp
= &cc
->write_tree
.rb_node
;
2055 sector
= io
->sector
;
2058 if (sector
< crypt_io_from_node(parent
)->sector
)
2059 rbp
= &(*rbp
)->rb_left
;
2061 rbp
= &(*rbp
)->rb_right
;
2063 rb_link_node(&io
->rb_node
, parent
, rbp
);
2064 rb_insert_color(&io
->rb_node
, &cc
->write_tree
);
2065 spin_unlock_irqrestore(&cc
->write_thread_lock
, flags
);
2068 static bool kcryptd_crypt_write_inline(struct crypt_config
*cc
,
2069 struct convert_context
*ctx
)
2072 if (!test_bit(DM_CRYPT_WRITE_INLINE
, &cc
->flags
))
2076 * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
2077 * constraints so they do not need to be issued inline by
2078 * kcryptd_crypt_write_convert().
2080 switch (bio_op(ctx
->bio_in
)) {
2082 case REQ_OP_WRITE_ZEROES
:
2089 static void kcryptd_crypt_write_continue(struct work_struct
*work
)
2091 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
2092 struct crypt_config
*cc
= io
->cc
;
2093 struct convert_context
*ctx
= &io
->ctx
;
2095 sector_t sector
= io
->sector
;
2098 wait_for_completion(&ctx
->restart
);
2099 reinit_completion(&ctx
->restart
);
2101 r
= crypt_convert(cc
, &io
->ctx
, true, false);
2104 crypt_finished
= atomic_dec_and_test(&ctx
->cc_pending
);
2105 if (!crypt_finished
&& kcryptd_crypt_write_inline(cc
, ctx
)) {
2106 /* Wait for completion signaled by kcryptd_async_done() */
2107 wait_for_completion(&ctx
->restart
);
2111 /* Encryption was already finished, submit io now */
2112 if (crypt_finished
) {
2113 kcryptd_crypt_write_io_submit(io
, 0);
2114 io
->sector
= sector
;
2117 crypt_dec_pending(io
);
2120 static void kcryptd_crypt_write_convert(struct dm_crypt_io
*io
)
2122 struct crypt_config
*cc
= io
->cc
;
2123 struct convert_context
*ctx
= &io
->ctx
;
2126 sector_t sector
= io
->sector
;
2130 * Prevent io from disappearing until this function completes.
2132 crypt_inc_pending(io
);
2133 crypt_convert_init(cc
, ctx
, NULL
, io
->base_bio
, sector
);
2135 clone
= crypt_alloc_buffer(io
, io
->base_bio
->bi_iter
.bi_size
);
2136 if (unlikely(!clone
)) {
2137 io
->error
= BLK_STS_IOERR
;
2141 io
->ctx
.bio_out
= clone
;
2142 io
->ctx
.iter_out
= clone
->bi_iter
;
2144 if (crypt_integrity_aead(cc
)) {
2145 bio_copy_data(clone
, io
->base_bio
);
2146 io
->ctx
.bio_in
= clone
;
2147 io
->ctx
.iter_in
= clone
->bi_iter
;
2150 sector
+= bio_sectors(clone
);
2152 crypt_inc_pending(io
);
2153 r
= crypt_convert(cc
, ctx
,
2154 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
), true);
2156 * Crypto API backlogged the request, because its queue was full
2157 * and we're in softirq context, so continue from a workqueue
2158 * (TODO: is it actually possible to be in softirq in the write path?)
2160 if (r
== BLK_STS_DEV_RESOURCE
) {
2161 INIT_WORK(&io
->work
, kcryptd_crypt_write_continue
);
2162 queue_work(cc
->crypt_queue
, &io
->work
);
2167 crypt_finished
= atomic_dec_and_test(&ctx
->cc_pending
);
2168 if (!crypt_finished
&& kcryptd_crypt_write_inline(cc
, ctx
)) {
2169 /* Wait for completion signaled by kcryptd_async_done() */
2170 wait_for_completion(&ctx
->restart
);
2174 /* Encryption was already finished, submit io now */
2175 if (crypt_finished
) {
2176 kcryptd_crypt_write_io_submit(io
, 0);
2177 io
->sector
= sector
;
2181 crypt_dec_pending(io
);
2184 static void kcryptd_crypt_read_done(struct dm_crypt_io
*io
)
2186 if (io
->ctx
.aead_recheck
) {
2188 io
->ctx
.bio_in
->bi_iter
= io
->saved_bi_iter
;
2189 bio_copy_data(io
->base_bio
, io
->ctx
.bio_in
);
2191 crypt_free_buffer_pages(io
->cc
, io
->ctx
.bio_in
);
2192 bio_put(io
->ctx
.bio_in
);
2194 crypt_dec_pending(io
);
2197 static void kcryptd_crypt_read_continue(struct work_struct
*work
)
2199 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
2200 struct crypt_config
*cc
= io
->cc
;
2203 wait_for_completion(&io
->ctx
.restart
);
2204 reinit_completion(&io
->ctx
.restart
);
2206 r
= crypt_convert(cc
, &io
->ctx
, true, false);
2210 if (atomic_dec_and_test(&io
->ctx
.cc_pending
))
2211 kcryptd_crypt_read_done(io
);
2213 crypt_dec_pending(io
);
2216 static void kcryptd_crypt_read_convert(struct dm_crypt_io
*io
)
2218 struct crypt_config
*cc
= io
->cc
;
2221 crypt_inc_pending(io
);
2223 if (io
->ctx
.aead_recheck
) {
2224 io
->ctx
.cc_sector
= io
->sector
+ cc
->iv_offset
;
2225 r
= crypt_convert(cc
, &io
->ctx
,
2226 test_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
), true);
2228 crypt_convert_init(cc
, &io
->ctx
, io
->base_bio
, io
->base_bio
,
2231 r
= crypt_convert(cc
, &io
->ctx
,
2232 test_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
), true);
2235 * Crypto API backlogged the request, because its queue was full
2236 * and we're in softirq context, so continue from a workqueue
2238 if (r
== BLK_STS_DEV_RESOURCE
) {
2239 INIT_WORK(&io
->work
, kcryptd_crypt_read_continue
);
2240 queue_work(cc
->crypt_queue
, &io
->work
);
2246 if (atomic_dec_and_test(&io
->ctx
.cc_pending
))
2247 kcryptd_crypt_read_done(io
);
2249 crypt_dec_pending(io
);
2252 static void kcryptd_async_done(void *data
, int error
)
2254 struct dm_crypt_request
*dmreq
= data
;
2255 struct convert_context
*ctx
= dmreq
->ctx
;
2256 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
2257 struct crypt_config
*cc
= io
->cc
;
2260 * A request from crypto driver backlog is going to be processed now,
2261 * finish the completion and continue in crypt_convert().
2262 * (Callback will be called for the second time for this request.)
2264 if (error
== -EINPROGRESS
) {
2265 complete(&ctx
->restart
);
2269 if (!error
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
2270 error
= cc
->iv_gen_ops
->post(cc
, org_iv_of_dmreq(cc
, dmreq
), dmreq
);
2272 if (error
== -EBADMSG
) {
2273 sector_t s
= le64_to_cpu(*org_sector_of_dmreq(cc
, dmreq
));
2275 ctx
->aead_failed
= true;
2276 if (ctx
->aead_recheck
) {
2277 DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
2278 ctx
->bio_in
->bi_bdev
, s
);
2279 dm_audit_log_bio(DM_MSG_PREFIX
, "integrity-aead",
2282 io
->error
= BLK_STS_PROTECTION
;
2283 } else if (error
< 0)
2284 io
->error
= BLK_STS_IOERR
;
2286 crypt_free_req(cc
, req_of_dmreq(cc
, dmreq
), io
->base_bio
);
2288 if (!atomic_dec_and_test(&ctx
->cc_pending
))
2292 * The request is fully completed: for inline writes, let
2293 * kcryptd_crypt_write_convert() do the IO submission.
2295 if (bio_data_dir(io
->base_bio
) == READ
) {
2296 kcryptd_crypt_read_done(io
);
2300 if (kcryptd_crypt_write_inline(cc
, ctx
)) {
2301 complete(&ctx
->restart
);
2305 kcryptd_crypt_write_io_submit(io
, 1);
2308 static void kcryptd_crypt(struct work_struct
*work
)
2310 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
2312 if (bio_data_dir(io
->base_bio
) == READ
)
2313 kcryptd_crypt_read_convert(io
);
2315 kcryptd_crypt_write_convert(io
);
2318 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
)
2320 struct crypt_config
*cc
= io
->cc
;
2322 if ((bio_data_dir(io
->base_bio
) == READ
&& test_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
)) ||
2323 (bio_data_dir(io
->base_bio
) == WRITE
&& test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
))) {
2325 * in_hardirq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
2326 * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
2327 * it is being executed with irqs disabled.
2329 if (in_hardirq() || irqs_disabled()) {
2330 INIT_WORK(&io
->work
, kcryptd_crypt
);
2331 queue_work(system_bh_wq
, &io
->work
);
2334 kcryptd_crypt(&io
->work
);
2339 INIT_WORK(&io
->work
, kcryptd_crypt
);
2340 queue_work(cc
->crypt_queue
, &io
->work
);
2343 static void crypt_free_tfms_aead(struct crypt_config
*cc
)
2345 if (!cc
->cipher_tfm
.tfms_aead
)
2348 if (cc
->cipher_tfm
.tfms_aead
[0] && !IS_ERR(cc
->cipher_tfm
.tfms_aead
[0])) {
2349 crypto_free_aead(cc
->cipher_tfm
.tfms_aead
[0]);
2350 cc
->cipher_tfm
.tfms_aead
[0] = NULL
;
2353 kfree(cc
->cipher_tfm
.tfms_aead
);
2354 cc
->cipher_tfm
.tfms_aead
= NULL
;
2357 static void crypt_free_tfms_skcipher(struct crypt_config
*cc
)
2361 if (!cc
->cipher_tfm
.tfms
)
2364 for (i
= 0; i
< cc
->tfms_count
; i
++)
2365 if (cc
->cipher_tfm
.tfms
[i
] && !IS_ERR(cc
->cipher_tfm
.tfms
[i
])) {
2366 crypto_free_skcipher(cc
->cipher_tfm
.tfms
[i
]);
2367 cc
->cipher_tfm
.tfms
[i
] = NULL
;
2370 kfree(cc
->cipher_tfm
.tfms
);
2371 cc
->cipher_tfm
.tfms
= NULL
;
2374 static void crypt_free_tfms(struct crypt_config
*cc
)
2376 if (crypt_integrity_aead(cc
))
2377 crypt_free_tfms_aead(cc
);
2379 crypt_free_tfms_skcipher(cc
);
2382 static int crypt_alloc_tfms_skcipher(struct crypt_config
*cc
, char *ciphermode
)
2387 cc
->cipher_tfm
.tfms
= kcalloc(cc
->tfms_count
,
2388 sizeof(struct crypto_skcipher
*),
2390 if (!cc
->cipher_tfm
.tfms
)
2393 for (i
= 0; i
< cc
->tfms_count
; i
++) {
2394 cc
->cipher_tfm
.tfms
[i
] = crypto_alloc_skcipher(ciphermode
, 0,
2395 CRYPTO_ALG_ALLOCATES_MEMORY
);
2396 if (IS_ERR(cc
->cipher_tfm
.tfms
[i
])) {
2397 err
= PTR_ERR(cc
->cipher_tfm
.tfms
[i
]);
2398 crypt_free_tfms(cc
);
2404 * dm-crypt performance can vary greatly depending on which crypto
2405 * algorithm implementation is used. Help people debug performance
2406 * problems by logging the ->cra_driver_name.
2408 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode
,
2409 crypto_skcipher_alg(any_tfm(cc
))->base
.cra_driver_name
);
2413 static int crypt_alloc_tfms_aead(struct crypt_config
*cc
, char *ciphermode
)
2417 cc
->cipher_tfm
.tfms
= kmalloc(sizeof(struct crypto_aead
*), GFP_KERNEL
);
2418 if (!cc
->cipher_tfm
.tfms
)
2421 cc
->cipher_tfm
.tfms_aead
[0] = crypto_alloc_aead(ciphermode
, 0,
2422 CRYPTO_ALG_ALLOCATES_MEMORY
);
2423 if (IS_ERR(cc
->cipher_tfm
.tfms_aead
[0])) {
2424 err
= PTR_ERR(cc
->cipher_tfm
.tfms_aead
[0]);
2425 crypt_free_tfms(cc
);
2429 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode
,
2430 crypto_aead_alg(any_tfm_aead(cc
))->base
.cra_driver_name
);
2434 static int crypt_alloc_tfms(struct crypt_config
*cc
, char *ciphermode
)
2436 if (crypt_integrity_aead(cc
))
2437 return crypt_alloc_tfms_aead(cc
, ciphermode
);
2439 return crypt_alloc_tfms_skcipher(cc
, ciphermode
);
2442 static unsigned int crypt_subkey_size(struct crypt_config
*cc
)
2444 return (cc
->key_size
- cc
->key_extra_size
) >> ilog2(cc
->tfms_count
);
2447 static unsigned int crypt_authenckey_size(struct crypt_config
*cc
)
2449 return crypt_subkey_size(cc
) + RTA_SPACE(sizeof(struct crypto_authenc_key_param
));
2453 * If AEAD is composed like authenc(hmac(sha256),xts(aes)),
2454 * the key must be for some reason in special format.
2455 * This funcion converts cc->key to this special format.
2457 static void crypt_copy_authenckey(char *p
, const void *key
,
2458 unsigned int enckeylen
, unsigned int authkeylen
)
2460 struct crypto_authenc_key_param
*param
;
2463 rta
= (struct rtattr
*)p
;
2464 param
= RTA_DATA(rta
);
2465 param
->enckeylen
= cpu_to_be32(enckeylen
);
2466 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
2467 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
2468 p
+= RTA_SPACE(sizeof(*param
));
2469 memcpy(p
, key
+ enckeylen
, authkeylen
);
2471 memcpy(p
, key
, enckeylen
);
2474 static int crypt_setkey(struct crypt_config
*cc
)
2476 unsigned int subkey_size
;
2479 /* Ignore extra keys (which are used for IV etc) */
2480 subkey_size
= crypt_subkey_size(cc
);
2482 if (crypt_integrity_hmac(cc
)) {
2483 if (subkey_size
< cc
->key_mac_size
)
2486 crypt_copy_authenckey(cc
->authenc_key
, cc
->key
,
2487 subkey_size
- cc
->key_mac_size
,
2491 for (i
= 0; i
< cc
->tfms_count
; i
++) {
2492 if (crypt_integrity_hmac(cc
))
2493 r
= crypto_aead_setkey(cc
->cipher_tfm
.tfms_aead
[i
],
2494 cc
->authenc_key
, crypt_authenckey_size(cc
));
2495 else if (crypt_integrity_aead(cc
))
2496 r
= crypto_aead_setkey(cc
->cipher_tfm
.tfms_aead
[i
],
2497 cc
->key
+ (i
* subkey_size
),
2500 r
= crypto_skcipher_setkey(cc
->cipher_tfm
.tfms
[i
],
2501 cc
->key
+ (i
* subkey_size
),
2507 if (crypt_integrity_hmac(cc
))
2508 memzero_explicit(cc
->authenc_key
, crypt_authenckey_size(cc
));
2515 static bool contains_whitespace(const char *str
)
2518 if (isspace(*str
++))
2523 static int set_key_user(struct crypt_config
*cc
, struct key
*key
)
2525 const struct user_key_payload
*ukp
;
2527 ukp
= user_key_payload_locked(key
);
2529 return -EKEYREVOKED
;
2531 if (cc
->key_size
!= ukp
->datalen
)
2534 memcpy(cc
->key
, ukp
->data
, cc
->key_size
);
2539 static int set_key_encrypted(struct crypt_config
*cc
, struct key
*key
)
2541 const struct encrypted_key_payload
*ekp
;
2543 ekp
= key
->payload
.data
[0];
2545 return -EKEYREVOKED
;
2547 if (cc
->key_size
!= ekp
->decrypted_datalen
)
2550 memcpy(cc
->key
, ekp
->decrypted_data
, cc
->key_size
);
2555 static int set_key_trusted(struct crypt_config
*cc
, struct key
*key
)
2557 const struct trusted_key_payload
*tkp
;
2559 tkp
= key
->payload
.data
[0];
2561 return -EKEYREVOKED
;
2563 if (cc
->key_size
!= tkp
->key_len
)
2566 memcpy(cc
->key
, tkp
->key
, cc
->key_size
);
2571 static int crypt_set_keyring_key(struct crypt_config
*cc
, const char *key_string
)
2573 char *new_key_string
, *key_desc
;
2575 struct key_type
*type
;
2577 int (*set_key
)(struct crypt_config
*cc
, struct key
*key
);
2580 * Reject key_string with whitespace. dm core currently lacks code for
2581 * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
2583 if (contains_whitespace(key_string
)) {
2584 DMERR("whitespace chars not allowed in key string");
2588 /* look for next ':' separating key_type from key_description */
2589 key_desc
= strchr(key_string
, ':');
2590 if (!key_desc
|| key_desc
== key_string
|| !strlen(key_desc
+ 1))
2593 if (!strncmp(key_string
, "logon:", key_desc
- key_string
+ 1)) {
2594 type
= &key_type_logon
;
2595 set_key
= set_key_user
;
2596 } else if (!strncmp(key_string
, "user:", key_desc
- key_string
+ 1)) {
2597 type
= &key_type_user
;
2598 set_key
= set_key_user
;
2599 } else if (IS_ENABLED(CONFIG_ENCRYPTED_KEYS
) &&
2600 !strncmp(key_string
, "encrypted:", key_desc
- key_string
+ 1)) {
2601 type
= &key_type_encrypted
;
2602 set_key
= set_key_encrypted
;
2603 } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS
) &&
2604 !strncmp(key_string
, "trusted:", key_desc
- key_string
+ 1)) {
2605 type
= &key_type_trusted
;
2606 set_key
= set_key_trusted
;
2611 new_key_string
= kstrdup(key_string
, GFP_KERNEL
);
2612 if (!new_key_string
)
2615 key
= request_key(type
, key_desc
+ 1, NULL
);
2618 goto free_new_key_string
;
2621 down_read(&key
->sem
);
2622 ret
= set_key(cc
, key
);
2626 goto free_new_key_string
;
2628 /* clear the flag since following operations may invalidate previously valid key */
2629 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2631 ret
= crypt_setkey(cc
);
2633 goto free_new_key_string
;
2635 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2636 kfree_sensitive(cc
->key_string
);
2637 cc
->key_string
= new_key_string
;
2640 free_new_key_string
:
2641 kfree_sensitive(new_key_string
);
2645 static int get_key_size(char **key_string
)
2650 if (*key_string
[0] != ':')
2651 return strlen(*key_string
) >> 1;
2653 /* look for next ':' in key string */
2654 colon
= strpbrk(*key_string
+ 1, ":");
2658 if (sscanf(*key_string
+ 1, "%u%c", &ret
, &dummy
) != 2 || dummy
!= ':')
2661 *key_string
= colon
;
2663 /* remaining key string should be :<logon|user>:<key_desc> */
2670 static int crypt_set_keyring_key(struct crypt_config
*cc
, const char *key_string
)
2675 static int get_key_size(char **key_string
)
2677 return (*key_string
[0] == ':') ? -EINVAL
: (int)(strlen(*key_string
) >> 1);
2680 #endif /* CONFIG_KEYS */
2682 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
2685 int key_string_len
= strlen(key
);
2687 /* Hyphen (which gives a key_size of zero) means there is no key. */
2688 if (!cc
->key_size
&& strcmp(key
, "-"))
2691 /* ':' means the key is in kernel keyring, short-circuit normal key processing */
2692 if (key
[0] == ':') {
2693 r
= crypt_set_keyring_key(cc
, key
+ 1);
2697 /* clear the flag since following operations may invalidate previously valid key */
2698 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2700 /* wipe references to any kernel keyring key */
2701 kfree_sensitive(cc
->key_string
);
2702 cc
->key_string
= NULL
;
2704 /* Decode key from its hex representation. */
2705 if (cc
->key_size
&& hex2bin(cc
->key
, key
, cc
->key_size
) < 0)
2708 r
= crypt_setkey(cc
);
2710 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2713 /* Hex key string not needed after here, so wipe it. */
2714 memset(key
, '0', key_string_len
);
2719 static int crypt_wipe_key(struct crypt_config
*cc
)
2723 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2724 get_random_bytes(&cc
->key
, cc
->key_size
);
2726 /* Wipe IV private keys */
2727 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->wipe
) {
2728 r
= cc
->iv_gen_ops
->wipe(cc
);
2733 kfree_sensitive(cc
->key_string
);
2734 cc
->key_string
= NULL
;
2735 r
= crypt_setkey(cc
);
2736 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
2741 static void crypt_calculate_pages_per_client(void)
2743 unsigned long pages
= (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT
/ 100;
2745 if (!dm_crypt_clients_n
)
2748 pages
/= dm_crypt_clients_n
;
2749 if (pages
< DM_CRYPT_MIN_PAGES_PER_CLIENT
)
2750 pages
= DM_CRYPT_MIN_PAGES_PER_CLIENT
;
2751 dm_crypt_pages_per_client
= pages
;
2754 static void *crypt_page_alloc(gfp_t gfp_mask
, void *pool_data
)
2756 struct crypt_config
*cc
= pool_data
;
2760 * Note, percpu_counter_read_positive() may over (and under) estimate
2761 * the current usage by at most (batch - 1) * num_online_cpus() pages,
2762 * but avoids potential spinlock contention of an exact result.
2764 if (unlikely(percpu_counter_read_positive(&cc
->n_allocated_pages
) >= dm_crypt_pages_per_client
) &&
2765 likely(gfp_mask
& __GFP_NORETRY
))
2768 page
= alloc_page(gfp_mask
);
2769 if (likely(page
!= NULL
))
2770 percpu_counter_add(&cc
->n_allocated_pages
, 1);
2775 static void crypt_page_free(void *page
, void *pool_data
)
2777 struct crypt_config
*cc
= pool_data
;
2780 percpu_counter_sub(&cc
->n_allocated_pages
, 1);
2783 static void crypt_dtr(struct dm_target
*ti
)
2785 struct crypt_config
*cc
= ti
->private;
2792 if (cc
->write_thread
)
2793 kthread_stop(cc
->write_thread
);
2796 destroy_workqueue(cc
->io_queue
);
2797 if (cc
->crypt_queue
)
2798 destroy_workqueue(cc
->crypt_queue
);
2800 if (cc
->workqueue_id
)
2801 ida_free(&workqueue_ida
, cc
->workqueue_id
);
2803 crypt_free_tfms(cc
);
2805 bioset_exit(&cc
->bs
);
2807 mempool_exit(&cc
->page_pool
);
2808 mempool_exit(&cc
->req_pool
);
2809 mempool_exit(&cc
->tag_pool
);
2811 WARN_ON(percpu_counter_sum(&cc
->n_allocated_pages
) != 0);
2812 percpu_counter_destroy(&cc
->n_allocated_pages
);
2814 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
2815 cc
->iv_gen_ops
->dtr(cc
);
2818 dm_put_device(ti
, cc
->dev
);
2820 kfree_sensitive(cc
->cipher_string
);
2821 kfree_sensitive(cc
->key_string
);
2822 kfree_sensitive(cc
->cipher_auth
);
2823 kfree_sensitive(cc
->authenc_key
);
2825 mutex_destroy(&cc
->bio_alloc_lock
);
2827 /* Must zero key material before freeing */
2828 kfree_sensitive(cc
);
2830 spin_lock(&dm_crypt_clients_lock
);
2831 WARN_ON(!dm_crypt_clients_n
);
2832 dm_crypt_clients_n
--;
2833 crypt_calculate_pages_per_client();
2834 spin_unlock(&dm_crypt_clients_lock
);
2836 dm_audit_log_dtr(DM_MSG_PREFIX
, ti
, 1);
2839 static int crypt_ctr_ivmode(struct dm_target
*ti
, const char *ivmode
)
2841 struct crypt_config
*cc
= ti
->private;
2843 if (crypt_integrity_aead(cc
))
2844 cc
->iv_size
= crypto_aead_ivsize(any_tfm_aead(cc
));
2846 cc
->iv_size
= crypto_skcipher_ivsize(any_tfm(cc
));
2849 /* at least a 64 bit sector number should fit in our buffer */
2850 cc
->iv_size
= max(cc
->iv_size
,
2851 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
2853 DMWARN("Selected cipher does not support IVs");
2857 /* Choose ivmode, see comments at iv code. */
2859 cc
->iv_gen_ops
= NULL
;
2860 else if (strcmp(ivmode
, "plain") == 0)
2861 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
2862 else if (strcmp(ivmode
, "plain64") == 0)
2863 cc
->iv_gen_ops
= &crypt_iv_plain64_ops
;
2864 else if (strcmp(ivmode
, "plain64be") == 0)
2865 cc
->iv_gen_ops
= &crypt_iv_plain64be_ops
;
2866 else if (strcmp(ivmode
, "essiv") == 0)
2867 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
2868 else if (strcmp(ivmode
, "benbi") == 0)
2869 cc
->iv_gen_ops
= &crypt_iv_benbi_ops
;
2870 else if (strcmp(ivmode
, "null") == 0)
2871 cc
->iv_gen_ops
= &crypt_iv_null_ops
;
2872 else if (strcmp(ivmode
, "eboiv") == 0)
2873 cc
->iv_gen_ops
= &crypt_iv_eboiv_ops
;
2874 else if (strcmp(ivmode
, "elephant") == 0) {
2875 cc
->iv_gen_ops
= &crypt_iv_elephant_ops
;
2877 cc
->key_extra_size
= cc
->key_size
/ 2;
2878 if (cc
->key_extra_size
> ELEPHANT_MAX_KEY_SIZE
)
2880 set_bit(CRYPT_ENCRYPT_PREPROCESS
, &cc
->cipher_flags
);
2881 } else if (strcmp(ivmode
, "lmk") == 0) {
2882 cc
->iv_gen_ops
= &crypt_iv_lmk_ops
;
2884 * Version 2 and 3 is recognised according
2885 * to length of provided multi-key string.
2886 * If present (version 3), last key is used as IV seed.
2887 * All keys (including IV seed) are always the same size.
2889 if (cc
->key_size
% cc
->key_parts
) {
2891 cc
->key_extra_size
= cc
->key_size
/ cc
->key_parts
;
2893 } else if (strcmp(ivmode
, "tcw") == 0) {
2894 cc
->iv_gen_ops
= &crypt_iv_tcw_ops
;
2895 cc
->key_parts
+= 2; /* IV + whitening */
2896 cc
->key_extra_size
= cc
->iv_size
+ TCW_WHITENING_SIZE
;
2897 } else if (strcmp(ivmode
, "random") == 0) {
2898 cc
->iv_gen_ops
= &crypt_iv_random_ops
;
2899 /* Need storage space in integrity fields. */
2900 cc
->integrity_iv_size
= cc
->iv_size
;
2902 ti
->error
= "Invalid IV mode";
2910 * Workaround to parse HMAC algorithm from AEAD crypto API spec.
2911 * The HMAC is needed to calculate tag size (HMAC digest size).
2912 * This should be probably done by crypto-api calls (once available...)
2914 static int crypt_ctr_auth_cipher(struct crypt_config
*cc
, char *cipher_api
)
2916 char *start
, *end
, *mac_alg
= NULL
;
2917 struct crypto_ahash
*mac
;
2919 if (!strstarts(cipher_api
, "authenc("))
2922 start
= strchr(cipher_api
, '(');
2923 end
= strchr(cipher_api
, ',');
2924 if (!start
|| !end
|| ++start
> end
)
2927 mac_alg
= kmemdup_nul(start
, end
- start
, GFP_KERNEL
);
2931 mac
= crypto_alloc_ahash(mac_alg
, 0, CRYPTO_ALG_ALLOCATES_MEMORY
);
2935 return PTR_ERR(mac
);
2937 if (!test_bit(CRYPT_KEY_MAC_SIZE_SET
, &cc
->cipher_flags
))
2938 cc
->key_mac_size
= crypto_ahash_digestsize(mac
);
2939 crypto_free_ahash(mac
);
2941 cc
->authenc_key
= kmalloc(crypt_authenckey_size(cc
), GFP_KERNEL
);
2942 if (!cc
->authenc_key
)
2948 static int crypt_ctr_cipher_new(struct dm_target
*ti
, char *cipher_in
, char *key
,
2949 char **ivmode
, char **ivopts
)
2951 struct crypt_config
*cc
= ti
->private;
2952 char *tmp
, *cipher_api
, buf
[CRYPTO_MAX_ALG_NAME
];
2958 * New format (capi: prefix)
2959 * capi:cipher_api_spec-iv:ivopts
2961 tmp
= &cipher_in
[strlen("capi:")];
2963 /* Separate IV options if present, it can contain another '-' in hash name */
2964 *ivopts
= strrchr(tmp
, ':');
2970 *ivmode
= strrchr(tmp
, '-');
2975 /* The rest is crypto API spec */
2978 /* Alloc AEAD, can be used only in new format. */
2979 if (crypt_integrity_aead(cc
)) {
2980 ret
= crypt_ctr_auth_cipher(cc
, cipher_api
);
2982 ti
->error
= "Invalid AEAD cipher spec";
2987 if (*ivmode
&& !strcmp(*ivmode
, "lmk"))
2988 cc
->tfms_count
= 64;
2990 if (*ivmode
&& !strcmp(*ivmode
, "essiv")) {
2992 ti
->error
= "Digest algorithm missing for ESSIV mode";
2995 ret
= snprintf(buf
, CRYPTO_MAX_ALG_NAME
, "essiv(%s,%s)",
2996 cipher_api
, *ivopts
);
2997 if (ret
< 0 || ret
>= CRYPTO_MAX_ALG_NAME
) {
2998 ti
->error
= "Cannot allocate cipher string";
3004 cc
->key_parts
= cc
->tfms_count
;
3006 /* Allocate cipher */
3007 ret
= crypt_alloc_tfms(cc
, cipher_api
);
3009 ti
->error
= "Error allocating crypto tfm";
3013 if (crypt_integrity_aead(cc
))
3014 cc
->iv_size
= crypto_aead_ivsize(any_tfm_aead(cc
));
3016 cc
->iv_size
= crypto_skcipher_ivsize(any_tfm(cc
));
3021 static int crypt_ctr_cipher_old(struct dm_target
*ti
, char *cipher_in
, char *key
,
3022 char **ivmode
, char **ivopts
)
3024 struct crypt_config
*cc
= ti
->private;
3025 char *tmp
, *cipher
, *chainmode
, *keycount
;
3026 char *cipher_api
= NULL
;
3030 if (strchr(cipher_in
, '(') || crypt_integrity_aead(cc
)) {
3031 ti
->error
= "Bad cipher specification";
3036 * Legacy dm-crypt cipher specification
3037 * cipher[:keycount]-mode-iv:ivopts
3040 keycount
= strsep(&tmp
, "-");
3041 cipher
= strsep(&keycount
, ":");
3045 else if (sscanf(keycount
, "%u%c", &cc
->tfms_count
, &dummy
) != 1 ||
3046 !is_power_of_2(cc
->tfms_count
)) {
3047 ti
->error
= "Bad cipher key count specification";
3050 cc
->key_parts
= cc
->tfms_count
;
3052 chainmode
= strsep(&tmp
, "-");
3053 *ivmode
= strsep(&tmp
, ":");
3057 * For compatibility with the original dm-crypt mapping format, if
3058 * only the cipher name is supplied, use cbc-plain.
3060 if (!chainmode
|| (!strcmp(chainmode
, "plain") && !*ivmode
)) {
3065 if (strcmp(chainmode
, "ecb") && !*ivmode
) {
3066 ti
->error
= "IV mechanism required";
3070 cipher_api
= kmalloc(CRYPTO_MAX_ALG_NAME
, GFP_KERNEL
);
3074 if (*ivmode
&& !strcmp(*ivmode
, "essiv")) {
3076 ti
->error
= "Digest algorithm missing for ESSIV mode";
3080 ret
= snprintf(cipher_api
, CRYPTO_MAX_ALG_NAME
,
3081 "essiv(%s(%s),%s)", chainmode
, cipher
, *ivopts
);
3083 ret
= snprintf(cipher_api
, CRYPTO_MAX_ALG_NAME
,
3084 "%s(%s)", chainmode
, cipher
);
3086 if (ret
< 0 || ret
>= CRYPTO_MAX_ALG_NAME
) {
3091 /* Allocate cipher */
3092 ret
= crypt_alloc_tfms(cc
, cipher_api
);
3094 ti
->error
= "Error allocating crypto tfm";
3102 ti
->error
= "Cannot allocate cipher strings";
3106 static int crypt_ctr_cipher(struct dm_target
*ti
, char *cipher_in
, char *key
)
3108 struct crypt_config
*cc
= ti
->private;
3109 char *ivmode
= NULL
, *ivopts
= NULL
;
3112 cc
->cipher_string
= kstrdup(cipher_in
, GFP_KERNEL
);
3113 if (!cc
->cipher_string
) {
3114 ti
->error
= "Cannot allocate cipher strings";
3118 if (strstarts(cipher_in
, "capi:"))
3119 ret
= crypt_ctr_cipher_new(ti
, cipher_in
, key
, &ivmode
, &ivopts
);
3121 ret
= crypt_ctr_cipher_old(ti
, cipher_in
, key
, &ivmode
, &ivopts
);
3126 ret
= crypt_ctr_ivmode(ti
, ivmode
);
3130 /* Initialize and set key */
3131 ret
= crypt_set_key(cc
, key
);
3133 ti
->error
= "Error decoding and setting key";
3138 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
) {
3139 ret
= cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
);
3141 ti
->error
= "Error creating IV";
3146 /* Initialize IV (set keys for ESSIV etc) */
3147 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
) {
3148 ret
= cc
->iv_gen_ops
->init(cc
);
3150 ti
->error
= "Error initialising IV";
3155 /* wipe the kernel key payload copy */
3157 memset(cc
->key
, 0, cc
->key_size
* sizeof(u8
));
3162 static int crypt_ctr_optional(struct dm_target
*ti
, unsigned int argc
, char **argv
)
3164 struct crypt_config
*cc
= ti
->private;
3165 struct dm_arg_set as
;
3166 static const struct dm_arg _args
[] = {
3167 {0, 9, "Invalid number of feature args"},
3169 unsigned int opt_params
, val
;
3170 const char *opt_string
, *sval
;
3174 /* Optional parameters */
3178 ret
= dm_read_arg_group(_args
, &as
, &opt_params
, &ti
->error
);
3182 while (opt_params
--) {
3183 opt_string
= dm_shift_arg(&as
);
3185 ti
->error
= "Not enough feature arguments";
3189 if (!strcasecmp(opt_string
, "allow_discards"))
3190 ti
->num_discard_bios
= 1;
3192 else if (!strcasecmp(opt_string
, "same_cpu_crypt"))
3193 set_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
);
3194 else if (!strcasecmp(opt_string
, "high_priority"))
3195 set_bit(DM_CRYPT_HIGH_PRIORITY
, &cc
->flags
);
3197 else if (!strcasecmp(opt_string
, "submit_from_crypt_cpus"))
3198 set_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
);
3199 else if (!strcasecmp(opt_string
, "no_read_workqueue"))
3200 set_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
);
3201 else if (!strcasecmp(opt_string
, "no_write_workqueue"))
3202 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
);
3203 else if (sscanf(opt_string
, "integrity:%u:", &val
) == 1) {
3204 if (val
== 0 || val
> MAX_TAG_SIZE
) {
3205 ti
->error
= "Invalid integrity arguments";
3208 cc
->used_tag_size
= val
;
3209 sval
= strchr(opt_string
+ strlen("integrity:"), ':') + 1;
3210 if (!strcasecmp(sval
, "aead")) {
3211 set_bit(CRYPT_MODE_INTEGRITY_AEAD
, &cc
->cipher_flags
);
3212 } else if (strcasecmp(sval
, "none")) {
3213 ti
->error
= "Unknown integrity profile";
3217 cc
->cipher_auth
= kstrdup(sval
, GFP_KERNEL
);
3218 if (!cc
->cipher_auth
)
3220 } else if (sscanf(opt_string
, "integrity_key_size:%u%c", &val
, &dummy
) == 1) {
3222 ti
->error
= "Invalid integrity_key_size argument";
3225 cc
->key_mac_size
= val
;
3226 set_bit(CRYPT_KEY_MAC_SIZE_SET
, &cc
->cipher_flags
);
3227 } else if (sscanf(opt_string
, "sector_size:%hu%c", &cc
->sector_size
, &dummy
) == 1) {
3228 if (cc
->sector_size
< (1 << SECTOR_SHIFT
) ||
3229 cc
->sector_size
> 4096 ||
3230 (cc
->sector_size
& (cc
->sector_size
- 1))) {
3231 ti
->error
= "Invalid feature value for sector_size";
3234 if (ti
->len
& ((cc
->sector_size
>> SECTOR_SHIFT
) - 1)) {
3235 ti
->error
= "Device size is not multiple of sector_size feature";
3238 cc
->sector_shift
= __ffs(cc
->sector_size
) - SECTOR_SHIFT
;
3239 } else if (!strcasecmp(opt_string
, "iv_large_sectors"))
3240 set_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
);
3242 ti
->error
= "Invalid feature arguments";
3250 #ifdef CONFIG_BLK_DEV_ZONED
3251 static int crypt_report_zones(struct dm_target
*ti
,
3252 struct dm_report_zones_args
*args
, unsigned int nr_zones
)
3254 struct crypt_config
*cc
= ti
->private;
3256 return dm_report_zones(cc
->dev
->bdev
, cc
->start
,
3257 cc
->start
+ dm_target_offset(ti
, args
->next_sector
),
3261 #define crypt_report_zones NULL
3265 * Construct an encryption mapping:
3266 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
3268 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
3270 struct crypt_config
*cc
;
3271 const char *devname
= dm_table_device_name(ti
->table
);
3272 int key_size
, wq_id
;
3273 unsigned int align_mask
;
3274 unsigned int common_wq_flags
;
3275 unsigned long long tmpll
;
3277 size_t iv_size_padding
, additional_req_size
;
3281 ti
->error
= "Not enough arguments";
3285 key_size
= get_key_size(&argv
[1]);
3287 ti
->error
= "Cannot parse key size";
3291 cc
= kzalloc(struct_size(cc
, key
, key_size
), GFP_KERNEL
);
3293 ti
->error
= "Cannot allocate encryption context";
3296 cc
->key_size
= key_size
;
3297 cc
->sector_size
= (1 << SECTOR_SHIFT
);
3298 cc
->sector_shift
= 0;
3302 spin_lock(&dm_crypt_clients_lock
);
3303 dm_crypt_clients_n
++;
3304 crypt_calculate_pages_per_client();
3305 spin_unlock(&dm_crypt_clients_lock
);
3307 ret
= percpu_counter_init(&cc
->n_allocated_pages
, 0, GFP_KERNEL
);
3311 /* Optional parameters need to be read before cipher constructor */
3313 ret
= crypt_ctr_optional(ti
, argc
- 5, &argv
[5]);
3318 ret
= crypt_ctr_cipher(ti
, argv
[0], argv
[1]);
3322 if (crypt_integrity_aead(cc
)) {
3323 cc
->dmreq_start
= sizeof(struct aead_request
);
3324 cc
->dmreq_start
+= crypto_aead_reqsize(any_tfm_aead(cc
));
3325 align_mask
= crypto_aead_alignmask(any_tfm_aead(cc
));
3327 cc
->dmreq_start
= sizeof(struct skcipher_request
);
3328 cc
->dmreq_start
+= crypto_skcipher_reqsize(any_tfm(cc
));
3329 align_mask
= crypto_skcipher_alignmask(any_tfm(cc
));
3331 cc
->dmreq_start
= ALIGN(cc
->dmreq_start
, __alignof__(struct dm_crypt_request
));
3333 if (align_mask
< CRYPTO_MINALIGN
) {
3334 /* Allocate the padding exactly */
3335 iv_size_padding
= -(cc
->dmreq_start
+ sizeof(struct dm_crypt_request
))
3339 * If the cipher requires greater alignment than kmalloc
3340 * alignment, we don't know the exact position of the
3341 * initialization vector. We must assume worst case.
3343 iv_size_padding
= align_mask
;
3346 /* ...| IV + padding | original IV | original sec. number | bio tag offset | */
3347 additional_req_size
= sizeof(struct dm_crypt_request
) +
3348 iv_size_padding
+ cc
->iv_size
+
3351 sizeof(unsigned int);
3353 ret
= mempool_init_kmalloc_pool(&cc
->req_pool
, MIN_IOS
, cc
->dmreq_start
+ additional_req_size
);
3355 ti
->error
= "Cannot allocate crypt request mempool";
3359 cc
->per_bio_data_size
= ti
->per_io_data_size
=
3360 ALIGN(sizeof(struct dm_crypt_io
) + cc
->dmreq_start
+ additional_req_size
,
3363 ret
= mempool_init(&cc
->page_pool
, BIO_MAX_VECS
, crypt_page_alloc
, crypt_page_free
, cc
);
3365 ti
->error
= "Cannot allocate page mempool";
3369 ret
= bioset_init(&cc
->bs
, MIN_IOS
, 0, BIOSET_NEED_BVECS
);
3371 ti
->error
= "Cannot allocate crypt bioset";
3375 mutex_init(&cc
->bio_alloc_lock
);
3378 if ((sscanf(argv
[2], "%llu%c", &tmpll
, &dummy
) != 1) ||
3379 (tmpll
& ((cc
->sector_size
>> SECTOR_SHIFT
) - 1))) {
3380 ti
->error
= "Invalid iv_offset sector";
3383 cc
->iv_offset
= tmpll
;
3385 ret
= dm_get_device(ti
, argv
[3], dm_table_get_mode(ti
->table
), &cc
->dev
);
3387 ti
->error
= "Device lookup failed";
3392 if (sscanf(argv
[4], "%llu%c", &tmpll
, &dummy
) != 1 || tmpll
!= (sector_t
)tmpll
) {
3393 ti
->error
= "Invalid device sector";
3398 if (bdev_is_zoned(cc
->dev
->bdev
)) {
3400 * For zoned block devices, we need to preserve the issuer write
3401 * ordering. To do so, disable write workqueues and force inline
3402 * encryption completion.
3404 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
);
3405 set_bit(DM_CRYPT_WRITE_INLINE
, &cc
->flags
);
3408 * All zone append writes to a zone of a zoned block device will
3409 * have the same BIO sector, the start of the zone. When the
3410 * cypher IV mode uses sector values, all data targeting a
3411 * zone will be encrypted using the first sector numbers of the
3412 * zone. This will not result in write errors but will
3413 * cause most reads to fail as reads will use the sector values
3414 * for the actual data locations, resulting in IV mismatch.
3415 * To avoid this problem, ask DM core to emulate zone append
3416 * operations with regular writes.
3418 DMDEBUG("Zone append operations will be emulated");
3419 ti
->emulate_zone_append
= true;
3422 if (crypt_integrity_aead(cc
) || cc
->integrity_iv_size
) {
3423 ret
= crypt_integrity_ctr(cc
, ti
);
3427 cc
->tag_pool_max_sectors
= POOL_ENTRY_SIZE
/ cc
->tuple_size
;
3428 if (!cc
->tag_pool_max_sectors
)
3429 cc
->tag_pool_max_sectors
= 1;
3431 ret
= mempool_init_kmalloc_pool(&cc
->tag_pool
, MIN_IOS
,
3432 cc
->tag_pool_max_sectors
* cc
->tuple_size
);
3434 ti
->error
= "Cannot allocate integrity tags mempool";
3438 cc
->tag_pool_max_sectors
<<= cc
->sector_shift
;
3441 wq_id
= ida_alloc_min(&workqueue_ida
, 1, GFP_KERNEL
);
3443 ti
->error
= "Couldn't get workqueue id";
3447 cc
->workqueue_id
= wq_id
;
3450 common_wq_flags
= WQ_MEM_RECLAIM
| WQ_SYSFS
;
3451 if (test_bit(DM_CRYPT_HIGH_PRIORITY
, &cc
->flags
))
3452 common_wq_flags
|= WQ_HIGHPRI
;
3454 cc
->io_queue
= alloc_workqueue("kcryptd_io-%s-%d", common_wq_flags
, 1, devname
, wq_id
);
3455 if (!cc
->io_queue
) {
3456 ti
->error
= "Couldn't create kcryptd io queue";
3460 if (test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
)) {
3461 cc
->crypt_queue
= alloc_workqueue("kcryptd-%s-%d",
3462 common_wq_flags
| WQ_CPU_INTENSIVE
,
3466 * While crypt_queue is certainly CPU intensive, the use of
3467 * WQ_CPU_INTENSIVE is meaningless with WQ_UNBOUND.
3469 cc
->crypt_queue
= alloc_workqueue("kcryptd-%s-%d",
3470 common_wq_flags
| WQ_UNBOUND
,
3471 num_online_cpus(), devname
, wq_id
);
3473 if (!cc
->crypt_queue
) {
3474 ti
->error
= "Couldn't create kcryptd queue";
3478 spin_lock_init(&cc
->write_thread_lock
);
3479 cc
->write_tree
= RB_ROOT
;
3481 cc
->write_thread
= kthread_run(dmcrypt_write
, cc
, "dmcrypt_write/%s", devname
);
3482 if (IS_ERR(cc
->write_thread
)) {
3483 ret
= PTR_ERR(cc
->write_thread
);
3484 cc
->write_thread
= NULL
;
3485 ti
->error
= "Couldn't spawn write thread";
3488 if (test_bit(DM_CRYPT_HIGH_PRIORITY
, &cc
->flags
))
3489 set_user_nice(cc
->write_thread
, MIN_NICE
);
3491 ti
->num_flush_bios
= 1;
3492 ti
->limit_swap_bios
= true;
3493 ti
->accounts_remapped_io
= true;
3495 dm_audit_log_ctr(DM_MSG_PREFIX
, ti
, 1);
3499 dm_audit_log_ctr(DM_MSG_PREFIX
, ti
, 0);
3504 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
)
3506 struct dm_crypt_io
*io
;
3507 struct crypt_config
*cc
= ti
->private;
3508 unsigned max_sectors
;
3511 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
3512 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
3513 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
3515 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
||
3516 bio_op(bio
) == REQ_OP_DISCARD
)) {
3517 bio_set_dev(bio
, cc
->dev
->bdev
);
3518 if (bio_sectors(bio
))
3519 bio
->bi_iter
.bi_sector
= cc
->start
+
3520 dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
3521 return DM_MAPIO_REMAPPED
;
3525 * Check if bio is too large, split as needed.
3527 max_sectors
= get_max_request_size(cc
, bio_data_dir(bio
) == WRITE
);
3528 if (unlikely(bio_sectors(bio
) > max_sectors
))
3529 dm_accept_partial_bio(bio
, max_sectors
);
3532 * Ensure that bio is a multiple of internal sector encryption size
3533 * and is aligned to this size as defined in IO hints.
3535 if (unlikely((bio
->bi_iter
.bi_sector
& ((cc
->sector_size
>> SECTOR_SHIFT
) - 1)) != 0))
3536 return DM_MAPIO_KILL
;
3538 if (unlikely(bio
->bi_iter
.bi_size
& (cc
->sector_size
- 1)))
3539 return DM_MAPIO_KILL
;
3541 io
= dm_per_bio_data(bio
, cc
->per_bio_data_size
);
3542 crypt_io_init(io
, cc
, bio
, dm_target_offset(ti
, bio
->bi_iter
.bi_sector
));
3544 if (cc
->tuple_size
) {
3545 unsigned int tag_len
= cc
->tuple_size
* (bio_sectors(bio
) >> cc
->sector_shift
);
3547 if (unlikely(tag_len
> KMALLOC_MAX_SIZE
))
3548 io
->integrity_metadata
= NULL
;
3550 io
->integrity_metadata
= kmalloc(tag_len
, GFP_NOIO
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
3552 if (unlikely(!io
->integrity_metadata
)) {
3553 if (bio_sectors(bio
) > cc
->tag_pool_max_sectors
)
3554 dm_accept_partial_bio(bio
, cc
->tag_pool_max_sectors
);
3555 io
->integrity_metadata
= mempool_alloc(&cc
->tag_pool
, GFP_NOIO
);
3556 io
->integrity_metadata_from_pool
= true;
3560 if (crypt_integrity_aead(cc
))
3561 io
->ctx
.r
.req_aead
= (struct aead_request
*)(io
+ 1);
3563 io
->ctx
.r
.req
= (struct skcipher_request
*)(io
+ 1);
3565 if (bio_data_dir(io
->base_bio
) == READ
) {
3566 if (kcryptd_io_read(io
, CRYPT_MAP_READ_GFP
))
3567 kcryptd_queue_read(io
);
3569 kcryptd_queue_crypt(io
);
3571 return DM_MAPIO_SUBMITTED
;
3574 static char hex2asc(unsigned char c
)
3576 return c
+ '0' + ((unsigned int)(9 - c
) >> 4 & 0x27);
3579 static void crypt_status(struct dm_target
*ti
, status_type_t type
,
3580 unsigned int status_flags
, char *result
, unsigned int maxlen
)
3582 struct crypt_config
*cc
= ti
->private;
3583 unsigned int i
, sz
= 0;
3584 int num_feature_args
= 0;
3587 case STATUSTYPE_INFO
:
3591 case STATUSTYPE_TABLE
:
3592 DMEMIT("%s ", cc
->cipher_string
);
3594 if (cc
->key_size
> 0) {
3596 DMEMIT(":%u:%s", cc
->key_size
, cc
->key_string
);
3598 for (i
= 0; i
< cc
->key_size
; i
++) {
3599 DMEMIT("%c%c", hex2asc(cc
->key
[i
] >> 4),
3600 hex2asc(cc
->key
[i
] & 0xf));
3606 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
3607 cc
->dev
->name
, (unsigned long long)cc
->start
);
3609 num_feature_args
+= !!ti
->num_discard_bios
;
3610 num_feature_args
+= test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
);
3611 num_feature_args
+= test_bit(DM_CRYPT_HIGH_PRIORITY
, &cc
->flags
);
3612 num_feature_args
+= test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
);
3613 num_feature_args
+= test_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
);
3614 num_feature_args
+= test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
);
3615 num_feature_args
+= !!cc
->used_tag_size
;
3616 num_feature_args
+= cc
->sector_size
!= (1 << SECTOR_SHIFT
);
3617 num_feature_args
+= test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
);
3618 num_feature_args
+= test_bit(CRYPT_KEY_MAC_SIZE_SET
, &cc
->cipher_flags
);
3619 if (num_feature_args
) {
3620 DMEMIT(" %d", num_feature_args
);
3621 if (ti
->num_discard_bios
)
3622 DMEMIT(" allow_discards");
3623 if (test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
))
3624 DMEMIT(" same_cpu_crypt");
3625 if (test_bit(DM_CRYPT_HIGH_PRIORITY
, &cc
->flags
))
3626 DMEMIT(" high_priority");
3627 if (test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
))
3628 DMEMIT(" submit_from_crypt_cpus");
3629 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
))
3630 DMEMIT(" no_read_workqueue");
3631 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
))
3632 DMEMIT(" no_write_workqueue");
3633 if (cc
->used_tag_size
)
3634 DMEMIT(" integrity:%u:%s", cc
->used_tag_size
, cc
->cipher_auth
);
3635 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
))
3636 DMEMIT(" sector_size:%d", cc
->sector_size
);
3637 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
3638 DMEMIT(" iv_large_sectors");
3639 if (test_bit(CRYPT_KEY_MAC_SIZE_SET
, &cc
->cipher_flags
))
3640 DMEMIT(" integrity_key_size:%u", cc
->key_mac_size
);
3644 case STATUSTYPE_IMA
:
3645 DMEMIT_TARGET_NAME_VERSION(ti
->type
);
3646 DMEMIT(",allow_discards=%c", ti
->num_discard_bios
? 'y' : 'n');
3647 DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
) ? 'y' : 'n');
3648 DMEMIT(",high_priority=%c", test_bit(DM_CRYPT_HIGH_PRIORITY
, &cc
->flags
) ? 'y' : 'n');
3649 DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
) ?
3651 DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
) ?
3653 DMEMIT(",no_write_workqueue=%c", test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
) ?
3655 DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
) ?
3658 if (cc
->used_tag_size
)
3659 DMEMIT(",integrity_tag_size=%u,cipher_auth=%s",
3660 cc
->used_tag_size
, cc
->cipher_auth
);
3661 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
))
3662 DMEMIT(",sector_size=%d", cc
->sector_size
);
3663 if (cc
->cipher_string
)
3664 DMEMIT(",cipher_string=%s", cc
->cipher_string
);
3666 DMEMIT(",key_size=%u", cc
->key_size
);
3667 DMEMIT(",key_parts=%u", cc
->key_parts
);
3668 DMEMIT(",key_extra_size=%u", cc
->key_extra_size
);
3669 DMEMIT(",key_mac_size=%u", cc
->key_mac_size
);
3675 static void crypt_postsuspend(struct dm_target
*ti
)
3677 struct crypt_config
*cc
= ti
->private;
3679 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
3682 static int crypt_preresume(struct dm_target
*ti
)
3684 struct crypt_config
*cc
= ti
->private;
3686 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
3687 DMERR("aborting resume - crypt key is not set.");
3694 static void crypt_resume(struct dm_target
*ti
)
3696 struct crypt_config
*cc
= ti
->private;
3698 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
3701 /* Message interface
3705 static int crypt_message(struct dm_target
*ti
, unsigned int argc
, char **argv
,
3706 char *result
, unsigned int maxlen
)
3708 struct crypt_config
*cc
= ti
->private;
3709 int key_size
, ret
= -EINVAL
;
3714 if (!strcasecmp(argv
[0], "key")) {
3715 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
3716 DMWARN("not suspended during key manipulation.");
3719 if (argc
== 3 && !strcasecmp(argv
[1], "set")) {
3720 /* The key size may not be changed. */
3721 key_size
= get_key_size(&argv
[2]);
3722 if (key_size
< 0 || cc
->key_size
!= key_size
) {
3723 memset(argv
[2], '0', strlen(argv
[2]));
3727 ret
= crypt_set_key(cc
, argv
[2]);
3730 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
)
3731 ret
= cc
->iv_gen_ops
->init(cc
);
3732 /* wipe the kernel key payload copy */
3734 memset(cc
->key
, 0, cc
->key_size
* sizeof(u8
));
3737 if (argc
== 2 && !strcasecmp(argv
[1], "wipe"))
3738 return crypt_wipe_key(cc
);
3742 DMWARN("unrecognised message received.");
3746 static int crypt_iterate_devices(struct dm_target
*ti
,
3747 iterate_devices_callout_fn fn
, void *data
)
3749 struct crypt_config
*cc
= ti
->private;
3751 return fn(ti
, cc
->dev
, cc
->start
, ti
->len
, data
);
3754 static void crypt_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
3756 struct crypt_config
*cc
= ti
->private;
3758 limits
->logical_block_size
=
3759 max_t(unsigned int, limits
->logical_block_size
, cc
->sector_size
);
3760 limits
->physical_block_size
=
3761 max_t(unsigned int, limits
->physical_block_size
, cc
->sector_size
);
3762 limits
->io_min
= max_t(unsigned int, limits
->io_min
, cc
->sector_size
);
3763 limits
->dma_alignment
= limits
->logical_block_size
- 1;
3766 static struct target_type crypt_target
= {
3768 .version
= {1, 28, 0},
3769 .module
= THIS_MODULE
,
3772 .features
= DM_TARGET_ZONED_HM
,
3773 .report_zones
= crypt_report_zones
,
3775 .status
= crypt_status
,
3776 .postsuspend
= crypt_postsuspend
,
3777 .preresume
= crypt_preresume
,
3778 .resume
= crypt_resume
,
3779 .message
= crypt_message
,
3780 .iterate_devices
= crypt_iterate_devices
,
3781 .io_hints
= crypt_io_hints
,
3785 MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3786 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
3787 MODULE_LICENSE("GPL");