1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2019 Google LLC
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
10 #define pr_fmt(fmt) "blk-crypto: " fmt
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/blk-crypto-profile.h>
15 #include <linux/module.h>
16 #include <linux/ratelimit.h>
17 #include <linux/slab.h>
19 #include "blk-crypto-internal.h"
21 const struct blk_crypto_mode blk_crypto_modes
[] = {
22 [BLK_ENCRYPTION_MODE_AES_256_XTS
] = {
23 .name
= "AES-256-XTS",
24 .cipher_str
= "xts(aes)",
28 [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV
] = {
29 .name
= "AES-128-CBC-ESSIV",
30 .cipher_str
= "essiv(cbc(aes),sha256)",
34 [BLK_ENCRYPTION_MODE_ADIANTUM
] = {
36 .cipher_str
= "adiantum(xchacha12,aes)",
40 [BLK_ENCRYPTION_MODE_SM4_XTS
] = {
42 .cipher_str
= "xts(sm4)",
49 * This number needs to be at least (the number of threads doing IO
50 * concurrently) * (maximum recursive depth of a bio), so that we don't
51 * deadlock on crypt_ctx allocations. The default is chosen to be the same
52 * as the default number of post read contexts in both EXT4 and F2FS.
54 static int num_prealloc_crypt_ctxs
= 128;
56 module_param(num_prealloc_crypt_ctxs
, int, 0444);
57 MODULE_PARM_DESC(num_prealloc_crypt_ctxs
,
58 "Number of bio crypto contexts to preallocate");
60 static struct kmem_cache
*bio_crypt_ctx_cache
;
61 static mempool_t
*bio_crypt_ctx_pool
;
63 static int __init
bio_crypt_ctx_init(void)
67 bio_crypt_ctx_cache
= KMEM_CACHE(bio_crypt_ctx
, 0);
68 if (!bio_crypt_ctx_cache
)
71 bio_crypt_ctx_pool
= mempool_create_slab_pool(num_prealloc_crypt_ctxs
,
73 if (!bio_crypt_ctx_pool
)
76 /* This is assumed in various places. */
77 BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID
!= 0);
79 /* Sanity check that no algorithm exceeds the defined limits. */
80 for (i
= 0; i
< BLK_ENCRYPTION_MODE_MAX
; i
++) {
81 BUG_ON(blk_crypto_modes
[i
].keysize
> BLK_CRYPTO_MAX_KEY_SIZE
);
82 BUG_ON(blk_crypto_modes
[i
].ivsize
> BLK_CRYPTO_MAX_IV_SIZE
);
87 panic("Failed to allocate mem for bio crypt ctxs\n");
89 subsys_initcall(bio_crypt_ctx_init
);
91 void bio_crypt_set_ctx(struct bio
*bio
, const struct blk_crypto_key
*key
,
92 const u64 dun
[BLK_CRYPTO_DUN_ARRAY_SIZE
], gfp_t gfp_mask
)
94 struct bio_crypt_ctx
*bc
;
97 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
98 * that the mempool_alloc() can't fail.
100 WARN_ON_ONCE(!(gfp_mask
& __GFP_DIRECT_RECLAIM
));
102 bc
= mempool_alloc(bio_crypt_ctx_pool
, gfp_mask
);
105 memcpy(bc
->bc_dun
, dun
, sizeof(bc
->bc_dun
));
107 bio
->bi_crypt_context
= bc
;
110 void __bio_crypt_free_ctx(struct bio
*bio
)
112 mempool_free(bio
->bi_crypt_context
, bio_crypt_ctx_pool
);
113 bio
->bi_crypt_context
= NULL
;
116 int __bio_crypt_clone(struct bio
*dst
, struct bio
*src
, gfp_t gfp_mask
)
118 dst
->bi_crypt_context
= mempool_alloc(bio_crypt_ctx_pool
, gfp_mask
);
119 if (!dst
->bi_crypt_context
)
121 *dst
->bi_crypt_context
= *src
->bi_crypt_context
;
125 /* Increments @dun by @inc, treating @dun as a multi-limb integer. */
126 void bio_crypt_dun_increment(u64 dun
[BLK_CRYPTO_DUN_ARRAY_SIZE
],
131 for (i
= 0; inc
&& i
< BLK_CRYPTO_DUN_ARRAY_SIZE
; i
++) {
134 * If the addition in this limb overflowed, then we need to
135 * carry 1 into the next limb. Else the carry is 0.
144 void __bio_crypt_advance(struct bio
*bio
, unsigned int bytes
)
146 struct bio_crypt_ctx
*bc
= bio
->bi_crypt_context
;
148 bio_crypt_dun_increment(bc
->bc_dun
,
149 bytes
>> bc
->bc_key
->data_unit_size_bits
);
153 * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
154 * @next_dun, treating the DUNs as multi-limb integers.
156 bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx
*bc
,
158 const u64 next_dun
[BLK_CRYPTO_DUN_ARRAY_SIZE
])
161 unsigned int carry
= bytes
>> bc
->bc_key
->data_unit_size_bits
;
163 for (i
= 0; i
< BLK_CRYPTO_DUN_ARRAY_SIZE
; i
++) {
164 if (bc
->bc_dun
[i
] + carry
!= next_dun
[i
])
167 * If the addition in this limb overflowed, then we need to
168 * carry 1 into the next limb. Else the carry is 0.
170 if ((bc
->bc_dun
[i
] + carry
) < carry
)
176 /* If the DUN wrapped through 0, don't treat it as contiguous. */
181 * Checks that two bio crypt contexts are compatible - i.e. that
182 * they are mergeable except for data_unit_num continuity.
184 static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx
*bc1
,
185 struct bio_crypt_ctx
*bc2
)
190 return bc2
&& bc1
->bc_key
== bc2
->bc_key
;
193 bool bio_crypt_rq_ctx_compatible(struct request
*rq
, struct bio
*bio
)
195 return bio_crypt_ctx_compatible(rq
->crypt_ctx
, bio
->bi_crypt_context
);
199 * Checks that two bio crypt contexts are compatible, and also
200 * that their data_unit_nums are continuous (and can hence be merged)
201 * in the order @bc1 followed by @bc2.
203 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx
*bc1
, unsigned int bc1_bytes
,
204 struct bio_crypt_ctx
*bc2
)
206 if (!bio_crypt_ctx_compatible(bc1
, bc2
))
209 return !bc1
|| bio_crypt_dun_is_contiguous(bc1
, bc1_bytes
, bc2
->bc_dun
);
212 /* Check that all I/O segments are data unit aligned. */
213 static bool bio_crypt_check_alignment(struct bio
*bio
)
215 const unsigned int data_unit_size
=
216 bio
->bi_crypt_context
->bc_key
->crypto_cfg
.data_unit_size
;
217 struct bvec_iter iter
;
220 bio_for_each_segment(bv
, bio
, iter
) {
221 if (!IS_ALIGNED(bv
.bv_len
| bv
.bv_offset
, data_unit_size
))
228 blk_status_t
__blk_crypto_rq_get_keyslot(struct request
*rq
)
230 return blk_crypto_get_keyslot(rq
->q
->crypto_profile
,
231 rq
->crypt_ctx
->bc_key
,
235 void __blk_crypto_rq_put_keyslot(struct request
*rq
)
237 blk_crypto_put_keyslot(rq
->crypt_keyslot
);
238 rq
->crypt_keyslot
= NULL
;
241 void __blk_crypto_free_request(struct request
*rq
)
243 /* The keyslot, if one was needed, should have been released earlier. */
244 if (WARN_ON_ONCE(rq
->crypt_keyslot
))
245 __blk_crypto_rq_put_keyslot(rq
);
247 mempool_free(rq
->crypt_ctx
, bio_crypt_ctx_pool
);
248 rq
->crypt_ctx
= NULL
;
252 * __blk_crypto_bio_prep - Prepare bio for inline encryption
254 * @bio_ptr: pointer to original bio pointer
256 * If the bio crypt context provided for the bio is supported by the underlying
257 * device's inline encryption hardware, do nothing.
259 * Otherwise, try to perform en/decryption for this bio by falling back to the
260 * kernel crypto API. When the crypto API fallback is used for encryption,
261 * blk-crypto may choose to split the bio into 2 - the first one that will
262 * continue to be processed and the second one that will be resubmitted via
263 * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
264 * of the aforementioned "first one", and *bio_ptr will be updated to this
267 * Caller must ensure bio has bio_crypt_ctx.
269 * Return: true on success; false on error (and bio->bi_status will be set
270 * appropriately, and bio_endio() will have been called so bio
271 * submission should abort).
273 bool __blk_crypto_bio_prep(struct bio
**bio_ptr
)
275 struct bio
*bio
= *bio_ptr
;
276 const struct blk_crypto_key
*bc_key
= bio
->bi_crypt_context
->bc_key
;
278 /* Error if bio has no data. */
279 if (WARN_ON_ONCE(!bio_has_data(bio
))) {
280 bio
->bi_status
= BLK_STS_IOERR
;
284 if (!bio_crypt_check_alignment(bio
)) {
285 bio
->bi_status
= BLK_STS_IOERR
;
290 * Success if device supports the encryption context, or if we succeeded
291 * in falling back to the crypto API.
293 if (blk_crypto_config_supported_natively(bio
->bi_bdev
,
294 &bc_key
->crypto_cfg
))
296 if (blk_crypto_fallback_bio_prep(bio_ptr
))
303 int __blk_crypto_rq_bio_prep(struct request
*rq
, struct bio
*bio
,
306 if (!rq
->crypt_ctx
) {
307 rq
->crypt_ctx
= mempool_alloc(bio_crypt_ctx_pool
, gfp_mask
);
311 *rq
->crypt_ctx
= *bio
->bi_crypt_context
;
316 * blk_crypto_init_key() - Prepare a key for use with blk-crypto
317 * @blk_key: Pointer to the blk_crypto_key to initialize.
318 * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
319 * @crypto_mode; see blk_crypto_modes[].
320 * @crypto_mode: identifier for the encryption algorithm to use
321 * @dun_bytes: number of bytes that will be used to specify the DUN when this
323 * @data_unit_size: the data unit size to use for en/decryption
325 * Return: 0 on success, -errno on failure. The caller is responsible for
326 * zeroizing both blk_key and raw_key when done with them.
328 int blk_crypto_init_key(struct blk_crypto_key
*blk_key
, const u8
*raw_key
,
329 enum blk_crypto_mode_num crypto_mode
,
330 unsigned int dun_bytes
,
331 unsigned int data_unit_size
)
333 const struct blk_crypto_mode
*mode
;
335 memset(blk_key
, 0, sizeof(*blk_key
));
337 if (crypto_mode
>= ARRAY_SIZE(blk_crypto_modes
))
340 mode
= &blk_crypto_modes
[crypto_mode
];
341 if (mode
->keysize
== 0)
344 if (dun_bytes
== 0 || dun_bytes
> mode
->ivsize
)
347 if (!is_power_of_2(data_unit_size
))
350 blk_key
->crypto_cfg
.crypto_mode
= crypto_mode
;
351 blk_key
->crypto_cfg
.dun_bytes
= dun_bytes
;
352 blk_key
->crypto_cfg
.data_unit_size
= data_unit_size
;
353 blk_key
->data_unit_size_bits
= ilog2(data_unit_size
);
354 blk_key
->size
= mode
->keysize
;
355 memcpy(blk_key
->raw
, raw_key
, mode
->keysize
);
360 bool blk_crypto_config_supported_natively(struct block_device
*bdev
,
361 const struct blk_crypto_config
*cfg
)
363 return __blk_crypto_cfg_supported(bdev_get_queue(bdev
)->crypto_profile
,
368 * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
369 * block_device it's submitted to supports inline crypto, or the
370 * blk-crypto-fallback is enabled and supports the cfg).
372 bool blk_crypto_config_supported(struct block_device
*bdev
,
373 const struct blk_crypto_config
*cfg
)
375 return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
) ||
376 blk_crypto_config_supported_natively(bdev
, cfg
);
380 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
381 * @bdev: block device to operate on
382 * @key: A key to use on the device
384 * Upper layers must call this function to ensure that either the hardware
385 * supports the key's crypto settings, or the crypto API fallback has transforms
386 * for the needed mode allocated and ready to go. This function may allocate
387 * an skcipher, and *should not* be called from the data path, since that might
390 * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
391 * blk-crypto-fallback is either disabled or the needed algorithm
392 * is disabled in the crypto API; or another -errno code.
394 int blk_crypto_start_using_key(struct block_device
*bdev
,
395 const struct blk_crypto_key
*key
)
397 if (blk_crypto_config_supported_natively(bdev
, &key
->crypto_cfg
))
399 return blk_crypto_fallback_start_using_mode(key
->crypto_cfg
.crypto_mode
);
403 * blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device
404 * @bdev: a block_device on which I/O using the key may have been done
405 * @key: the key to evict
407 * For a given block_device, this function removes the given blk_crypto_key from
408 * the keyslot management structures and evicts it from any underlying hardware
409 * keyslot(s) or blk-crypto-fallback keyslot it may have been programmed into.
411 * Upper layers must call this before freeing the blk_crypto_key. It must be
412 * called for every block_device the key may have been used on. The key must no
413 * longer be in use by any I/O when this function is called.
415 * Context: May sleep.
417 void blk_crypto_evict_key(struct block_device
*bdev
,
418 const struct blk_crypto_key
*key
)
420 struct request_queue
*q
= bdev_get_queue(bdev
);
423 if (blk_crypto_config_supported_natively(bdev
, &key
->crypto_cfg
))
424 err
= __blk_crypto_evict_key(q
->crypto_profile
, key
);
426 err
= blk_crypto_fallback_evict_key(key
);
428 * An error can only occur here if the key failed to be evicted from a
429 * keyslot (due to a hardware or driver issue) or is allegedly still in
430 * use by I/O (due to a kernel bug). Even in these cases, the key is
431 * still unlinked from the keyslot management structures, and the caller
432 * is allowed and expected to free it right away. There's nothing
433 * callers can do to handle errors, so just log them and return void.
436 pr_warn_ratelimited("%pg: error %d evicting key\n", bdev
, err
);
438 EXPORT_SYMBOL_GPL(blk_crypto_evict_key
);