1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright 2019 Google LLC
6 #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
7 #define __LINUX_BLK_CRYPTO_INTERNAL_H
10 #include <linux/blkdev.h>
12 /* Represents a crypto mode supported by blk-crypto */
13 struct blk_crypto_mode
{
14 const char *cipher_str
; /* crypto API name (for fallback case) */
15 unsigned int keysize
; /* key size in bytes */
16 unsigned int ivsize
; /* iv size in bytes */
19 extern const struct blk_crypto_mode blk_crypto_modes
[];
21 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
23 void bio_crypt_dun_increment(u64 dun
[BLK_CRYPTO_DUN_ARRAY_SIZE
],
26 bool bio_crypt_rq_ctx_compatible(struct request
*rq
, struct bio
*bio
);
28 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx
*bc1
, unsigned int bc1_bytes
,
29 struct bio_crypt_ctx
*bc2
);
31 static inline bool bio_crypt_ctx_back_mergeable(struct request
*req
,
34 return bio_crypt_ctx_mergeable(req
->crypt_ctx
, blk_rq_bytes(req
),
35 bio
->bi_crypt_context
);
38 static inline bool bio_crypt_ctx_front_mergeable(struct request
*req
,
41 return bio_crypt_ctx_mergeable(bio
->bi_crypt_context
,
42 bio
->bi_iter
.bi_size
, req
->crypt_ctx
);
45 static inline bool bio_crypt_ctx_merge_rq(struct request
*req
,
48 return bio_crypt_ctx_mergeable(req
->crypt_ctx
, blk_rq_bytes(req
),
52 static inline void blk_crypto_rq_set_defaults(struct request
*rq
)
55 rq
->crypt_keyslot
= NULL
;
58 static inline bool blk_crypto_rq_is_encrypted(struct request
*rq
)
63 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
65 static inline bool bio_crypt_rq_ctx_compatible(struct request
*rq
,
71 static inline bool bio_crypt_ctx_front_mergeable(struct request
*req
,
77 static inline bool bio_crypt_ctx_back_mergeable(struct request
*req
,
83 static inline bool bio_crypt_ctx_merge_rq(struct request
*req
,
89 static inline void blk_crypto_rq_set_defaults(struct request
*rq
) { }
91 static inline bool blk_crypto_rq_is_encrypted(struct request
*rq
)
96 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
98 void __bio_crypt_advance(struct bio
*bio
, unsigned int bytes
);
99 static inline void bio_crypt_advance(struct bio
*bio
, unsigned int bytes
)
101 if (bio_has_crypt_ctx(bio
))
102 __bio_crypt_advance(bio
, bytes
);
105 void __bio_crypt_free_ctx(struct bio
*bio
);
106 static inline void bio_crypt_free_ctx(struct bio
*bio
)
108 if (bio_has_crypt_ctx(bio
))
109 __bio_crypt_free_ctx(bio
);
112 static inline void bio_crypt_do_front_merge(struct request
*rq
,
115 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
116 if (bio_has_crypt_ctx(bio
))
117 memcpy(rq
->crypt_ctx
->bc_dun
, bio
->bi_crypt_context
->bc_dun
,
118 sizeof(rq
->crypt_ctx
->bc_dun
));
122 bool __blk_crypto_bio_prep(struct bio
**bio_ptr
);
123 static inline bool blk_crypto_bio_prep(struct bio
**bio_ptr
)
125 if (bio_has_crypt_ctx(*bio_ptr
))
126 return __blk_crypto_bio_prep(bio_ptr
);
130 blk_status_t
__blk_crypto_init_request(struct request
*rq
);
131 static inline blk_status_t
blk_crypto_init_request(struct request
*rq
)
133 if (blk_crypto_rq_is_encrypted(rq
))
134 return __blk_crypto_init_request(rq
);
138 void __blk_crypto_free_request(struct request
*rq
);
139 static inline void blk_crypto_free_request(struct request
*rq
)
141 if (blk_crypto_rq_is_encrypted(rq
))
142 __blk_crypto_free_request(rq
);
145 int __blk_crypto_rq_bio_prep(struct request
*rq
, struct bio
*bio
,
148 * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
150 * @rq: The request to prepare
151 * @bio: The first bio being inserted into the request
152 * @gfp_mask: Memory allocation flags
154 * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
155 * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
157 static inline int blk_crypto_rq_bio_prep(struct request
*rq
, struct bio
*bio
,
160 if (bio_has_crypt_ctx(bio
))
161 return __blk_crypto_rq_bio_prep(rq
, bio
, gfp_mask
);
166 * blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
167 * into a request queue.
168 * @rq: the request being queued
170 * Return: BLK_STS_OK on success, nonzero on error.
172 static inline blk_status_t
blk_crypto_insert_cloned_request(struct request
*rq
)
175 if (blk_crypto_rq_is_encrypted(rq
))
176 return blk_crypto_init_request(rq
);
180 #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
182 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num
);
184 bool blk_crypto_fallback_bio_prep(struct bio
**bio_ptr
);
186 int blk_crypto_fallback_evict_key(const struct blk_crypto_key
*key
);
188 #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
191 blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num
)
193 pr_warn_once("crypto API fallback is disabled\n");
197 static inline bool blk_crypto_fallback_bio_prep(struct bio
**bio_ptr
)
199 pr_warn_once("crypto API fallback disabled; failing request.\n");
200 (*bio_ptr
)->bi_status
= BLK_STS_NOTSUPP
;
205 blk_crypto_fallback_evict_key(const struct blk_crypto_key
*key
)
210 #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
212 #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */