1 // SPDX-License-Identifier: GPL-2.0-only
3 * CQHCI crypto engine (inline encryption) support
5 * Copyright 2020 Google LLC
8 #include <linux/blk-crypto.h>
9 #include <linux/blk-crypto-profile.h>
10 #include <linux/mmc/host.h>
12 #include "cqhci-crypto.h"
14 /* Map from blk-crypto modes to CQHCI crypto algorithm IDs and key sizes */
15 static const struct cqhci_crypto_alg_entry
{
16 enum cqhci_crypto_alg alg
;
17 enum cqhci_crypto_key_size key_size
;
18 } cqhci_crypto_algs
[BLK_ENCRYPTION_MODE_MAX
] = {
19 [BLK_ENCRYPTION_MODE_AES_256_XTS
] = {
20 .alg
= CQHCI_CRYPTO_ALG_AES_XTS
,
21 .key_size
= CQHCI_CRYPTO_KEY_SIZE_256
,
25 static inline struct cqhci_host
*
26 cqhci_host_from_crypto_profile(struct blk_crypto_profile
*profile
)
28 struct mmc_host
*mmc
=
29 container_of(profile
, struct mmc_host
, crypto_profile
);
31 return mmc
->cqe_private
;
34 static int cqhci_crypto_program_key(struct cqhci_host
*cq_host
,
35 const union cqhci_crypto_cfg_entry
*cfg
,
38 u32 slot_offset
= cq_host
->crypto_cfg_register
+ slot
* sizeof(*cfg
);
41 if (cq_host
->ops
->program_key
)
42 return cq_host
->ops
->program_key(cq_host
, cfg
, slot
);
45 cqhci_writel(cq_host
, 0, slot_offset
+ 16 * sizeof(cfg
->reg_val
[0]));
48 for (i
= 0; i
< 16; i
++) {
49 cqhci_writel(cq_host
, le32_to_cpu(cfg
->reg_val
[i
]),
50 slot_offset
+ i
* sizeof(cfg
->reg_val
[0]));
53 cqhci_writel(cq_host
, le32_to_cpu(cfg
->reg_val
[17]),
54 slot_offset
+ 17 * sizeof(cfg
->reg_val
[0]));
55 /* Write dword 16, which includes the new value of CFGE */
56 cqhci_writel(cq_host
, le32_to_cpu(cfg
->reg_val
[16]),
57 slot_offset
+ 16 * sizeof(cfg
->reg_val
[0]));
61 static int cqhci_crypto_keyslot_program(struct blk_crypto_profile
*profile
,
62 const struct blk_crypto_key
*key
,
66 struct cqhci_host
*cq_host
= cqhci_host_from_crypto_profile(profile
);
67 const union cqhci_crypto_cap_entry
*ccap_array
=
68 cq_host
->crypto_cap_array
;
69 const struct cqhci_crypto_alg_entry
*alg
=
70 &cqhci_crypto_algs
[key
->crypto_cfg
.crypto_mode
];
71 u8 data_unit_mask
= key
->crypto_cfg
.data_unit_size
/ 512;
74 union cqhci_crypto_cfg_entry cfg
= {};
77 BUILD_BUG_ON(CQHCI_CRYPTO_KEY_SIZE_INVALID
!= 0);
78 for (i
= 0; i
< cq_host
->crypto_capabilities
.num_crypto_cap
; i
++) {
79 if (ccap_array
[i
].algorithm_id
== alg
->alg
&&
80 ccap_array
[i
].key_size
== alg
->key_size
&&
81 (ccap_array
[i
].sdus_mask
& data_unit_mask
)) {
86 if (WARN_ON(cap_idx
< 0))
89 cfg
.data_unit_size
= data_unit_mask
;
90 cfg
.crypto_cap_idx
= cap_idx
;
91 cfg
.config_enable
= CQHCI_CRYPTO_CONFIGURATION_ENABLE
;
93 if (ccap_array
[cap_idx
].algorithm_id
== CQHCI_CRYPTO_ALG_AES_XTS
) {
94 /* In XTS mode, the blk_crypto_key's size is already doubled */
95 memcpy(cfg
.crypto_key
, key
->raw
, key
->size
/2);
96 memcpy(cfg
.crypto_key
+ CQHCI_CRYPTO_KEY_MAX_SIZE
/2,
97 key
->raw
+ key
->size
/2, key
->size
/2);
99 memcpy(cfg
.crypto_key
, key
->raw
, key
->size
);
102 err
= cqhci_crypto_program_key(cq_host
, &cfg
, slot
);
104 memzero_explicit(&cfg
, sizeof(cfg
));
108 static int cqhci_crypto_clear_keyslot(struct cqhci_host
*cq_host
, int slot
)
111 * Clear the crypto cfg on the device. Clearing CFGE
112 * might not be sufficient, so just clear the entire cfg.
114 union cqhci_crypto_cfg_entry cfg
= {};
116 return cqhci_crypto_program_key(cq_host
, &cfg
, slot
);
119 static int cqhci_crypto_keyslot_evict(struct blk_crypto_profile
*profile
,
120 const struct blk_crypto_key
*key
,
123 struct cqhci_host
*cq_host
= cqhci_host_from_crypto_profile(profile
);
125 return cqhci_crypto_clear_keyslot(cq_host
, slot
);
129 * The keyslot management operations for CQHCI crypto.
131 * Note that the block layer ensures that these are never called while the host
132 * controller is runtime-suspended. However, the CQE won't necessarily be
133 * "enabled" when these are called, i.e. CQHCI_ENABLE might not be set in the
134 * CQHCI_CFG register. But the hardware allows that.
136 static const struct blk_crypto_ll_ops cqhci_crypto_ops
= {
137 .keyslot_program
= cqhci_crypto_keyslot_program
,
138 .keyslot_evict
= cqhci_crypto_keyslot_evict
,
141 static enum blk_crypto_mode_num
142 cqhci_find_blk_crypto_mode(union cqhci_crypto_cap_entry cap
)
146 for (i
= 0; i
< ARRAY_SIZE(cqhci_crypto_algs
); i
++) {
147 BUILD_BUG_ON(CQHCI_CRYPTO_KEY_SIZE_INVALID
!= 0);
148 if (cqhci_crypto_algs
[i
].alg
== cap
.algorithm_id
&&
149 cqhci_crypto_algs
[i
].key_size
== cap
.key_size
)
152 return BLK_ENCRYPTION_MODE_INVALID
;
156 * cqhci_crypto_init - initialize CQHCI crypto support
157 * @cq_host: a cqhci host
159 * If the driver previously set MMC_CAP2_CRYPTO and the CQE declares
160 * CQHCI_CAP_CS, initialize the crypto support. This involves reading the
161 * crypto capability registers, initializing the blk_crypto_profile, clearing
162 * all keyslots, and enabling 128-bit task descriptors.
164 * Return: 0 if crypto was initialized or isn't supported; whether
165 * MMC_CAP2_CRYPTO remains set indicates which one of those cases it is.
166 * Also can return a negative errno value on unexpected error.
168 int cqhci_crypto_init(struct cqhci_host
*cq_host
)
170 struct mmc_host
*mmc
= cq_host
->mmc
;
171 struct device
*dev
= mmc_dev(mmc
);
172 struct blk_crypto_profile
*profile
= &mmc
->crypto_profile
;
173 unsigned int num_keyslots
;
174 unsigned int cap_idx
;
175 enum blk_crypto_mode_num blk_mode_num
;
179 if (!(mmc
->caps2
& MMC_CAP2_CRYPTO
) ||
180 !(cqhci_readl(cq_host
, CQHCI_CAP
) & CQHCI_CAP_CS
))
183 cq_host
->crypto_capabilities
.reg_val
=
184 cpu_to_le32(cqhci_readl(cq_host
, CQHCI_CCAP
));
186 cq_host
->crypto_cfg_register
=
187 (u32
)cq_host
->crypto_capabilities
.config_array_ptr
* 0x100;
189 cq_host
->crypto_cap_array
=
190 devm_kcalloc(dev
, cq_host
->crypto_capabilities
.num_crypto_cap
,
191 sizeof(cq_host
->crypto_cap_array
[0]), GFP_KERNEL
);
192 if (!cq_host
->crypto_cap_array
) {
198 * CCAP.CFGC is off by one, so the actual number of crypto
199 * configurations (a.k.a. keyslots) is CCAP.CFGC + 1.
201 num_keyslots
= cq_host
->crypto_capabilities
.config_count
+ 1;
203 err
= devm_blk_crypto_profile_init(dev
, profile
, num_keyslots
);
207 profile
->ll_ops
= cqhci_crypto_ops
;
210 /* Unfortunately, CQHCI crypto only supports 32 DUN bits. */
211 profile
->max_dun_bytes_supported
= 4;
214 * Cache all the crypto capabilities and advertise the supported crypto
215 * modes and data unit sizes to the block layer.
217 for (cap_idx
= 0; cap_idx
< cq_host
->crypto_capabilities
.num_crypto_cap
;
219 cq_host
->crypto_cap_array
[cap_idx
].reg_val
=
220 cpu_to_le32(cqhci_readl(cq_host
,
222 cap_idx
* sizeof(__le32
)));
223 blk_mode_num
= cqhci_find_blk_crypto_mode(
224 cq_host
->crypto_cap_array
[cap_idx
]);
225 if (blk_mode_num
== BLK_ENCRYPTION_MODE_INVALID
)
227 profile
->modes_supported
[blk_mode_num
] |=
228 cq_host
->crypto_cap_array
[cap_idx
].sdus_mask
* 512;
231 /* Clear all the keyslots so that we start in a known state. */
232 for (slot
= 0; slot
< num_keyslots
; slot
++)
233 cqhci_crypto_clear_keyslot(cq_host
, slot
);
235 /* CQHCI crypto requires the use of 128-bit task descriptors. */
236 cq_host
->caps
|= CQHCI_TASK_DESC_SZ_128
;
241 mmc
->caps2
&= ~MMC_CAP2_CRYPTO
;