1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Cryptographic Coprocessor (CCP) crypto API support
5 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/ccp.h>
15 #include <linux/scatterlist.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/internal/akcipher.h>
19 #include "ccp-crypto.h"
21 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
22 MODULE_LICENSE("GPL");
23 MODULE_VERSION("1.0.0");
24 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
26 static unsigned int aes_disable
;
27 module_param(aes_disable
, uint
, 0444);
28 MODULE_PARM_DESC(aes_disable
, "Disable use of AES - any non-zero value");
30 static unsigned int sha_disable
;
31 module_param(sha_disable
, uint
, 0444);
32 MODULE_PARM_DESC(sha_disable
, "Disable use of SHA - any non-zero value");
34 static unsigned int des3_disable
;
35 module_param(des3_disable
, uint
, 0444);
36 MODULE_PARM_DESC(des3_disable
, "Disable use of 3DES - any non-zero value");
38 static unsigned int rsa_disable
;
39 module_param(rsa_disable
, uint
, 0444);
40 MODULE_PARM_DESC(rsa_disable
, "Disable use of RSA - any non-zero value");
42 /* List heads for the supported algorithms */
43 static LIST_HEAD(hash_algs
);
44 static LIST_HEAD(skcipher_algs
);
45 static LIST_HEAD(aead_algs
);
46 static LIST_HEAD(akcipher_algs
);
48 /* For any tfm, requests for that tfm must be returned on the order
49 * received. With multiple queues available, the CCP can process more
50 * than one cmd at a time. Therefore we must maintain a cmd list to insure
51 * the proper ordering of requests on a given tfm.
53 struct ccp_crypto_queue
{
54 struct list_head cmds
;
55 struct list_head
*backlog
;
56 unsigned int cmd_count
;
59 #define CCP_CRYPTO_MAX_QLEN 100
61 static struct ccp_crypto_queue req_queue
;
62 static DEFINE_SPINLOCK(req_queue_lock
);
64 struct ccp_crypto_cmd
{
65 struct list_head entry
;
69 /* Save the crypto_tfm and crypto_async_request addresses
70 * separately to avoid any reference to a possibly invalid
71 * crypto_async_request structure after invoking the request
74 struct crypto_async_request
*req
;
75 struct crypto_tfm
*tfm
;
77 /* Used for held command processing to determine state */
81 static inline bool ccp_crypto_success(int err
)
83 if (err
&& (err
!= -EINPROGRESS
) && (err
!= -EBUSY
))
89 static struct ccp_crypto_cmd
*ccp_crypto_cmd_complete(
90 struct ccp_crypto_cmd
*crypto_cmd
, struct ccp_crypto_cmd
**backlog
)
92 struct ccp_crypto_cmd
*held
= NULL
, *tmp
;
97 spin_lock_irqsave(&req_queue_lock
, flags
);
99 /* Held cmds will be after the current cmd in the queue so start
100 * searching for a cmd with a matching tfm for submission.
103 list_for_each_entry_continue(tmp
, &req_queue
.cmds
, entry
) {
104 if (crypto_cmd
->tfm
!= tmp
->tfm
)
110 /* Process the backlog:
111 * Because cmds can be executed from any point in the cmd list
112 * special precautions have to be taken when handling the backlog.
114 if (req_queue
.backlog
!= &req_queue
.cmds
) {
115 /* Skip over this cmd if it is the next backlog cmd */
116 if (req_queue
.backlog
== &crypto_cmd
->entry
)
117 req_queue
.backlog
= crypto_cmd
->entry
.next
;
119 *backlog
= container_of(req_queue
.backlog
,
120 struct ccp_crypto_cmd
, entry
);
121 req_queue
.backlog
= req_queue
.backlog
->next
;
123 /* Skip over this cmd if it is now the next backlog cmd */
124 if (req_queue
.backlog
== &crypto_cmd
->entry
)
125 req_queue
.backlog
= crypto_cmd
->entry
.next
;
128 /* Remove the cmd entry from the list of cmds */
129 req_queue
.cmd_count
--;
130 list_del(&crypto_cmd
->entry
);
132 spin_unlock_irqrestore(&req_queue_lock
, flags
);
137 static void ccp_crypto_complete(void *data
, int err
)
139 struct ccp_crypto_cmd
*crypto_cmd
= data
;
140 struct ccp_crypto_cmd
*held
, *next
, *backlog
;
141 struct crypto_async_request
*req
= crypto_cmd
->req
;
142 struct ccp_ctx
*ctx
= crypto_tfm_ctx_dma(req
->tfm
);
145 if (err
== -EINPROGRESS
) {
146 /* Only propagate the -EINPROGRESS if necessary */
147 if (crypto_cmd
->ret
== -EBUSY
) {
148 crypto_cmd
->ret
= -EINPROGRESS
;
149 crypto_request_complete(req
, -EINPROGRESS
);
155 /* Operation has completed - update the queue before invoking
156 * the completion callbacks and retrieve the next cmd (cmd with
157 * a matching tfm) that can be submitted to the CCP.
159 held
= ccp_crypto_cmd_complete(crypto_cmd
, &backlog
);
161 backlog
->ret
= -EINPROGRESS
;
162 crypto_request_complete(backlog
->req
, -EINPROGRESS
);
165 /* Transition the state from -EBUSY to -EINPROGRESS first */
166 if (crypto_cmd
->ret
== -EBUSY
)
167 crypto_request_complete(req
, -EINPROGRESS
);
169 /* Completion callbacks */
172 ret
= ctx
->complete(req
, ret
);
173 crypto_request_complete(req
, ret
);
175 /* Submit the next cmd */
177 /* Since we have already queued the cmd, we must indicate that
178 * we can backlog so as not to "lose" this request.
180 held
->cmd
->flags
|= CCP_CMD_MAY_BACKLOG
;
181 ret
= ccp_enqueue_cmd(held
->cmd
);
182 if (ccp_crypto_success(ret
))
185 /* Error occurred, report it and get the next entry */
186 ctx
= crypto_tfm_ctx_dma(held
->req
->tfm
);
188 ret
= ctx
->complete(held
->req
, ret
);
189 crypto_request_complete(held
->req
, ret
);
191 next
= ccp_crypto_cmd_complete(held
, &backlog
);
193 backlog
->ret
= -EINPROGRESS
;
194 crypto_request_complete(backlog
->req
, -EINPROGRESS
);
204 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd
*crypto_cmd
)
206 struct ccp_crypto_cmd
*active
= NULL
, *tmp
;
208 bool free_cmd
= true;
211 spin_lock_irqsave(&req_queue_lock
, flags
);
213 /* Check if the cmd can/should be queued */
214 if (req_queue
.cmd_count
>= CCP_CRYPTO_MAX_QLEN
) {
215 if (!(crypto_cmd
->cmd
->flags
& CCP_CMD_MAY_BACKLOG
)) {
221 /* Look for an entry with the same tfm. If there is a cmd
222 * with the same tfm in the list then the current cmd cannot
223 * be submitted to the CCP yet.
225 list_for_each_entry(tmp
, &req_queue
.cmds
, entry
) {
226 if (crypto_cmd
->tfm
!= tmp
->tfm
)
234 ret
= ccp_enqueue_cmd(crypto_cmd
->cmd
);
235 if (!ccp_crypto_success(ret
))
236 goto e_lock
; /* Error, don't queue it */
239 if (req_queue
.cmd_count
>= CCP_CRYPTO_MAX_QLEN
) {
241 if (req_queue
.backlog
== &req_queue
.cmds
)
242 req_queue
.backlog
= &crypto_cmd
->entry
;
244 crypto_cmd
->ret
= ret
;
246 req_queue
.cmd_count
++;
247 list_add_tail(&crypto_cmd
->entry
, &req_queue
.cmds
);
252 spin_unlock_irqrestore(&req_queue_lock
, flags
);
261 * ccp_crypto_enqueue_request - queue an crypto async request for processing
264 * @req: crypto_async_request struct to be processed
265 * @cmd: ccp_cmd struct to be sent to the CCP
267 int ccp_crypto_enqueue_request(struct crypto_async_request
*req
,
270 struct ccp_crypto_cmd
*crypto_cmd
;
273 gfp
= req
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
: GFP_ATOMIC
;
275 crypto_cmd
= kzalloc(sizeof(*crypto_cmd
), gfp
);
279 /* The tfm pointer must be saved and not referenced from the
280 * crypto_async_request (req) pointer because it is used after
281 * completion callback for the request and the req pointer
282 * might not be valid anymore.
284 crypto_cmd
->cmd
= cmd
;
285 crypto_cmd
->req
= req
;
286 crypto_cmd
->tfm
= req
->tfm
;
288 cmd
->callback
= ccp_crypto_complete
;
289 cmd
->data
= crypto_cmd
;
291 if (req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)
292 cmd
->flags
|= CCP_CMD_MAY_BACKLOG
;
294 cmd
->flags
&= ~CCP_CMD_MAY_BACKLOG
;
296 return ccp_crypto_enqueue_cmd(crypto_cmd
);
299 struct scatterlist
*ccp_crypto_sg_table_add(struct sg_table
*table
,
300 struct scatterlist
*sg_add
)
302 struct scatterlist
*sg
, *sg_last
= NULL
;
304 for (sg
= table
->sgl
; sg
; sg
= sg_next(sg
))
310 for (; sg
&& sg_add
; sg
= sg_next(sg
), sg_add
= sg_next(sg_add
)) {
311 sg_set_page(sg
, sg_page(sg_add
), sg_add
->length
,
321 static int ccp_register_algs(void)
326 ret
= ccp_register_aes_algs(&skcipher_algs
);
330 ret
= ccp_register_aes_cmac_algs(&hash_algs
);
334 ret
= ccp_register_aes_xts_algs(&skcipher_algs
);
338 ret
= ccp_register_aes_aeads(&aead_algs
);
344 ret
= ccp_register_des3_algs(&skcipher_algs
);
350 ret
= ccp_register_sha_algs(&hash_algs
);
356 ret
= ccp_register_rsa_algs(&akcipher_algs
);
364 static void ccp_unregister_algs(void)
366 struct ccp_crypto_ahash_alg
*ahash_alg
, *ahash_tmp
;
367 struct ccp_crypto_skcipher_alg
*ablk_alg
, *ablk_tmp
;
368 struct ccp_crypto_aead
*aead_alg
, *aead_tmp
;
369 struct ccp_crypto_akcipher_alg
*akc_alg
, *akc_tmp
;
371 list_for_each_entry_safe(ahash_alg
, ahash_tmp
, &hash_algs
, entry
) {
372 crypto_unregister_ahash(&ahash_alg
->alg
);
373 list_del(&ahash_alg
->entry
);
377 list_for_each_entry_safe(ablk_alg
, ablk_tmp
, &skcipher_algs
, entry
) {
378 crypto_unregister_skcipher(&ablk_alg
->alg
);
379 list_del(&ablk_alg
->entry
);
383 list_for_each_entry_safe(aead_alg
, aead_tmp
, &aead_algs
, entry
) {
384 crypto_unregister_aead(&aead_alg
->alg
);
385 list_del(&aead_alg
->entry
);
389 list_for_each_entry_safe(akc_alg
, akc_tmp
, &akcipher_algs
, entry
) {
390 crypto_unregister_akcipher(&akc_alg
->alg
);
391 list_del(&akc_alg
->entry
);
396 static int __init
ccp_crypto_init(void)
402 pr_err("Cannot load: there are no available CCPs\n");
406 INIT_LIST_HEAD(&req_queue
.cmds
);
407 req_queue
.backlog
= &req_queue
.cmds
;
408 req_queue
.cmd_count
= 0;
410 ret
= ccp_register_algs();
412 ccp_unregister_algs();
417 static void __exit
ccp_crypto_exit(void)
419 ccp_unregister_algs();
422 module_init(ccp_crypto_init
);
423 module_exit(ccp_crypto_exit
);