2 * AMD Cryptographic Coprocessor (CCP) crypto API support
4 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/ccp.h>
18 #include <linux/scatterlist.h>
19 #include <crypto/internal/hash.h>
20 #include <crypto/internal/akcipher.h>
22 #include "ccp-crypto.h"
24 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
25 MODULE_LICENSE("GPL");
26 MODULE_VERSION("1.0.0");
27 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
29 static unsigned int aes_disable
;
30 module_param(aes_disable
, uint
, 0444);
31 MODULE_PARM_DESC(aes_disable
, "Disable use of AES - any non-zero value");
33 static unsigned int sha_disable
;
34 module_param(sha_disable
, uint
, 0444);
35 MODULE_PARM_DESC(sha_disable
, "Disable use of SHA - any non-zero value");
37 static unsigned int des3_disable
;
38 module_param(des3_disable
, uint
, 0444);
39 MODULE_PARM_DESC(des3_disable
, "Disable use of 3DES - any non-zero value");
41 static unsigned int rsa_disable
;
42 module_param(rsa_disable
, uint
, 0444);
43 MODULE_PARM_DESC(rsa_disable
, "Disable use of RSA - any non-zero value");
45 /* List heads for the supported algorithms */
46 static LIST_HEAD(hash_algs
);
47 static LIST_HEAD(cipher_algs
);
48 static LIST_HEAD(aead_algs
);
49 static LIST_HEAD(akcipher_algs
);
51 /* For any tfm, requests for that tfm must be returned on the order
52 * received. With multiple queues available, the CCP can process more
53 * than one cmd at a time. Therefore we must maintain a cmd list to insure
54 * the proper ordering of requests on a given tfm.
56 struct ccp_crypto_queue
{
57 struct list_head cmds
;
58 struct list_head
*backlog
;
59 unsigned int cmd_count
;
62 #define CCP_CRYPTO_MAX_QLEN 100
64 static struct ccp_crypto_queue req_queue
;
65 static spinlock_t req_queue_lock
;
67 struct ccp_crypto_cmd
{
68 struct list_head entry
;
72 /* Save the crypto_tfm and crypto_async_request addresses
73 * separately to avoid any reference to a possibly invalid
74 * crypto_async_request structure after invoking the request
77 struct crypto_async_request
*req
;
78 struct crypto_tfm
*tfm
;
80 /* Used for held command processing to determine state */
84 struct ccp_crypto_cpu
{
85 struct work_struct work
;
86 struct completion completion
;
87 struct ccp_crypto_cmd
*crypto_cmd
;
91 static inline bool ccp_crypto_success(int err
)
93 if (err
&& (err
!= -EINPROGRESS
) && (err
!= -EBUSY
))
99 static struct ccp_crypto_cmd
*ccp_crypto_cmd_complete(
100 struct ccp_crypto_cmd
*crypto_cmd
, struct ccp_crypto_cmd
**backlog
)
102 struct ccp_crypto_cmd
*held
= NULL
, *tmp
;
107 spin_lock_irqsave(&req_queue_lock
, flags
);
109 /* Held cmds will be after the current cmd in the queue so start
110 * searching for a cmd with a matching tfm for submission.
113 list_for_each_entry_continue(tmp
, &req_queue
.cmds
, entry
) {
114 if (crypto_cmd
->tfm
!= tmp
->tfm
)
120 /* Process the backlog:
121 * Because cmds can be executed from any point in the cmd list
122 * special precautions have to be taken when handling the backlog.
124 if (req_queue
.backlog
!= &req_queue
.cmds
) {
125 /* Skip over this cmd if it is the next backlog cmd */
126 if (req_queue
.backlog
== &crypto_cmd
->entry
)
127 req_queue
.backlog
= crypto_cmd
->entry
.next
;
129 *backlog
= container_of(req_queue
.backlog
,
130 struct ccp_crypto_cmd
, entry
);
131 req_queue
.backlog
= req_queue
.backlog
->next
;
133 /* Skip over this cmd if it is now the next backlog cmd */
134 if (req_queue
.backlog
== &crypto_cmd
->entry
)
135 req_queue
.backlog
= crypto_cmd
->entry
.next
;
138 /* Remove the cmd entry from the list of cmds */
139 req_queue
.cmd_count
--;
140 list_del(&crypto_cmd
->entry
);
142 spin_unlock_irqrestore(&req_queue_lock
, flags
);
147 static void ccp_crypto_complete(void *data
, int err
)
149 struct ccp_crypto_cmd
*crypto_cmd
= data
;
150 struct ccp_crypto_cmd
*held
, *next
, *backlog
;
151 struct crypto_async_request
*req
= crypto_cmd
->req
;
152 struct ccp_ctx
*ctx
= crypto_tfm_ctx(req
->tfm
);
155 if (err
== -EINPROGRESS
) {
156 /* Only propagate the -EINPROGRESS if necessary */
157 if (crypto_cmd
->ret
== -EBUSY
) {
158 crypto_cmd
->ret
= -EINPROGRESS
;
159 req
->complete(req
, -EINPROGRESS
);
165 /* Operation has completed - update the queue before invoking
166 * the completion callbacks and retrieve the next cmd (cmd with
167 * a matching tfm) that can be submitted to the CCP.
169 held
= ccp_crypto_cmd_complete(crypto_cmd
, &backlog
);
171 backlog
->ret
= -EINPROGRESS
;
172 backlog
->req
->complete(backlog
->req
, -EINPROGRESS
);
175 /* Transition the state from -EBUSY to -EINPROGRESS first */
176 if (crypto_cmd
->ret
== -EBUSY
)
177 req
->complete(req
, -EINPROGRESS
);
179 /* Completion callbacks */
182 ret
= ctx
->complete(req
, ret
);
183 req
->complete(req
, ret
);
185 /* Submit the next cmd */
187 /* Since we have already queued the cmd, we must indicate that
188 * we can backlog so as not to "lose" this request.
190 held
->cmd
->flags
|= CCP_CMD_MAY_BACKLOG
;
191 ret
= ccp_enqueue_cmd(held
->cmd
);
192 if (ccp_crypto_success(ret
))
195 /* Error occurred, report it and get the next entry */
196 ctx
= crypto_tfm_ctx(held
->req
->tfm
);
198 ret
= ctx
->complete(held
->req
, ret
);
199 held
->req
->complete(held
->req
, ret
);
201 next
= ccp_crypto_cmd_complete(held
, &backlog
);
203 backlog
->ret
= -EINPROGRESS
;
204 backlog
->req
->complete(backlog
->req
, -EINPROGRESS
);
214 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd
*crypto_cmd
)
216 struct ccp_crypto_cmd
*active
= NULL
, *tmp
;
218 bool free_cmd
= true;
221 spin_lock_irqsave(&req_queue_lock
, flags
);
223 /* Check if the cmd can/should be queued */
224 if (req_queue
.cmd_count
>= CCP_CRYPTO_MAX_QLEN
) {
225 if (!(crypto_cmd
->cmd
->flags
& CCP_CMD_MAY_BACKLOG
)) {
231 /* Look for an entry with the same tfm. If there is a cmd
232 * with the same tfm in the list then the current cmd cannot
233 * be submitted to the CCP yet.
235 list_for_each_entry(tmp
, &req_queue
.cmds
, entry
) {
236 if (crypto_cmd
->tfm
!= tmp
->tfm
)
244 ret
= ccp_enqueue_cmd(crypto_cmd
->cmd
);
245 if (!ccp_crypto_success(ret
))
246 goto e_lock
; /* Error, don't queue it */
249 if (req_queue
.cmd_count
>= CCP_CRYPTO_MAX_QLEN
) {
251 if (req_queue
.backlog
== &req_queue
.cmds
)
252 req_queue
.backlog
= &crypto_cmd
->entry
;
254 crypto_cmd
->ret
= ret
;
256 req_queue
.cmd_count
++;
257 list_add_tail(&crypto_cmd
->entry
, &req_queue
.cmds
);
262 spin_unlock_irqrestore(&req_queue_lock
, flags
);
271 * ccp_crypto_enqueue_request - queue an crypto async request for processing
274 * @req: crypto_async_request struct to be processed
275 * @cmd: ccp_cmd struct to be sent to the CCP
277 int ccp_crypto_enqueue_request(struct crypto_async_request
*req
,
280 struct ccp_crypto_cmd
*crypto_cmd
;
283 gfp
= req
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
: GFP_ATOMIC
;
285 crypto_cmd
= kzalloc(sizeof(*crypto_cmd
), gfp
);
289 /* The tfm pointer must be saved and not referenced from the
290 * crypto_async_request (req) pointer because it is used after
291 * completion callback for the request and the req pointer
292 * might not be valid anymore.
294 crypto_cmd
->cmd
= cmd
;
295 crypto_cmd
->req
= req
;
296 crypto_cmd
->tfm
= req
->tfm
;
298 cmd
->callback
= ccp_crypto_complete
;
299 cmd
->data
= crypto_cmd
;
301 if (req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)
302 cmd
->flags
|= CCP_CMD_MAY_BACKLOG
;
304 cmd
->flags
&= ~CCP_CMD_MAY_BACKLOG
;
306 return ccp_crypto_enqueue_cmd(crypto_cmd
);
309 struct scatterlist
*ccp_crypto_sg_table_add(struct sg_table
*table
,
310 struct scatterlist
*sg_add
)
312 struct scatterlist
*sg
, *sg_last
= NULL
;
314 for (sg
= table
->sgl
; sg
; sg
= sg_next(sg
))
320 for (; sg
&& sg_add
; sg
= sg_next(sg
), sg_add
= sg_next(sg_add
)) {
321 sg_set_page(sg
, sg_page(sg_add
), sg_add
->length
,
331 static int ccp_register_algs(void)
336 ret
= ccp_register_aes_algs(&cipher_algs
);
340 ret
= ccp_register_aes_cmac_algs(&hash_algs
);
344 ret
= ccp_register_aes_xts_algs(&cipher_algs
);
348 ret
= ccp_register_aes_aeads(&aead_algs
);
354 ret
= ccp_register_des3_algs(&cipher_algs
);
360 ret
= ccp_register_sha_algs(&hash_algs
);
366 ret
= ccp_register_rsa_algs(&akcipher_algs
);
374 static void ccp_unregister_algs(void)
376 struct ccp_crypto_ahash_alg
*ahash_alg
, *ahash_tmp
;
377 struct ccp_crypto_ablkcipher_alg
*ablk_alg
, *ablk_tmp
;
378 struct ccp_crypto_aead
*aead_alg
, *aead_tmp
;
379 struct ccp_crypto_akcipher_alg
*akc_alg
, *akc_tmp
;
381 list_for_each_entry_safe(ahash_alg
, ahash_tmp
, &hash_algs
, entry
) {
382 crypto_unregister_ahash(&ahash_alg
->alg
);
383 list_del(&ahash_alg
->entry
);
387 list_for_each_entry_safe(ablk_alg
, ablk_tmp
, &cipher_algs
, entry
) {
388 crypto_unregister_alg(&ablk_alg
->alg
);
389 list_del(&ablk_alg
->entry
);
393 list_for_each_entry_safe(aead_alg
, aead_tmp
, &aead_algs
, entry
) {
394 crypto_unregister_aead(&aead_alg
->alg
);
395 list_del(&aead_alg
->entry
);
399 list_for_each_entry_safe(akc_alg
, akc_tmp
, &akcipher_algs
, entry
) {
400 crypto_unregister_akcipher(&akc_alg
->alg
);
401 list_del(&akc_alg
->entry
);
406 static int ccp_crypto_init(void)
414 spin_lock_init(&req_queue_lock
);
415 INIT_LIST_HEAD(&req_queue
.cmds
);
416 req_queue
.backlog
= &req_queue
.cmds
;
417 req_queue
.cmd_count
= 0;
419 ret
= ccp_register_algs();
421 ccp_unregister_algs();
426 static void ccp_crypto_exit(void)
428 ccp_unregister_algs();
431 module_init(ccp_crypto_init
);
432 module_exit(ccp_crypto_exit
);