PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / crypto / ccp / ccp-crypto-main.c
blob2636f044789dc4ebb9c725a8bf9d6d629eea5ad7
1 /*
2 * AMD Cryptographic Coprocessor (CCP) crypto API support
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/ccp.h>
17 #include <linux/scatterlist.h>
18 #include <crypto/internal/hash.h>
20 #include "ccp-crypto.h"
22 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
23 MODULE_LICENSE("GPL");
24 MODULE_VERSION("1.0.0");
25 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
28 /* List heads for the supported algorithms */
29 static LIST_HEAD(hash_algs);
30 static LIST_HEAD(cipher_algs);
32 /* For any tfm, requests for that tfm on the same CPU must be returned
33 * in the order received. With multiple queues available, the CCP can
34 * process more than one cmd at a time. Therefore we must maintain
35 * a cmd list to insure the proper ordering of requests on a given tfm/cpu
36 * combination.
38 struct ccp_crypto_cpu_queue {
39 struct list_head cmds;
40 struct list_head *backlog;
41 unsigned int cmd_count;
43 #define CCP_CRYPTO_MAX_QLEN 50
45 struct ccp_crypto_percpu_queue {
46 struct ccp_crypto_cpu_queue __percpu *cpu_queue;
48 static struct ccp_crypto_percpu_queue req_queue;
50 struct ccp_crypto_cmd {
51 struct list_head entry;
53 struct ccp_cmd *cmd;
55 /* Save the crypto_tfm and crypto_async_request addresses
56 * separately to avoid any reference to a possibly invalid
57 * crypto_async_request structure after invoking the request
58 * callback
60 struct crypto_async_request *req;
61 struct crypto_tfm *tfm;
63 /* Used for held command processing to determine state */
64 int ret;
66 int cpu;
69 struct ccp_crypto_cpu {
70 struct work_struct work;
71 struct completion completion;
72 struct ccp_crypto_cmd *crypto_cmd;
73 int err;
77 static inline bool ccp_crypto_success(int err)
79 if (err && (err != -EINPROGRESS) && (err != -EBUSY))
80 return false;
82 return true;
86 * ccp_crypto_cmd_complete must be called while running on the appropriate
87 * cpu and the caller must have done a get_cpu to disable preemption
89 static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
90 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
92 struct ccp_crypto_cpu_queue *cpu_queue;
93 struct ccp_crypto_cmd *held = NULL, *tmp;
95 *backlog = NULL;
97 cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
99 /* Held cmds will be after the current cmd in the queue so start
100 * searching for a cmd with a matching tfm for submission.
102 tmp = crypto_cmd;
103 list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
104 if (crypto_cmd->tfm != tmp->tfm)
105 continue;
106 held = tmp;
107 break;
110 /* Process the backlog:
111 * Because cmds can be executed from any point in the cmd list
112 * special precautions have to be taken when handling the backlog.
114 if (cpu_queue->backlog != &cpu_queue->cmds) {
115 /* Skip over this cmd if it is the next backlog cmd */
116 if (cpu_queue->backlog == &crypto_cmd->entry)
117 cpu_queue->backlog = crypto_cmd->entry.next;
119 *backlog = container_of(cpu_queue->backlog,
120 struct ccp_crypto_cmd, entry);
121 cpu_queue->backlog = cpu_queue->backlog->next;
123 /* Skip over this cmd if it is now the next backlog cmd */
124 if (cpu_queue->backlog == &crypto_cmd->entry)
125 cpu_queue->backlog = crypto_cmd->entry.next;
128 /* Remove the cmd entry from the list of cmds */
129 cpu_queue->cmd_count--;
130 list_del(&crypto_cmd->entry);
132 return held;
135 static void ccp_crypto_complete_on_cpu(struct work_struct *work)
137 struct ccp_crypto_cpu *cpu_work =
138 container_of(work, struct ccp_crypto_cpu, work);
139 struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd;
140 struct ccp_crypto_cmd *held, *next, *backlog;
141 struct crypto_async_request *req = crypto_cmd->req;
142 struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
143 int cpu, ret;
145 cpu = get_cpu();
147 if (cpu_work->err == -EINPROGRESS) {
148 /* Only propogate the -EINPROGRESS if necessary */
149 if (crypto_cmd->ret == -EBUSY) {
150 crypto_cmd->ret = -EINPROGRESS;
151 req->complete(req, -EINPROGRESS);
154 goto e_cpu;
157 /* Operation has completed - update the queue before invoking
158 * the completion callbacks and retrieve the next cmd (cmd with
159 * a matching tfm) that can be submitted to the CCP.
161 held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
162 if (backlog) {
163 backlog->ret = -EINPROGRESS;
164 backlog->req->complete(backlog->req, -EINPROGRESS);
167 /* Transition the state from -EBUSY to -EINPROGRESS first */
168 if (crypto_cmd->ret == -EBUSY)
169 req->complete(req, -EINPROGRESS);
171 /* Completion callbacks */
172 ret = cpu_work->err;
173 if (ctx->complete)
174 ret = ctx->complete(req, ret);
175 req->complete(req, ret);
177 /* Submit the next cmd */
178 while (held) {
179 ret = ccp_enqueue_cmd(held->cmd);
180 if (ccp_crypto_success(ret))
181 break;
183 /* Error occurred, report it and get the next entry */
184 held->req->complete(held->req, ret);
186 next = ccp_crypto_cmd_complete(held, &backlog);
187 if (backlog) {
188 backlog->ret = -EINPROGRESS;
189 backlog->req->complete(backlog->req, -EINPROGRESS);
192 kfree(held);
193 held = next;
196 kfree(crypto_cmd);
198 e_cpu:
199 put_cpu();
201 complete(&cpu_work->completion);
204 static void ccp_crypto_complete(void *data, int err)
206 struct ccp_crypto_cmd *crypto_cmd = data;
207 struct ccp_crypto_cpu cpu_work;
209 INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu);
210 init_completion(&cpu_work.completion);
211 cpu_work.crypto_cmd = crypto_cmd;
212 cpu_work.err = err;
214 schedule_work_on(crypto_cmd->cpu, &cpu_work.work);
216 /* Keep the completion call synchronous */
217 wait_for_completion(&cpu_work.completion);
220 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
222 struct ccp_crypto_cpu_queue *cpu_queue;
223 struct ccp_crypto_cmd *active = NULL, *tmp;
224 int cpu, ret;
226 cpu = get_cpu();
227 crypto_cmd->cpu = cpu;
229 cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
231 /* Check if the cmd can/should be queued */
232 if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
233 ret = -EBUSY;
234 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
235 goto e_cpu;
238 /* Look for an entry with the same tfm. If there is a cmd
239 * with the same tfm in the list for this cpu then the current
240 * cmd cannot be submitted to the CCP yet.
242 list_for_each_entry(tmp, &cpu_queue->cmds, entry) {
243 if (crypto_cmd->tfm != tmp->tfm)
244 continue;
245 active = tmp;
246 break;
249 ret = -EINPROGRESS;
250 if (!active) {
251 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
252 if (!ccp_crypto_success(ret))
253 goto e_cpu;
256 if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
257 ret = -EBUSY;
258 if (cpu_queue->backlog == &cpu_queue->cmds)
259 cpu_queue->backlog = &crypto_cmd->entry;
261 crypto_cmd->ret = ret;
263 cpu_queue->cmd_count++;
264 list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds);
266 e_cpu:
267 put_cpu();
269 return ret;
273 * ccp_crypto_enqueue_request - queue an crypto async request for processing
274 * by the CCP
276 * @req: crypto_async_request struct to be processed
277 * @cmd: ccp_cmd struct to be sent to the CCP
279 int ccp_crypto_enqueue_request(struct crypto_async_request *req,
280 struct ccp_cmd *cmd)
282 struct ccp_crypto_cmd *crypto_cmd;
283 gfp_t gfp;
284 int ret;
286 gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
288 crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
289 if (!crypto_cmd)
290 return -ENOMEM;
292 /* The tfm pointer must be saved and not referenced from the
293 * crypto_async_request (req) pointer because it is used after
294 * completion callback for the request and the req pointer
295 * might not be valid anymore.
297 crypto_cmd->cmd = cmd;
298 crypto_cmd->req = req;
299 crypto_cmd->tfm = req->tfm;
301 cmd->callback = ccp_crypto_complete;
302 cmd->data = crypto_cmd;
304 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
305 cmd->flags |= CCP_CMD_MAY_BACKLOG;
306 else
307 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
309 ret = ccp_crypto_enqueue_cmd(crypto_cmd);
310 if (!ccp_crypto_success(ret))
311 kfree(crypto_cmd);
313 return ret;
316 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
317 struct scatterlist *sg_add)
319 struct scatterlist *sg, *sg_last = NULL;
321 for (sg = table->sgl; sg; sg = sg_next(sg))
322 if (!sg_page(sg))
323 break;
324 BUG_ON(!sg);
326 for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
327 sg_set_page(sg, sg_page(sg_add), sg_add->length,
328 sg_add->offset);
329 sg_last = sg;
331 BUG_ON(sg_add);
333 return sg_last;
336 static int ccp_register_algs(void)
338 int ret;
340 ret = ccp_register_aes_algs(&cipher_algs);
341 if (ret)
342 return ret;
344 ret = ccp_register_aes_cmac_algs(&hash_algs);
345 if (ret)
346 return ret;
348 ret = ccp_register_aes_xts_algs(&cipher_algs);
349 if (ret)
350 return ret;
352 ret = ccp_register_sha_algs(&hash_algs);
353 if (ret)
354 return ret;
356 return 0;
359 static void ccp_unregister_algs(void)
361 struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
362 struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
364 list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
365 crypto_unregister_ahash(&ahash_alg->alg);
366 list_del(&ahash_alg->entry);
367 kfree(ahash_alg);
370 list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
371 crypto_unregister_alg(&ablk_alg->alg);
372 list_del(&ablk_alg->entry);
373 kfree(ablk_alg);
377 static int ccp_init_queues(void)
379 struct ccp_crypto_cpu_queue *cpu_queue;
380 int cpu;
382 req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue);
383 if (!req_queue.cpu_queue)
384 return -ENOMEM;
386 for_each_possible_cpu(cpu) {
387 cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
388 INIT_LIST_HEAD(&cpu_queue->cmds);
389 cpu_queue->backlog = &cpu_queue->cmds;
390 cpu_queue->cmd_count = 0;
393 return 0;
396 static void ccp_fini_queue(void)
398 struct ccp_crypto_cpu_queue *cpu_queue;
399 int cpu;
401 for_each_possible_cpu(cpu) {
402 cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
403 BUG_ON(!list_empty(&cpu_queue->cmds));
405 free_percpu(req_queue.cpu_queue);
408 static int ccp_crypto_init(void)
410 int ret;
412 ret = ccp_init_queues();
413 if (ret)
414 return ret;
416 ret = ccp_register_algs();
417 if (ret) {
418 ccp_unregister_algs();
419 ccp_fini_queue();
422 return ret;
425 static void ccp_crypto_exit(void)
427 ccp_unregister_algs();
428 ccp_fini_queue();
431 module_init(ccp_crypto_init);
432 module_exit(ccp_crypto_exit);