staging: wlan-ng: properly check endpoint types
[linux/fpc-iii.git] / crypto / crypto_engine.c
blob3655d9d3f5dfb77c47841196d0402f0ca89f5706
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Handle async block request by crypto hardware engine.
5 * Copyright (C) 2016 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
10 #include <linux/err.h>
11 #include <linux/delay.h>
12 #include <crypto/engine.h>
13 #include <uapi/linux/sched/types.h>
14 #include "internal.h"
16 #define CRYPTO_ENGINE_MAX_QLEN 10
18 /**
19 * crypto_finalize_request - finalize one request if the request is done
20 * @engine: the hardware engine
21 * @req: the request need to be finalized
22 * @err: error number
24 static void crypto_finalize_request(struct crypto_engine *engine,
25 struct crypto_async_request *req, int err)
27 unsigned long flags;
28 bool finalize_req = false;
29 int ret;
30 struct crypto_engine_ctx *enginectx;
33 * If hardware cannot enqueue more requests
34 * and retry mechanism is not supported
35 * make sure we are completing the current request
37 if (!engine->retry_support) {
38 spin_lock_irqsave(&engine->queue_lock, flags);
39 if (engine->cur_req == req) {
40 finalize_req = true;
41 engine->cur_req = NULL;
43 spin_unlock_irqrestore(&engine->queue_lock, flags);
46 if (finalize_req || engine->retry_support) {
47 enginectx = crypto_tfm_ctx(req->tfm);
48 if (enginectx->op.prepare_request &&
49 enginectx->op.unprepare_request) {
50 ret = enginectx->op.unprepare_request(engine, req);
51 if (ret)
52 dev_err(engine->dev, "failed to unprepare request\n");
55 req->complete(req, err);
57 kthread_queue_work(engine->kworker, &engine->pump_requests);
60 /**
61 * crypto_pump_requests - dequeue one request from engine queue to process
62 * @engine: the hardware engine
63 * @in_kthread: true if we are in the context of the request pump thread
65 * This function checks if there is any request in the engine queue that
66 * needs processing and if so call out to the driver to initialize hardware
67 * and handle each request.
69 static void crypto_pump_requests(struct crypto_engine *engine,
70 bool in_kthread)
72 struct crypto_async_request *async_req, *backlog;
73 unsigned long flags;
74 bool was_busy = false;
75 int ret;
76 struct crypto_engine_ctx *enginectx;
78 spin_lock_irqsave(&engine->queue_lock, flags);
80 /* Make sure we are not already running a request */
81 if (!engine->retry_support && engine->cur_req)
82 goto out;
84 /* If another context is idling then defer */
85 if (engine->idling) {
86 kthread_queue_work(engine->kworker, &engine->pump_requests);
87 goto out;
90 /* Check if the engine queue is idle */
91 if (!crypto_queue_len(&engine->queue) || !engine->running) {
92 if (!engine->busy)
93 goto out;
95 /* Only do teardown in the thread */
96 if (!in_kthread) {
97 kthread_queue_work(engine->kworker,
98 &engine->pump_requests);
99 goto out;
102 engine->busy = false;
103 engine->idling = true;
104 spin_unlock_irqrestore(&engine->queue_lock, flags);
106 if (engine->unprepare_crypt_hardware &&
107 engine->unprepare_crypt_hardware(engine))
108 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
110 spin_lock_irqsave(&engine->queue_lock, flags);
111 engine->idling = false;
112 goto out;
115 start_request:
116 /* Get the fist request from the engine queue to handle */
117 backlog = crypto_get_backlog(&engine->queue);
118 async_req = crypto_dequeue_request(&engine->queue);
119 if (!async_req)
120 goto out;
123 * If hardware doesn't support the retry mechanism,
124 * keep track of the request we are processing now.
125 * We'll need it on completion (crypto_finalize_request).
127 if (!engine->retry_support)
128 engine->cur_req = async_req;
130 if (backlog)
131 backlog->complete(backlog, -EINPROGRESS);
133 if (engine->busy)
134 was_busy = true;
135 else
136 engine->busy = true;
138 spin_unlock_irqrestore(&engine->queue_lock, flags);
140 /* Until here we get the request need to be encrypted successfully */
141 if (!was_busy && engine->prepare_crypt_hardware) {
142 ret = engine->prepare_crypt_hardware(engine);
143 if (ret) {
144 dev_err(engine->dev, "failed to prepare crypt hardware\n");
145 goto req_err_2;
149 enginectx = crypto_tfm_ctx(async_req->tfm);
151 if (enginectx->op.prepare_request) {
152 ret = enginectx->op.prepare_request(engine, async_req);
153 if (ret) {
154 dev_err(engine->dev, "failed to prepare request: %d\n",
155 ret);
156 goto req_err_2;
159 if (!enginectx->op.do_one_request) {
160 dev_err(engine->dev, "failed to do request\n");
161 ret = -EINVAL;
162 goto req_err_1;
165 ret = enginectx->op.do_one_request(engine, async_req);
167 /* Request unsuccessfully executed by hardware */
168 if (ret < 0) {
170 * If hardware queue is full (-ENOSPC), requeue request
171 * regardless of backlog flag.
172 * Otherwise, unprepare and complete the request.
174 if (!engine->retry_support ||
175 (ret != -ENOSPC)) {
176 dev_err(engine->dev,
177 "Failed to do one request from queue: %d\n",
178 ret);
179 goto req_err_1;
182 * If retry mechanism is supported,
183 * unprepare current request and
184 * enqueue it back into crypto-engine queue.
186 if (enginectx->op.unprepare_request) {
187 ret = enginectx->op.unprepare_request(engine,
188 async_req);
189 if (ret)
190 dev_err(engine->dev,
191 "failed to unprepare request\n");
193 spin_lock_irqsave(&engine->queue_lock, flags);
195 * If hardware was unable to execute request, enqueue it
196 * back in front of crypto-engine queue, to keep the order
197 * of requests.
199 crypto_enqueue_request_head(&engine->queue, async_req);
201 kthread_queue_work(engine->kworker, &engine->pump_requests);
202 goto out;
205 goto retry;
207 req_err_1:
208 if (enginectx->op.unprepare_request) {
209 ret = enginectx->op.unprepare_request(engine, async_req);
210 if (ret)
211 dev_err(engine->dev, "failed to unprepare request\n");
214 req_err_2:
215 async_req->complete(async_req, ret);
217 retry:
218 /* If retry mechanism is supported, send new requests to engine */
219 if (engine->retry_support) {
220 spin_lock_irqsave(&engine->queue_lock, flags);
221 goto start_request;
223 return;
225 out:
226 spin_unlock_irqrestore(&engine->queue_lock, flags);
229 * Batch requests is possible only if
230 * hardware can enqueue multiple requests
232 if (engine->do_batch_requests) {
233 ret = engine->do_batch_requests(engine);
234 if (ret)
235 dev_err(engine->dev, "failed to do batch requests: %d\n",
236 ret);
239 return;
242 static void crypto_pump_work(struct kthread_work *work)
244 struct crypto_engine *engine =
245 container_of(work, struct crypto_engine, pump_requests);
247 crypto_pump_requests(engine, true);
251 * crypto_transfer_request - transfer the new request into the engine queue
252 * @engine: the hardware engine
253 * @req: the request need to be listed into the engine queue
255 static int crypto_transfer_request(struct crypto_engine *engine,
256 struct crypto_async_request *req,
257 bool need_pump)
259 unsigned long flags;
260 int ret;
262 spin_lock_irqsave(&engine->queue_lock, flags);
264 if (!engine->running) {
265 spin_unlock_irqrestore(&engine->queue_lock, flags);
266 return -ESHUTDOWN;
269 ret = crypto_enqueue_request(&engine->queue, req);
271 if (!engine->busy && need_pump)
272 kthread_queue_work(engine->kworker, &engine->pump_requests);
274 spin_unlock_irqrestore(&engine->queue_lock, flags);
275 return ret;
279 * crypto_transfer_request_to_engine - transfer one request to list
280 * into the engine queue
281 * @engine: the hardware engine
282 * @req: the request need to be listed into the engine queue
284 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
285 struct crypto_async_request *req)
287 return crypto_transfer_request(engine, req, true);
291 * crypto_transfer_aead_request_to_engine - transfer one aead_request
292 * to list into the engine queue
293 * @engine: the hardware engine
294 * @req: the request need to be listed into the engine queue
296 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
297 struct aead_request *req)
299 return crypto_transfer_request_to_engine(engine, &req->base);
301 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
304 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
305 * to list into the engine queue
306 * @engine: the hardware engine
307 * @req: the request need to be listed into the engine queue
309 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
310 struct akcipher_request *req)
312 return crypto_transfer_request_to_engine(engine, &req->base);
314 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
317 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
318 * to list into the engine queue
319 * @engine: the hardware engine
320 * @req: the request need to be listed into the engine queue
322 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
323 struct ahash_request *req)
325 return crypto_transfer_request_to_engine(engine, &req->base);
327 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
330 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
331 * to list into the engine queue
332 * @engine: the hardware engine
333 * @req: the request need to be listed into the engine queue
335 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
336 struct skcipher_request *req)
338 return crypto_transfer_request_to_engine(engine, &req->base);
340 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
343 * crypto_finalize_aead_request - finalize one aead_request if
344 * the request is done
345 * @engine: the hardware engine
346 * @req: the request need to be finalized
347 * @err: error number
349 void crypto_finalize_aead_request(struct crypto_engine *engine,
350 struct aead_request *req, int err)
352 return crypto_finalize_request(engine, &req->base, err);
354 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
357 * crypto_finalize_akcipher_request - finalize one akcipher_request if
358 * the request is done
359 * @engine: the hardware engine
360 * @req: the request need to be finalized
361 * @err: error number
363 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
364 struct akcipher_request *req, int err)
366 return crypto_finalize_request(engine, &req->base, err);
368 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
371 * crypto_finalize_hash_request - finalize one ahash_request if
372 * the request is done
373 * @engine: the hardware engine
374 * @req: the request need to be finalized
375 * @err: error number
377 void crypto_finalize_hash_request(struct crypto_engine *engine,
378 struct ahash_request *req, int err)
380 return crypto_finalize_request(engine, &req->base, err);
382 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
385 * crypto_finalize_skcipher_request - finalize one skcipher_request if
386 * the request is done
387 * @engine: the hardware engine
388 * @req: the request need to be finalized
389 * @err: error number
391 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
392 struct skcipher_request *req, int err)
394 return crypto_finalize_request(engine, &req->base, err);
396 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
399 * crypto_engine_start - start the hardware engine
400 * @engine: the hardware engine need to be started
402 * Return 0 on success, else on fail.
404 int crypto_engine_start(struct crypto_engine *engine)
406 unsigned long flags;
408 spin_lock_irqsave(&engine->queue_lock, flags);
410 if (engine->running || engine->busy) {
411 spin_unlock_irqrestore(&engine->queue_lock, flags);
412 return -EBUSY;
415 engine->running = true;
416 spin_unlock_irqrestore(&engine->queue_lock, flags);
418 kthread_queue_work(engine->kworker, &engine->pump_requests);
420 return 0;
422 EXPORT_SYMBOL_GPL(crypto_engine_start);
425 * crypto_engine_stop - stop the hardware engine
426 * @engine: the hardware engine need to be stopped
428 * Return 0 on success, else on fail.
430 int crypto_engine_stop(struct crypto_engine *engine)
432 unsigned long flags;
433 unsigned int limit = 500;
434 int ret = 0;
436 spin_lock_irqsave(&engine->queue_lock, flags);
439 * If the engine queue is not empty or the engine is on busy state,
440 * we need to wait for a while to pump the requests of engine queue.
442 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
443 spin_unlock_irqrestore(&engine->queue_lock, flags);
444 msleep(20);
445 spin_lock_irqsave(&engine->queue_lock, flags);
448 if (crypto_queue_len(&engine->queue) || engine->busy)
449 ret = -EBUSY;
450 else
451 engine->running = false;
453 spin_unlock_irqrestore(&engine->queue_lock, flags);
455 if (ret)
456 dev_warn(engine->dev, "could not stop engine\n");
458 return ret;
460 EXPORT_SYMBOL_GPL(crypto_engine_stop);
463 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
464 * and initialize it by setting the maximum number of entries in the software
465 * crypto-engine queue.
466 * @dev: the device attached with one hardware engine
467 * @retry_support: whether hardware has support for retry mechanism
468 * @cbk_do_batch: pointer to a callback function to be invoked when executing a
469 * a batch of requests.
470 * This has the form:
471 * callback(struct crypto_engine *engine)
472 * where:
473 * @engine: the crypto engine structure.
474 * @rt: whether this queue is set to run as a realtime task
475 * @qlen: maximum size of the crypto-engine queue
477 * This must be called from context that can sleep.
478 * Return: the crypto engine structure on success, else NULL.
480 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
481 bool retry_support,
482 int (*cbk_do_batch)(struct crypto_engine *engine),
483 bool rt, int qlen)
485 struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
486 struct crypto_engine *engine;
488 if (!dev)
489 return NULL;
491 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
492 if (!engine)
493 return NULL;
495 engine->dev = dev;
496 engine->rt = rt;
497 engine->running = false;
498 engine->busy = false;
499 engine->idling = false;
500 engine->retry_support = retry_support;
501 engine->priv_data = dev;
503 * Batch requests is possible only if
504 * hardware has support for retry mechanism.
506 engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
508 snprintf(engine->name, sizeof(engine->name),
509 "%s-engine", dev_name(dev));
511 crypto_init_queue(&engine->queue, qlen);
512 spin_lock_init(&engine->queue_lock);
514 engine->kworker = kthread_create_worker(0, "%s", engine->name);
515 if (IS_ERR(engine->kworker)) {
516 dev_err(dev, "failed to create crypto request pump task\n");
517 return NULL;
519 kthread_init_work(&engine->pump_requests, crypto_pump_work);
521 if (engine->rt) {
522 dev_info(dev, "will run requests pump with realtime priority\n");
523 sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
526 return engine;
528 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
531 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
532 * initialize it.
533 * @dev: the device attached with one hardware engine
534 * @rt: whether this queue is set to run as a realtime task
536 * This must be called from context that can sleep.
537 * Return: the crypto engine structure on success, else NULL.
539 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
541 return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
542 CRYPTO_ENGINE_MAX_QLEN);
544 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
547 * crypto_engine_exit - free the resources of hardware engine when exit
548 * @engine: the hardware engine need to be freed
550 * Return 0 for success.
552 int crypto_engine_exit(struct crypto_engine *engine)
554 int ret;
556 ret = crypto_engine_stop(engine);
557 if (ret)
558 return ret;
560 kthread_destroy_worker(engine->kworker);
562 return 0;
564 EXPORT_SYMBOL_GPL(crypto_engine_exit);
566 MODULE_LICENSE("GPL");
567 MODULE_DESCRIPTION("Crypto hardware engine framework");