spi: sprd: adi: Change hwlock to be optional
[linux/fpc-iii.git] / crypto / crypto_engine.c
blobd7502ec37f2087f7b327fd41768614510075ddef
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Handle async block request by crypto hardware engine.
5 * Copyright (C) 2016 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
10 #include <linux/err.h>
11 #include <linux/delay.h>
12 #include <crypto/engine.h>
13 #include <uapi/linux/sched/types.h>
14 #include "internal.h"
16 #define CRYPTO_ENGINE_MAX_QLEN 10
18 /**
19 * crypto_finalize_request - finalize one request if the request is done
20 * @engine: the hardware engine
21 * @req: the request need to be finalized
22 * @err: error number
24 static void crypto_finalize_request(struct crypto_engine *engine,
25 struct crypto_async_request *req, int err)
27 unsigned long flags;
28 bool finalize_cur_req = false;
29 int ret;
30 struct crypto_engine_ctx *enginectx;
32 spin_lock_irqsave(&engine->queue_lock, flags);
33 if (engine->cur_req == req)
34 finalize_cur_req = true;
35 spin_unlock_irqrestore(&engine->queue_lock, flags);
37 if (finalize_cur_req) {
38 enginectx = crypto_tfm_ctx(req->tfm);
39 if (engine->cur_req_prepared &&
40 enginectx->op.unprepare_request) {
41 ret = enginectx->op.unprepare_request(engine, req);
42 if (ret)
43 dev_err(engine->dev, "failed to unprepare request\n");
45 spin_lock_irqsave(&engine->queue_lock, flags);
46 engine->cur_req = NULL;
47 engine->cur_req_prepared = false;
48 spin_unlock_irqrestore(&engine->queue_lock, flags);
51 req->complete(req, err);
53 kthread_queue_work(engine->kworker, &engine->pump_requests);
56 /**
57 * crypto_pump_requests - dequeue one request from engine queue to process
58 * @engine: the hardware engine
59 * @in_kthread: true if we are in the context of the request pump thread
61 * This function checks if there is any request in the engine queue that
62 * needs processing and if so call out to the driver to initialize hardware
63 * and handle each request.
65 static void crypto_pump_requests(struct crypto_engine *engine,
66 bool in_kthread)
68 struct crypto_async_request *async_req, *backlog;
69 unsigned long flags;
70 bool was_busy = false;
71 int ret;
72 struct crypto_engine_ctx *enginectx;
74 spin_lock_irqsave(&engine->queue_lock, flags);
76 /* Make sure we are not already running a request */
77 if (engine->cur_req)
78 goto out;
80 /* If another context is idling then defer */
81 if (engine->idling) {
82 kthread_queue_work(engine->kworker, &engine->pump_requests);
83 goto out;
86 /* Check if the engine queue is idle */
87 if (!crypto_queue_len(&engine->queue) || !engine->running) {
88 if (!engine->busy)
89 goto out;
91 /* Only do teardown in the thread */
92 if (!in_kthread) {
93 kthread_queue_work(engine->kworker,
94 &engine->pump_requests);
95 goto out;
98 engine->busy = false;
99 engine->idling = true;
100 spin_unlock_irqrestore(&engine->queue_lock, flags);
102 if (engine->unprepare_crypt_hardware &&
103 engine->unprepare_crypt_hardware(engine))
104 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
106 spin_lock_irqsave(&engine->queue_lock, flags);
107 engine->idling = false;
108 goto out;
111 /* Get the fist request from the engine queue to handle */
112 backlog = crypto_get_backlog(&engine->queue);
113 async_req = crypto_dequeue_request(&engine->queue);
114 if (!async_req)
115 goto out;
117 engine->cur_req = async_req;
118 if (backlog)
119 backlog->complete(backlog, -EINPROGRESS);
121 if (engine->busy)
122 was_busy = true;
123 else
124 engine->busy = true;
126 spin_unlock_irqrestore(&engine->queue_lock, flags);
128 /* Until here we get the request need to be encrypted successfully */
129 if (!was_busy && engine->prepare_crypt_hardware) {
130 ret = engine->prepare_crypt_hardware(engine);
131 if (ret) {
132 dev_err(engine->dev, "failed to prepare crypt hardware\n");
133 goto req_err;
137 enginectx = crypto_tfm_ctx(async_req->tfm);
139 if (enginectx->op.prepare_request) {
140 ret = enginectx->op.prepare_request(engine, async_req);
141 if (ret) {
142 dev_err(engine->dev, "failed to prepare request: %d\n",
143 ret);
144 goto req_err;
146 engine->cur_req_prepared = true;
148 if (!enginectx->op.do_one_request) {
149 dev_err(engine->dev, "failed to do request\n");
150 ret = -EINVAL;
151 goto req_err;
153 ret = enginectx->op.do_one_request(engine, async_req);
154 if (ret) {
155 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
156 goto req_err;
158 return;
160 req_err:
161 crypto_finalize_request(engine, async_req, ret);
162 return;
164 out:
165 spin_unlock_irqrestore(&engine->queue_lock, flags);
168 static void crypto_pump_work(struct kthread_work *work)
170 struct crypto_engine *engine =
171 container_of(work, struct crypto_engine, pump_requests);
173 crypto_pump_requests(engine, true);
177 * crypto_transfer_request - transfer the new request into the engine queue
178 * @engine: the hardware engine
179 * @req: the request need to be listed into the engine queue
181 static int crypto_transfer_request(struct crypto_engine *engine,
182 struct crypto_async_request *req,
183 bool need_pump)
185 unsigned long flags;
186 int ret;
188 spin_lock_irqsave(&engine->queue_lock, flags);
190 if (!engine->running) {
191 spin_unlock_irqrestore(&engine->queue_lock, flags);
192 return -ESHUTDOWN;
195 ret = crypto_enqueue_request(&engine->queue, req);
197 if (!engine->busy && need_pump)
198 kthread_queue_work(engine->kworker, &engine->pump_requests);
200 spin_unlock_irqrestore(&engine->queue_lock, flags);
201 return ret;
205 * crypto_transfer_request_to_engine - transfer one request to list
206 * into the engine queue
207 * @engine: the hardware engine
208 * @req: the request need to be listed into the engine queue
210 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
211 struct crypto_async_request *req)
213 return crypto_transfer_request(engine, req, true);
217 * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
218 * to list into the engine queue
219 * @engine: the hardware engine
220 * @req: the request need to be listed into the engine queue
221 * TODO: Remove this function when skcipher conversion is finished
223 int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
224 struct ablkcipher_request *req)
226 return crypto_transfer_request_to_engine(engine, &req->base);
228 EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
231 * crypto_transfer_aead_request_to_engine - transfer one aead_request
232 * to list into the engine queue
233 * @engine: the hardware engine
234 * @req: the request need to be listed into the engine queue
236 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
237 struct aead_request *req)
239 return crypto_transfer_request_to_engine(engine, &req->base);
241 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
244 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
245 * to list into the engine queue
246 * @engine: the hardware engine
247 * @req: the request need to be listed into the engine queue
249 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
250 struct akcipher_request *req)
252 return crypto_transfer_request_to_engine(engine, &req->base);
254 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
257 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
258 * to list into the engine queue
259 * @engine: the hardware engine
260 * @req: the request need to be listed into the engine queue
262 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
263 struct ahash_request *req)
265 return crypto_transfer_request_to_engine(engine, &req->base);
267 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
270 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
271 * to list into the engine queue
272 * @engine: the hardware engine
273 * @req: the request need to be listed into the engine queue
275 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
276 struct skcipher_request *req)
278 return crypto_transfer_request_to_engine(engine, &req->base);
280 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
283 * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
284 * the request is done
285 * @engine: the hardware engine
286 * @req: the request need to be finalized
287 * @err: error number
288 * TODO: Remove this function when skcipher conversion is finished
290 void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
291 struct ablkcipher_request *req, int err)
293 return crypto_finalize_request(engine, &req->base, err);
295 EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
298 * crypto_finalize_aead_request - finalize one aead_request if
299 * the request is done
300 * @engine: the hardware engine
301 * @req: the request need to be finalized
302 * @err: error number
304 void crypto_finalize_aead_request(struct crypto_engine *engine,
305 struct aead_request *req, int err)
307 return crypto_finalize_request(engine, &req->base, err);
309 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
312 * crypto_finalize_akcipher_request - finalize one akcipher_request if
313 * the request is done
314 * @engine: the hardware engine
315 * @req: the request need to be finalized
316 * @err: error number
318 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
319 struct akcipher_request *req, int err)
321 return crypto_finalize_request(engine, &req->base, err);
323 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
326 * crypto_finalize_hash_request - finalize one ahash_request if
327 * the request is done
328 * @engine: the hardware engine
329 * @req: the request need to be finalized
330 * @err: error number
332 void crypto_finalize_hash_request(struct crypto_engine *engine,
333 struct ahash_request *req, int err)
335 return crypto_finalize_request(engine, &req->base, err);
337 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
340 * crypto_finalize_skcipher_request - finalize one skcipher_request if
341 * the request is done
342 * @engine: the hardware engine
343 * @req: the request need to be finalized
344 * @err: error number
346 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
347 struct skcipher_request *req, int err)
349 return crypto_finalize_request(engine, &req->base, err);
351 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
354 * crypto_engine_start - start the hardware engine
355 * @engine: the hardware engine need to be started
357 * Return 0 on success, else on fail.
359 int crypto_engine_start(struct crypto_engine *engine)
361 unsigned long flags;
363 spin_lock_irqsave(&engine->queue_lock, flags);
365 if (engine->running || engine->busy) {
366 spin_unlock_irqrestore(&engine->queue_lock, flags);
367 return -EBUSY;
370 engine->running = true;
371 spin_unlock_irqrestore(&engine->queue_lock, flags);
373 kthread_queue_work(engine->kworker, &engine->pump_requests);
375 return 0;
377 EXPORT_SYMBOL_GPL(crypto_engine_start);
380 * crypto_engine_stop - stop the hardware engine
381 * @engine: the hardware engine need to be stopped
383 * Return 0 on success, else on fail.
385 int crypto_engine_stop(struct crypto_engine *engine)
387 unsigned long flags;
388 unsigned int limit = 500;
389 int ret = 0;
391 spin_lock_irqsave(&engine->queue_lock, flags);
394 * If the engine queue is not empty or the engine is on busy state,
395 * we need to wait for a while to pump the requests of engine queue.
397 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
398 spin_unlock_irqrestore(&engine->queue_lock, flags);
399 msleep(20);
400 spin_lock_irqsave(&engine->queue_lock, flags);
403 if (crypto_queue_len(&engine->queue) || engine->busy)
404 ret = -EBUSY;
405 else
406 engine->running = false;
408 spin_unlock_irqrestore(&engine->queue_lock, flags);
410 if (ret)
411 dev_warn(engine->dev, "could not stop engine\n");
413 return ret;
415 EXPORT_SYMBOL_GPL(crypto_engine_stop);
418 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
419 * initialize it.
420 * @dev: the device attached with one hardware engine
421 * @rt: whether this queue is set to run as a realtime task
423 * This must be called from context that can sleep.
424 * Return: the crypto engine structure on success, else NULL.
426 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
428 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
429 struct crypto_engine *engine;
431 if (!dev)
432 return NULL;
434 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
435 if (!engine)
436 return NULL;
438 engine->dev = dev;
439 engine->rt = rt;
440 engine->running = false;
441 engine->busy = false;
442 engine->idling = false;
443 engine->cur_req_prepared = false;
444 engine->priv_data = dev;
445 snprintf(engine->name, sizeof(engine->name),
446 "%s-engine", dev_name(dev));
448 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
449 spin_lock_init(&engine->queue_lock);
451 engine->kworker = kthread_create_worker(0, "%s", engine->name);
452 if (IS_ERR(engine->kworker)) {
453 dev_err(dev, "failed to create crypto request pump task\n");
454 return NULL;
456 kthread_init_work(&engine->pump_requests, crypto_pump_work);
458 if (engine->rt) {
459 dev_info(dev, "will run requests pump with realtime priority\n");
460 sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
463 return engine;
465 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
468 * crypto_engine_exit - free the resources of hardware engine when exit
469 * @engine: the hardware engine need to be freed
471 * Return 0 for success.
473 int crypto_engine_exit(struct crypto_engine *engine)
475 int ret;
477 ret = crypto_engine_stop(engine);
478 if (ret)
479 return ret;
481 kthread_destroy_worker(engine->kworker);
483 return 0;
485 EXPORT_SYMBOL_GPL(crypto_engine_exit);
487 MODULE_LICENSE("GPL");
488 MODULE_DESCRIPTION("Crypto hardware engine framework");