1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handle async block request by crypto hardware engine.
5 * Copyright (C) 2016 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
10 #include <linux/err.h>
11 #include <linux/delay.h>
12 #include <crypto/engine.h>
13 #include <uapi/linux/sched/types.h>
16 #define CRYPTO_ENGINE_MAX_QLEN 10
19 * crypto_finalize_request - finalize one request if the request is done
20 * @engine: the hardware engine
21 * @req: the request need to be finalized
24 static void crypto_finalize_request(struct crypto_engine
*engine
,
25 struct crypto_async_request
*req
, int err
)
28 bool finalize_req
= false;
30 struct crypto_engine_ctx
*enginectx
;
33 * If hardware cannot enqueue more requests
34 * and retry mechanism is not supported
35 * make sure we are completing the current request
37 if (!engine
->retry_support
) {
38 spin_lock_irqsave(&engine
->queue_lock
, flags
);
39 if (engine
->cur_req
== req
) {
41 engine
->cur_req
= NULL
;
43 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
46 if (finalize_req
|| engine
->retry_support
) {
47 enginectx
= crypto_tfm_ctx(req
->tfm
);
48 if (enginectx
->op
.prepare_request
&&
49 enginectx
->op
.unprepare_request
) {
50 ret
= enginectx
->op
.unprepare_request(engine
, req
);
52 dev_err(engine
->dev
, "failed to unprepare request\n");
55 req
->complete(req
, err
);
57 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
61 * crypto_pump_requests - dequeue one request from engine queue to process
62 * @engine: the hardware engine
63 * @in_kthread: true if we are in the context of the request pump thread
65 * This function checks if there is any request in the engine queue that
66 * needs processing and if so call out to the driver to initialize hardware
67 * and handle each request.
69 static void crypto_pump_requests(struct crypto_engine
*engine
,
72 struct crypto_async_request
*async_req
, *backlog
;
74 bool was_busy
= false;
76 struct crypto_engine_ctx
*enginectx
;
78 spin_lock_irqsave(&engine
->queue_lock
, flags
);
80 /* Make sure we are not already running a request */
81 if (!engine
->retry_support
&& engine
->cur_req
)
84 /* If another context is idling then defer */
86 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
90 /* Check if the engine queue is idle */
91 if (!crypto_queue_len(&engine
->queue
) || !engine
->running
) {
95 /* Only do teardown in the thread */
97 kthread_queue_work(engine
->kworker
,
98 &engine
->pump_requests
);
102 engine
->busy
= false;
103 engine
->idling
= true;
104 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
106 if (engine
->unprepare_crypt_hardware
&&
107 engine
->unprepare_crypt_hardware(engine
))
108 dev_err(engine
->dev
, "failed to unprepare crypt hardware\n");
110 spin_lock_irqsave(&engine
->queue_lock
, flags
);
111 engine
->idling
= false;
116 /* Get the fist request from the engine queue to handle */
117 backlog
= crypto_get_backlog(&engine
->queue
);
118 async_req
= crypto_dequeue_request(&engine
->queue
);
123 * If hardware doesn't support the retry mechanism,
124 * keep track of the request we are processing now.
125 * We'll need it on completion (crypto_finalize_request).
127 if (!engine
->retry_support
)
128 engine
->cur_req
= async_req
;
131 backlog
->complete(backlog
, -EINPROGRESS
);
138 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
140 /* Until here we get the request need to be encrypted successfully */
141 if (!was_busy
&& engine
->prepare_crypt_hardware
) {
142 ret
= engine
->prepare_crypt_hardware(engine
);
144 dev_err(engine
->dev
, "failed to prepare crypt hardware\n");
149 enginectx
= crypto_tfm_ctx(async_req
->tfm
);
151 if (enginectx
->op
.prepare_request
) {
152 ret
= enginectx
->op
.prepare_request(engine
, async_req
);
154 dev_err(engine
->dev
, "failed to prepare request: %d\n",
159 if (!enginectx
->op
.do_one_request
) {
160 dev_err(engine
->dev
, "failed to do request\n");
165 ret
= enginectx
->op
.do_one_request(engine
, async_req
);
167 /* Request unsuccessfully executed by hardware */
170 * If hardware queue is full (-ENOSPC), requeue request
171 * regardless of backlog flag.
172 * Otherwise, unprepare and complete the request.
174 if (!engine
->retry_support
||
177 "Failed to do one request from queue: %d\n",
182 * If retry mechanism is supported,
183 * unprepare current request and
184 * enqueue it back into crypto-engine queue.
186 if (enginectx
->op
.unprepare_request
) {
187 ret
= enginectx
->op
.unprepare_request(engine
,
191 "failed to unprepare request\n");
193 spin_lock_irqsave(&engine
->queue_lock
, flags
);
195 * If hardware was unable to execute request, enqueue it
196 * back in front of crypto-engine queue, to keep the order
199 crypto_enqueue_request_head(&engine
->queue
, async_req
);
201 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
208 if (enginectx
->op
.unprepare_request
) {
209 ret
= enginectx
->op
.unprepare_request(engine
, async_req
);
211 dev_err(engine
->dev
, "failed to unprepare request\n");
215 async_req
->complete(async_req
, ret
);
218 /* If retry mechanism is supported, send new requests to engine */
219 if (engine
->retry_support
) {
220 spin_lock_irqsave(&engine
->queue_lock
, flags
);
226 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
229 * Batch requests is possible only if
230 * hardware can enqueue multiple requests
232 if (engine
->do_batch_requests
) {
233 ret
= engine
->do_batch_requests(engine
);
235 dev_err(engine
->dev
, "failed to do batch requests: %d\n",
242 static void crypto_pump_work(struct kthread_work
*work
)
244 struct crypto_engine
*engine
=
245 container_of(work
, struct crypto_engine
, pump_requests
);
247 crypto_pump_requests(engine
, true);
251 * crypto_transfer_request - transfer the new request into the engine queue
252 * @engine: the hardware engine
253 * @req: the request need to be listed into the engine queue
255 static int crypto_transfer_request(struct crypto_engine
*engine
,
256 struct crypto_async_request
*req
,
262 spin_lock_irqsave(&engine
->queue_lock
, flags
);
264 if (!engine
->running
) {
265 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
269 ret
= crypto_enqueue_request(&engine
->queue
, req
);
271 if (!engine
->busy
&& need_pump
)
272 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
274 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
279 * crypto_transfer_request_to_engine - transfer one request to list
280 * into the engine queue
281 * @engine: the hardware engine
282 * @req: the request need to be listed into the engine queue
284 static int crypto_transfer_request_to_engine(struct crypto_engine
*engine
,
285 struct crypto_async_request
*req
)
287 return crypto_transfer_request(engine
, req
, true);
291 * crypto_transfer_aead_request_to_engine - transfer one aead_request
292 * to list into the engine queue
293 * @engine: the hardware engine
294 * @req: the request need to be listed into the engine queue
296 int crypto_transfer_aead_request_to_engine(struct crypto_engine
*engine
,
297 struct aead_request
*req
)
299 return crypto_transfer_request_to_engine(engine
, &req
->base
);
301 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine
);
304 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
305 * to list into the engine queue
306 * @engine: the hardware engine
307 * @req: the request need to be listed into the engine queue
309 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine
*engine
,
310 struct akcipher_request
*req
)
312 return crypto_transfer_request_to_engine(engine
, &req
->base
);
314 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine
);
317 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
318 * to list into the engine queue
319 * @engine: the hardware engine
320 * @req: the request need to be listed into the engine queue
322 int crypto_transfer_hash_request_to_engine(struct crypto_engine
*engine
,
323 struct ahash_request
*req
)
325 return crypto_transfer_request_to_engine(engine
, &req
->base
);
327 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine
);
330 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
331 * to list into the engine queue
332 * @engine: the hardware engine
333 * @req: the request need to be listed into the engine queue
335 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine
*engine
,
336 struct skcipher_request
*req
)
338 return crypto_transfer_request_to_engine(engine
, &req
->base
);
340 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine
);
343 * crypto_finalize_aead_request - finalize one aead_request if
344 * the request is done
345 * @engine: the hardware engine
346 * @req: the request need to be finalized
349 void crypto_finalize_aead_request(struct crypto_engine
*engine
,
350 struct aead_request
*req
, int err
)
352 return crypto_finalize_request(engine
, &req
->base
, err
);
354 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request
);
357 * crypto_finalize_akcipher_request - finalize one akcipher_request if
358 * the request is done
359 * @engine: the hardware engine
360 * @req: the request need to be finalized
363 void crypto_finalize_akcipher_request(struct crypto_engine
*engine
,
364 struct akcipher_request
*req
, int err
)
366 return crypto_finalize_request(engine
, &req
->base
, err
);
368 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request
);
371 * crypto_finalize_hash_request - finalize one ahash_request if
372 * the request is done
373 * @engine: the hardware engine
374 * @req: the request need to be finalized
377 void crypto_finalize_hash_request(struct crypto_engine
*engine
,
378 struct ahash_request
*req
, int err
)
380 return crypto_finalize_request(engine
, &req
->base
, err
);
382 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request
);
385 * crypto_finalize_skcipher_request - finalize one skcipher_request if
386 * the request is done
387 * @engine: the hardware engine
388 * @req: the request need to be finalized
391 void crypto_finalize_skcipher_request(struct crypto_engine
*engine
,
392 struct skcipher_request
*req
, int err
)
394 return crypto_finalize_request(engine
, &req
->base
, err
);
396 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request
);
399 * crypto_engine_start - start the hardware engine
400 * @engine: the hardware engine need to be started
402 * Return 0 on success, else on fail.
404 int crypto_engine_start(struct crypto_engine
*engine
)
408 spin_lock_irqsave(&engine
->queue_lock
, flags
);
410 if (engine
->running
|| engine
->busy
) {
411 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
415 engine
->running
= true;
416 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
418 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
422 EXPORT_SYMBOL_GPL(crypto_engine_start
);
425 * crypto_engine_stop - stop the hardware engine
426 * @engine: the hardware engine need to be stopped
428 * Return 0 on success, else on fail.
430 int crypto_engine_stop(struct crypto_engine
*engine
)
433 unsigned int limit
= 500;
436 spin_lock_irqsave(&engine
->queue_lock
, flags
);
439 * If the engine queue is not empty or the engine is on busy state,
440 * we need to wait for a while to pump the requests of engine queue.
442 while ((crypto_queue_len(&engine
->queue
) || engine
->busy
) && limit
--) {
443 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
445 spin_lock_irqsave(&engine
->queue_lock
, flags
);
448 if (crypto_queue_len(&engine
->queue
) || engine
->busy
)
451 engine
->running
= false;
453 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
456 dev_warn(engine
->dev
, "could not stop engine\n");
460 EXPORT_SYMBOL_GPL(crypto_engine_stop
);
463 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
464 * and initialize it by setting the maximum number of entries in the software
465 * crypto-engine queue.
466 * @dev: the device attached with one hardware engine
467 * @retry_support: whether hardware has support for retry mechanism
468 * @cbk_do_batch: pointer to a callback function to be invoked when executing a
469 * a batch of requests.
471 * callback(struct crypto_engine *engine)
473 * @engine: the crypto engine structure.
474 * @rt: whether this queue is set to run as a realtime task
475 * @qlen: maximum size of the crypto-engine queue
477 * This must be called from context that can sleep.
478 * Return: the crypto engine structure on success, else NULL.
480 struct crypto_engine
*crypto_engine_alloc_init_and_set(struct device
*dev
,
482 int (*cbk_do_batch
)(struct crypto_engine
*engine
),
485 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
/ 2 };
486 struct crypto_engine
*engine
;
491 engine
= devm_kzalloc(dev
, sizeof(*engine
), GFP_KERNEL
);
497 engine
->running
= false;
498 engine
->busy
= false;
499 engine
->idling
= false;
500 engine
->retry_support
= retry_support
;
501 engine
->priv_data
= dev
;
503 * Batch requests is possible only if
504 * hardware has support for retry mechanism.
506 engine
->do_batch_requests
= retry_support
? cbk_do_batch
: NULL
;
508 snprintf(engine
->name
, sizeof(engine
->name
),
509 "%s-engine", dev_name(dev
));
511 crypto_init_queue(&engine
->queue
, qlen
);
512 spin_lock_init(&engine
->queue_lock
);
514 engine
->kworker
= kthread_create_worker(0, "%s", engine
->name
);
515 if (IS_ERR(engine
->kworker
)) {
516 dev_err(dev
, "failed to create crypto request pump task\n");
519 kthread_init_work(&engine
->pump_requests
, crypto_pump_work
);
522 dev_info(dev
, "will run requests pump with realtime priority\n");
523 sched_setscheduler(engine
->kworker
->task
, SCHED_FIFO
, ¶m
);
528 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set
);
531 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
533 * @dev: the device attached with one hardware engine
534 * @rt: whether this queue is set to run as a realtime task
536 * This must be called from context that can sleep.
537 * Return: the crypto engine structure on success, else NULL.
539 struct crypto_engine
*crypto_engine_alloc_init(struct device
*dev
, bool rt
)
541 return crypto_engine_alloc_init_and_set(dev
, false, NULL
, rt
,
542 CRYPTO_ENGINE_MAX_QLEN
);
544 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init
);
547 * crypto_engine_exit - free the resources of hardware engine when exit
548 * @engine: the hardware engine need to be freed
550 * Return 0 for success.
552 int crypto_engine_exit(struct crypto_engine
*engine
)
556 ret
= crypto_engine_stop(engine
);
560 kthread_destroy_worker(engine
->kworker
);
564 EXPORT_SYMBOL_GPL(crypto_engine_exit
);
566 MODULE_LICENSE("GPL");
567 MODULE_DESCRIPTION("Crypto hardware engine framework");