2 * Handle async block request by crypto hardware engine.
4 * Copyright (C) 2016 Linaro, Inc.
6 * Author: Baolin Wang <baolin.wang@linaro.org>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
15 #include <linux/err.h>
16 #include <linux/delay.h>
17 #include <crypto/engine.h>
18 #include <crypto/internal/hash.h>
19 #include <uapi/linux/sched/types.h>
22 #define CRYPTO_ENGINE_MAX_QLEN 10
25 * crypto_pump_requests - dequeue one request from engine queue to process
26 * @engine: the hardware engine
27 * @in_kthread: true if we are in the context of the request pump thread
29 * This function checks if there is any request in the engine queue that
30 * needs processing and if so call out to the driver to initialize hardware
31 * and handle each request.
33 static void crypto_pump_requests(struct crypto_engine
*engine
,
36 struct crypto_async_request
*async_req
, *backlog
;
37 struct ahash_request
*hreq
;
38 struct ablkcipher_request
*breq
;
40 bool was_busy
= false;
43 spin_lock_irqsave(&engine
->queue_lock
, flags
);
45 /* Make sure we are not already running a request */
49 /* If another context is idling then defer */
51 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
55 /* Check if the engine queue is idle */
56 if (!crypto_queue_len(&engine
->queue
) || !engine
->running
) {
60 /* Only do teardown in the thread */
62 kthread_queue_work(engine
->kworker
,
63 &engine
->pump_requests
);
68 engine
->idling
= true;
69 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
71 if (engine
->unprepare_crypt_hardware
&&
72 engine
->unprepare_crypt_hardware(engine
))
73 pr_err("failed to unprepare crypt hardware\n");
75 spin_lock_irqsave(&engine
->queue_lock
, flags
);
76 engine
->idling
= false;
80 /* Get the fist request from the engine queue to handle */
81 backlog
= crypto_get_backlog(&engine
->queue
);
82 async_req
= crypto_dequeue_request(&engine
->queue
);
86 engine
->cur_req
= async_req
;
88 backlog
->complete(backlog
, -EINPROGRESS
);
95 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
97 rtype
= crypto_tfm_alg_type(engine
->cur_req
->tfm
);
98 /* Until here we get the request need to be encrypted successfully */
99 if (!was_busy
&& engine
->prepare_crypt_hardware
) {
100 ret
= engine
->prepare_crypt_hardware(engine
);
102 pr_err("failed to prepare crypt hardware\n");
108 case CRYPTO_ALG_TYPE_AHASH
:
109 hreq
= ahash_request_cast(engine
->cur_req
);
110 if (engine
->prepare_hash_request
) {
111 ret
= engine
->prepare_hash_request(engine
, hreq
);
113 pr_err("failed to prepare request: %d\n", ret
);
116 engine
->cur_req_prepared
= true;
118 ret
= engine
->hash_one_request(engine
, hreq
);
120 pr_err("failed to hash one request from queue\n");
124 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
125 breq
= ablkcipher_request_cast(engine
->cur_req
);
126 if (engine
->prepare_cipher_request
) {
127 ret
= engine
->prepare_cipher_request(engine
, breq
);
129 pr_err("failed to prepare request: %d\n", ret
);
132 engine
->cur_req_prepared
= true;
134 ret
= engine
->cipher_one_request(engine
, breq
);
136 pr_err("failed to cipher one request from queue\n");
141 pr_err("failed to prepare request of unknown type\n");
147 case CRYPTO_ALG_TYPE_AHASH
:
148 hreq
= ahash_request_cast(engine
->cur_req
);
149 crypto_finalize_hash_request(engine
, hreq
, ret
);
151 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
152 breq
= ablkcipher_request_cast(engine
->cur_req
);
153 crypto_finalize_cipher_request(engine
, breq
, ret
);
159 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
162 static void crypto_pump_work(struct kthread_work
*work
)
164 struct crypto_engine
*engine
=
165 container_of(work
, struct crypto_engine
, pump_requests
);
167 crypto_pump_requests(engine
, true);
171 * crypto_transfer_cipher_request - transfer the new request into the
173 * @engine: the hardware engine
174 * @req: the request need to be listed into the engine queue
176 int crypto_transfer_cipher_request(struct crypto_engine
*engine
,
177 struct ablkcipher_request
*req
,
183 spin_lock_irqsave(&engine
->queue_lock
, flags
);
185 if (!engine
->running
) {
186 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
190 ret
= ablkcipher_enqueue_request(&engine
->queue
, req
);
192 if (!engine
->busy
&& need_pump
)
193 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
195 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
198 EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request
);
201 * crypto_transfer_cipher_request_to_engine - transfer one request to list
202 * into the engine queue
203 * @engine: the hardware engine
204 * @req: the request need to be listed into the engine queue
206 int crypto_transfer_cipher_request_to_engine(struct crypto_engine
*engine
,
207 struct ablkcipher_request
*req
)
209 return crypto_transfer_cipher_request(engine
, req
, true);
211 EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine
);
214 * crypto_transfer_hash_request - transfer the new request into the
216 * @engine: the hardware engine
217 * @req: the request need to be listed into the engine queue
219 int crypto_transfer_hash_request(struct crypto_engine
*engine
,
220 struct ahash_request
*req
, bool need_pump
)
225 spin_lock_irqsave(&engine
->queue_lock
, flags
);
227 if (!engine
->running
) {
228 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
232 ret
= ahash_enqueue_request(&engine
->queue
, req
);
234 if (!engine
->busy
&& need_pump
)
235 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
237 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
240 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request
);
243 * crypto_transfer_hash_request_to_engine - transfer one request to list
244 * into the engine queue
245 * @engine: the hardware engine
246 * @req: the request need to be listed into the engine queue
248 int crypto_transfer_hash_request_to_engine(struct crypto_engine
*engine
,
249 struct ahash_request
*req
)
251 return crypto_transfer_hash_request(engine
, req
, true);
253 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine
);
256 * crypto_finalize_cipher_request - finalize one request if the request is done
257 * @engine: the hardware engine
258 * @req: the request need to be finalized
261 void crypto_finalize_cipher_request(struct crypto_engine
*engine
,
262 struct ablkcipher_request
*req
, int err
)
265 bool finalize_cur_req
= false;
268 spin_lock_irqsave(&engine
->queue_lock
, flags
);
269 if (engine
->cur_req
== &req
->base
)
270 finalize_cur_req
= true;
271 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
273 if (finalize_cur_req
) {
274 if (engine
->cur_req_prepared
&&
275 engine
->unprepare_cipher_request
) {
276 ret
= engine
->unprepare_cipher_request(engine
, req
);
278 pr_err("failed to unprepare request\n");
280 spin_lock_irqsave(&engine
->queue_lock
, flags
);
281 engine
->cur_req
= NULL
;
282 engine
->cur_req_prepared
= false;
283 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
286 req
->base
.complete(&req
->base
, err
);
288 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
290 EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request
);
293 * crypto_finalize_hash_request - finalize one request if the request is done
294 * @engine: the hardware engine
295 * @req: the request need to be finalized
298 void crypto_finalize_hash_request(struct crypto_engine
*engine
,
299 struct ahash_request
*req
, int err
)
302 bool finalize_cur_req
= false;
305 spin_lock_irqsave(&engine
->queue_lock
, flags
);
306 if (engine
->cur_req
== &req
->base
)
307 finalize_cur_req
= true;
308 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
310 if (finalize_cur_req
) {
311 if (engine
->cur_req_prepared
&&
312 engine
->unprepare_hash_request
) {
313 ret
= engine
->unprepare_hash_request(engine
, req
);
315 pr_err("failed to unprepare request\n");
317 spin_lock_irqsave(&engine
->queue_lock
, flags
);
318 engine
->cur_req
= NULL
;
319 engine
->cur_req_prepared
= false;
320 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
323 req
->base
.complete(&req
->base
, err
);
325 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
327 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request
);
330 * crypto_engine_start - start the hardware engine
331 * @engine: the hardware engine need to be started
333 * Return 0 on success, else on fail.
335 int crypto_engine_start(struct crypto_engine
*engine
)
339 spin_lock_irqsave(&engine
->queue_lock
, flags
);
341 if (engine
->running
|| engine
->busy
) {
342 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
346 engine
->running
= true;
347 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
349 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
353 EXPORT_SYMBOL_GPL(crypto_engine_start
);
356 * crypto_engine_stop - stop the hardware engine
357 * @engine: the hardware engine need to be stopped
359 * Return 0 on success, else on fail.
361 int crypto_engine_stop(struct crypto_engine
*engine
)
364 unsigned int limit
= 500;
367 spin_lock_irqsave(&engine
->queue_lock
, flags
);
370 * If the engine queue is not empty or the engine is on busy state,
371 * we need to wait for a while to pump the requests of engine queue.
373 while ((crypto_queue_len(&engine
->queue
) || engine
->busy
) && limit
--) {
374 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
376 spin_lock_irqsave(&engine
->queue_lock
, flags
);
379 if (crypto_queue_len(&engine
->queue
) || engine
->busy
)
382 engine
->running
= false;
384 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
387 pr_warn("could not stop engine\n");
391 EXPORT_SYMBOL_GPL(crypto_engine_stop
);
394 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
396 * @dev: the device attached with one hardware engine
397 * @rt: whether this queue is set to run as a realtime task
399 * This must be called from context that can sleep.
400 * Return: the crypto engine structure on success, else NULL.
402 struct crypto_engine
*crypto_engine_alloc_init(struct device
*dev
, bool rt
)
404 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
- 1 };
405 struct crypto_engine
*engine
;
410 engine
= devm_kzalloc(dev
, sizeof(*engine
), GFP_KERNEL
);
415 engine
->running
= false;
416 engine
->busy
= false;
417 engine
->idling
= false;
418 engine
->cur_req_prepared
= false;
419 engine
->priv_data
= dev
;
420 snprintf(engine
->name
, sizeof(engine
->name
),
421 "%s-engine", dev_name(dev
));
423 crypto_init_queue(&engine
->queue
, CRYPTO_ENGINE_MAX_QLEN
);
424 spin_lock_init(&engine
->queue_lock
);
426 engine
->kworker
= kthread_create_worker(0, "%s", engine
->name
);
427 if (IS_ERR(engine
->kworker
)) {
428 dev_err(dev
, "failed to create crypto request pump task\n");
431 kthread_init_work(&engine
->pump_requests
, crypto_pump_work
);
434 dev_info(dev
, "will run requests pump with realtime priority\n");
435 sched_setscheduler(engine
->kworker
->task
, SCHED_FIFO
, ¶m
);
440 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init
);
443 * crypto_engine_exit - free the resources of hardware engine when exit
444 * @engine: the hardware engine need to be freed
446 * Return 0 for success.
448 int crypto_engine_exit(struct crypto_engine
*engine
)
452 ret
= crypto_engine_stop(engine
);
456 kthread_destroy_worker(engine
->kworker
);
460 EXPORT_SYMBOL_GPL(crypto_engine_exit
);
462 MODULE_LICENSE("GPL");
463 MODULE_DESCRIPTION("Crypto hardware engine framework");