treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / crypto / qce / skcipher.c
blob4217b745f1242206edb6ad37b318ab933ddf54e8
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4 */
6 #include <linux/device.h>
7 #include <linux/interrupt.h>
8 #include <linux/types.h>
9 #include <crypto/aes.h>
10 #include <crypto/internal/des.h>
11 #include <crypto/internal/skcipher.h>
13 #include "cipher.h"
15 static LIST_HEAD(skcipher_algs);
17 static void qce_skcipher_done(void *data)
19 struct crypto_async_request *async_req = data;
20 struct skcipher_request *req = skcipher_request_cast(async_req);
21 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
22 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
23 struct qce_device *qce = tmpl->qce;
24 struct qce_result_dump *result_buf = qce->dma.result_buf;
25 enum dma_data_direction dir_src, dir_dst;
26 u32 status;
27 int error;
28 bool diff_dst;
30 diff_dst = (req->src != req->dst) ? true : false;
31 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
32 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
34 error = qce_dma_terminate_all(&qce->dma);
35 if (error)
36 dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
37 error);
39 if (diff_dst)
40 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
41 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
43 sg_free_table(&rctx->dst_tbl);
45 error = qce_check_status(qce, &status);
46 if (error < 0)
47 dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
49 memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
50 qce->async_req_done(tmpl->qce, error);
53 static int
54 qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
56 struct skcipher_request *req = skcipher_request_cast(async_req);
57 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
58 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
59 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
60 struct qce_device *qce = tmpl->qce;
61 enum dma_data_direction dir_src, dir_dst;
62 struct scatterlist *sg;
63 bool diff_dst;
64 gfp_t gfp;
65 int ret;
67 rctx->iv = req->iv;
68 rctx->ivsize = crypto_skcipher_ivsize(skcipher);
69 rctx->cryptlen = req->cryptlen;
71 diff_dst = (req->src != req->dst) ? true : false;
72 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
73 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
75 rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
76 if (diff_dst)
77 rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
78 else
79 rctx->dst_nents = rctx->src_nents;
80 if (rctx->src_nents < 0) {
81 dev_err(qce->dev, "Invalid numbers of src SG.\n");
82 return rctx->src_nents;
84 if (rctx->dst_nents < 0) {
85 dev_err(qce->dev, "Invalid numbers of dst SG.\n");
86 return -rctx->dst_nents;
89 rctx->dst_nents += 1;
91 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
92 GFP_KERNEL : GFP_ATOMIC;
94 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
95 if (ret)
96 return ret;
98 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
100 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
101 if (IS_ERR(sg)) {
102 ret = PTR_ERR(sg);
103 goto error_free;
106 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
107 if (IS_ERR(sg)) {
108 ret = PTR_ERR(sg);
109 goto error_free;
112 sg_mark_end(sg);
113 rctx->dst_sg = rctx->dst_tbl.sgl;
115 ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
116 if (ret < 0)
117 goto error_free;
119 if (diff_dst) {
120 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
121 if (ret < 0)
122 goto error_unmap_dst;
123 rctx->src_sg = req->src;
124 } else {
125 rctx->src_sg = rctx->dst_sg;
128 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
129 rctx->dst_sg, rctx->dst_nents,
130 qce_skcipher_done, async_req);
131 if (ret)
132 goto error_unmap_src;
134 qce_dma_issue_pending(&qce->dma);
136 ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
137 if (ret)
138 goto error_terminate;
140 return 0;
142 error_terminate:
143 qce_dma_terminate_all(&qce->dma);
144 error_unmap_src:
145 if (diff_dst)
146 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
147 error_unmap_dst:
148 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
149 error_free:
150 sg_free_table(&rctx->dst_tbl);
151 return ret;
154 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
155 unsigned int keylen)
157 struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
158 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
159 unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
160 int ret;
162 if (!key || !keylen)
163 return -EINVAL;
165 switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
166 case AES_KEYSIZE_128:
167 case AES_KEYSIZE_256:
168 break;
169 default:
170 goto fallback;
173 ctx->enc_keylen = keylen;
174 memcpy(ctx->enc_key, key, keylen);
175 return 0;
176 fallback:
177 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
178 if (!ret)
179 ctx->enc_keylen = keylen;
180 return ret;
183 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
184 unsigned int keylen)
186 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
187 int err;
189 err = verify_skcipher_des_key(ablk, key);
190 if (err)
191 return err;
193 ctx->enc_keylen = keylen;
194 memcpy(ctx->enc_key, key, keylen);
195 return 0;
198 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
199 unsigned int keylen)
201 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
202 int err;
204 err = verify_skcipher_des3_key(ablk, key);
205 if (err)
206 return err;
208 ctx->enc_keylen = keylen;
209 memcpy(ctx->enc_key, key, keylen);
210 return 0;
213 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
215 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
216 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
217 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
218 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
219 int keylen;
220 int ret;
222 rctx->flags = tmpl->alg_flags;
223 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
224 keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
226 if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 &&
227 keylen != AES_KEYSIZE_256) {
228 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
230 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
231 skcipher_request_set_callback(subreq, req->base.flags,
232 NULL, NULL);
233 skcipher_request_set_crypt(subreq, req->src, req->dst,
234 req->cryptlen, req->iv);
235 ret = encrypt ? crypto_skcipher_encrypt(subreq) :
236 crypto_skcipher_decrypt(subreq);
237 skcipher_request_zero(subreq);
238 return ret;
241 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
244 static int qce_skcipher_encrypt(struct skcipher_request *req)
246 return qce_skcipher_crypt(req, 1);
249 static int qce_skcipher_decrypt(struct skcipher_request *req)
251 return qce_skcipher_crypt(req, 0);
254 static int qce_skcipher_init(struct crypto_skcipher *tfm)
256 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
258 memset(ctx, 0, sizeof(*ctx));
259 crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
260 return 0;
263 static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
265 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
267 qce_skcipher_init(tfm);
268 ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
269 0, CRYPTO_ALG_NEED_FALLBACK);
270 return PTR_ERR_OR_ZERO(ctx->fallback);
273 static void qce_skcipher_exit(struct crypto_skcipher *tfm)
275 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
277 crypto_free_sync_skcipher(ctx->fallback);
280 struct qce_skcipher_def {
281 unsigned long flags;
282 const char *name;
283 const char *drv_name;
284 unsigned int blocksize;
285 unsigned int chunksize;
286 unsigned int ivsize;
287 unsigned int min_keysize;
288 unsigned int max_keysize;
291 static const struct qce_skcipher_def skcipher_def[] = {
293 .flags = QCE_ALG_AES | QCE_MODE_ECB,
294 .name = "ecb(aes)",
295 .drv_name = "ecb-aes-qce",
296 .blocksize = AES_BLOCK_SIZE,
297 .ivsize = AES_BLOCK_SIZE,
298 .min_keysize = AES_MIN_KEY_SIZE,
299 .max_keysize = AES_MAX_KEY_SIZE,
302 .flags = QCE_ALG_AES | QCE_MODE_CBC,
303 .name = "cbc(aes)",
304 .drv_name = "cbc-aes-qce",
305 .blocksize = AES_BLOCK_SIZE,
306 .ivsize = AES_BLOCK_SIZE,
307 .min_keysize = AES_MIN_KEY_SIZE,
308 .max_keysize = AES_MAX_KEY_SIZE,
311 .flags = QCE_ALG_AES | QCE_MODE_CTR,
312 .name = "ctr(aes)",
313 .drv_name = "ctr-aes-qce",
314 .blocksize = 1,
315 .chunksize = AES_BLOCK_SIZE,
316 .ivsize = AES_BLOCK_SIZE,
317 .min_keysize = AES_MIN_KEY_SIZE,
318 .max_keysize = AES_MAX_KEY_SIZE,
321 .flags = QCE_ALG_AES | QCE_MODE_XTS,
322 .name = "xts(aes)",
323 .drv_name = "xts-aes-qce",
324 .blocksize = AES_BLOCK_SIZE,
325 .ivsize = AES_BLOCK_SIZE,
326 .min_keysize = AES_MIN_KEY_SIZE * 2,
327 .max_keysize = AES_MAX_KEY_SIZE * 2,
330 .flags = QCE_ALG_DES | QCE_MODE_ECB,
331 .name = "ecb(des)",
332 .drv_name = "ecb-des-qce",
333 .blocksize = DES_BLOCK_SIZE,
334 .ivsize = 0,
335 .min_keysize = DES_KEY_SIZE,
336 .max_keysize = DES_KEY_SIZE,
339 .flags = QCE_ALG_DES | QCE_MODE_CBC,
340 .name = "cbc(des)",
341 .drv_name = "cbc-des-qce",
342 .blocksize = DES_BLOCK_SIZE,
343 .ivsize = DES_BLOCK_SIZE,
344 .min_keysize = DES_KEY_SIZE,
345 .max_keysize = DES_KEY_SIZE,
348 .flags = QCE_ALG_3DES | QCE_MODE_ECB,
349 .name = "ecb(des3_ede)",
350 .drv_name = "ecb-3des-qce",
351 .blocksize = DES3_EDE_BLOCK_SIZE,
352 .ivsize = 0,
353 .min_keysize = DES3_EDE_KEY_SIZE,
354 .max_keysize = DES3_EDE_KEY_SIZE,
357 .flags = QCE_ALG_3DES | QCE_MODE_CBC,
358 .name = "cbc(des3_ede)",
359 .drv_name = "cbc-3des-qce",
360 .blocksize = DES3_EDE_BLOCK_SIZE,
361 .ivsize = DES3_EDE_BLOCK_SIZE,
362 .min_keysize = DES3_EDE_KEY_SIZE,
363 .max_keysize = DES3_EDE_KEY_SIZE,
367 static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
368 struct qce_device *qce)
370 struct qce_alg_template *tmpl;
371 struct skcipher_alg *alg;
372 int ret;
374 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
375 if (!tmpl)
376 return -ENOMEM;
378 alg = &tmpl->alg.skcipher;
380 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
381 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
382 def->drv_name);
384 alg->base.cra_blocksize = def->blocksize;
385 alg->chunksize = def->chunksize;
386 alg->ivsize = def->ivsize;
387 alg->min_keysize = def->min_keysize;
388 alg->max_keysize = def->max_keysize;
389 alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
390 IS_DES(def->flags) ? qce_des_setkey :
391 qce_skcipher_setkey;
392 alg->encrypt = qce_skcipher_encrypt;
393 alg->decrypt = qce_skcipher_decrypt;
395 alg->base.cra_priority = 300;
396 alg->base.cra_flags = CRYPTO_ALG_ASYNC |
397 CRYPTO_ALG_KERN_DRIVER_ONLY;
398 alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
399 alg->base.cra_alignmask = 0;
400 alg->base.cra_module = THIS_MODULE;
402 if (IS_AES(def->flags)) {
403 alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
404 alg->init = qce_skcipher_init_fallback;
405 alg->exit = qce_skcipher_exit;
406 } else {
407 alg->init = qce_skcipher_init;
410 INIT_LIST_HEAD(&tmpl->entry);
411 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
412 tmpl->alg_flags = def->flags;
413 tmpl->qce = qce;
415 ret = crypto_register_skcipher(alg);
416 if (ret) {
417 kfree(tmpl);
418 dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
419 return ret;
422 list_add_tail(&tmpl->entry, &skcipher_algs);
423 dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
424 return 0;
427 static void qce_skcipher_unregister(struct qce_device *qce)
429 struct qce_alg_template *tmpl, *n;
431 list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
432 crypto_unregister_skcipher(&tmpl->alg.skcipher);
433 list_del(&tmpl->entry);
434 kfree(tmpl);
438 static int qce_skcipher_register(struct qce_device *qce)
440 int ret, i;
442 for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
443 ret = qce_skcipher_register_one(&skcipher_def[i], qce);
444 if (ret)
445 goto err;
448 return 0;
449 err:
450 qce_skcipher_unregister(qce);
451 return ret;
454 const struct qce_algo_ops skcipher_ops = {
455 .type = CRYPTO_ALG_TYPE_SKCIPHER,
456 .register_algs = qce_skcipher_register,
457 .unregister_algs = qce_skcipher_unregister,
458 .async_req_handle = qce_skcipher_async_req_handle,