treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / crypto / rockchip / rk3288_crypto_skcipher.c
blob4a75c8e1fa6c1993479745529eba732cf2eae8b8
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Crypto acceleration support for Rockchip RK3288
5 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
7 * Author: Zain Wang <zain.wang@rock-chips.com>
9 * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
11 #include "rk3288_crypto.h"
13 #define RK_CRYPTO_DEC BIT(0)
15 static void rk_crypto_complete(struct crypto_async_request *base, int err)
17 if (base->complete)
18 base->complete(base, err);
21 static int rk_handle_req(struct rk_crypto_info *dev,
22 struct skcipher_request *req)
24 if (!IS_ALIGNED(req->cryptlen, dev->align_size))
25 return -EINVAL;
26 else
27 return dev->enqueue(dev, &req->base);
30 static int rk_aes_setkey(struct crypto_skcipher *cipher,
31 const u8 *key, unsigned int keylen)
33 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
34 struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
36 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
37 keylen != AES_KEYSIZE_256)
38 return -EINVAL;
39 ctx->keylen = keylen;
40 memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
41 return 0;
44 static int rk_des_setkey(struct crypto_skcipher *cipher,
45 const u8 *key, unsigned int keylen)
47 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
48 int err;
50 err = verify_skcipher_des_key(cipher, key);
51 if (err)
52 return err;
54 ctx->keylen = keylen;
55 memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
56 return 0;
59 static int rk_tdes_setkey(struct crypto_skcipher *cipher,
60 const u8 *key, unsigned int keylen)
62 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
63 int err;
65 err = verify_skcipher_des3_key(cipher, key);
66 if (err)
67 return err;
69 ctx->keylen = keylen;
70 memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
71 return 0;
74 static int rk_aes_ecb_encrypt(struct skcipher_request *req)
76 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
77 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
78 struct rk_crypto_info *dev = ctx->dev;
80 ctx->mode = RK_CRYPTO_AES_ECB_MODE;
81 return rk_handle_req(dev, req);
84 static int rk_aes_ecb_decrypt(struct skcipher_request *req)
86 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
87 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
88 struct rk_crypto_info *dev = ctx->dev;
90 ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
91 return rk_handle_req(dev, req);
94 static int rk_aes_cbc_encrypt(struct skcipher_request *req)
96 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
97 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
98 struct rk_crypto_info *dev = ctx->dev;
100 ctx->mode = RK_CRYPTO_AES_CBC_MODE;
101 return rk_handle_req(dev, req);
104 static int rk_aes_cbc_decrypt(struct skcipher_request *req)
106 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
107 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
108 struct rk_crypto_info *dev = ctx->dev;
110 ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
111 return rk_handle_req(dev, req);
114 static int rk_des_ecb_encrypt(struct skcipher_request *req)
116 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
117 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
118 struct rk_crypto_info *dev = ctx->dev;
120 ctx->mode = 0;
121 return rk_handle_req(dev, req);
124 static int rk_des_ecb_decrypt(struct skcipher_request *req)
126 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
127 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
128 struct rk_crypto_info *dev = ctx->dev;
130 ctx->mode = RK_CRYPTO_DEC;
131 return rk_handle_req(dev, req);
134 static int rk_des_cbc_encrypt(struct skcipher_request *req)
136 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
137 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
138 struct rk_crypto_info *dev = ctx->dev;
140 ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
141 return rk_handle_req(dev, req);
144 static int rk_des_cbc_decrypt(struct skcipher_request *req)
146 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
147 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
148 struct rk_crypto_info *dev = ctx->dev;
150 ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
151 return rk_handle_req(dev, req);
154 static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
156 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
157 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
158 struct rk_crypto_info *dev = ctx->dev;
160 ctx->mode = RK_CRYPTO_TDES_SELECT;
161 return rk_handle_req(dev, req);
164 static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
166 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
167 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
168 struct rk_crypto_info *dev = ctx->dev;
170 ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
171 return rk_handle_req(dev, req);
174 static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
176 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
177 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
178 struct rk_crypto_info *dev = ctx->dev;
180 ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
181 return rk_handle_req(dev, req);
184 static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
186 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
187 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
188 struct rk_crypto_info *dev = ctx->dev;
190 ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
191 RK_CRYPTO_DEC;
192 return rk_handle_req(dev, req);
195 static void rk_ablk_hw_init(struct rk_crypto_info *dev)
197 struct skcipher_request *req =
198 skcipher_request_cast(dev->async_req);
199 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
200 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
201 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
202 u32 ivsize, block, conf_reg = 0;
204 block = crypto_tfm_alg_blocksize(tfm);
205 ivsize = crypto_skcipher_ivsize(cipher);
207 if (block == DES_BLOCK_SIZE) {
208 ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
209 RK_CRYPTO_TDES_BYTESWAP_KEY |
210 RK_CRYPTO_TDES_BYTESWAP_IV;
211 CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
212 memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize);
213 conf_reg = RK_CRYPTO_DESSEL;
214 } else {
215 ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
216 RK_CRYPTO_AES_KEY_CHANGE |
217 RK_CRYPTO_AES_BYTESWAP_KEY |
218 RK_CRYPTO_AES_BYTESWAP_IV;
219 if (ctx->keylen == AES_KEYSIZE_192)
220 ctx->mode |= RK_CRYPTO_AES_192BIT_key;
221 else if (ctx->keylen == AES_KEYSIZE_256)
222 ctx->mode |= RK_CRYPTO_AES_256BIT_key;
223 CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
224 memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize);
226 conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
227 RK_CRYPTO_BYTESWAP_BRFIFO;
228 CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
229 CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
230 RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
233 static void crypto_dma_start(struct rk_crypto_info *dev)
235 CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
236 CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
237 CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
238 CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
239 _SBF(RK_CRYPTO_BLOCK_START, 16));
242 static int rk_set_data_start(struct rk_crypto_info *dev)
244 int err;
245 struct skcipher_request *req =
246 skcipher_request_cast(dev->async_req);
247 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
248 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
249 u32 ivsize = crypto_skcipher_ivsize(tfm);
250 u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
251 dev->sg_src->offset + dev->sg_src->length - ivsize;
253 /* Store the iv that need to be updated in chain mode.
254 * And update the IV buffer to contain the next IV for decryption mode.
256 if (ctx->mode & RK_CRYPTO_DEC) {
257 memcpy(ctx->iv, src_last_blk, ivsize);
258 sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv,
259 ivsize, dev->total - ivsize);
262 err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
263 if (!err)
264 crypto_dma_start(dev);
265 return err;
268 static int rk_ablk_start(struct rk_crypto_info *dev)
270 struct skcipher_request *req =
271 skcipher_request_cast(dev->async_req);
272 unsigned long flags;
273 int err = 0;
275 dev->left_bytes = req->cryptlen;
276 dev->total = req->cryptlen;
277 dev->sg_src = req->src;
278 dev->first = req->src;
279 dev->src_nents = sg_nents(req->src);
280 dev->sg_dst = req->dst;
281 dev->dst_nents = sg_nents(req->dst);
282 dev->aligned = 1;
284 spin_lock_irqsave(&dev->lock, flags);
285 rk_ablk_hw_init(dev);
286 err = rk_set_data_start(dev);
287 spin_unlock_irqrestore(&dev->lock, flags);
288 return err;
291 static void rk_iv_copyback(struct rk_crypto_info *dev)
293 struct skcipher_request *req =
294 skcipher_request_cast(dev->async_req);
295 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
296 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
297 u32 ivsize = crypto_skcipher_ivsize(tfm);
299 /* Update the IV buffer to contain the next IV for encryption mode. */
300 if (!(ctx->mode & RK_CRYPTO_DEC)) {
301 if (dev->aligned) {
302 memcpy(req->iv, sg_virt(dev->sg_dst) +
303 dev->sg_dst->length - ivsize, ivsize);
304 } else {
305 memcpy(req->iv, dev->addr_vir +
306 dev->count - ivsize, ivsize);
311 static void rk_update_iv(struct rk_crypto_info *dev)
313 struct skcipher_request *req =
314 skcipher_request_cast(dev->async_req);
315 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
316 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
317 u32 ivsize = crypto_skcipher_ivsize(tfm);
318 u8 *new_iv = NULL;
320 if (ctx->mode & RK_CRYPTO_DEC) {
321 new_iv = ctx->iv;
322 } else {
323 new_iv = page_address(sg_page(dev->sg_dst)) +
324 dev->sg_dst->offset + dev->sg_dst->length - ivsize;
327 if (ivsize == DES_BLOCK_SIZE)
328 memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
329 else if (ivsize == AES_BLOCK_SIZE)
330 memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
333 /* return:
334 * true some err was occurred
335 * fault no err, continue
337 static int rk_ablk_rx(struct rk_crypto_info *dev)
339 int err = 0;
340 struct skcipher_request *req =
341 skcipher_request_cast(dev->async_req);
343 dev->unload_data(dev);
344 if (!dev->aligned) {
345 if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
346 dev->addr_vir, dev->count,
347 dev->total - dev->left_bytes -
348 dev->count)) {
349 err = -EINVAL;
350 goto out_rx;
353 if (dev->left_bytes) {
354 rk_update_iv(dev);
355 if (dev->aligned) {
356 if (sg_is_last(dev->sg_src)) {
357 dev_err(dev->dev, "[%s:%d] Lack of data\n",
358 __func__, __LINE__);
359 err = -ENOMEM;
360 goto out_rx;
362 dev->sg_src = sg_next(dev->sg_src);
363 dev->sg_dst = sg_next(dev->sg_dst);
365 err = rk_set_data_start(dev);
366 } else {
367 rk_iv_copyback(dev);
368 /* here show the calculation is over without any err */
369 dev->complete(dev->async_req, 0);
370 tasklet_schedule(&dev->queue_task);
372 out_rx:
373 return err;
376 static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
378 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
379 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
380 struct rk_crypto_tmp *algt;
382 algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
384 ctx->dev = algt->dev;
385 ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1;
386 ctx->dev->start = rk_ablk_start;
387 ctx->dev->update = rk_ablk_rx;
388 ctx->dev->complete = rk_crypto_complete;
389 ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
391 return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
394 static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
396 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
398 free_page((unsigned long)ctx->dev->addr_vir);
399 ctx->dev->disable_clk(ctx->dev);
402 struct rk_crypto_tmp rk_ecb_aes_alg = {
403 .type = ALG_TYPE_CIPHER,
404 .alg.skcipher = {
405 .base.cra_name = "ecb(aes)",
406 .base.cra_driver_name = "ecb-aes-rk",
407 .base.cra_priority = 300,
408 .base.cra_flags = CRYPTO_ALG_ASYNC,
409 .base.cra_blocksize = AES_BLOCK_SIZE,
410 .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
411 .base.cra_alignmask = 0x0f,
412 .base.cra_module = THIS_MODULE,
414 .init = rk_ablk_init_tfm,
415 .exit = rk_ablk_exit_tfm,
416 .min_keysize = AES_MIN_KEY_SIZE,
417 .max_keysize = AES_MAX_KEY_SIZE,
418 .setkey = rk_aes_setkey,
419 .encrypt = rk_aes_ecb_encrypt,
420 .decrypt = rk_aes_ecb_decrypt,
424 struct rk_crypto_tmp rk_cbc_aes_alg = {
425 .type = ALG_TYPE_CIPHER,
426 .alg.skcipher = {
427 .base.cra_name = "cbc(aes)",
428 .base.cra_driver_name = "cbc-aes-rk",
429 .base.cra_priority = 300,
430 .base.cra_flags = CRYPTO_ALG_ASYNC,
431 .base.cra_blocksize = AES_BLOCK_SIZE,
432 .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
433 .base.cra_alignmask = 0x0f,
434 .base.cra_module = THIS_MODULE,
436 .init = rk_ablk_init_tfm,
437 .exit = rk_ablk_exit_tfm,
438 .min_keysize = AES_MIN_KEY_SIZE,
439 .max_keysize = AES_MAX_KEY_SIZE,
440 .ivsize = AES_BLOCK_SIZE,
441 .setkey = rk_aes_setkey,
442 .encrypt = rk_aes_cbc_encrypt,
443 .decrypt = rk_aes_cbc_decrypt,
447 struct rk_crypto_tmp rk_ecb_des_alg = {
448 .type = ALG_TYPE_CIPHER,
449 .alg.skcipher = {
450 .base.cra_name = "ecb(des)",
451 .base.cra_driver_name = "ecb-des-rk",
452 .base.cra_priority = 300,
453 .base.cra_flags = CRYPTO_ALG_ASYNC,
454 .base.cra_blocksize = DES_BLOCK_SIZE,
455 .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
456 .base.cra_alignmask = 0x07,
457 .base.cra_module = THIS_MODULE,
459 .init = rk_ablk_init_tfm,
460 .exit = rk_ablk_exit_tfm,
461 .min_keysize = DES_KEY_SIZE,
462 .max_keysize = DES_KEY_SIZE,
463 .setkey = rk_des_setkey,
464 .encrypt = rk_des_ecb_encrypt,
465 .decrypt = rk_des_ecb_decrypt,
469 struct rk_crypto_tmp rk_cbc_des_alg = {
470 .type = ALG_TYPE_CIPHER,
471 .alg.skcipher = {
472 .base.cra_name = "cbc(des)",
473 .base.cra_driver_name = "cbc-des-rk",
474 .base.cra_priority = 300,
475 .base.cra_flags = CRYPTO_ALG_ASYNC,
476 .base.cra_blocksize = DES_BLOCK_SIZE,
477 .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
478 .base.cra_alignmask = 0x07,
479 .base.cra_module = THIS_MODULE,
481 .init = rk_ablk_init_tfm,
482 .exit = rk_ablk_exit_tfm,
483 .min_keysize = DES_KEY_SIZE,
484 .max_keysize = DES_KEY_SIZE,
485 .ivsize = DES_BLOCK_SIZE,
486 .setkey = rk_des_setkey,
487 .encrypt = rk_des_cbc_encrypt,
488 .decrypt = rk_des_cbc_decrypt,
492 struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
493 .type = ALG_TYPE_CIPHER,
494 .alg.skcipher = {
495 .base.cra_name = "ecb(des3_ede)",
496 .base.cra_driver_name = "ecb-des3-ede-rk",
497 .base.cra_priority = 300,
498 .base.cra_flags = CRYPTO_ALG_ASYNC,
499 .base.cra_blocksize = DES_BLOCK_SIZE,
500 .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
501 .base.cra_alignmask = 0x07,
502 .base.cra_module = THIS_MODULE,
504 .init = rk_ablk_init_tfm,
505 .exit = rk_ablk_exit_tfm,
506 .min_keysize = DES3_EDE_KEY_SIZE,
507 .max_keysize = DES3_EDE_KEY_SIZE,
508 .ivsize = DES_BLOCK_SIZE,
509 .setkey = rk_tdes_setkey,
510 .encrypt = rk_des3_ede_ecb_encrypt,
511 .decrypt = rk_des3_ede_ecb_decrypt,
515 struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
516 .type = ALG_TYPE_CIPHER,
517 .alg.skcipher = {
518 .base.cra_name = "cbc(des3_ede)",
519 .base.cra_driver_name = "cbc-des3-ede-rk",
520 .base.cra_priority = 300,
521 .base.cra_flags = CRYPTO_ALG_ASYNC,
522 .base.cra_blocksize = DES_BLOCK_SIZE,
523 .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
524 .base.cra_alignmask = 0x07,
525 .base.cra_module = THIS_MODULE,
527 .init = rk_ablk_init_tfm,
528 .exit = rk_ablk_exit_tfm,
529 .min_keysize = DES3_EDE_KEY_SIZE,
530 .max_keysize = DES3_EDE_KEY_SIZE,
531 .ivsize = DES_BLOCK_SIZE,
532 .setkey = rk_tdes_setkey,
533 .encrypt = rk_des3_ede_cbc_encrypt,
534 .decrypt = rk_des3_ede_cbc_decrypt,