WIP FPC-III support
[linux/fpc-iii.git] / drivers / crypto / rockchip / rk3288_crypto_ahash.c
blob81befe7febaa42ca0025fa1f630424992f3c9fbd
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Crypto acceleration support for Rockchip RK3288
5 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
7 * Author: Zain Wang <zain.wang@rock-chips.com>
9 * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
11 #include <linux/device.h>
12 #include "rk3288_crypto.h"
15 * IC can not process zero message hash,
16 * so we put the fixed hash out when met zero message.
19 static int zero_message_process(struct ahash_request *req)
21 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
22 int rk_digest_size = crypto_ahash_digestsize(tfm);
24 switch (rk_digest_size) {
25 case SHA1_DIGEST_SIZE:
26 memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
27 break;
28 case SHA256_DIGEST_SIZE:
29 memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
30 break;
31 case MD5_DIGEST_SIZE:
32 memcpy(req->result, md5_zero_message_hash, rk_digest_size);
33 break;
34 default:
35 return -EINVAL;
38 return 0;
41 static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
43 if (base->complete)
44 base->complete(base, err);
47 static void rk_ahash_reg_init(struct rk_crypto_info *dev)
49 struct ahash_request *req = ahash_request_cast(dev->async_req);
50 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
51 int reg_status = 0;
53 reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
54 RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
55 CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
57 reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
58 reg_status &= (~RK_CRYPTO_HASH_FLUSH);
59 reg_status |= _SBF(0xffff, 16);
60 CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
62 memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
64 CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
65 RK_CRYPTO_HRDMA_DONE_ENA);
67 CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
68 RK_CRYPTO_HRDMA_DONE_INT);
70 CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
71 RK_CRYPTO_HASH_SWAP_DO);
73 CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
74 RK_CRYPTO_BYTESWAP_BRFIFO |
75 RK_CRYPTO_BYTESWAP_BTFIFO);
77 CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
80 static int rk_ahash_init(struct ahash_request *req)
82 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
83 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
84 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
86 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
87 rctx->fallback_req.base.flags = req->base.flags &
88 CRYPTO_TFM_REQ_MAY_SLEEP;
90 return crypto_ahash_init(&rctx->fallback_req);
93 static int rk_ahash_update(struct ahash_request *req)
95 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
96 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
97 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
99 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
100 rctx->fallback_req.base.flags = req->base.flags &
101 CRYPTO_TFM_REQ_MAY_SLEEP;
102 rctx->fallback_req.nbytes = req->nbytes;
103 rctx->fallback_req.src = req->src;
105 return crypto_ahash_update(&rctx->fallback_req);
108 static int rk_ahash_final(struct ahash_request *req)
110 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
111 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
112 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
114 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
115 rctx->fallback_req.base.flags = req->base.flags &
116 CRYPTO_TFM_REQ_MAY_SLEEP;
117 rctx->fallback_req.result = req->result;
119 return crypto_ahash_final(&rctx->fallback_req);
122 static int rk_ahash_finup(struct ahash_request *req)
124 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
125 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
126 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
128 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
129 rctx->fallback_req.base.flags = req->base.flags &
130 CRYPTO_TFM_REQ_MAY_SLEEP;
132 rctx->fallback_req.nbytes = req->nbytes;
133 rctx->fallback_req.src = req->src;
134 rctx->fallback_req.result = req->result;
136 return crypto_ahash_finup(&rctx->fallback_req);
139 static int rk_ahash_import(struct ahash_request *req, const void *in)
141 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
142 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
143 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
145 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
146 rctx->fallback_req.base.flags = req->base.flags &
147 CRYPTO_TFM_REQ_MAY_SLEEP;
149 return crypto_ahash_import(&rctx->fallback_req, in);
152 static int rk_ahash_export(struct ahash_request *req, void *out)
154 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
155 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
156 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
158 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
159 rctx->fallback_req.base.flags = req->base.flags &
160 CRYPTO_TFM_REQ_MAY_SLEEP;
162 return crypto_ahash_export(&rctx->fallback_req, out);
165 static int rk_ahash_digest(struct ahash_request *req)
167 struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
168 struct rk_crypto_info *dev = tctx->dev;
170 if (!req->nbytes)
171 return zero_message_process(req);
172 else
173 return dev->enqueue(dev, &req->base);
176 static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
178 CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
179 CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
180 CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
181 (RK_CRYPTO_HASH_START << 16));
184 static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
186 int err;
188 err = dev->load_data(dev, dev->sg_src, NULL);
189 if (!err)
190 crypto_ahash_dma_start(dev);
191 return err;
194 static int rk_ahash_start(struct rk_crypto_info *dev)
196 struct ahash_request *req = ahash_request_cast(dev->async_req);
197 struct crypto_ahash *tfm;
198 struct rk_ahash_rctx *rctx;
200 dev->total = req->nbytes;
201 dev->left_bytes = req->nbytes;
202 dev->aligned = 0;
203 dev->align_size = 4;
204 dev->sg_dst = NULL;
205 dev->sg_src = req->src;
206 dev->first = req->src;
207 dev->src_nents = sg_nents(req->src);
208 rctx = ahash_request_ctx(req);
209 rctx->mode = 0;
211 tfm = crypto_ahash_reqtfm(req);
212 switch (crypto_ahash_digestsize(tfm)) {
213 case SHA1_DIGEST_SIZE:
214 rctx->mode = RK_CRYPTO_HASH_SHA1;
215 break;
216 case SHA256_DIGEST_SIZE:
217 rctx->mode = RK_CRYPTO_HASH_SHA256;
218 break;
219 case MD5_DIGEST_SIZE:
220 rctx->mode = RK_CRYPTO_HASH_MD5;
221 break;
222 default:
223 return -EINVAL;
226 rk_ahash_reg_init(dev);
227 return rk_ahash_set_data_start(dev);
230 static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
232 int err = 0;
233 struct ahash_request *req = ahash_request_cast(dev->async_req);
234 struct crypto_ahash *tfm;
236 dev->unload_data(dev);
237 if (dev->left_bytes) {
238 if (dev->aligned) {
239 if (sg_is_last(dev->sg_src)) {
240 dev_warn(dev->dev, "[%s:%d], Lack of data\n",
241 __func__, __LINE__);
242 err = -ENOMEM;
243 goto out_rx;
245 dev->sg_src = sg_next(dev->sg_src);
247 err = rk_ahash_set_data_start(dev);
248 } else {
250 * it will take some time to process date after last dma
251 * transmission.
253 * waiting time is relative with the last date len,
254 * so cannot set a fixed time here.
255 * 10us makes system not call here frequently wasting
256 * efficiency, and make it response quickly when dma
257 * complete.
259 while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
260 udelay(10);
262 tfm = crypto_ahash_reqtfm(req);
263 memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
264 crypto_ahash_digestsize(tfm));
265 dev->complete(dev->async_req, 0);
266 tasklet_schedule(&dev->queue_task);
269 out_rx:
270 return err;
273 static int rk_cra_hash_init(struct crypto_tfm *tfm)
275 struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
276 struct rk_crypto_tmp *algt;
277 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
279 const char *alg_name = crypto_tfm_alg_name(tfm);
281 algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
283 tctx->dev = algt->dev;
284 tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
285 if (!tctx->dev->addr_vir) {
286 dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
287 return -ENOMEM;
289 tctx->dev->start = rk_ahash_start;
290 tctx->dev->update = rk_ahash_crypto_rx;
291 tctx->dev->complete = rk_ahash_crypto_complete;
293 /* for fallback */
294 tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
295 CRYPTO_ALG_NEED_FALLBACK);
296 if (IS_ERR(tctx->fallback_tfm)) {
297 dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
298 return PTR_ERR(tctx->fallback_tfm);
300 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
301 sizeof(struct rk_ahash_rctx) +
302 crypto_ahash_reqsize(tctx->fallback_tfm));
304 return tctx->dev->enable_clk(tctx->dev);
307 static void rk_cra_hash_exit(struct crypto_tfm *tfm)
309 struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
311 free_page((unsigned long)tctx->dev->addr_vir);
312 return tctx->dev->disable_clk(tctx->dev);
315 struct rk_crypto_tmp rk_ahash_sha1 = {
316 .type = ALG_TYPE_HASH,
317 .alg.hash = {
318 .init = rk_ahash_init,
319 .update = rk_ahash_update,
320 .final = rk_ahash_final,
321 .finup = rk_ahash_finup,
322 .export = rk_ahash_export,
323 .import = rk_ahash_import,
324 .digest = rk_ahash_digest,
325 .halg = {
326 .digestsize = SHA1_DIGEST_SIZE,
327 .statesize = sizeof(struct sha1_state),
328 .base = {
329 .cra_name = "sha1",
330 .cra_driver_name = "rk-sha1",
331 .cra_priority = 300,
332 .cra_flags = CRYPTO_ALG_ASYNC |
333 CRYPTO_ALG_NEED_FALLBACK,
334 .cra_blocksize = SHA1_BLOCK_SIZE,
335 .cra_ctxsize = sizeof(struct rk_ahash_ctx),
336 .cra_alignmask = 3,
337 .cra_init = rk_cra_hash_init,
338 .cra_exit = rk_cra_hash_exit,
339 .cra_module = THIS_MODULE,
345 struct rk_crypto_tmp rk_ahash_sha256 = {
346 .type = ALG_TYPE_HASH,
347 .alg.hash = {
348 .init = rk_ahash_init,
349 .update = rk_ahash_update,
350 .final = rk_ahash_final,
351 .finup = rk_ahash_finup,
352 .export = rk_ahash_export,
353 .import = rk_ahash_import,
354 .digest = rk_ahash_digest,
355 .halg = {
356 .digestsize = SHA256_DIGEST_SIZE,
357 .statesize = sizeof(struct sha256_state),
358 .base = {
359 .cra_name = "sha256",
360 .cra_driver_name = "rk-sha256",
361 .cra_priority = 300,
362 .cra_flags = CRYPTO_ALG_ASYNC |
363 CRYPTO_ALG_NEED_FALLBACK,
364 .cra_blocksize = SHA256_BLOCK_SIZE,
365 .cra_ctxsize = sizeof(struct rk_ahash_ctx),
366 .cra_alignmask = 3,
367 .cra_init = rk_cra_hash_init,
368 .cra_exit = rk_cra_hash_exit,
369 .cra_module = THIS_MODULE,
375 struct rk_crypto_tmp rk_ahash_md5 = {
376 .type = ALG_TYPE_HASH,
377 .alg.hash = {
378 .init = rk_ahash_init,
379 .update = rk_ahash_update,
380 .final = rk_ahash_final,
381 .finup = rk_ahash_finup,
382 .export = rk_ahash_export,
383 .import = rk_ahash_import,
384 .digest = rk_ahash_digest,
385 .halg = {
386 .digestsize = MD5_DIGEST_SIZE,
387 .statesize = sizeof(struct md5_state),
388 .base = {
389 .cra_name = "md5",
390 .cra_driver_name = "rk-md5",
391 .cra_priority = 300,
392 .cra_flags = CRYPTO_ALG_ASYNC |
393 CRYPTO_ALG_NEED_FALLBACK,
394 .cra_blocksize = SHA1_BLOCK_SIZE,
395 .cra_ctxsize = sizeof(struct rk_ahash_ctx),
396 .cra_alignmask = 3,
397 .cra_init = rk_cra_hash_init,
398 .cra_exit = rk_cra_hash_exit,
399 .cra_module = THIS_MODULE,