Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / crypto / allwinner / sun8i-ce / sun8i-ce-hash.c
blob2f09a37306e28412c81c3f4a7576526c5312af10
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * sun8i-ce-hash.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
8 * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
10 * You could find the datasheet in Documentation/arm/sunxi.rst
12 #include <linux/dma-mapping.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/scatterlist.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/sha1.h>
17 #include <crypto/sha2.h>
18 #include <crypto/md5.h>
19 #include "sun8i-ce.h"
21 int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
23 struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
24 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
25 struct sun8i_ce_alg_template *algt;
26 int err;
28 memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx));
30 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
31 op->ce = algt->ce;
33 op->enginectx.op.do_one_request = sun8i_ce_hash_run;
34 op->enginectx.op.prepare_request = NULL;
35 op->enginectx.op.unprepare_request = NULL;
37 /* FALLBACK */
38 op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
39 CRYPTO_ALG_NEED_FALLBACK);
40 if (IS_ERR(op->fallback_tfm)) {
41 dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
42 return PTR_ERR(op->fallback_tfm);
45 if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
46 algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
48 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
49 sizeof(struct sun8i_ce_hash_reqctx) +
50 crypto_ahash_reqsize(op->fallback_tfm));
52 dev_info(op->ce->dev, "Fallback for %s is %s\n",
53 crypto_tfm_alg_driver_name(tfm),
54 crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
55 err = pm_runtime_get_sync(op->ce->dev);
56 if (err < 0)
57 goto error_pm;
58 return 0;
59 error_pm:
60 pm_runtime_put_noidle(op->ce->dev);
61 crypto_free_ahash(op->fallback_tfm);
62 return err;
65 void sun8i_ce_hash_craexit(struct crypto_tfm *tfm)
67 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
69 crypto_free_ahash(tfmctx->fallback_tfm);
70 pm_runtime_put_sync_suspend(tfmctx->ce->dev);
73 int sun8i_ce_hash_init(struct ahash_request *areq)
75 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
76 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
77 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
79 memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
81 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
82 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
84 return crypto_ahash_init(&rctx->fallback_req);
87 int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
89 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
90 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
91 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
93 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
94 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
96 return crypto_ahash_export(&rctx->fallback_req, out);
99 int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
101 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
102 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
103 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
105 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
106 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
108 return crypto_ahash_import(&rctx->fallback_req, in);
111 int sun8i_ce_hash_final(struct ahash_request *areq)
113 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
114 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
115 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
116 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
117 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
118 struct sun8i_ce_alg_template *algt;
119 #endif
121 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
122 rctx->fallback_req.base.flags = areq->base.flags &
123 CRYPTO_TFM_REQ_MAY_SLEEP;
124 rctx->fallback_req.result = areq->result;
126 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
127 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
128 algt->stat_fb++;
129 #endif
131 return crypto_ahash_final(&rctx->fallback_req);
134 int sun8i_ce_hash_update(struct ahash_request *areq)
136 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
137 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
138 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
140 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
141 rctx->fallback_req.base.flags = areq->base.flags &
142 CRYPTO_TFM_REQ_MAY_SLEEP;
143 rctx->fallback_req.nbytes = areq->nbytes;
144 rctx->fallback_req.src = areq->src;
146 return crypto_ahash_update(&rctx->fallback_req);
149 int sun8i_ce_hash_finup(struct ahash_request *areq)
151 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
152 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
153 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
154 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
155 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
156 struct sun8i_ce_alg_template *algt;
157 #endif
159 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
160 rctx->fallback_req.base.flags = areq->base.flags &
161 CRYPTO_TFM_REQ_MAY_SLEEP;
163 rctx->fallback_req.nbytes = areq->nbytes;
164 rctx->fallback_req.src = areq->src;
165 rctx->fallback_req.result = areq->result;
166 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
167 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
168 algt->stat_fb++;
169 #endif
171 return crypto_ahash_finup(&rctx->fallback_req);
174 static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
176 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
177 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
178 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
179 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
180 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
181 struct sun8i_ce_alg_template *algt;
182 #endif
184 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
185 rctx->fallback_req.base.flags = areq->base.flags &
186 CRYPTO_TFM_REQ_MAY_SLEEP;
188 rctx->fallback_req.nbytes = areq->nbytes;
189 rctx->fallback_req.src = areq->src;
190 rctx->fallback_req.result = areq->result;
191 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
192 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
193 algt->stat_fb++;
194 #endif
196 return crypto_ahash_digest(&rctx->fallback_req);
199 static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
201 struct scatterlist *sg;
203 if (areq->nbytes == 0)
204 return true;
205 /* we need to reserve one SG for padding one */
206 if (sg_nents(areq->src) > MAX_SG - 1)
207 return true;
208 sg = areq->src;
209 while (sg) {
210 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
211 return true;
212 sg = sg_next(sg);
214 return false;
217 int sun8i_ce_hash_digest(struct ahash_request *areq)
219 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
220 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
221 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
222 struct sun8i_ce_alg_template *algt;
223 struct sun8i_ce_dev *ce;
224 struct crypto_engine *engine;
225 struct scatterlist *sg;
226 int nr_sgs, e, i;
228 if (sun8i_ce_hash_need_fallback(areq))
229 return sun8i_ce_hash_digest_fb(areq);
231 nr_sgs = sg_nents(areq->src);
232 if (nr_sgs > MAX_SG - 1)
233 return sun8i_ce_hash_digest_fb(areq);
235 for_each_sg(areq->src, sg, nr_sgs, i) {
236 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
237 return sun8i_ce_hash_digest_fb(areq);
240 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
241 ce = algt->ce;
243 e = sun8i_ce_get_engine_number(ce);
244 rctx->flow = e;
245 engine = ce->chanlist[e].engine;
247 return crypto_transfer_hash_request_to_engine(engine, areq);
250 int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
252 struct ahash_request *areq = container_of(breq, struct ahash_request, base);
253 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
254 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
255 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
256 struct sun8i_ce_alg_template *algt;
257 struct sun8i_ce_dev *ce;
258 struct sun8i_ce_flow *chan;
259 struct ce_task *cet;
260 struct scatterlist *sg;
261 int nr_sgs, flow, err;
262 unsigned int len;
263 u32 common;
264 u64 byte_count;
265 __le32 *bf;
266 void *buf = NULL;
267 int j, i, todo;
268 int nbw = 0;
269 u64 fill, min_fill;
270 __be64 *bebits;
271 __le64 *lebits;
272 void *result = NULL;
273 u64 bs;
274 int digestsize;
275 dma_addr_t addr_res, addr_pad;
277 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
278 ce = algt->ce;
280 bs = algt->alg.hash.halg.base.cra_blocksize;
281 digestsize = algt->alg.hash.halg.digestsize;
282 if (digestsize == SHA224_DIGEST_SIZE)
283 digestsize = SHA256_DIGEST_SIZE;
284 if (digestsize == SHA384_DIGEST_SIZE)
285 digestsize = SHA512_DIGEST_SIZE;
287 /* the padding could be up to two block. */
288 buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA);
289 if (!buf) {
290 err = -ENOMEM;
291 goto theend;
293 bf = (__le32 *)buf;
295 result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
296 if (!result) {
297 err = -ENOMEM;
298 goto theend;
301 flow = rctx->flow;
302 chan = &ce->chanlist[flow];
304 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
305 algt->stat_req++;
306 #endif
307 dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
309 cet = chan->tl;
310 memset(cet, 0, sizeof(struct ce_task));
312 cet->t_id = cpu_to_le32(flow);
313 common = ce->variant->alg_hash[algt->ce_algo_id];
314 common |= CE_COMM_INT;
315 cet->t_common_ctl = cpu_to_le32(common);
317 cet->t_sym_ctl = 0;
318 cet->t_asym_ctl = 0;
320 nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
321 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
322 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
323 err = -EINVAL;
324 goto theend;
327 len = areq->nbytes;
328 for_each_sg(areq->src, sg, nr_sgs, i) {
329 cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
330 todo = min(len, sg_dma_len(sg));
331 cet->t_src[i].len = cpu_to_le32(todo / 4);
332 len -= todo;
334 if (len > 0) {
335 dev_err(ce->dev, "remaining len %d\n", len);
336 err = -EINVAL;
337 goto theend;
339 addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
340 cet->t_dst[0].addr = cpu_to_le32(addr_res);
341 cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
342 if (dma_mapping_error(ce->dev, addr_res)) {
343 dev_err(ce->dev, "DMA map dest\n");
344 err = -EINVAL;
345 goto theend;
348 byte_count = areq->nbytes;
349 j = 0;
350 bf[j++] = cpu_to_le32(0x80);
352 if (bs == 64) {
353 fill = 64 - (byte_count % 64);
354 min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
355 } else {
356 fill = 128 - (byte_count % 128);
357 min_fill = 4 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
360 if (fill < min_fill)
361 fill += bs;
363 j += (fill - min_fill) / sizeof(u32);
365 switch (algt->ce_algo_id) {
366 case CE_ID_HASH_MD5:
367 lebits = (__le64 *)&bf[j];
368 *lebits = cpu_to_le64(byte_count << 3);
369 j += 2;
370 break;
371 case CE_ID_HASH_SHA1:
372 case CE_ID_HASH_SHA224:
373 case CE_ID_HASH_SHA256:
374 bebits = (__be64 *)&bf[j];
375 *bebits = cpu_to_be64(byte_count << 3);
376 j += 2;
377 break;
378 case CE_ID_HASH_SHA384:
379 case CE_ID_HASH_SHA512:
380 bebits = (__be64 *)&bf[j];
381 *bebits = cpu_to_be64(byte_count >> 61);
382 j += 2;
383 bebits = (__be64 *)&bf[j];
384 *bebits = cpu_to_be64(byte_count << 3);
385 j += 2;
386 break;
389 addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
390 cet->t_src[i].addr = cpu_to_le32(addr_pad);
391 cet->t_src[i].len = cpu_to_le32(j);
392 if (dma_mapping_error(ce->dev, addr_pad)) {
393 dev_err(ce->dev, "DMA error on padding SG\n");
394 err = -EINVAL;
395 goto theend;
398 if (ce->variant->hash_t_dlen_in_bits)
399 cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8);
400 else
401 cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
403 chan->timeout = areq->nbytes;
405 err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
407 dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
408 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
409 dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
412 memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
413 theend:
414 kfree(buf);
415 kfree(result);
416 crypto_finalize_hash_request(engine, breq, err);
417 return 0;