1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
5 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
7 * This file add support for MD5 and SHA1.
9 * You could find the datasheet in Documentation/arm/sunxi.rst
12 #include <linux/scatterlist.h>
14 /* This is a totally arbitrary value */
15 #define SS_TIMEOUT 100
17 int sun4i_hash_crainit(struct crypto_tfm
*tfm
)
19 struct sun4i_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
20 struct ahash_alg
*alg
= __crypto_ahash_alg(tfm
->__crt_alg
);
21 struct sun4i_ss_alg_template
*algt
;
24 memset(op
, 0, sizeof(struct sun4i_tfm_ctx
));
26 algt
= container_of(alg
, struct sun4i_ss_alg_template
, alg
.hash
);
29 err
= pm_runtime_get_sync(op
->ss
->dev
);
33 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
34 sizeof(struct sun4i_req_ctx
));
38 void sun4i_hash_craexit(struct crypto_tfm
*tfm
)
40 struct sun4i_tfm_ctx
*op
= crypto_tfm_ctx(tfm
);
42 pm_runtime_put(op
->ss
->dev
);
45 /* sun4i_hash_init: initialize request context */
46 int sun4i_hash_init(struct ahash_request
*areq
)
48 struct sun4i_req_ctx
*op
= ahash_request_ctx(areq
);
49 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
50 struct ahash_alg
*alg
= __crypto_ahash_alg(tfm
->base
.__crt_alg
);
51 struct sun4i_ss_alg_template
*algt
;
53 memset(op
, 0, sizeof(struct sun4i_req_ctx
));
55 algt
= container_of(alg
, struct sun4i_ss_alg_template
, alg
.hash
);
56 op
->mode
= algt
->mode
;
61 int sun4i_hash_export_md5(struct ahash_request
*areq
, void *out
)
63 struct sun4i_req_ctx
*op
= ahash_request_ctx(areq
);
64 struct md5_state
*octx
= out
;
67 octx
->byte_count
= op
->byte_count
+ op
->len
;
69 memcpy(octx
->block
, op
->buf
, op
->len
);
72 for (i
= 0; i
< 4; i
++)
73 octx
->hash
[i
] = op
->hash
[i
];
75 octx
->hash
[0] = SHA1_H0
;
76 octx
->hash
[1] = SHA1_H1
;
77 octx
->hash
[2] = SHA1_H2
;
78 octx
->hash
[3] = SHA1_H3
;
84 int sun4i_hash_import_md5(struct ahash_request
*areq
, const void *in
)
86 struct sun4i_req_ctx
*op
= ahash_request_ctx(areq
);
87 const struct md5_state
*ictx
= in
;
90 sun4i_hash_init(areq
);
92 op
->byte_count
= ictx
->byte_count
& ~0x3F;
93 op
->len
= ictx
->byte_count
& 0x3F;
95 memcpy(op
->buf
, ictx
->block
, op
->len
);
97 for (i
= 0; i
< 4; i
++)
98 op
->hash
[i
] = ictx
->hash
[i
];
103 int sun4i_hash_export_sha1(struct ahash_request
*areq
, void *out
)
105 struct sun4i_req_ctx
*op
= ahash_request_ctx(areq
);
106 struct sha1_state
*octx
= out
;
109 octx
->count
= op
->byte_count
+ op
->len
;
111 memcpy(octx
->buffer
, op
->buf
, op
->len
);
113 if (op
->byte_count
) {
114 for (i
= 0; i
< 5; i
++)
115 octx
->state
[i
] = op
->hash
[i
];
117 octx
->state
[0] = SHA1_H0
;
118 octx
->state
[1] = SHA1_H1
;
119 octx
->state
[2] = SHA1_H2
;
120 octx
->state
[3] = SHA1_H3
;
121 octx
->state
[4] = SHA1_H4
;
127 int sun4i_hash_import_sha1(struct ahash_request
*areq
, const void *in
)
129 struct sun4i_req_ctx
*op
= ahash_request_ctx(areq
);
130 const struct sha1_state
*ictx
= in
;
133 sun4i_hash_init(areq
);
135 op
->byte_count
= ictx
->count
& ~0x3F;
136 op
->len
= ictx
->count
& 0x3F;
138 memcpy(op
->buf
, ictx
->buffer
, op
->len
);
140 for (i
= 0; i
< 5; i
++)
141 op
->hash
[i
] = ictx
->state
[i
];
146 #define SS_HASH_UPDATE 1
147 #define SS_HASH_FINAL 2
150 * sun4i_hash_update: update hash engine
152 * Could be used for both SHA1 and MD5
153 * Write data by step of 32bits and put then in the SS.
155 * Since we cannot leave partial data and hash state in the engine,
156 * we need to get the hash state at the end of this function.
157 * We can get the hash state every 64 bytes
159 * So the first work is to get the number of bytes to write to SS modulo 64
160 * The extra bytes will go to a temporary buffer op->buf storing op->len bytes
162 * So at the begin of update()
163 * if op->len + areq->nbytes < 64
164 * => all data will be written to wait buffer (op->buf) and end=0
165 * if not, write all data from op->buf to the device and position end to
166 * complete to 64bytes
169 * update1 60o => op->len=60
170 * update2 60o => need one more word to have 64 bytes
172 * so write all data from op->buf and one word of SGs
173 * write remaining data in op->buf
174 * final state op->len=56
176 static int sun4i_hash(struct ahash_request
*areq
)
179 * i is the total bytes read from SGs, to be compared to areq->nbytes
180 * i is important because we cannot rely on SG length since the sum of
181 * SG->length could be greater than areq->nbytes
183 * end is the position when we need to stop writing to the device,
184 * to be compared to i
186 * in_i: advancement in the current SG
188 unsigned int i
= 0, end
, fill
, min_fill
, nwait
, nbw
= 0, j
= 0, todo
;
189 unsigned int in_i
= 0;
190 u32 spaces
, rx_cnt
= SS_RX_DEFAULT
, bf
[32] = {0}, v
, ivmode
= 0;
191 struct sun4i_req_ctx
*op
= ahash_request_ctx(areq
);
192 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
193 struct sun4i_tfm_ctx
*tfmctx
= crypto_ahash_ctx(tfm
);
194 struct sun4i_ss_ctx
*ss
= tfmctx
->ss
;
195 struct scatterlist
*in_sg
= areq
->src
;
196 struct sg_mapping_iter mi
;
201 dev_dbg(ss
->dev
, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
202 __func__
, crypto_tfm_alg_name(areq
->base
.tfm
),
203 op
->byte_count
, areq
->nbytes
, op
->mode
,
204 op
->len
, op
->hash
[0]);
206 if (unlikely(!areq
->nbytes
) && !(op
->flags
& SS_HASH_FINAL
))
209 /* protect against overflow */
210 if (unlikely(areq
->nbytes
> UINT_MAX
- op
->len
)) {
211 dev_err(ss
->dev
, "Cannot process too large request\n");
215 if (op
->len
+ areq
->nbytes
< 64 && !(op
->flags
& SS_HASH_FINAL
)) {
216 /* linearize data to op->buf */
217 copied
= sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
218 op
->buf
+ op
->len
, areq
->nbytes
, 0);
223 spin_lock_bh(&ss
->slock
);
226 * if some data have been processed before,
227 * we need to restore the partial hash state
229 if (op
->byte_count
) {
230 ivmode
= SS_IV_ARBITRARY
;
231 for (i
= 0; i
< crypto_ahash_digestsize(tfm
) / 4; i
++)
232 writel(op
->hash
[i
], ss
->base
+ SS_IV0
+ i
* 4);
234 /* Enable the device */
235 writel(op
->mode
| SS_ENABLED
| ivmode
, ss
->base
+ SS_CTL
);
237 if (!(op
->flags
& SS_HASH_UPDATE
))
240 /* start of handling data */
241 if (!(op
->flags
& SS_HASH_FINAL
)) {
242 end
= ((areq
->nbytes
+ op
->len
) / 64) * 64 - op
->len
;
244 if (end
> areq
->nbytes
|| areq
->nbytes
- end
> 63) {
245 dev_err(ss
->dev
, "ERROR: Bound error %u %u\n",
251 /* Since we have the flag final, we can go up to modulo 4 */
252 if (areq
->nbytes
< 4)
255 end
= ((areq
->nbytes
+ op
->len
) / 4) * 4 - op
->len
;
258 /* TODO if SGlen % 4 and !op->len then DMA */
260 while (in_sg
&& i
== 1) {
261 if (in_sg
->length
% 4)
263 in_sg
= sg_next(in_sg
);
265 if (i
== 1 && !op
->len
&& areq
->nbytes
)
266 dev_dbg(ss
->dev
, "We can DMA\n");
269 sg_miter_start(&mi
, areq
->src
, sg_nents(areq
->src
),
270 SG_MITER_FROM_SG
| SG_MITER_ATOMIC
);
276 * we need to linearize in two case:
277 * - the buffer is already used
278 * - the SG does not have enough byte remaining ( < 4)
280 if (op
->len
|| (mi
.length
- in_i
) < 4) {
282 * if we have entered here we have two reason to stop
283 * - the buffer is full
286 while (op
->len
< 64 && i
< end
) {
287 /* how many bytes we can read from current SG */
288 in_r
= min(end
- i
, 64 - op
->len
);
289 in_r
= min_t(size_t, mi
.length
- in_i
, in_r
);
290 memcpy(op
->buf
+ op
->len
, mi
.addr
+ in_i
, in_r
);
294 if (in_i
== mi
.length
) {
299 if (op
->len
> 3 && !(op
->len
% 4)) {
300 /* write buf to the device */
301 writesl(ss
->base
+ SS_RXFIFO
, op
->buf
,
303 op
->byte_count
+= op
->len
;
307 if (mi
.length
- in_i
> 3 && i
< end
) {
308 /* how many bytes we can read from current SG */
309 in_r
= min_t(size_t, mi
.length
- in_i
, areq
->nbytes
- i
);
310 in_r
= min_t(size_t, ((mi
.length
- in_i
) / 4) * 4, in_r
);
311 /* how many bytes we can write in the device*/
312 todo
= min3((u32
)(end
- i
) / 4, rx_cnt
, (u32
)in_r
/ 4);
313 writesl(ss
->base
+ SS_RXFIFO
, mi
.addr
+ in_i
, todo
);
314 op
->byte_count
+= todo
* 4;
319 spaces
= readl(ss
->base
+ SS_FCSR
);
320 rx_cnt
= SS_RXFIFO_SPACES(spaces
);
322 if (in_i
== mi
.length
) {
330 * Now we have written to the device all that we can,
331 * store the remaining bytes in op->buf
333 if ((areq
->nbytes
- i
) < 64) {
334 while (i
< areq
->nbytes
&& in_i
< mi
.length
&& op
->len
< 64) {
335 /* how many bytes we can read from current SG */
336 in_r
= min(areq
->nbytes
- i
, 64 - op
->len
);
337 in_r
= min_t(size_t, mi
.length
- in_i
, in_r
);
338 memcpy(op
->buf
+ op
->len
, mi
.addr
+ in_i
, in_r
);
342 if (in_i
== mi
.length
) {
352 * End of data process
353 * Now if we have the flag final go to finalize part
354 * If not, store the partial hash
356 if (op
->flags
& SS_HASH_FINAL
)
359 writel(op
->mode
| SS_ENABLED
| SS_DATA_END
, ss
->base
+ SS_CTL
);
362 v
= readl(ss
->base
+ SS_CTL
);
364 } while (i
< SS_TIMEOUT
&& (v
& SS_DATA_END
));
365 if (unlikely(i
>= SS_TIMEOUT
)) {
366 dev_err_ratelimited(ss
->dev
,
367 "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
368 i
, SS_TIMEOUT
, v
, areq
->nbytes
);
374 * The datasheet isn't very clear about when to retrieve the digest. The
375 * bit SS_DATA_END is cleared when the engine has processed the data and
376 * when the digest is computed *but* it doesn't mean the digest is
377 * available in the digest registers. Hence the delay to be sure we can
382 for (i
= 0; i
< crypto_ahash_digestsize(tfm
) / 4; i
++)
383 op
->hash
[i
] = readl(ss
->base
+ SS_MD0
+ i
* 4);
388 * hash_final: finalize hashing operation
390 * If we have some remaining bytes, we write them.
391 * Then ask the SS for finalizing the hashing operation
393 * I do not check RX FIFO size in this function since the size is 32
394 * after each enabling and this function neither write more than 32 words.
395 * If we come from the update part, we cannot have more than
396 * 3 remaining bytes to write and SS is fast enough to not care about it.
401 /* write the remaining words of the wait buffer */
405 writesl(ss
->base
+ SS_RXFIFO
, op
->buf
, nwait
);
406 op
->byte_count
+= 4 * nwait
;
409 nbw
= op
->len
- 4 * nwait
;
411 wb
= cpu_to_le32(*(u32
*)(op
->buf
+ nwait
* 4));
412 wb
&= GENMASK((nbw
* 8) - 1, 0);
414 op
->byte_count
+= nbw
;
418 /* write the remaining bytes of the nbw buffer */
419 wb
|= ((1 << 7) << (nbw
* 8));
420 bf
[j
++] = le32_to_cpu(wb
);
423 * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
424 * I take the operations from other MD5/SHA1 implementations
427 /* last block size */
428 fill
= 64 - (op
->byte_count
% 64);
429 min_fill
= 2 * sizeof(u32
) + (nbw
? 0 : sizeof(u32
));
431 /* if we can't fill all data, jump to the next 64 block */
435 j
+= (fill
- min_fill
) / sizeof(u32
);
437 /* write the length of data */
438 if (op
->mode
== SS_OP_SHA1
) {
439 __be64
*bits
= (__be64
*)&bf
[j
];
440 *bits
= cpu_to_be64(op
->byte_count
<< 3);
443 __le64
*bits
= (__le64
*)&bf
[j
];
444 *bits
= cpu_to_le64(op
->byte_count
<< 3);
447 writesl(ss
->base
+ SS_RXFIFO
, bf
, j
);
449 /* Tell the SS to stop the hashing */
450 writel(op
->mode
| SS_ENABLED
| SS_DATA_END
, ss
->base
+ SS_CTL
);
453 * Wait for SS to finish the hash.
454 * The timeout could happen only in case of bad overclocking
459 v
= readl(ss
->base
+ SS_CTL
);
461 } while (i
< SS_TIMEOUT
&& (v
& SS_DATA_END
));
462 if (unlikely(i
>= SS_TIMEOUT
)) {
463 dev_err_ratelimited(ss
->dev
,
464 "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
465 i
, SS_TIMEOUT
, v
, areq
->nbytes
);
471 * The datasheet isn't very clear about when to retrieve the digest. The
472 * bit SS_DATA_END is cleared when the engine has processed the data and
473 * when the digest is computed *but* it doesn't mean the digest is
474 * available in the digest registers. Hence the delay to be sure we can
479 /* Get the hash from the device */
480 if (op
->mode
== SS_OP_SHA1
) {
481 for (i
= 0; i
< 5; i
++) {
482 if (ss
->variant
->sha1_in_be
)
483 v
= cpu_to_le32(readl(ss
->base
+ SS_MD0
+ i
* 4));
485 v
= cpu_to_be32(readl(ss
->base
+ SS_MD0
+ i
* 4));
486 memcpy(areq
->result
+ i
* 4, &v
, 4);
489 for (i
= 0; i
< 4; i
++) {
490 v
= cpu_to_le32(readl(ss
->base
+ SS_MD0
+ i
* 4));
491 memcpy(areq
->result
+ i
* 4, &v
, 4);
496 writel(0, ss
->base
+ SS_CTL
);
497 spin_unlock_bh(&ss
->slock
);
501 int sun4i_hash_final(struct ahash_request
*areq
)
503 struct sun4i_req_ctx
*op
= ahash_request_ctx(areq
);
505 op
->flags
= SS_HASH_FINAL
;
506 return sun4i_hash(areq
);
509 int sun4i_hash_update(struct ahash_request
*areq
)
511 struct sun4i_req_ctx
*op
= ahash_request_ctx(areq
);
513 op
->flags
= SS_HASH_UPDATE
;
514 return sun4i_hash(areq
);
517 /* sun4i_hash_finup: finalize hashing operation after an update */
518 int sun4i_hash_finup(struct ahash_request
*areq
)
520 struct sun4i_req_ctx
*op
= ahash_request_ctx(areq
);
522 op
->flags
= SS_HASH_UPDATE
| SS_HASH_FINAL
;
523 return sun4i_hash(areq
);
526 /* combo of init/update/final functions */
527 int sun4i_hash_digest(struct ahash_request
*areq
)
530 struct sun4i_req_ctx
*op
= ahash_request_ctx(areq
);
532 err
= sun4i_hash_init(areq
);
536 op
->flags
= SS_HASH_UPDATE
| SS_HASH_FINAL
;
537 return sun4i_hash(areq
);