4 * Support for SAHARA cryptographic accelerator.
6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
7 * Copyright (c) 2013 Vista Silicon S.L.
8 * Author: Javier Martin <javier.martin@vista-silicon.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
14 * Based on omap-aes.c and tegra-aes.c
17 #include <crypto/algapi.h>
18 #include <crypto/aes.h>
19 #include <crypto/hash.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/scatterwalk.h>
22 #include <crypto/sha.h>
24 #include <linux/clk.h>
25 #include <linux/crypto.h>
26 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/kernel.h>
30 #include <linux/kthread.h>
31 #include <linux/module.h>
32 #include <linux/mutex.h>
34 #include <linux/of_device.h>
35 #include <linux/platform_device.h>
37 #define SHA_BUFFER_LEN PAGE_SIZE
38 #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
40 #define SAHARA_NAME "sahara"
41 #define SAHARA_VERSION_3 3
42 #define SAHARA_VERSION_4 4
43 #define SAHARA_TIMEOUT_MS 1000
44 #define SAHARA_MAX_HW_DESC 2
45 #define SAHARA_MAX_HW_LINK 20
47 #define FLAGS_MODE_MASK 0x000f
48 #define FLAGS_ENCRYPT BIT(0)
49 #define FLAGS_CBC BIT(1)
50 #define FLAGS_NEW_KEY BIT(3)
52 #define SAHARA_HDR_BASE 0x00800000
53 #define SAHARA_HDR_SKHA_ALG_AES 0
54 #define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
55 #define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
56 #define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
57 #define SAHARA_HDR_FORM_DATA (5 << 16)
58 #define SAHARA_HDR_FORM_KEY (8 << 16)
59 #define SAHARA_HDR_LLO (1 << 24)
60 #define SAHARA_HDR_CHA_SKHA (1 << 28)
61 #define SAHARA_HDR_CHA_MDHA (2 << 28)
62 #define SAHARA_HDR_PARITY_BIT (1 << 31)
64 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
65 #define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
66 #define SAHARA_HDR_MDHA_HASH 0xA0850000
67 #define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
68 #define SAHARA_HDR_MDHA_ALG_SHA1 0
69 #define SAHARA_HDR_MDHA_ALG_MD5 1
70 #define SAHARA_HDR_MDHA_ALG_SHA256 2
71 #define SAHARA_HDR_MDHA_ALG_SHA224 3
72 #define SAHARA_HDR_MDHA_PDATA (1 << 2)
73 #define SAHARA_HDR_MDHA_HMAC (1 << 3)
74 #define SAHARA_HDR_MDHA_INIT (1 << 5)
75 #define SAHARA_HDR_MDHA_IPAD (1 << 6)
76 #define SAHARA_HDR_MDHA_OPAD (1 << 7)
77 #define SAHARA_HDR_MDHA_SWAP (1 << 8)
78 #define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
79 #define SAHARA_HDR_MDHA_SSL (1 << 10)
81 /* SAHARA can only process one request at a time */
82 #define SAHARA_QUEUE_LENGTH 1
84 #define SAHARA_REG_VERSION 0x00
85 #define SAHARA_REG_DAR 0x04
86 #define SAHARA_REG_CONTROL 0x08
87 #define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
88 #define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
89 #define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
90 #define SAHARA_CONTROL_ENABLE_INT (1 << 4)
91 #define SAHARA_REG_CMD 0x0C
92 #define SAHARA_CMD_RESET (1 << 0)
93 #define SAHARA_CMD_CLEAR_INT (1 << 8)
94 #define SAHARA_CMD_CLEAR_ERR (1 << 9)
95 #define SAHARA_CMD_SINGLE_STEP (1 << 10)
96 #define SAHARA_CMD_MODE_BATCH (1 << 16)
97 #define SAHARA_CMD_MODE_DEBUG (1 << 18)
98 #define SAHARA_REG_STATUS 0x10
99 #define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
100 #define SAHARA_STATE_IDLE 0
101 #define SAHARA_STATE_BUSY 1
102 #define SAHARA_STATE_ERR 2
103 #define SAHARA_STATE_FAULT 3
104 #define SAHARA_STATE_COMPLETE 4
105 #define SAHARA_STATE_COMP_FLAG (1 << 2)
106 #define SAHARA_STATUS_DAR_FULL (1 << 3)
107 #define SAHARA_STATUS_ERROR (1 << 4)
108 #define SAHARA_STATUS_SECURE (1 << 5)
109 #define SAHARA_STATUS_FAIL (1 << 6)
110 #define SAHARA_STATUS_INIT (1 << 7)
111 #define SAHARA_STATUS_RNG_RESEED (1 << 8)
112 #define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
113 #define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
114 #define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
115 #define SAHARA_STATUS_MODE_BATCH (1 << 16)
116 #define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
117 #define SAHARA_STATUS_MODE_DEBUG (1 << 18)
118 #define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
119 #define SAHARA_REG_ERRSTATUS 0x14
120 #define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
121 #define SAHARA_ERRSOURCE_CHA 14
122 #define SAHARA_ERRSOURCE_DMA 15
123 #define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
124 #define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
125 #define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
126 #define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
127 #define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
128 #define SAHARA_REG_FADDR 0x18
129 #define SAHARA_REG_CDAR 0x1C
130 #define SAHARA_REG_IDAR 0x20
132 struct sahara_hw_desc
{
141 struct sahara_hw_link
{
150 /* AES-specific context */
152 u8 key
[AES_KEYSIZE_128
];
153 struct crypto_ablkcipher
*fallback
;
155 /* SHA-specific context */
156 struct crypto_shash
*shash_fallback
;
159 struct sahara_aes_reqctx
{
164 * struct sahara_sha_reqctx - private data per request
165 * @buf: holds data for requests smaller than block_size
166 * @rembuf: used to prepare one block_size-aligned request
167 * @context: hw-specific context for request. Digest is extracted from this
168 * @mode: specifies what type of hw-descriptor needs to be built
169 * @digest_size: length of digest for this request
170 * @context_size: length of hw-context for this request.
171 * Always digest_size + 4
172 * @buf_cnt: number of bytes saved in buf
173 * @sg_in_idx: number of hw links
174 * @in_sg: scatterlist for input data
175 * @in_sg_chain: scatterlists for chained input data
176 * @in_sg_chained: specifies if chained scatterlists are used or not
177 * @total: total number of bytes for transfer
178 * @last: is this the last block
179 * @first: is this the first block
180 * @active: inside a transfer
182 struct sahara_sha_reqctx
{
183 u8 buf
[SAHARA_MAX_SHA_BLOCK_SIZE
];
184 u8 rembuf
[SAHARA_MAX_SHA_BLOCK_SIZE
];
185 u8 context
[SHA256_DIGEST_SIZE
+ 4];
188 unsigned int digest_size
;
189 unsigned int context_size
;
190 unsigned int buf_cnt
;
191 unsigned int sg_in_idx
;
192 struct scatterlist
*in_sg
;
193 struct scatterlist in_sg_chain
[2];
202 struct device
*device
;
203 unsigned int version
;
204 void __iomem
*regs_base
;
207 struct mutex queue_mutex
;
208 struct task_struct
*kthread
;
209 struct completion dma_completion
;
211 struct sahara_ctx
*ctx
;
213 struct crypto_queue queue
;
216 struct sahara_hw_desc
*hw_desc
[SAHARA_MAX_HW_DESC
];
217 dma_addr_t hw_phys_desc
[SAHARA_MAX_HW_DESC
];
220 dma_addr_t key_phys_base
;
223 dma_addr_t iv_phys_base
;
226 dma_addr_t context_phys_base
;
228 struct sahara_hw_link
*hw_link
[SAHARA_MAX_HW_LINK
];
229 dma_addr_t hw_phys_link
[SAHARA_MAX_HW_LINK
];
232 struct scatterlist
*in_sg
;
233 unsigned int nb_in_sg
;
234 struct scatterlist
*out_sg
;
235 unsigned int nb_out_sg
;
240 static struct sahara_dev
*dev_ptr
;
242 static inline void sahara_write(struct sahara_dev
*dev
, u32 data
, u32 reg
)
244 writel(data
, dev
->regs_base
+ reg
);
247 static inline unsigned int sahara_read(struct sahara_dev
*dev
, u32 reg
)
249 return readl(dev
->regs_base
+ reg
);
252 static u32
sahara_aes_key_hdr(struct sahara_dev
*dev
)
254 u32 hdr
= SAHARA_HDR_BASE
| SAHARA_HDR_SKHA_ALG_AES
|
255 SAHARA_HDR_FORM_KEY
| SAHARA_HDR_LLO
|
256 SAHARA_HDR_CHA_SKHA
| SAHARA_HDR_PARITY_BIT
;
258 if (dev
->flags
& FLAGS_CBC
) {
259 hdr
|= SAHARA_HDR_SKHA_MODE_CBC
;
260 hdr
^= SAHARA_HDR_PARITY_BIT
;
263 if (dev
->flags
& FLAGS_ENCRYPT
) {
264 hdr
|= SAHARA_HDR_SKHA_OP_ENC
;
265 hdr
^= SAHARA_HDR_PARITY_BIT
;
271 static u32
sahara_aes_data_link_hdr(struct sahara_dev
*dev
)
273 return SAHARA_HDR_BASE
| SAHARA_HDR_FORM_DATA
|
274 SAHARA_HDR_CHA_SKHA
| SAHARA_HDR_PARITY_BIT
;
277 static int sahara_sg_length(struct scatterlist
*sg
,
282 struct scatterlist
*sg_list
;
288 len
= min(sg_list
->length
, total
);
293 sg_list
= sg_next(sg_list
);
301 static char *sahara_err_src
[16] = {
304 "Descriptor length error",
305 "Descriptor length or pointer error",
307 "Link pointer error",
308 "Input buffer error",
309 "Output buffer error",
310 "Output buffer starvation",
311 "Internal state fault",
312 "General descriptor problem",
314 "Descriptor address error",
315 "Link address error",
320 static char *sahara_err_dmasize
[4] = {
322 "Half-word transfer",
327 static char *sahara_err_dmasrc
[8] = {
330 "Internal IP bus error",
332 "DMA crosses 256 byte boundary",
338 static char *sahara_cha_errsrc
[12] = {
339 "Input buffer non-empty",
344 "Write during processing",
345 "CTX read during processing",
347 "Input buffer disabled/underflow",
348 "Output buffer disabled/overflow",
349 "DES key parity error",
353 static char *sahara_cha_err
[4] = { "No error", "SKHA", "MDHA", "RNG" };
355 static void sahara_decode_error(struct sahara_dev
*dev
, unsigned int error
)
357 u8 source
= SAHARA_ERRSTATUS_GET_SOURCE(error
);
358 u16 chasrc
= ffs(SAHARA_ERRSTATUS_GET_CHASRC(error
));
360 dev_err(dev
->device
, "%s: Error Register = 0x%08x\n", __func__
, error
);
362 dev_err(dev
->device
, " - %s.\n", sahara_err_src
[source
]);
364 if (source
== SAHARA_ERRSOURCE_DMA
) {
365 if (error
& SAHARA_ERRSTATUS_DMA_DIR
)
366 dev_err(dev
->device
, " * DMA read.\n");
368 dev_err(dev
->device
, " * DMA write.\n");
370 dev_err(dev
->device
, " * %s.\n",
371 sahara_err_dmasize
[SAHARA_ERRSTATUS_GET_DMASZ(error
)]);
372 dev_err(dev
->device
, " * %s.\n",
373 sahara_err_dmasrc
[SAHARA_ERRSTATUS_GET_DMASRC(error
)]);
374 } else if (source
== SAHARA_ERRSOURCE_CHA
) {
375 dev_err(dev
->device
, " * %s.\n",
376 sahara_cha_errsrc
[chasrc
]);
377 dev_err(dev
->device
, " * %s.\n",
378 sahara_cha_err
[SAHARA_ERRSTATUS_GET_CHAERR(error
)]);
380 dev_err(dev
->device
, "\n");
383 static char *sahara_state
[4] = { "Idle", "Busy", "Error", "HW Fault" };
385 static void sahara_decode_status(struct sahara_dev
*dev
, unsigned int status
)
389 if (!IS_ENABLED(DEBUG
))
392 state
= SAHARA_STATUS_GET_STATE(status
);
394 dev_dbg(dev
->device
, "%s: Status Register = 0x%08x\n",
397 dev_dbg(dev
->device
, " - State = %d:\n", state
);
398 if (state
& SAHARA_STATE_COMP_FLAG
)
399 dev_dbg(dev
->device
, " * Descriptor completed. IRQ pending.\n");
401 dev_dbg(dev
->device
, " * %s.\n",
402 sahara_state
[state
& ~SAHARA_STATE_COMP_FLAG
]);
404 if (status
& SAHARA_STATUS_DAR_FULL
)
405 dev_dbg(dev
->device
, " - DAR Full.\n");
406 if (status
& SAHARA_STATUS_ERROR
)
407 dev_dbg(dev
->device
, " - Error.\n");
408 if (status
& SAHARA_STATUS_SECURE
)
409 dev_dbg(dev
->device
, " - Secure.\n");
410 if (status
& SAHARA_STATUS_FAIL
)
411 dev_dbg(dev
->device
, " - Fail.\n");
412 if (status
& SAHARA_STATUS_RNG_RESEED
)
413 dev_dbg(dev
->device
, " - RNG Reseed Request.\n");
414 if (status
& SAHARA_STATUS_ACTIVE_RNG
)
415 dev_dbg(dev
->device
, " - RNG Active.\n");
416 if (status
& SAHARA_STATUS_ACTIVE_MDHA
)
417 dev_dbg(dev
->device
, " - MDHA Active.\n");
418 if (status
& SAHARA_STATUS_ACTIVE_SKHA
)
419 dev_dbg(dev
->device
, " - SKHA Active.\n");
421 if (status
& SAHARA_STATUS_MODE_BATCH
)
422 dev_dbg(dev
->device
, " - Batch Mode.\n");
423 else if (status
& SAHARA_STATUS_MODE_DEDICATED
)
424 dev_dbg(dev
->device
, " - Decidated Mode.\n");
425 else if (status
& SAHARA_STATUS_MODE_DEBUG
)
426 dev_dbg(dev
->device
, " - Debug Mode.\n");
428 dev_dbg(dev
->device
, " - Internal state = 0x%02x\n",
429 SAHARA_STATUS_GET_ISTATE(status
));
431 dev_dbg(dev
->device
, "Current DAR: 0x%08x\n",
432 sahara_read(dev
, SAHARA_REG_CDAR
));
433 dev_dbg(dev
->device
, "Initial DAR: 0x%08x\n\n",
434 sahara_read(dev
, SAHARA_REG_IDAR
));
437 static void sahara_dump_descriptors(struct sahara_dev
*dev
)
441 if (!IS_ENABLED(DEBUG
))
444 for (i
= 0; i
< SAHARA_MAX_HW_DESC
; i
++) {
445 dev_dbg(dev
->device
, "Descriptor (%d) (0x%08x):\n",
446 i
, dev
->hw_phys_desc
[i
]);
447 dev_dbg(dev
->device
, "\thdr = 0x%08x\n", dev
->hw_desc
[i
]->hdr
);
448 dev_dbg(dev
->device
, "\tlen1 = %u\n", dev
->hw_desc
[i
]->len1
);
449 dev_dbg(dev
->device
, "\tp1 = 0x%08x\n", dev
->hw_desc
[i
]->p1
);
450 dev_dbg(dev
->device
, "\tlen2 = %u\n", dev
->hw_desc
[i
]->len2
);
451 dev_dbg(dev
->device
, "\tp2 = 0x%08x\n", dev
->hw_desc
[i
]->p2
);
452 dev_dbg(dev
->device
, "\tnext = 0x%08x\n",
453 dev
->hw_desc
[i
]->next
);
455 dev_dbg(dev
->device
, "\n");
458 static void sahara_dump_links(struct sahara_dev
*dev
)
462 if (!IS_ENABLED(DEBUG
))
465 for (i
= 0; i
< SAHARA_MAX_HW_LINK
; i
++) {
466 dev_dbg(dev
->device
, "Link (%d) (0x%08x):\n",
467 i
, dev
->hw_phys_link
[i
]);
468 dev_dbg(dev
->device
, "\tlen = %u\n", dev
->hw_link
[i
]->len
);
469 dev_dbg(dev
->device
, "\tp = 0x%08x\n", dev
->hw_link
[i
]->p
);
470 dev_dbg(dev
->device
, "\tnext = 0x%08x\n",
471 dev
->hw_link
[i
]->next
);
473 dev_dbg(dev
->device
, "\n");
476 static int sahara_hw_descriptor_create(struct sahara_dev
*dev
)
478 struct sahara_ctx
*ctx
= dev
->ctx
;
479 struct scatterlist
*sg
;
484 /* Copy new key if necessary */
485 if (ctx
->flags
& FLAGS_NEW_KEY
) {
486 memcpy(dev
->key_base
, ctx
->key
, ctx
->keylen
);
487 ctx
->flags
&= ~FLAGS_NEW_KEY
;
489 if (dev
->flags
& FLAGS_CBC
) {
490 dev
->hw_desc
[idx
]->len1
= AES_BLOCK_SIZE
;
491 dev
->hw_desc
[idx
]->p1
= dev
->iv_phys_base
;
493 dev
->hw_desc
[idx
]->len1
= 0;
494 dev
->hw_desc
[idx
]->p1
= 0;
496 dev
->hw_desc
[idx
]->len2
= ctx
->keylen
;
497 dev
->hw_desc
[idx
]->p2
= dev
->key_phys_base
;
498 dev
->hw_desc
[idx
]->next
= dev
->hw_phys_desc
[1];
500 dev
->hw_desc
[idx
]->hdr
= sahara_aes_key_hdr(dev
);
505 dev
->nb_in_sg
= sahara_sg_length(dev
->in_sg
, dev
->total
);
506 dev
->nb_out_sg
= sahara_sg_length(dev
->out_sg
, dev
->total
);
507 if ((dev
->nb_in_sg
+ dev
->nb_out_sg
) > SAHARA_MAX_HW_LINK
) {
508 dev_err(dev
->device
, "not enough hw links (%d)\n",
509 dev
->nb_in_sg
+ dev
->nb_out_sg
);
513 ret
= dma_map_sg(dev
->device
, dev
->in_sg
, dev
->nb_in_sg
,
515 if (ret
!= dev
->nb_in_sg
) {
516 dev_err(dev
->device
, "couldn't map in sg\n");
519 ret
= dma_map_sg(dev
->device
, dev
->out_sg
, dev
->nb_out_sg
,
521 if (ret
!= dev
->nb_out_sg
) {
522 dev_err(dev
->device
, "couldn't map out sg\n");
526 /* Create input links */
527 dev
->hw_desc
[idx
]->p1
= dev
->hw_phys_link
[0];
529 for (i
= 0; i
< dev
->nb_in_sg
; i
++) {
530 dev
->hw_link
[i
]->len
= sg
->length
;
531 dev
->hw_link
[i
]->p
= sg
->dma_address
;
532 if (i
== (dev
->nb_in_sg
- 1)) {
533 dev
->hw_link
[i
]->next
= 0;
535 dev
->hw_link
[i
]->next
= dev
->hw_phys_link
[i
+ 1];
540 /* Create output links */
541 dev
->hw_desc
[idx
]->p2
= dev
->hw_phys_link
[i
];
543 for (j
= i
; j
< dev
->nb_out_sg
+ i
; j
++) {
544 dev
->hw_link
[j
]->len
= sg
->length
;
545 dev
->hw_link
[j
]->p
= sg
->dma_address
;
546 if (j
== (dev
->nb_out_sg
+ i
- 1)) {
547 dev
->hw_link
[j
]->next
= 0;
549 dev
->hw_link
[j
]->next
= dev
->hw_phys_link
[j
+ 1];
554 /* Fill remaining fields of hw_desc[1] */
555 dev
->hw_desc
[idx
]->hdr
= sahara_aes_data_link_hdr(dev
);
556 dev
->hw_desc
[idx
]->len1
= dev
->total
;
557 dev
->hw_desc
[idx
]->len2
= dev
->total
;
558 dev
->hw_desc
[idx
]->next
= 0;
560 sahara_dump_descriptors(dev
);
561 sahara_dump_links(dev
);
563 sahara_write(dev
, dev
->hw_phys_desc
[0], SAHARA_REG_DAR
);
568 dma_unmap_sg(dev
->device
, dev
->out_sg
, dev
->nb_out_sg
,
571 dma_unmap_sg(dev
->device
, dev
->in_sg
, dev
->nb_in_sg
,
577 static int sahara_aes_process(struct ablkcipher_request
*req
)
579 struct sahara_dev
*dev
= dev_ptr
;
580 struct sahara_ctx
*ctx
;
581 struct sahara_aes_reqctx
*rctx
;
583 unsigned long timeout
;
585 /* Request is ready to be dispatched by the device */
587 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
588 req
->nbytes
, req
->src
, req
->dst
);
590 /* assign new request to device */
591 dev
->total
= req
->nbytes
;
592 dev
->in_sg
= req
->src
;
593 dev
->out_sg
= req
->dst
;
595 rctx
= ablkcipher_request_ctx(req
);
596 ctx
= crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req
));
597 rctx
->mode
&= FLAGS_MODE_MASK
;
598 dev
->flags
= (dev
->flags
& ~FLAGS_MODE_MASK
) | rctx
->mode
;
600 if ((dev
->flags
& FLAGS_CBC
) && req
->info
)
601 memcpy(dev
->iv_base
, req
->info
, AES_KEYSIZE_128
);
603 /* assign new context to device */
606 reinit_completion(&dev
->dma_completion
);
608 ret
= sahara_hw_descriptor_create(dev
);
612 timeout
= wait_for_completion_timeout(&dev
->dma_completion
,
613 msecs_to_jiffies(SAHARA_TIMEOUT_MS
));
615 dev_err(dev
->device
, "AES timeout\n");
619 dma_unmap_sg(dev
->device
, dev
->out_sg
, dev
->nb_out_sg
,
621 dma_unmap_sg(dev
->device
, dev
->in_sg
, dev
->nb_in_sg
,
627 static int sahara_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
630 struct sahara_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
633 ctx
->keylen
= keylen
;
635 /* SAHARA only supports 128bit keys */
636 if (keylen
== AES_KEYSIZE_128
) {
637 memcpy(ctx
->key
, key
, keylen
);
638 ctx
->flags
|= FLAGS_NEW_KEY
;
642 if (keylen
!= AES_KEYSIZE_128
&&
643 keylen
!= AES_KEYSIZE_192
&& keylen
!= AES_KEYSIZE_256
)
647 * The requested key size is not supported by HW, do a fallback.
649 ctx
->fallback
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
650 ctx
->fallback
->base
.crt_flags
|=
651 (tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
653 ret
= crypto_ablkcipher_setkey(ctx
->fallback
, key
, keylen
);
655 struct crypto_tfm
*tfm_aux
= crypto_ablkcipher_tfm(tfm
);
657 tfm_aux
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
658 tfm_aux
->crt_flags
|=
659 (ctx
->fallback
->base
.crt_flags
& CRYPTO_TFM_RES_MASK
);
664 static int sahara_aes_crypt(struct ablkcipher_request
*req
, unsigned long mode
)
666 struct sahara_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
667 struct sahara_dev
*dev
= dev_ptr
;
670 dev_dbg(dev
->device
, "nbytes: %d, enc: %d, cbc: %d\n",
671 req
->nbytes
, !!(mode
& FLAGS_ENCRYPT
), !!(mode
& FLAGS_CBC
));
673 if (!IS_ALIGNED(req
->nbytes
, AES_BLOCK_SIZE
)) {
675 "request size is not exact amount of AES blocks\n");
681 mutex_lock(&dev
->queue_mutex
);
682 err
= ablkcipher_enqueue_request(&dev
->queue
, req
);
683 mutex_unlock(&dev
->queue_mutex
);
685 wake_up_process(dev
->kthread
);
690 static int sahara_aes_ecb_encrypt(struct ablkcipher_request
*req
)
692 struct crypto_tfm
*tfm
=
693 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
694 struct sahara_ctx
*ctx
= crypto_ablkcipher_ctx(
695 crypto_ablkcipher_reqtfm(req
));
698 if (unlikely(ctx
->keylen
!= AES_KEYSIZE_128
)) {
699 ablkcipher_request_set_tfm(req
, ctx
->fallback
);
700 err
= crypto_ablkcipher_encrypt(req
);
701 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
705 return sahara_aes_crypt(req
, FLAGS_ENCRYPT
);
708 static int sahara_aes_ecb_decrypt(struct ablkcipher_request
*req
)
710 struct crypto_tfm
*tfm
=
711 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
712 struct sahara_ctx
*ctx
= crypto_ablkcipher_ctx(
713 crypto_ablkcipher_reqtfm(req
));
716 if (unlikely(ctx
->keylen
!= AES_KEYSIZE_128
)) {
717 ablkcipher_request_set_tfm(req
, ctx
->fallback
);
718 err
= crypto_ablkcipher_decrypt(req
);
719 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
723 return sahara_aes_crypt(req
, 0);
726 static int sahara_aes_cbc_encrypt(struct ablkcipher_request
*req
)
728 struct crypto_tfm
*tfm
=
729 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
730 struct sahara_ctx
*ctx
= crypto_ablkcipher_ctx(
731 crypto_ablkcipher_reqtfm(req
));
734 if (unlikely(ctx
->keylen
!= AES_KEYSIZE_128
)) {
735 ablkcipher_request_set_tfm(req
, ctx
->fallback
);
736 err
= crypto_ablkcipher_encrypt(req
);
737 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
741 return sahara_aes_crypt(req
, FLAGS_ENCRYPT
| FLAGS_CBC
);
744 static int sahara_aes_cbc_decrypt(struct ablkcipher_request
*req
)
746 struct crypto_tfm
*tfm
=
747 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
748 struct sahara_ctx
*ctx
= crypto_ablkcipher_ctx(
749 crypto_ablkcipher_reqtfm(req
));
752 if (unlikely(ctx
->keylen
!= AES_KEYSIZE_128
)) {
753 ablkcipher_request_set_tfm(req
, ctx
->fallback
);
754 err
= crypto_ablkcipher_decrypt(req
);
755 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
759 return sahara_aes_crypt(req
, FLAGS_CBC
);
762 static int sahara_aes_cra_init(struct crypto_tfm
*tfm
)
764 const char *name
= crypto_tfm_alg_name(tfm
);
765 struct sahara_ctx
*ctx
= crypto_tfm_ctx(tfm
);
767 ctx
->fallback
= crypto_alloc_ablkcipher(name
, 0,
768 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
769 if (IS_ERR(ctx
->fallback
)) {
770 pr_err("Error allocating fallback algo %s\n", name
);
771 return PTR_ERR(ctx
->fallback
);
774 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct sahara_aes_reqctx
);
779 static void sahara_aes_cra_exit(struct crypto_tfm
*tfm
)
781 struct sahara_ctx
*ctx
= crypto_tfm_ctx(tfm
);
784 crypto_free_ablkcipher(ctx
->fallback
);
785 ctx
->fallback
= NULL
;
788 static u32
sahara_sha_init_hdr(struct sahara_dev
*dev
,
789 struct sahara_sha_reqctx
*rctx
)
796 hdr
|= SAHARA_HDR_MDHA_SET_MODE_HASH
;
797 hdr
|= SAHARA_HDR_MDHA_INIT
;
799 hdr
|= SAHARA_HDR_MDHA_SET_MODE_MD_KEY
;
803 hdr
|= SAHARA_HDR_MDHA_PDATA
;
805 if (hweight_long(hdr
) % 2 == 0)
806 hdr
|= SAHARA_HDR_PARITY_BIT
;
811 static int sahara_sha_hw_links_create(struct sahara_dev
*dev
,
812 struct sahara_sha_reqctx
*rctx
,
815 struct scatterlist
*sg
;
819 dev
->in_sg
= rctx
->in_sg
;
821 dev
->nb_in_sg
= sahara_sg_length(dev
->in_sg
, rctx
->total
);
822 if ((dev
->nb_in_sg
) > SAHARA_MAX_HW_LINK
) {
823 dev_err(dev
->device
, "not enough hw links (%d)\n",
824 dev
->nb_in_sg
+ dev
->nb_out_sg
);
828 if (rctx
->in_sg_chained
) {
832 ret
= dma_map_sg(dev
->device
, sg
, 1,
837 dev
->hw_link
[i
]->len
= sg
->length
;
838 dev
->hw_link
[i
]->p
= sg
->dma_address
;
839 dev
->hw_link
[i
]->next
= dev
->hw_phys_link
[i
+ 1];
843 dev
->hw_link
[i
-1]->next
= 0;
846 ret
= dma_map_sg(dev
->device
, dev
->in_sg
, dev
->nb_in_sg
,
851 for (i
= start
; i
< dev
->nb_in_sg
+ start
; i
++) {
852 dev
->hw_link
[i
]->len
= sg
->length
;
853 dev
->hw_link
[i
]->p
= sg
->dma_address
;
854 if (i
== (dev
->nb_in_sg
+ start
- 1)) {
855 dev
->hw_link
[i
]->next
= 0;
857 dev
->hw_link
[i
]->next
= dev
->hw_phys_link
[i
+ 1];
866 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev
*dev
,
867 struct sahara_sha_reqctx
*rctx
,
868 struct ahash_request
*req
,
875 /* Create initial descriptor: #8*/
876 dev
->hw_desc
[index
]->hdr
= sahara_sha_init_hdr(dev
, rctx
);
878 /* Create hash descriptor: #10. Must follow #6. */
879 dev
->hw_desc
[index
]->hdr
= SAHARA_HDR_MDHA_HASH
;
881 dev
->hw_desc
[index
]->len1
= rctx
->total
;
882 if (dev
->hw_desc
[index
]->len1
== 0) {
883 /* if len1 is 0, p1 must be 0, too */
884 dev
->hw_desc
[index
]->p1
= 0;
887 /* Create input links */
888 dev
->hw_desc
[index
]->p1
= dev
->hw_phys_link
[index
];
889 i
= sahara_sha_hw_links_create(dev
, rctx
, index
);
891 rctx
->sg_in_idx
= index
;
896 dev
->hw_desc
[index
]->p2
= dev
->hw_phys_link
[i
];
898 /* Save the context for the next operation */
899 result_len
= rctx
->context_size
;
900 dev
->hw_link
[i
]->p
= dev
->context_phys_base
;
902 dev
->hw_link
[i
]->len
= result_len
;
903 dev
->hw_desc
[index
]->len2
= result_len
;
905 dev
->hw_link
[i
]->next
= 0;
911 * Load descriptor aka #6
913 * To load a previously saved context back to the MDHA unit
919 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev
*dev
,
920 struct sahara_sha_reqctx
*rctx
,
921 struct ahash_request
*req
,
924 dev
->hw_desc
[index
]->hdr
= sahara_sha_init_hdr(dev
, rctx
);
926 dev
->hw_desc
[index
]->len1
= rctx
->context_size
;
927 dev
->hw_desc
[index
]->p1
= dev
->hw_phys_link
[index
];
928 dev
->hw_desc
[index
]->len2
= 0;
929 dev
->hw_desc
[index
]->p2
= 0;
931 dev
->hw_link
[index
]->len
= rctx
->context_size
;
932 dev
->hw_link
[index
]->p
= dev
->context_phys_base
;
933 dev
->hw_link
[index
]->next
= 0;
938 static int sahara_walk_and_recalc(struct scatterlist
*sg
, unsigned int nbytes
)
940 if (!sg
|| !sg
->length
)
943 while (nbytes
&& sg
) {
944 if (nbytes
<= sg
->length
) {
949 nbytes
-= sg
->length
;
956 static int sahara_sha_prepare_request(struct ahash_request
*req
)
958 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
959 struct sahara_sha_reqctx
*rctx
= ahash_request_ctx(req
);
960 unsigned int hash_later
;
961 unsigned int block_size
;
964 block_size
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
966 /* append bytes from previous operation */
967 len
= rctx
->buf_cnt
+ req
->nbytes
;
969 /* only the last transfer can be padded in hardware */
970 if (!rctx
->last
&& (len
< block_size
)) {
971 /* to few data, save for next operation */
972 scatterwalk_map_and_copy(rctx
->buf
+ rctx
->buf_cnt
, req
->src
,
974 rctx
->buf_cnt
+= req
->nbytes
;
979 /* add data from previous operation first */
981 memcpy(rctx
->rembuf
, rctx
->buf
, rctx
->buf_cnt
);
983 /* data must always be a multiple of block_size */
984 hash_later
= rctx
->last
? 0 : len
& (block_size
- 1);
986 unsigned int offset
= req
->nbytes
- hash_later
;
987 /* Save remaining bytes for later use */
988 scatterwalk_map_and_copy(rctx
->buf
, req
->src
, offset
,
992 /* nbytes should now be multiple of blocksize */
993 req
->nbytes
= req
->nbytes
- hash_later
;
995 sahara_walk_and_recalc(req
->src
, req
->nbytes
);
997 /* have data from previous operation and current */
998 if (rctx
->buf_cnt
&& req
->nbytes
) {
999 sg_init_table(rctx
->in_sg_chain
, 2);
1000 sg_set_buf(rctx
->in_sg_chain
, rctx
->rembuf
, rctx
->buf_cnt
);
1002 scatterwalk_sg_chain(rctx
->in_sg_chain
, 2, req
->src
);
1004 rctx
->total
= req
->nbytes
+ rctx
->buf_cnt
;
1005 rctx
->in_sg
= rctx
->in_sg_chain
;
1007 rctx
->in_sg_chained
= true;
1008 req
->src
= rctx
->in_sg_chain
;
1009 /* only data from previous operation */
1010 } else if (rctx
->buf_cnt
) {
1012 rctx
->in_sg
= req
->src
;
1014 rctx
->in_sg
= rctx
->in_sg_chain
;
1015 /* buf was copied into rembuf above */
1016 sg_init_one(rctx
->in_sg
, rctx
->rembuf
, rctx
->buf_cnt
);
1017 rctx
->total
= rctx
->buf_cnt
;
1018 rctx
->in_sg_chained
= false;
1019 /* no data from previous operation */
1021 rctx
->in_sg
= req
->src
;
1022 rctx
->total
= req
->nbytes
;
1023 req
->src
= rctx
->in_sg
;
1024 rctx
->in_sg_chained
= false;
1027 /* on next call, we only have the remaining data in the buffer */
1028 rctx
->buf_cnt
= hash_later
;
1030 return -EINPROGRESS
;
1033 static void sahara_sha_unmap_sg(struct sahara_dev
*dev
,
1034 struct sahara_sha_reqctx
*rctx
)
1036 struct scatterlist
*sg
;
1038 if (rctx
->in_sg_chained
) {
1041 dma_unmap_sg(dev
->device
, sg
, 1, DMA_TO_DEVICE
);
1045 dma_unmap_sg(dev
->device
, dev
->in_sg
, dev
->nb_in_sg
,
1050 static int sahara_sha_process(struct ahash_request
*req
)
1052 struct sahara_dev
*dev
= dev_ptr
;
1053 struct sahara_sha_reqctx
*rctx
= ahash_request_ctx(req
);
1055 unsigned long timeout
;
1057 ret
= sahara_sha_prepare_request(req
);
1062 sahara_sha_hw_data_descriptor_create(dev
, rctx
, req
, 0);
1063 dev
->hw_desc
[0]->next
= 0;
1066 memcpy(dev
->context_base
, rctx
->context
, rctx
->context_size
);
1068 sahara_sha_hw_context_descriptor_create(dev
, rctx
, req
, 0);
1069 dev
->hw_desc
[0]->next
= dev
->hw_phys_desc
[1];
1070 sahara_sha_hw_data_descriptor_create(dev
, rctx
, req
, 1);
1071 dev
->hw_desc
[1]->next
= 0;
1074 sahara_dump_descriptors(dev
);
1075 sahara_dump_links(dev
);
1077 reinit_completion(&dev
->dma_completion
);
1079 sahara_write(dev
, dev
->hw_phys_desc
[0], SAHARA_REG_DAR
);
1081 timeout
= wait_for_completion_timeout(&dev
->dma_completion
,
1082 msecs_to_jiffies(SAHARA_TIMEOUT_MS
));
1084 dev_err(dev
->device
, "SHA timeout\n");
1088 if (rctx
->sg_in_idx
)
1089 sahara_sha_unmap_sg(dev
, rctx
);
1091 memcpy(rctx
->context
, dev
->context_base
, rctx
->context_size
);
1094 memcpy(req
->result
, rctx
->context
, rctx
->digest_size
);
1099 static int sahara_queue_manage(void *data
)
1101 struct sahara_dev
*dev
= (struct sahara_dev
*)data
;
1102 struct crypto_async_request
*async_req
;
1103 struct crypto_async_request
*backlog
;
1107 __set_current_state(TASK_INTERRUPTIBLE
);
1109 mutex_lock(&dev
->queue_mutex
);
1110 backlog
= crypto_get_backlog(&dev
->queue
);
1111 async_req
= crypto_dequeue_request(&dev
->queue
);
1112 mutex_unlock(&dev
->queue_mutex
);
1115 backlog
->complete(backlog
, -EINPROGRESS
);
1118 if (crypto_tfm_alg_type(async_req
->tfm
) ==
1119 CRYPTO_ALG_TYPE_AHASH
) {
1120 struct ahash_request
*req
=
1121 ahash_request_cast(async_req
);
1123 ret
= sahara_sha_process(req
);
1125 struct ablkcipher_request
*req
=
1126 ablkcipher_request_cast(async_req
);
1128 ret
= sahara_aes_process(req
);
1131 async_req
->complete(async_req
, ret
);
1137 } while (!kthread_should_stop());
1142 static int sahara_sha_enqueue(struct ahash_request
*req
, int last
)
1144 struct sahara_sha_reqctx
*rctx
= ahash_request_ctx(req
);
1145 struct sahara_dev
*dev
= dev_ptr
;
1148 if (!req
->nbytes
&& !last
)
1151 mutex_lock(&rctx
->mutex
);
1154 if (!rctx
->active
) {
1159 mutex_lock(&dev
->queue_mutex
);
1160 ret
= crypto_enqueue_request(&dev
->queue
, &req
->base
);
1161 mutex_unlock(&dev
->queue_mutex
);
1163 wake_up_process(dev
->kthread
);
1164 mutex_unlock(&rctx
->mutex
);
1169 static int sahara_sha_init(struct ahash_request
*req
)
1171 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1172 struct sahara_sha_reqctx
*rctx
= ahash_request_ctx(req
);
1174 memset(rctx
, 0, sizeof(*rctx
));
1176 switch (crypto_ahash_digestsize(tfm
)) {
1177 case SHA1_DIGEST_SIZE
:
1178 rctx
->mode
|= SAHARA_HDR_MDHA_ALG_SHA1
;
1179 rctx
->digest_size
= SHA1_DIGEST_SIZE
;
1181 case SHA256_DIGEST_SIZE
:
1182 rctx
->mode
|= SAHARA_HDR_MDHA_ALG_SHA256
;
1183 rctx
->digest_size
= SHA256_DIGEST_SIZE
;
1189 rctx
->context_size
= rctx
->digest_size
+ 4;
1192 mutex_init(&rctx
->mutex
);
1197 static int sahara_sha_update(struct ahash_request
*req
)
1199 return sahara_sha_enqueue(req
, 0);
1202 static int sahara_sha_final(struct ahash_request
*req
)
1205 return sahara_sha_enqueue(req
, 1);
1208 static int sahara_sha_finup(struct ahash_request
*req
)
1210 return sahara_sha_enqueue(req
, 1);
1213 static int sahara_sha_digest(struct ahash_request
*req
)
1215 sahara_sha_init(req
);
1217 return sahara_sha_finup(req
);
1220 static int sahara_sha_export(struct ahash_request
*req
, void *out
)
1222 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1223 struct sahara_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1224 struct sahara_sha_reqctx
*rctx
= ahash_request_ctx(req
);
1226 memcpy(out
, ctx
, sizeof(struct sahara_ctx
));
1227 memcpy(out
+ sizeof(struct sahara_sha_reqctx
), rctx
,
1228 sizeof(struct sahara_sha_reqctx
));
1233 static int sahara_sha_import(struct ahash_request
*req
, const void *in
)
1235 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1236 struct sahara_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1237 struct sahara_sha_reqctx
*rctx
= ahash_request_ctx(req
);
1239 memcpy(ctx
, in
, sizeof(struct sahara_ctx
));
1240 memcpy(rctx
, in
+ sizeof(struct sahara_sha_reqctx
),
1241 sizeof(struct sahara_sha_reqctx
));
1246 static int sahara_sha_cra_init(struct crypto_tfm
*tfm
)
1248 const char *name
= crypto_tfm_alg_name(tfm
);
1249 struct sahara_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1251 ctx
->shash_fallback
= crypto_alloc_shash(name
, 0,
1252 CRYPTO_ALG_NEED_FALLBACK
);
1253 if (IS_ERR(ctx
->shash_fallback
)) {
1254 pr_err("Error allocating fallback algo %s\n", name
);
1255 return PTR_ERR(ctx
->shash_fallback
);
1257 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1258 sizeof(struct sahara_sha_reqctx
) +
1259 SHA_BUFFER_LEN
+ SHA256_BLOCK_SIZE
);
1264 static void sahara_sha_cra_exit(struct crypto_tfm
*tfm
)
1266 struct sahara_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1268 crypto_free_shash(ctx
->shash_fallback
);
1269 ctx
->shash_fallback
= NULL
;
1272 static struct crypto_alg aes_algs
[] = {
1274 .cra_name
= "ecb(aes)",
1275 .cra_driver_name
= "sahara-ecb-aes",
1276 .cra_priority
= 300,
1277 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
1278 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1279 .cra_blocksize
= AES_BLOCK_SIZE
,
1280 .cra_ctxsize
= sizeof(struct sahara_ctx
),
1281 .cra_alignmask
= 0x0,
1282 .cra_type
= &crypto_ablkcipher_type
,
1283 .cra_module
= THIS_MODULE
,
1284 .cra_init
= sahara_aes_cra_init
,
1285 .cra_exit
= sahara_aes_cra_exit
,
1286 .cra_u
.ablkcipher
= {
1287 .min_keysize
= AES_MIN_KEY_SIZE
,
1288 .max_keysize
= AES_MAX_KEY_SIZE
,
1289 .setkey
= sahara_aes_setkey
,
1290 .encrypt
= sahara_aes_ecb_encrypt
,
1291 .decrypt
= sahara_aes_ecb_decrypt
,
1294 .cra_name
= "cbc(aes)",
1295 .cra_driver_name
= "sahara-cbc-aes",
1296 .cra_priority
= 300,
1297 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
1298 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1299 .cra_blocksize
= AES_BLOCK_SIZE
,
1300 .cra_ctxsize
= sizeof(struct sahara_ctx
),
1301 .cra_alignmask
= 0x0,
1302 .cra_type
= &crypto_ablkcipher_type
,
1303 .cra_module
= THIS_MODULE
,
1304 .cra_init
= sahara_aes_cra_init
,
1305 .cra_exit
= sahara_aes_cra_exit
,
1306 .cra_u
.ablkcipher
= {
1307 .min_keysize
= AES_MIN_KEY_SIZE
,
1308 .max_keysize
= AES_MAX_KEY_SIZE
,
1309 .ivsize
= AES_BLOCK_SIZE
,
1310 .setkey
= sahara_aes_setkey
,
1311 .encrypt
= sahara_aes_cbc_encrypt
,
1312 .decrypt
= sahara_aes_cbc_decrypt
,
1317 static struct ahash_alg sha_v3_algs
[] = {
1319 .init
= sahara_sha_init
,
1320 .update
= sahara_sha_update
,
1321 .final
= sahara_sha_final
,
1322 .finup
= sahara_sha_finup
,
1323 .digest
= sahara_sha_digest
,
1324 .export
= sahara_sha_export
,
1325 .import
= sahara_sha_import
,
1326 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
1329 .cra_driver_name
= "sahara-sha1",
1330 .cra_priority
= 300,
1331 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1333 CRYPTO_ALG_NEED_FALLBACK
,
1334 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1335 .cra_ctxsize
= sizeof(struct sahara_ctx
),
1337 .cra_module
= THIS_MODULE
,
1338 .cra_init
= sahara_sha_cra_init
,
1339 .cra_exit
= sahara_sha_cra_exit
,
1344 static struct ahash_alg sha_v4_algs
[] = {
1346 .init
= sahara_sha_init
,
1347 .update
= sahara_sha_update
,
1348 .final
= sahara_sha_final
,
1349 .finup
= sahara_sha_finup
,
1350 .digest
= sahara_sha_digest
,
1351 .export
= sahara_sha_export
,
1352 .import
= sahara_sha_import
,
1353 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
1355 .cra_name
= "sha256",
1356 .cra_driver_name
= "sahara-sha256",
1357 .cra_priority
= 300,
1358 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1360 CRYPTO_ALG_NEED_FALLBACK
,
1361 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1362 .cra_ctxsize
= sizeof(struct sahara_ctx
),
1364 .cra_module
= THIS_MODULE
,
1365 .cra_init
= sahara_sha_cra_init
,
1366 .cra_exit
= sahara_sha_cra_exit
,
1371 static irqreturn_t
sahara_irq_handler(int irq
, void *data
)
1373 struct sahara_dev
*dev
= (struct sahara_dev
*)data
;
1374 unsigned int stat
= sahara_read(dev
, SAHARA_REG_STATUS
);
1375 unsigned int err
= sahara_read(dev
, SAHARA_REG_ERRSTATUS
);
1377 sahara_write(dev
, SAHARA_CMD_CLEAR_INT
| SAHARA_CMD_CLEAR_ERR
,
1380 sahara_decode_status(dev
, stat
);
1382 if (SAHARA_STATUS_GET_STATE(stat
) == SAHARA_STATE_BUSY
) {
1384 } else if (SAHARA_STATUS_GET_STATE(stat
) == SAHARA_STATE_COMPLETE
) {
1387 sahara_decode_error(dev
, err
);
1388 dev
->error
= -EINVAL
;
1391 complete(&dev
->dma_completion
);
1397 static int sahara_register_algs(struct sahara_dev
*dev
)
1400 unsigned int i
, j
, k
, l
;
1402 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++) {
1403 INIT_LIST_HEAD(&aes_algs
[i
].cra_list
);
1404 err
= crypto_register_alg(&aes_algs
[i
]);
1409 for (k
= 0; k
< ARRAY_SIZE(sha_v3_algs
); k
++) {
1410 err
= crypto_register_ahash(&sha_v3_algs
[k
]);
1412 goto err_sha_v3_algs
;
1415 if (dev
->version
> SAHARA_VERSION_3
)
1416 for (l
= 0; l
< ARRAY_SIZE(sha_v4_algs
); l
++) {
1417 err
= crypto_register_ahash(&sha_v4_algs
[l
]);
1419 goto err_sha_v4_algs
;
1425 for (j
= 0; j
< l
; j
++)
1426 crypto_unregister_ahash(&sha_v4_algs
[j
]);
1429 for (j
= 0; j
< k
; j
++)
1430 crypto_unregister_ahash(&sha_v4_algs
[j
]);
1433 for (j
= 0; j
< i
; j
++)
1434 crypto_unregister_alg(&aes_algs
[j
]);
1439 static void sahara_unregister_algs(struct sahara_dev
*dev
)
1443 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++)
1444 crypto_unregister_alg(&aes_algs
[i
]);
1446 for (i
= 0; i
< ARRAY_SIZE(sha_v4_algs
); i
++)
1447 crypto_unregister_ahash(&sha_v3_algs
[i
]);
1449 if (dev
->version
> SAHARA_VERSION_3
)
1450 for (i
= 0; i
< ARRAY_SIZE(sha_v4_algs
); i
++)
1451 crypto_unregister_ahash(&sha_v4_algs
[i
]);
1454 static struct platform_device_id sahara_platform_ids
[] = {
1455 { .name
= "sahara-imx27" },
1458 MODULE_DEVICE_TABLE(platform
, sahara_platform_ids
);
1460 static struct of_device_id sahara_dt_ids
[] = {
1461 { .compatible
= "fsl,imx53-sahara" },
1462 { .compatible
= "fsl,imx27-sahara" },
1465 MODULE_DEVICE_TABLE(of
, sahara_dt_ids
);
1467 static int sahara_probe(struct platform_device
*pdev
)
1469 struct sahara_dev
*dev
;
1470 struct resource
*res
;
1476 dev
= devm_kzalloc(&pdev
->dev
, sizeof(struct sahara_dev
), GFP_KERNEL
);
1478 dev_err(&pdev
->dev
, "unable to alloc data struct.\n");
1482 dev
->device
= &pdev
->dev
;
1483 platform_set_drvdata(pdev
, dev
);
1485 /* Get the base address */
1486 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1487 dev
->regs_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1488 if (IS_ERR(dev
->regs_base
))
1489 return PTR_ERR(dev
->regs_base
);
1492 irq
= platform_get_irq(pdev
, 0);
1494 dev_err(&pdev
->dev
, "failed to get irq resource\n");
1498 err
= devm_request_irq(&pdev
->dev
, irq
, sahara_irq_handler
,
1499 0, dev_name(&pdev
->dev
), dev
);
1501 dev_err(&pdev
->dev
, "failed to request irq\n");
1506 dev
->clk_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
1507 if (IS_ERR(dev
->clk_ipg
)) {
1508 dev_err(&pdev
->dev
, "Could not get ipg clock\n");
1509 return PTR_ERR(dev
->clk_ipg
);
1512 dev
->clk_ahb
= devm_clk_get(&pdev
->dev
, "ahb");
1513 if (IS_ERR(dev
->clk_ahb
)) {
1514 dev_err(&pdev
->dev
, "Could not get ahb clock\n");
1515 return PTR_ERR(dev
->clk_ahb
);
1518 /* Allocate HW descriptors */
1519 dev
->hw_desc
[0] = dma_alloc_coherent(&pdev
->dev
,
1520 SAHARA_MAX_HW_DESC
* sizeof(struct sahara_hw_desc
),
1521 &dev
->hw_phys_desc
[0], GFP_KERNEL
);
1522 if (!dev
->hw_desc
[0]) {
1523 dev_err(&pdev
->dev
, "Could not allocate hw descriptors\n");
1526 dev
->hw_desc
[1] = dev
->hw_desc
[0] + 1;
1527 dev
->hw_phys_desc
[1] = dev
->hw_phys_desc
[0] +
1528 sizeof(struct sahara_hw_desc
);
1530 /* Allocate space for iv and key */
1531 dev
->key_base
= dma_alloc_coherent(&pdev
->dev
, 2 * AES_KEYSIZE_128
,
1532 &dev
->key_phys_base
, GFP_KERNEL
);
1533 if (!dev
->key_base
) {
1534 dev_err(&pdev
->dev
, "Could not allocate memory for key\n");
1538 dev
->iv_base
= dev
->key_base
+ AES_KEYSIZE_128
;
1539 dev
->iv_phys_base
= dev
->key_phys_base
+ AES_KEYSIZE_128
;
1541 /* Allocate space for context: largest digest + message length field */
1542 dev
->context_base
= dma_alloc_coherent(&pdev
->dev
,
1543 SHA256_DIGEST_SIZE
+ 4,
1544 &dev
->context_phys_base
, GFP_KERNEL
);
1545 if (!dev
->context_base
) {
1546 dev_err(&pdev
->dev
, "Could not allocate memory for MDHA context\n");
1551 /* Allocate space for HW links */
1552 dev
->hw_link
[0] = dma_alloc_coherent(&pdev
->dev
,
1553 SAHARA_MAX_HW_LINK
* sizeof(struct sahara_hw_link
),
1554 &dev
->hw_phys_link
[0], GFP_KERNEL
);
1555 if (!dev
->hw_link
[0]) {
1556 dev_err(&pdev
->dev
, "Could not allocate hw links\n");
1560 for (i
= 1; i
< SAHARA_MAX_HW_LINK
; i
++) {
1561 dev
->hw_phys_link
[i
] = dev
->hw_phys_link
[i
- 1] +
1562 sizeof(struct sahara_hw_link
);
1563 dev
->hw_link
[i
] = dev
->hw_link
[i
- 1] + 1;
1566 crypto_init_queue(&dev
->queue
, SAHARA_QUEUE_LENGTH
);
1568 spin_lock_init(&dev
->lock
);
1569 mutex_init(&dev
->queue_mutex
);
1573 dev
->kthread
= kthread_run(sahara_queue_manage
, dev
, "sahara_crypto");
1574 if (IS_ERR(dev
->kthread
)) {
1575 err
= PTR_ERR(dev
->kthread
);
1579 init_completion(&dev
->dma_completion
);
1581 clk_prepare_enable(dev
->clk_ipg
);
1582 clk_prepare_enable(dev
->clk_ahb
);
1584 version
= sahara_read(dev
, SAHARA_REG_VERSION
);
1585 if (of_device_is_compatible(pdev
->dev
.of_node
, "fsl,imx27-sahara")) {
1586 if (version
!= SAHARA_VERSION_3
)
1588 } else if (of_device_is_compatible(pdev
->dev
.of_node
,
1589 "fsl,imx53-sahara")) {
1590 if (((version
>> 8) & 0xff) != SAHARA_VERSION_4
)
1592 version
= (version
>> 8) & 0xff;
1594 if (err
== -ENODEV
) {
1595 dev_err(&pdev
->dev
, "SAHARA version %d not supported\n",
1600 dev
->version
= version
;
1602 sahara_write(dev
, SAHARA_CMD_RESET
| SAHARA_CMD_MODE_BATCH
,
1604 sahara_write(dev
, SAHARA_CONTROL_SET_THROTTLE(0) |
1605 SAHARA_CONTROL_SET_MAXBURST(8) |
1606 SAHARA_CONTROL_RNG_AUTORSD
|
1607 SAHARA_CONTROL_ENABLE_INT
,
1608 SAHARA_REG_CONTROL
);
1610 err
= sahara_register_algs(dev
);
1614 dev_info(&pdev
->dev
, "SAHARA version %d initialized\n", version
);
1619 dma_free_coherent(&pdev
->dev
,
1620 SAHARA_MAX_HW_LINK
* sizeof(struct sahara_hw_link
),
1621 dev
->hw_link
[0], dev
->hw_phys_link
[0]);
1622 clk_disable_unprepare(dev
->clk_ipg
);
1623 clk_disable_unprepare(dev
->clk_ahb
);
1624 kthread_stop(dev
->kthread
);
1627 dma_free_coherent(&pdev
->dev
,
1628 2 * AES_KEYSIZE_128
,
1629 dev
->key_base
, dev
->key_phys_base
);
1630 dma_free_coherent(&pdev
->dev
,
1632 dev
->context_base
, dev
->context_phys_base
);
1634 dma_free_coherent(&pdev
->dev
,
1635 SAHARA_MAX_HW_DESC
* sizeof(struct sahara_hw_desc
),
1636 dev
->hw_desc
[0], dev
->hw_phys_desc
[0]);
1641 static int sahara_remove(struct platform_device
*pdev
)
1643 struct sahara_dev
*dev
= platform_get_drvdata(pdev
);
1645 dma_free_coherent(&pdev
->dev
,
1646 SAHARA_MAX_HW_LINK
* sizeof(struct sahara_hw_link
),
1647 dev
->hw_link
[0], dev
->hw_phys_link
[0]);
1648 dma_free_coherent(&pdev
->dev
,
1649 2 * AES_KEYSIZE_128
,
1650 dev
->key_base
, dev
->key_phys_base
);
1651 dma_free_coherent(&pdev
->dev
,
1652 SAHARA_MAX_HW_DESC
* sizeof(struct sahara_hw_desc
),
1653 dev
->hw_desc
[0], dev
->hw_phys_desc
[0]);
1655 kthread_stop(dev
->kthread
);
1657 sahara_unregister_algs(dev
);
1659 clk_disable_unprepare(dev
->clk_ipg
);
1660 clk_disable_unprepare(dev
->clk_ahb
);
1667 static struct platform_driver sahara_driver
= {
1668 .probe
= sahara_probe
,
1669 .remove
= sahara_remove
,
1671 .name
= SAHARA_NAME
,
1672 .of_match_table
= sahara_dt_ids
,
1674 .id_table
= sahara_platform_ids
,
1677 module_platform_driver(sahara_driver
);
1679 MODULE_LICENSE("GPL");
1680 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1681 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1682 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");