1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
11 #include <linux/kernel.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/stmp_device.h>
17 #include <linux/clk.h>
18 #include <soc/fsl/dcp.h>
20 #include <crypto/aes.h>
21 #include <crypto/sha1.h>
22 #include <crypto/sha2.h>
23 #include <crypto/internal/hash.h>
24 #include <crypto/internal/skcipher.h>
25 #include <crypto/scatterwalk.h>
27 #define DCP_MAX_CHANS 4
28 #define DCP_BUF_SZ PAGE_SIZE
29 #define DCP_SHA_PAY_SZ 64
31 #define DCP_ALIGNMENT 64
34 * Null hashes to align with hw behavior on imx6sl and ull
35 * these are flipped for consistency with hw output
37 static const uint8_t sha1_null_hash
[] =
38 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
39 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
41 static const uint8_t sha256_null_hash
[] =
42 "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
43 "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
44 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
45 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
47 /* DCP DMA descriptor. */
49 uint32_t next_cmd_addr
;
59 /* Coherent aligned block for bounce buffering. */
60 struct dcp_coherent_block
{
61 uint8_t aes_in_buf
[DCP_BUF_SZ
];
62 uint8_t aes_out_buf
[DCP_BUF_SZ
];
63 uint8_t sha_in_buf
[DCP_BUF_SZ
];
64 uint8_t sha_out_buf
[DCP_SHA_PAY_SZ
];
66 uint8_t aes_key
[2 * AES_KEYSIZE_128
];
68 struct dcp_dma_desc desc
[DCP_MAX_CHANS
];
77 struct dcp_coherent_block
*coh
;
79 struct completion completion
[DCP_MAX_CHANS
];
80 spinlock_t lock
[DCP_MAX_CHANS
];
81 struct task_struct
*thread
[DCP_MAX_CHANS
];
82 struct crypto_queue queue
[DCP_MAX_CHANS
];
87 DCP_CHAN_HASH_SHA
= 0,
91 struct dcp_async_ctx
{
96 /* SHA Hash-specific context */
101 /* Crypto-specific context */
102 struct crypto_skcipher
*fallback
;
103 unsigned int key_len
;
104 uint8_t key
[AES_KEYSIZE_128
];
108 struct dcp_aes_req_ctx
{
111 struct skcipher_request fallback_req
; // keep at the end
114 struct dcp_sha_req_ctx
{
119 struct dcp_export_state
{
120 struct dcp_sha_req_ctx req_ctx
;
121 struct dcp_async_ctx async_ctx
;
125 * There can even be only one instance of the MXS DCP due to the
126 * design of Linux Crypto API.
128 static struct dcp
*global_sdcp
;
130 /* DCP register layout. */
131 #define MXS_DCP_CTRL 0x00
132 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
133 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
135 #define MXS_DCP_STAT 0x10
136 #define MXS_DCP_STAT_CLR 0x18
137 #define MXS_DCP_STAT_IRQ_MASK 0xf
139 #define MXS_DCP_CHANNELCTRL 0x20
140 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
142 #define MXS_DCP_CAPABILITY1 0x40
143 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
144 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
145 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
147 #define MXS_DCP_CONTEXT 0x50
149 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
151 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
153 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
154 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
156 /* DMA descriptor bits. */
157 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
158 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
159 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
160 #define MXS_DCP_CONTROL0_OTP_KEY (1 << 10)
161 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
162 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
163 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
164 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
165 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
166 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
168 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
169 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
170 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
171 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
172 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
174 #define MXS_DCP_CONTROL1_KEY_SELECT_SHIFT 8
176 static int mxs_dcp_start_dma(struct dcp_async_ctx
*actx
)
179 struct dcp
*sdcp
= global_sdcp
;
180 const int chan
= actx
->chan
;
183 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
184 dma_addr_t desc_phys
= dma_map_single(sdcp
->dev
, desc
, sizeof(*desc
),
187 dma_err
= dma_mapping_error(sdcp
->dev
, desc_phys
);
191 reinit_completion(&sdcp
->completion
[chan
]);
193 /* Clear status register. */
194 writel(0xffffffff, sdcp
->base
+ MXS_DCP_CH_N_STAT_CLR(chan
));
196 /* Load the DMA descriptor. */
197 writel(desc_phys
, sdcp
->base
+ MXS_DCP_CH_N_CMDPTR(chan
));
199 /* Increment the semaphore to start the DMA transfer. */
200 writel(1, sdcp
->base
+ MXS_DCP_CH_N_SEMA(chan
));
202 ret
= wait_for_completion_timeout(&sdcp
->completion
[chan
],
203 msecs_to_jiffies(1000));
205 dev_err(sdcp
->dev
, "Channel %i timeout (DCP_STAT=0x%08x)\n",
206 chan
, readl(sdcp
->base
+ MXS_DCP_STAT
));
210 stat
= readl(sdcp
->base
+ MXS_DCP_CH_N_STAT(chan
));
212 dev_err(sdcp
->dev
, "Channel %i error (CH_STAT=0x%08x)\n",
217 dma_unmap_single(sdcp
->dev
, desc_phys
, sizeof(*desc
), DMA_TO_DEVICE
);
223 * Encryption (AES128)
225 static int mxs_dcp_run_aes(struct dcp_async_ctx
*actx
,
226 struct skcipher_request
*req
, int init
)
228 dma_addr_t key_phys
= 0;
229 dma_addr_t src_phys
, dst_phys
;
230 struct dcp
*sdcp
= global_sdcp
;
231 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
232 struct dcp_aes_req_ctx
*rctx
= skcipher_request_ctx(req
);
233 bool key_referenced
= actx
->key_referenced
;
236 if (!key_referenced
) {
237 key_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_key
,
238 2 * AES_KEYSIZE_128
, DMA_TO_DEVICE
);
239 ret
= dma_mapping_error(sdcp
->dev
, key_phys
);
244 src_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_in_buf
,
245 DCP_BUF_SZ
, DMA_TO_DEVICE
);
246 ret
= dma_mapping_error(sdcp
->dev
, src_phys
);
250 dst_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_out_buf
,
251 DCP_BUF_SZ
, DMA_FROM_DEVICE
);
252 ret
= dma_mapping_error(sdcp
->dev
, dst_phys
);
256 if (actx
->fill
% AES_BLOCK_SIZE
) {
257 dev_err(sdcp
->dev
, "Invalid block size!\n");
262 /* Fill in the DMA descriptor. */
263 desc
->control0
= MXS_DCP_CONTROL0_DECR_SEMAPHORE
|
264 MXS_DCP_CONTROL0_INTERRUPT
|
265 MXS_DCP_CONTROL0_ENABLE_CIPHER
;
268 /* Set OTP key bit to select the key via KEY_SELECT. */
269 desc
->control0
|= MXS_DCP_CONTROL0_OTP_KEY
;
271 /* Payload contains the key. */
272 desc
->control0
|= MXS_DCP_CONTROL0_PAYLOAD_KEY
;
275 desc
->control0
|= MXS_DCP_CONTROL0_CIPHER_ENCRYPT
;
277 desc
->control0
|= MXS_DCP_CONTROL0_CIPHER_INIT
;
279 desc
->control1
= MXS_DCP_CONTROL1_CIPHER_SELECT_AES128
;
282 desc
->control1
|= MXS_DCP_CONTROL1_CIPHER_MODE_ECB
;
284 desc
->control1
|= MXS_DCP_CONTROL1_CIPHER_MODE_CBC
;
287 desc
->control1
|= sdcp
->coh
->aes_key
[0] << MXS_DCP_CONTROL1_KEY_SELECT_SHIFT
;
289 desc
->next_cmd_addr
= 0;
290 desc
->source
= src_phys
;
291 desc
->destination
= dst_phys
;
292 desc
->size
= actx
->fill
;
293 desc
->payload
= key_phys
;
296 ret
= mxs_dcp_start_dma(actx
);
299 dma_unmap_single(sdcp
->dev
, dst_phys
, DCP_BUF_SZ
, DMA_FROM_DEVICE
);
301 dma_unmap_single(sdcp
->dev
, src_phys
, DCP_BUF_SZ
, DMA_TO_DEVICE
);
304 dma_unmap_single(sdcp
->dev
, key_phys
, 2 * AES_KEYSIZE_128
,
309 static int mxs_dcp_aes_block_crypt(struct crypto_async_request
*arq
)
311 struct dcp
*sdcp
= global_sdcp
;
313 struct skcipher_request
*req
= skcipher_request_cast(arq
);
314 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(arq
->tfm
);
315 struct dcp_aes_req_ctx
*rctx
= skcipher_request_ctx(req
);
317 struct scatterlist
*dst
= req
->dst
;
318 struct scatterlist
*src
= req
->src
;
319 int dst_nents
= sg_nents(dst
);
321 const int out_off
= DCP_BUF_SZ
;
322 uint8_t *in_buf
= sdcp
->coh
->aes_in_buf
;
323 uint8_t *out_buf
= sdcp
->coh
->aes_out_buf
;
325 uint32_t dst_off
= 0;
326 uint8_t *src_buf
= NULL
;
327 uint32_t last_out_len
= 0;
329 uint8_t *key
= sdcp
->coh
->aes_key
;
332 unsigned int i
, len
, clen
, tlen
= 0;
334 bool limit_hit
= false;
338 /* Copy the key from the temporary location. */
339 memcpy(key
, actx
->key
, actx
->key_len
);
342 /* Copy the CBC IV just past the key. */
343 memcpy(key
+ AES_KEYSIZE_128
, req
->iv
, AES_KEYSIZE_128
);
344 /* CBC needs the INIT set. */
347 memset(key
+ AES_KEYSIZE_128
, 0, AES_KEYSIZE_128
);
350 for_each_sg(req
->src
, src
, sg_nents(req
->src
), i
) {
351 src_buf
= sg_virt(src
);
352 len
= sg_dma_len(src
);
354 limit_hit
= tlen
> req
->cryptlen
;
357 len
= req
->cryptlen
- (tlen
- len
);
360 if (actx
->fill
+ len
> out_off
)
361 clen
= out_off
- actx
->fill
;
365 memcpy(in_buf
+ actx
->fill
, src_buf
, clen
);
371 * If we filled the buffer or this is the last SG,
374 if (actx
->fill
== out_off
|| sg_is_last(src
) ||
376 ret
= mxs_dcp_run_aes(actx
, req
, init
);
381 sg_pcopy_from_buffer(dst
, dst_nents
, out_buf
,
382 actx
->fill
, dst_off
);
383 dst_off
+= actx
->fill
;
384 last_out_len
= actx
->fill
;
393 /* Copy the IV for CBC for chaining */
396 memcpy(req
->iv
, out_buf
+(last_out_len
-AES_BLOCK_SIZE
),
399 memcpy(req
->iv
, in_buf
+(last_out_len
-AES_BLOCK_SIZE
),
406 static int dcp_chan_thread_aes(void *data
)
408 struct dcp
*sdcp
= global_sdcp
;
409 const int chan
= DCP_CHAN_CRYPTO
;
411 struct crypto_async_request
*backlog
;
412 struct crypto_async_request
*arq
;
416 while (!kthread_should_stop()) {
417 set_current_state(TASK_INTERRUPTIBLE
);
419 spin_lock(&sdcp
->lock
[chan
]);
420 backlog
= crypto_get_backlog(&sdcp
->queue
[chan
]);
421 arq
= crypto_dequeue_request(&sdcp
->queue
[chan
]);
422 spin_unlock(&sdcp
->lock
[chan
]);
424 if (!backlog
&& !arq
) {
429 set_current_state(TASK_RUNNING
);
432 crypto_request_complete(backlog
, -EINPROGRESS
);
435 ret
= mxs_dcp_aes_block_crypt(arq
);
436 crypto_request_complete(arq
, ret
);
443 static int mxs_dcp_block_fallback(struct skcipher_request
*req
, int enc
)
445 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
446 struct dcp_aes_req_ctx
*rctx
= skcipher_request_ctx(req
);
447 struct dcp_async_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
450 skcipher_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
451 skcipher_request_set_callback(&rctx
->fallback_req
, req
->base
.flags
,
452 req
->base
.complete
, req
->base
.data
);
453 skcipher_request_set_crypt(&rctx
->fallback_req
, req
->src
, req
->dst
,
454 req
->cryptlen
, req
->iv
);
457 ret
= crypto_skcipher_encrypt(&rctx
->fallback_req
);
459 ret
= crypto_skcipher_decrypt(&rctx
->fallback_req
);
464 static int mxs_dcp_aes_enqueue(struct skcipher_request
*req
, int enc
, int ecb
)
466 struct dcp
*sdcp
= global_sdcp
;
467 struct crypto_async_request
*arq
= &req
->base
;
468 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(arq
->tfm
);
469 struct dcp_aes_req_ctx
*rctx
= skcipher_request_ctx(req
);
472 if (unlikely(actx
->key_len
!= AES_KEYSIZE_128
&& !actx
->key_referenced
))
473 return mxs_dcp_block_fallback(req
, enc
);
477 actx
->chan
= DCP_CHAN_CRYPTO
;
479 spin_lock(&sdcp
->lock
[actx
->chan
]);
480 ret
= crypto_enqueue_request(&sdcp
->queue
[actx
->chan
], &req
->base
);
481 spin_unlock(&sdcp
->lock
[actx
->chan
]);
483 wake_up_process(sdcp
->thread
[actx
->chan
]);
488 static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request
*req
)
490 return mxs_dcp_aes_enqueue(req
, 0, 1);
493 static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request
*req
)
495 return mxs_dcp_aes_enqueue(req
, 1, 1);
498 static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request
*req
)
500 return mxs_dcp_aes_enqueue(req
, 0, 0);
503 static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request
*req
)
505 return mxs_dcp_aes_enqueue(req
, 1, 0);
508 static int mxs_dcp_aes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
511 struct dcp_async_ctx
*actx
= crypto_skcipher_ctx(tfm
);
514 * AES 128 is supposed by the hardware, store key into temporary
515 * buffer and exit. We must use the temporary buffer here, since
516 * there can still be an operation in progress.
519 actx
->key_referenced
= false;
520 if (len
== AES_KEYSIZE_128
) {
521 memcpy(actx
->key
, key
, len
);
526 * If the requested AES key size is not supported by the hardware,
527 * but is supported by in-kernel software implementation, we use
530 crypto_skcipher_clear_flags(actx
->fallback
, CRYPTO_TFM_REQ_MASK
);
531 crypto_skcipher_set_flags(actx
->fallback
,
532 tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
533 return crypto_skcipher_setkey(actx
->fallback
, key
, len
);
536 static int mxs_dcp_aes_setrefkey(struct crypto_skcipher
*tfm
, const u8
*key
,
539 struct dcp_async_ctx
*actx
= crypto_skcipher_ctx(tfm
);
541 if (len
!= DCP_PAES_KEYSIZE
)
545 case DCP_PAES_KEY_SLOT0
:
546 case DCP_PAES_KEY_SLOT1
:
547 case DCP_PAES_KEY_SLOT2
:
548 case DCP_PAES_KEY_SLOT3
:
549 case DCP_PAES_KEY_UNIQUE
:
550 case DCP_PAES_KEY_OTP
:
551 memcpy(actx
->key
, key
, len
);
553 actx
->key_referenced
= true;
562 static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher
*tfm
)
564 const char *name
= crypto_tfm_alg_name(crypto_skcipher_tfm(tfm
));
565 struct dcp_async_ctx
*actx
= crypto_skcipher_ctx(tfm
);
566 struct crypto_skcipher
*blk
;
568 blk
= crypto_alloc_skcipher(name
, 0, CRYPTO_ALG_NEED_FALLBACK
);
572 actx
->fallback
= blk
;
573 crypto_skcipher_set_reqsize(tfm
, sizeof(struct dcp_aes_req_ctx
) +
574 crypto_skcipher_reqsize(blk
));
578 static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher
*tfm
)
580 struct dcp_async_ctx
*actx
= crypto_skcipher_ctx(tfm
);
582 crypto_free_skcipher(actx
->fallback
);
585 static int mxs_dcp_paes_init_tfm(struct crypto_skcipher
*tfm
)
587 crypto_skcipher_set_reqsize(tfm
, sizeof(struct dcp_aes_req_ctx
));
593 * Hashing (SHA1/SHA256)
595 static int mxs_dcp_run_sha(struct ahash_request
*req
)
597 struct dcp
*sdcp
= global_sdcp
;
600 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
601 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
602 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
603 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
605 dma_addr_t digest_phys
= 0;
606 dma_addr_t buf_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->sha_in_buf
,
607 DCP_BUF_SZ
, DMA_TO_DEVICE
);
609 ret
= dma_mapping_error(sdcp
->dev
, buf_phys
);
613 /* Fill in the DMA descriptor. */
614 desc
->control0
= MXS_DCP_CONTROL0_DECR_SEMAPHORE
|
615 MXS_DCP_CONTROL0_INTERRUPT
|
616 MXS_DCP_CONTROL0_ENABLE_HASH
;
618 desc
->control0
|= MXS_DCP_CONTROL0_HASH_INIT
;
620 desc
->control1
= actx
->alg
;
621 desc
->next_cmd_addr
= 0;
622 desc
->source
= buf_phys
;
623 desc
->destination
= 0;
624 desc
->size
= actx
->fill
;
629 * Align driver with hw behavior when generating null hashes
631 if (rctx
->init
&& rctx
->fini
&& desc
->size
== 0) {
632 struct hash_alg_common
*halg
= crypto_hash_alg_common(tfm
);
633 const uint8_t *sha_buf
=
634 (actx
->alg
== MXS_DCP_CONTROL1_HASH_SELECT_SHA1
) ?
635 sha1_null_hash
: sha256_null_hash
;
636 memcpy(sdcp
->coh
->sha_out_buf
, sha_buf
, halg
->digestsize
);
641 /* Set HASH_TERM bit for last transfer block. */
643 digest_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->sha_out_buf
,
644 DCP_SHA_PAY_SZ
, DMA_FROM_DEVICE
);
645 ret
= dma_mapping_error(sdcp
->dev
, digest_phys
);
649 desc
->control0
|= MXS_DCP_CONTROL0_HASH_TERM
;
650 desc
->payload
= digest_phys
;
653 ret
= mxs_dcp_start_dma(actx
);
656 dma_unmap_single(sdcp
->dev
, digest_phys
, DCP_SHA_PAY_SZ
,
660 dma_unmap_single(sdcp
->dev
, buf_phys
, DCP_BUF_SZ
, DMA_TO_DEVICE
);
665 static int dcp_sha_req_to_buf(struct crypto_async_request
*arq
)
667 struct dcp
*sdcp
= global_sdcp
;
669 struct ahash_request
*req
= ahash_request_cast(arq
);
670 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
671 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
672 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
673 struct hash_alg_common
*halg
= crypto_hash_alg_common(tfm
);
675 uint8_t *in_buf
= sdcp
->coh
->sha_in_buf
;
676 uint8_t *out_buf
= sdcp
->coh
->sha_out_buf
;
678 struct scatterlist
*src
;
680 unsigned int i
, len
, clen
, oft
= 0;
683 int fin
= rctx
->fini
;
691 if (actx
->fill
+ len
> DCP_BUF_SZ
)
692 clen
= DCP_BUF_SZ
- actx
->fill
;
696 scatterwalk_map_and_copy(in_buf
+ actx
->fill
, src
, oft
, clen
,
704 * If we filled the buffer and still have some
705 * more data, submit the buffer.
707 if (len
&& actx
->fill
== DCP_BUF_SZ
) {
708 ret
= mxs_dcp_run_sha(req
);
719 /* Submit whatever is left. */
723 ret
= mxs_dcp_run_sha(req
);
729 /* For some reason the result is flipped */
730 for (i
= 0; i
< halg
->digestsize
; i
++)
731 req
->result
[i
] = out_buf
[halg
->digestsize
- i
- 1];
737 static int dcp_chan_thread_sha(void *data
)
739 struct dcp
*sdcp
= global_sdcp
;
740 const int chan
= DCP_CHAN_HASH_SHA
;
742 struct crypto_async_request
*backlog
;
743 struct crypto_async_request
*arq
;
746 while (!kthread_should_stop()) {
747 set_current_state(TASK_INTERRUPTIBLE
);
749 spin_lock(&sdcp
->lock
[chan
]);
750 backlog
= crypto_get_backlog(&sdcp
->queue
[chan
]);
751 arq
= crypto_dequeue_request(&sdcp
->queue
[chan
]);
752 spin_unlock(&sdcp
->lock
[chan
]);
754 if (!backlog
&& !arq
) {
759 set_current_state(TASK_RUNNING
);
762 crypto_request_complete(backlog
, -EINPROGRESS
);
765 ret
= dcp_sha_req_to_buf(arq
);
766 crypto_request_complete(arq
, ret
);
773 static int dcp_sha_init(struct ahash_request
*req
)
775 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
776 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
778 struct hash_alg_common
*halg
= crypto_hash_alg_common(tfm
);
781 * Start hashing session. The code below only inits the
782 * hashing session context, nothing more.
784 memset(actx
, 0, sizeof(*actx
));
786 if (strcmp(halg
->base
.cra_name
, "sha1") == 0)
787 actx
->alg
= MXS_DCP_CONTROL1_HASH_SELECT_SHA1
;
789 actx
->alg
= MXS_DCP_CONTROL1_HASH_SELECT_SHA256
;
793 actx
->chan
= DCP_CHAN_HASH_SHA
;
795 mutex_init(&actx
->mutex
);
800 static int dcp_sha_update_fx(struct ahash_request
*req
, int fini
)
802 struct dcp
*sdcp
= global_sdcp
;
804 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
805 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
806 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
811 * Ignore requests that have no data in them and are not
812 * the trailing requests in the stream of requests.
814 if (!req
->nbytes
&& !fini
)
817 mutex_lock(&actx
->mutex
);
826 spin_lock(&sdcp
->lock
[actx
->chan
]);
827 ret
= crypto_enqueue_request(&sdcp
->queue
[actx
->chan
], &req
->base
);
828 spin_unlock(&sdcp
->lock
[actx
->chan
]);
830 wake_up_process(sdcp
->thread
[actx
->chan
]);
831 mutex_unlock(&actx
->mutex
);
836 static int dcp_sha_update(struct ahash_request
*req
)
838 return dcp_sha_update_fx(req
, 0);
841 static int dcp_sha_final(struct ahash_request
*req
)
843 ahash_request_set_crypt(req
, NULL
, req
->result
, 0);
845 return dcp_sha_update_fx(req
, 1);
848 static int dcp_sha_finup(struct ahash_request
*req
)
850 return dcp_sha_update_fx(req
, 1);
853 static int dcp_sha_digest(struct ahash_request
*req
)
857 ret
= dcp_sha_init(req
);
861 return dcp_sha_finup(req
);
864 static int dcp_sha_import(struct ahash_request
*req
, const void *in
)
866 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
867 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
868 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
869 const struct dcp_export_state
*export
= in
;
871 memset(rctx
, 0, sizeof(struct dcp_sha_req_ctx
));
872 memset(actx
, 0, sizeof(struct dcp_async_ctx
));
873 memcpy(rctx
, &export
->req_ctx
, sizeof(struct dcp_sha_req_ctx
));
874 memcpy(actx
, &export
->async_ctx
, sizeof(struct dcp_async_ctx
));
879 static int dcp_sha_export(struct ahash_request
*req
, void *out
)
881 struct dcp_sha_req_ctx
*rctx_state
= ahash_request_ctx(req
);
882 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
883 struct dcp_async_ctx
*actx_state
= crypto_ahash_ctx(tfm
);
884 struct dcp_export_state
*export
= out
;
886 memcpy(&export
->req_ctx
, rctx_state
, sizeof(struct dcp_sha_req_ctx
));
887 memcpy(&export
->async_ctx
, actx_state
, sizeof(struct dcp_async_ctx
));
892 static int dcp_sha_cra_init(struct crypto_tfm
*tfm
)
894 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
895 sizeof(struct dcp_sha_req_ctx
));
899 static void dcp_sha_cra_exit(struct crypto_tfm
*tfm
)
903 /* AES 128 ECB and AES 128 CBC */
904 static struct skcipher_alg dcp_aes_algs
[] = {
906 .base
.cra_name
= "ecb(aes)",
907 .base
.cra_driver_name
= "ecb-aes-dcp",
908 .base
.cra_priority
= 400,
909 .base
.cra_alignmask
= 15,
910 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
911 CRYPTO_ALG_NEED_FALLBACK
,
912 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
913 .base
.cra_ctxsize
= sizeof(struct dcp_async_ctx
),
914 .base
.cra_module
= THIS_MODULE
,
916 .min_keysize
= AES_MIN_KEY_SIZE
,
917 .max_keysize
= AES_MAX_KEY_SIZE
,
918 .setkey
= mxs_dcp_aes_setkey
,
919 .encrypt
= mxs_dcp_aes_ecb_encrypt
,
920 .decrypt
= mxs_dcp_aes_ecb_decrypt
,
921 .init
= mxs_dcp_aes_fallback_init_tfm
,
922 .exit
= mxs_dcp_aes_fallback_exit_tfm
,
924 .base
.cra_name
= "cbc(aes)",
925 .base
.cra_driver_name
= "cbc-aes-dcp",
926 .base
.cra_priority
= 400,
927 .base
.cra_alignmask
= 15,
928 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
929 CRYPTO_ALG_NEED_FALLBACK
,
930 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
931 .base
.cra_ctxsize
= sizeof(struct dcp_async_ctx
),
932 .base
.cra_module
= THIS_MODULE
,
934 .min_keysize
= AES_MIN_KEY_SIZE
,
935 .max_keysize
= AES_MAX_KEY_SIZE
,
936 .setkey
= mxs_dcp_aes_setkey
,
937 .encrypt
= mxs_dcp_aes_cbc_encrypt
,
938 .decrypt
= mxs_dcp_aes_cbc_decrypt
,
939 .ivsize
= AES_BLOCK_SIZE
,
940 .init
= mxs_dcp_aes_fallback_init_tfm
,
941 .exit
= mxs_dcp_aes_fallback_exit_tfm
,
943 .base
.cra_name
= "ecb(paes)",
944 .base
.cra_driver_name
= "ecb-paes-dcp",
945 .base
.cra_priority
= 401,
946 .base
.cra_alignmask
= 15,
947 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_INTERNAL
,
948 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
949 .base
.cra_ctxsize
= sizeof(struct dcp_async_ctx
),
950 .base
.cra_module
= THIS_MODULE
,
952 .min_keysize
= DCP_PAES_KEYSIZE
,
953 .max_keysize
= DCP_PAES_KEYSIZE
,
954 .setkey
= mxs_dcp_aes_setrefkey
,
955 .encrypt
= mxs_dcp_aes_ecb_encrypt
,
956 .decrypt
= mxs_dcp_aes_ecb_decrypt
,
957 .init
= mxs_dcp_paes_init_tfm
,
959 .base
.cra_name
= "cbc(paes)",
960 .base
.cra_driver_name
= "cbc-paes-dcp",
961 .base
.cra_priority
= 401,
962 .base
.cra_alignmask
= 15,
963 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_INTERNAL
,
964 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
965 .base
.cra_ctxsize
= sizeof(struct dcp_async_ctx
),
966 .base
.cra_module
= THIS_MODULE
,
968 .min_keysize
= DCP_PAES_KEYSIZE
,
969 .max_keysize
= DCP_PAES_KEYSIZE
,
970 .setkey
= mxs_dcp_aes_setrefkey
,
971 .encrypt
= mxs_dcp_aes_cbc_encrypt
,
972 .decrypt
= mxs_dcp_aes_cbc_decrypt
,
973 .ivsize
= AES_BLOCK_SIZE
,
974 .init
= mxs_dcp_paes_init_tfm
,
979 static struct ahash_alg dcp_sha1_alg
= {
980 .init
= dcp_sha_init
,
981 .update
= dcp_sha_update
,
982 .final
= dcp_sha_final
,
983 .finup
= dcp_sha_finup
,
984 .digest
= dcp_sha_digest
,
985 .import
= dcp_sha_import
,
986 .export
= dcp_sha_export
,
988 .digestsize
= SHA1_DIGEST_SIZE
,
989 .statesize
= sizeof(struct dcp_export_state
),
992 .cra_driver_name
= "sha1-dcp",
994 .cra_flags
= CRYPTO_ALG_ASYNC
,
995 .cra_blocksize
= SHA1_BLOCK_SIZE
,
996 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
997 .cra_module
= THIS_MODULE
,
998 .cra_init
= dcp_sha_cra_init
,
999 .cra_exit
= dcp_sha_cra_exit
,
1005 static struct ahash_alg dcp_sha256_alg
= {
1006 .init
= dcp_sha_init
,
1007 .update
= dcp_sha_update
,
1008 .final
= dcp_sha_final
,
1009 .finup
= dcp_sha_finup
,
1010 .digest
= dcp_sha_digest
,
1011 .import
= dcp_sha_import
,
1012 .export
= dcp_sha_export
,
1014 .digestsize
= SHA256_DIGEST_SIZE
,
1015 .statesize
= sizeof(struct dcp_export_state
),
1017 .cra_name
= "sha256",
1018 .cra_driver_name
= "sha256-dcp",
1019 .cra_priority
= 400,
1020 .cra_flags
= CRYPTO_ALG_ASYNC
,
1021 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1022 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
1023 .cra_module
= THIS_MODULE
,
1024 .cra_init
= dcp_sha_cra_init
,
1025 .cra_exit
= dcp_sha_cra_exit
,
1030 static irqreturn_t
mxs_dcp_irq(int irq
, void *context
)
1032 struct dcp
*sdcp
= context
;
1036 stat
= readl(sdcp
->base
+ MXS_DCP_STAT
);
1037 stat
&= MXS_DCP_STAT_IRQ_MASK
;
1041 /* Clear the interrupts. */
1042 writel(stat
, sdcp
->base
+ MXS_DCP_STAT_CLR
);
1044 /* Complete the DMA requests that finished. */
1045 for (i
= 0; i
< DCP_MAX_CHANS
; i
++)
1046 if (stat
& (1 << i
))
1047 complete(&sdcp
->completion
[i
]);
1052 static int mxs_dcp_probe(struct platform_device
*pdev
)
1054 struct device
*dev
= &pdev
->dev
;
1055 struct dcp
*sdcp
= NULL
;
1057 int dcp_vmi_irq
, dcp_irq
;
1060 dev_err(dev
, "Only one DCP instance allowed!\n");
1064 dcp_vmi_irq
= platform_get_irq(pdev
, 0);
1065 if (dcp_vmi_irq
< 0)
1068 dcp_irq
= platform_get_irq(pdev
, 1);
1072 sdcp
= devm_kzalloc(dev
, sizeof(*sdcp
), GFP_KERNEL
);
1077 sdcp
->base
= devm_platform_ioremap_resource(pdev
, 0);
1078 if (IS_ERR(sdcp
->base
))
1079 return PTR_ERR(sdcp
->base
);
1082 ret
= devm_request_irq(dev
, dcp_vmi_irq
, mxs_dcp_irq
, 0,
1083 "dcp-vmi-irq", sdcp
);
1085 dev_err(dev
, "Failed to claim DCP VMI IRQ!\n");
1089 ret
= devm_request_irq(dev
, dcp_irq
, mxs_dcp_irq
, 0,
1092 dev_err(dev
, "Failed to claim DCP IRQ!\n");
1096 /* Allocate coherent helper block. */
1097 sdcp
->coh
= devm_kzalloc(dev
, sizeof(*sdcp
->coh
) + DCP_ALIGNMENT
,
1102 /* Re-align the structure so it fits the DCP constraints. */
1103 sdcp
->coh
= PTR_ALIGN(sdcp
->coh
, DCP_ALIGNMENT
);
1105 /* DCP clock is optional, only used on some SOCs */
1106 sdcp
->dcp_clk
= devm_clk_get_optional_enabled(dev
, "dcp");
1107 if (IS_ERR(sdcp
->dcp_clk
))
1108 return PTR_ERR(sdcp
->dcp_clk
);
1110 /* Restart the DCP block. */
1111 ret
= stmp_reset_block(sdcp
->base
);
1113 dev_err(dev
, "Failed reset\n");
1117 /* Initialize control register. */
1118 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES
|
1119 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING
| 0xf,
1120 sdcp
->base
+ MXS_DCP_CTRL
);
1122 /* Enable all DCP DMA channels. */
1123 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK
,
1124 sdcp
->base
+ MXS_DCP_CHANNELCTRL
);
1127 * We do not enable context switching. Give the context buffer a
1128 * pointer to an illegal address so if context switching is
1129 * inadvertantly enabled, the DCP will return an error instead of
1130 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1133 writel(0xffff0000, sdcp
->base
+ MXS_DCP_CONTEXT
);
1134 for (i
= 0; i
< DCP_MAX_CHANS
; i
++)
1135 writel(0xffffffff, sdcp
->base
+ MXS_DCP_CH_N_STAT_CLR(i
));
1136 writel(0xffffffff, sdcp
->base
+ MXS_DCP_STAT_CLR
);
1140 platform_set_drvdata(pdev
, sdcp
);
1142 for (i
= 0; i
< DCP_MAX_CHANS
; i
++) {
1143 spin_lock_init(&sdcp
->lock
[i
]);
1144 init_completion(&sdcp
->completion
[i
]);
1145 crypto_init_queue(&sdcp
->queue
[i
], 50);
1148 /* Create the SHA and AES handler threads. */
1149 sdcp
->thread
[DCP_CHAN_HASH_SHA
] = kthread_run(dcp_chan_thread_sha
,
1150 NULL
, "mxs_dcp_chan/sha");
1151 if (IS_ERR(sdcp
->thread
[DCP_CHAN_HASH_SHA
])) {
1152 dev_err(dev
, "Error starting SHA thread!\n");
1153 ret
= PTR_ERR(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1157 sdcp
->thread
[DCP_CHAN_CRYPTO
] = kthread_run(dcp_chan_thread_aes
,
1158 NULL
, "mxs_dcp_chan/aes");
1159 if (IS_ERR(sdcp
->thread
[DCP_CHAN_CRYPTO
])) {
1160 dev_err(dev
, "Error starting SHA thread!\n");
1161 ret
= PTR_ERR(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1162 goto err_destroy_sha_thread
;
1165 /* Register the various crypto algorithms. */
1166 sdcp
->caps
= readl(sdcp
->base
+ MXS_DCP_CAPABILITY1
);
1168 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
) {
1169 ret
= crypto_register_skciphers(dcp_aes_algs
,
1170 ARRAY_SIZE(dcp_aes_algs
));
1172 /* Failed to register algorithm. */
1173 dev_err(dev
, "Failed to register AES crypto!\n");
1174 goto err_destroy_aes_thread
;
1178 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
) {
1179 ret
= crypto_register_ahash(&dcp_sha1_alg
);
1181 dev_err(dev
, "Failed to register %s hash!\n",
1182 dcp_sha1_alg
.halg
.base
.cra_name
);
1183 goto err_unregister_aes
;
1187 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA256
) {
1188 ret
= crypto_register_ahash(&dcp_sha256_alg
);
1190 dev_err(dev
, "Failed to register %s hash!\n",
1191 dcp_sha256_alg
.halg
.base
.cra_name
);
1192 goto err_unregister_sha1
;
1198 err_unregister_sha1
:
1199 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
)
1200 crypto_unregister_ahash(&dcp_sha1_alg
);
1203 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
)
1204 crypto_unregister_skciphers(dcp_aes_algs
, ARRAY_SIZE(dcp_aes_algs
));
1206 err_destroy_aes_thread
:
1207 kthread_stop(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1209 err_destroy_sha_thread
:
1210 kthread_stop(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1215 static void mxs_dcp_remove(struct platform_device
*pdev
)
1217 struct dcp
*sdcp
= platform_get_drvdata(pdev
);
1219 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA256
)
1220 crypto_unregister_ahash(&dcp_sha256_alg
);
1222 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
)
1223 crypto_unregister_ahash(&dcp_sha1_alg
);
1225 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
)
1226 crypto_unregister_skciphers(dcp_aes_algs
, ARRAY_SIZE(dcp_aes_algs
));
1228 kthread_stop(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1229 kthread_stop(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1231 platform_set_drvdata(pdev
, NULL
);
1236 static const struct of_device_id mxs_dcp_dt_ids
[] = {
1237 { .compatible
= "fsl,imx23-dcp", .data
= NULL
, },
1238 { .compatible
= "fsl,imx28-dcp", .data
= NULL
, },
1242 MODULE_DEVICE_TABLE(of
, mxs_dcp_dt_ids
);
1244 static struct platform_driver mxs_dcp_driver
= {
1245 .probe
= mxs_dcp_probe
,
1246 .remove_new
= mxs_dcp_remove
,
1249 .of_match_table
= mxs_dcp_dt_ids
,
1253 module_platform_driver(mxs_dcp_driver
);
1255 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1256 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1257 MODULE_LICENSE("GPL");
1258 MODULE_ALIAS("platform:mxs-dcp");