2 * Freescale i.MX23/i.MX28 Data Co-Processor driver
4 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
14 #include <linux/dma-mapping.h>
15 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/kthread.h>
19 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/stmp_device.h>
23 #include <linux/clk.h>
25 #include <crypto/aes.h>
26 #include <crypto/sha.h>
27 #include <crypto/internal/hash.h>
28 #include <crypto/internal/skcipher.h>
30 #define DCP_MAX_CHANS 4
31 #define DCP_BUF_SZ PAGE_SIZE
32 #define DCP_SHA_PAY_SZ 64
34 #define DCP_ALIGNMENT 64
37 * Null hashes to align with hw behavior on imx6sl and ull
38 * these are flipped for consistency with hw output
40 static const uint8_t sha1_null_hash
[] =
41 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
42 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
44 static const uint8_t sha256_null_hash
[] =
45 "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
46 "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
47 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
48 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
50 /* DCP DMA descriptor. */
52 uint32_t next_cmd_addr
;
62 /* Coherent aligned block for bounce buffering. */
63 struct dcp_coherent_block
{
64 uint8_t aes_in_buf
[DCP_BUF_SZ
];
65 uint8_t aes_out_buf
[DCP_BUF_SZ
];
66 uint8_t sha_in_buf
[DCP_BUF_SZ
];
67 uint8_t sha_out_buf
[DCP_SHA_PAY_SZ
];
69 uint8_t aes_key
[2 * AES_KEYSIZE_128
];
71 struct dcp_dma_desc desc
[DCP_MAX_CHANS
];
80 struct dcp_coherent_block
*coh
;
82 struct completion completion
[DCP_MAX_CHANS
];
83 spinlock_t lock
[DCP_MAX_CHANS
];
84 struct task_struct
*thread
[DCP_MAX_CHANS
];
85 struct crypto_queue queue
[DCP_MAX_CHANS
];
90 DCP_CHAN_HASH_SHA
= 0,
94 struct dcp_async_ctx
{
99 /* SHA Hash-specific context */
104 /* Crypto-specific context */
105 struct crypto_sync_skcipher
*fallback
;
106 unsigned int key_len
;
107 uint8_t key
[AES_KEYSIZE_128
];
110 struct dcp_aes_req_ctx
{
115 struct dcp_sha_req_ctx
{
120 struct dcp_export_state
{
121 struct dcp_sha_req_ctx req_ctx
;
122 struct dcp_async_ctx async_ctx
;
126 * There can even be only one instance of the MXS DCP due to the
127 * design of Linux Crypto API.
129 static struct dcp
*global_sdcp
;
131 /* DCP register layout. */
132 #define MXS_DCP_CTRL 0x00
133 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
134 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
136 #define MXS_DCP_STAT 0x10
137 #define MXS_DCP_STAT_CLR 0x18
138 #define MXS_DCP_STAT_IRQ_MASK 0xf
140 #define MXS_DCP_CHANNELCTRL 0x20
141 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
143 #define MXS_DCP_CAPABILITY1 0x40
144 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
145 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
146 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
148 #define MXS_DCP_CONTEXT 0x50
150 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
152 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
154 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
155 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
157 /* DMA descriptor bits. */
158 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
159 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
160 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
161 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
162 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
163 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
164 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
165 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
166 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
168 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
169 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
170 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
171 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
172 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
174 static int mxs_dcp_start_dma(struct dcp_async_ctx
*actx
)
176 struct dcp
*sdcp
= global_sdcp
;
177 const int chan
= actx
->chan
;
180 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
182 dma_addr_t desc_phys
= dma_map_single(sdcp
->dev
, desc
, sizeof(*desc
),
185 reinit_completion(&sdcp
->completion
[chan
]);
187 /* Clear status register. */
188 writel(0xffffffff, sdcp
->base
+ MXS_DCP_CH_N_STAT_CLR(chan
));
190 /* Load the DMA descriptor. */
191 writel(desc_phys
, sdcp
->base
+ MXS_DCP_CH_N_CMDPTR(chan
));
193 /* Increment the semaphore to start the DMA transfer. */
194 writel(1, sdcp
->base
+ MXS_DCP_CH_N_SEMA(chan
));
196 ret
= wait_for_completion_timeout(&sdcp
->completion
[chan
],
197 msecs_to_jiffies(1000));
199 dev_err(sdcp
->dev
, "Channel %i timeout (DCP_STAT=0x%08x)\n",
200 chan
, readl(sdcp
->base
+ MXS_DCP_STAT
));
204 stat
= readl(sdcp
->base
+ MXS_DCP_CH_N_STAT(chan
));
206 dev_err(sdcp
->dev
, "Channel %i error (CH_STAT=0x%08x)\n",
211 dma_unmap_single(sdcp
->dev
, desc_phys
, sizeof(*desc
), DMA_TO_DEVICE
);
217 * Encryption (AES128)
219 static int mxs_dcp_run_aes(struct dcp_async_ctx
*actx
,
220 struct ablkcipher_request
*req
, int init
)
222 struct dcp
*sdcp
= global_sdcp
;
223 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
224 struct dcp_aes_req_ctx
*rctx
= ablkcipher_request_ctx(req
);
227 dma_addr_t key_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_key
,
230 dma_addr_t src_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_in_buf
,
231 DCP_BUF_SZ
, DMA_TO_DEVICE
);
232 dma_addr_t dst_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_out_buf
,
233 DCP_BUF_SZ
, DMA_FROM_DEVICE
);
235 if (actx
->fill
% AES_BLOCK_SIZE
) {
236 dev_err(sdcp
->dev
, "Invalid block size!\n");
241 /* Fill in the DMA descriptor. */
242 desc
->control0
= MXS_DCP_CONTROL0_DECR_SEMAPHORE
|
243 MXS_DCP_CONTROL0_INTERRUPT
|
244 MXS_DCP_CONTROL0_ENABLE_CIPHER
;
246 /* Payload contains the key. */
247 desc
->control0
|= MXS_DCP_CONTROL0_PAYLOAD_KEY
;
250 desc
->control0
|= MXS_DCP_CONTROL0_CIPHER_ENCRYPT
;
252 desc
->control0
|= MXS_DCP_CONTROL0_CIPHER_INIT
;
254 desc
->control1
= MXS_DCP_CONTROL1_CIPHER_SELECT_AES128
;
257 desc
->control1
|= MXS_DCP_CONTROL1_CIPHER_MODE_ECB
;
259 desc
->control1
|= MXS_DCP_CONTROL1_CIPHER_MODE_CBC
;
261 desc
->next_cmd_addr
= 0;
262 desc
->source
= src_phys
;
263 desc
->destination
= dst_phys
;
264 desc
->size
= actx
->fill
;
265 desc
->payload
= key_phys
;
268 ret
= mxs_dcp_start_dma(actx
);
271 dma_unmap_single(sdcp
->dev
, key_phys
, 2 * AES_KEYSIZE_128
,
273 dma_unmap_single(sdcp
->dev
, src_phys
, DCP_BUF_SZ
, DMA_TO_DEVICE
);
274 dma_unmap_single(sdcp
->dev
, dst_phys
, DCP_BUF_SZ
, DMA_FROM_DEVICE
);
279 static int mxs_dcp_aes_block_crypt(struct crypto_async_request
*arq
)
281 struct dcp
*sdcp
= global_sdcp
;
283 struct ablkcipher_request
*req
= ablkcipher_request_cast(arq
);
284 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(arq
->tfm
);
285 struct dcp_aes_req_ctx
*rctx
= ablkcipher_request_ctx(req
);
287 struct scatterlist
*dst
= req
->dst
;
288 struct scatterlist
*src
= req
->src
;
289 const int nents
= sg_nents(req
->src
);
291 const int out_off
= DCP_BUF_SZ
;
292 uint8_t *in_buf
= sdcp
->coh
->aes_in_buf
;
293 uint8_t *out_buf
= sdcp
->coh
->aes_out_buf
;
295 uint8_t *out_tmp
, *src_buf
, *dst_buf
= NULL
;
296 uint32_t dst_off
= 0;
297 uint32_t last_out_len
= 0;
299 uint8_t *key
= sdcp
->coh
->aes_key
;
303 unsigned int i
, len
, clen
, rem
= 0, tlen
= 0;
305 bool limit_hit
= false;
309 /* Copy the key from the temporary location. */
310 memcpy(key
, actx
->key
, actx
->key_len
);
313 /* Copy the CBC IV just past the key. */
314 memcpy(key
+ AES_KEYSIZE_128
, req
->info
, AES_KEYSIZE_128
);
315 /* CBC needs the INIT set. */
318 memset(key
+ AES_KEYSIZE_128
, 0, AES_KEYSIZE_128
);
321 for_each_sg(req
->src
, src
, nents
, i
) {
322 src_buf
= sg_virt(src
);
323 len
= sg_dma_len(src
);
325 limit_hit
= tlen
> req
->nbytes
;
328 len
= req
->nbytes
- (tlen
- len
);
331 if (actx
->fill
+ len
> out_off
)
332 clen
= out_off
- actx
->fill
;
336 memcpy(in_buf
+ actx
->fill
, src_buf
, clen
);
342 * If we filled the buffer or this is the last SG,
345 if (actx
->fill
== out_off
|| sg_is_last(src
) ||
347 ret
= mxs_dcp_run_aes(actx
, req
, init
);
353 last_out_len
= actx
->fill
;
354 while (dst
&& actx
->fill
) {
356 dst_buf
= sg_virt(dst
);
359 rem
= min(sg_dma_len(dst
) - dst_off
,
362 memcpy(dst_buf
+ dst_off
, out_tmp
, rem
);
367 if (dst_off
== sg_dma_len(dst
)) {
381 /* Copy the IV for CBC for chaining */
384 memcpy(req
->info
, out_buf
+(last_out_len
-AES_BLOCK_SIZE
),
387 memcpy(req
->info
, in_buf
+(last_out_len
-AES_BLOCK_SIZE
),
394 static int dcp_chan_thread_aes(void *data
)
396 struct dcp
*sdcp
= global_sdcp
;
397 const int chan
= DCP_CHAN_CRYPTO
;
399 struct crypto_async_request
*backlog
;
400 struct crypto_async_request
*arq
;
404 while (!kthread_should_stop()) {
405 set_current_state(TASK_INTERRUPTIBLE
);
407 spin_lock(&sdcp
->lock
[chan
]);
408 backlog
= crypto_get_backlog(&sdcp
->queue
[chan
]);
409 arq
= crypto_dequeue_request(&sdcp
->queue
[chan
]);
410 spin_unlock(&sdcp
->lock
[chan
]);
412 if (!backlog
&& !arq
) {
417 set_current_state(TASK_RUNNING
);
420 backlog
->complete(backlog
, -EINPROGRESS
);
423 ret
= mxs_dcp_aes_block_crypt(arq
);
424 arq
->complete(arq
, ret
);
431 static int mxs_dcp_block_fallback(struct ablkcipher_request
*req
, int enc
)
433 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
434 struct dcp_async_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
435 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, ctx
->fallback
);
438 skcipher_request_set_sync_tfm(subreq
, ctx
->fallback
);
439 skcipher_request_set_callback(subreq
, req
->base
.flags
, NULL
, NULL
);
440 skcipher_request_set_crypt(subreq
, req
->src
, req
->dst
,
441 req
->nbytes
, req
->info
);
444 ret
= crypto_skcipher_encrypt(subreq
);
446 ret
= crypto_skcipher_decrypt(subreq
);
448 skcipher_request_zero(subreq
);
453 static int mxs_dcp_aes_enqueue(struct ablkcipher_request
*req
, int enc
, int ecb
)
455 struct dcp
*sdcp
= global_sdcp
;
456 struct crypto_async_request
*arq
= &req
->base
;
457 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(arq
->tfm
);
458 struct dcp_aes_req_ctx
*rctx
= ablkcipher_request_ctx(req
);
461 if (unlikely(actx
->key_len
!= AES_KEYSIZE_128
))
462 return mxs_dcp_block_fallback(req
, enc
);
466 actx
->chan
= DCP_CHAN_CRYPTO
;
468 spin_lock(&sdcp
->lock
[actx
->chan
]);
469 ret
= crypto_enqueue_request(&sdcp
->queue
[actx
->chan
], &req
->base
);
470 spin_unlock(&sdcp
->lock
[actx
->chan
]);
472 wake_up_process(sdcp
->thread
[actx
->chan
]);
477 static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request
*req
)
479 return mxs_dcp_aes_enqueue(req
, 0, 1);
482 static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request
*req
)
484 return mxs_dcp_aes_enqueue(req
, 1, 1);
487 static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request
*req
)
489 return mxs_dcp_aes_enqueue(req
, 0, 0);
492 static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request
*req
)
494 return mxs_dcp_aes_enqueue(req
, 1, 0);
497 static int mxs_dcp_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
500 struct dcp_async_ctx
*actx
= crypto_ablkcipher_ctx(tfm
);
504 * AES 128 is supposed by the hardware, store key into temporary
505 * buffer and exit. We must use the temporary buffer here, since
506 * there can still be an operation in progress.
509 if (len
== AES_KEYSIZE_128
) {
510 memcpy(actx
->key
, key
, len
);
515 * If the requested AES key size is not supported by the hardware,
516 * but is supported by in-kernel software implementation, we use
519 crypto_sync_skcipher_clear_flags(actx
->fallback
, CRYPTO_TFM_REQ_MASK
);
520 crypto_sync_skcipher_set_flags(actx
->fallback
,
521 tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
523 ret
= crypto_sync_skcipher_setkey(actx
->fallback
, key
, len
);
527 tfm
->base
.crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
528 tfm
->base
.crt_flags
|= crypto_sync_skcipher_get_flags(actx
->fallback
) &
534 static int mxs_dcp_aes_fallback_init(struct crypto_tfm
*tfm
)
536 const char *name
= crypto_tfm_alg_name(tfm
);
537 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(tfm
);
538 struct crypto_sync_skcipher
*blk
;
540 blk
= crypto_alloc_sync_skcipher(name
, 0, CRYPTO_ALG_NEED_FALLBACK
);
544 actx
->fallback
= blk
;
545 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct dcp_aes_req_ctx
);
549 static void mxs_dcp_aes_fallback_exit(struct crypto_tfm
*tfm
)
551 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(tfm
);
553 crypto_free_sync_skcipher(actx
->fallback
);
557 * Hashing (SHA1/SHA256)
559 static int mxs_dcp_run_sha(struct ahash_request
*req
)
561 struct dcp
*sdcp
= global_sdcp
;
564 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
565 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
566 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
567 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
569 dma_addr_t digest_phys
= 0;
570 dma_addr_t buf_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->sha_in_buf
,
571 DCP_BUF_SZ
, DMA_TO_DEVICE
);
573 /* Fill in the DMA descriptor. */
574 desc
->control0
= MXS_DCP_CONTROL0_DECR_SEMAPHORE
|
575 MXS_DCP_CONTROL0_INTERRUPT
|
576 MXS_DCP_CONTROL0_ENABLE_HASH
;
578 desc
->control0
|= MXS_DCP_CONTROL0_HASH_INIT
;
580 desc
->control1
= actx
->alg
;
581 desc
->next_cmd_addr
= 0;
582 desc
->source
= buf_phys
;
583 desc
->destination
= 0;
584 desc
->size
= actx
->fill
;
589 * Align driver with hw behavior when generating null hashes
591 if (rctx
->init
&& rctx
->fini
&& desc
->size
== 0) {
592 struct hash_alg_common
*halg
= crypto_hash_alg_common(tfm
);
593 const uint8_t *sha_buf
=
594 (actx
->alg
== MXS_DCP_CONTROL1_HASH_SELECT_SHA1
) ?
595 sha1_null_hash
: sha256_null_hash
;
596 memcpy(sdcp
->coh
->sha_out_buf
, sha_buf
, halg
->digestsize
);
601 /* Set HASH_TERM bit for last transfer block. */
603 digest_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->sha_out_buf
,
604 DCP_SHA_PAY_SZ
, DMA_FROM_DEVICE
);
605 desc
->control0
|= MXS_DCP_CONTROL0_HASH_TERM
;
606 desc
->payload
= digest_phys
;
609 ret
= mxs_dcp_start_dma(actx
);
612 dma_unmap_single(sdcp
->dev
, digest_phys
, DCP_SHA_PAY_SZ
,
616 dma_unmap_single(sdcp
->dev
, buf_phys
, DCP_BUF_SZ
, DMA_TO_DEVICE
);
621 static int dcp_sha_req_to_buf(struct crypto_async_request
*arq
)
623 struct dcp
*sdcp
= global_sdcp
;
625 struct ahash_request
*req
= ahash_request_cast(arq
);
626 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
627 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
628 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
629 struct hash_alg_common
*halg
= crypto_hash_alg_common(tfm
);
630 const int nents
= sg_nents(req
->src
);
632 uint8_t *in_buf
= sdcp
->coh
->sha_in_buf
;
633 uint8_t *out_buf
= sdcp
->coh
->sha_out_buf
;
637 struct scatterlist
*src
;
639 unsigned int i
, len
, clen
;
642 int fin
= rctx
->fini
;
646 for_each_sg(req
->src
, src
, nents
, i
) {
647 src_buf
= sg_virt(src
);
648 len
= sg_dma_len(src
);
651 if (actx
->fill
+ len
> DCP_BUF_SZ
)
652 clen
= DCP_BUF_SZ
- actx
->fill
;
656 memcpy(in_buf
+ actx
->fill
, src_buf
, clen
);
662 * If we filled the buffer and still have some
663 * more data, submit the buffer.
665 if (len
&& actx
->fill
== DCP_BUF_SZ
) {
666 ret
= mxs_dcp_run_sha(req
);
678 /* Submit whatever is left. */
682 ret
= mxs_dcp_run_sha(req
);
688 /* For some reason the result is flipped */
689 for (i
= 0; i
< halg
->digestsize
; i
++)
690 req
->result
[i
] = out_buf
[halg
->digestsize
- i
- 1];
696 static int dcp_chan_thread_sha(void *data
)
698 struct dcp
*sdcp
= global_sdcp
;
699 const int chan
= DCP_CHAN_HASH_SHA
;
701 struct crypto_async_request
*backlog
;
702 struct crypto_async_request
*arq
;
704 struct dcp_sha_req_ctx
*rctx
;
706 struct ahash_request
*req
;
709 while (!kthread_should_stop()) {
710 set_current_state(TASK_INTERRUPTIBLE
);
712 spin_lock(&sdcp
->lock
[chan
]);
713 backlog
= crypto_get_backlog(&sdcp
->queue
[chan
]);
714 arq
= crypto_dequeue_request(&sdcp
->queue
[chan
]);
715 spin_unlock(&sdcp
->lock
[chan
]);
717 if (!backlog
&& !arq
) {
722 set_current_state(TASK_RUNNING
);
725 backlog
->complete(backlog
, -EINPROGRESS
);
728 req
= ahash_request_cast(arq
);
729 rctx
= ahash_request_ctx(req
);
731 ret
= dcp_sha_req_to_buf(arq
);
733 arq
->complete(arq
, ret
);
740 static int dcp_sha_init(struct ahash_request
*req
)
742 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
743 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
745 struct hash_alg_common
*halg
= crypto_hash_alg_common(tfm
);
748 * Start hashing session. The code below only inits the
749 * hashing session context, nothing more.
751 memset(actx
, 0, sizeof(*actx
));
753 if (strcmp(halg
->base
.cra_name
, "sha1") == 0)
754 actx
->alg
= MXS_DCP_CONTROL1_HASH_SELECT_SHA1
;
756 actx
->alg
= MXS_DCP_CONTROL1_HASH_SELECT_SHA256
;
760 actx
->chan
= DCP_CHAN_HASH_SHA
;
762 mutex_init(&actx
->mutex
);
767 static int dcp_sha_update_fx(struct ahash_request
*req
, int fini
)
769 struct dcp
*sdcp
= global_sdcp
;
771 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
772 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
773 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
778 * Ignore requests that have no data in them and are not
779 * the trailing requests in the stream of requests.
781 if (!req
->nbytes
&& !fini
)
784 mutex_lock(&actx
->mutex
);
793 spin_lock(&sdcp
->lock
[actx
->chan
]);
794 ret
= crypto_enqueue_request(&sdcp
->queue
[actx
->chan
], &req
->base
);
795 spin_unlock(&sdcp
->lock
[actx
->chan
]);
797 wake_up_process(sdcp
->thread
[actx
->chan
]);
798 mutex_unlock(&actx
->mutex
);
803 static int dcp_sha_update(struct ahash_request
*req
)
805 return dcp_sha_update_fx(req
, 0);
808 static int dcp_sha_final(struct ahash_request
*req
)
810 ahash_request_set_crypt(req
, NULL
, req
->result
, 0);
812 return dcp_sha_update_fx(req
, 1);
815 static int dcp_sha_finup(struct ahash_request
*req
)
817 return dcp_sha_update_fx(req
, 1);
820 static int dcp_sha_digest(struct ahash_request
*req
)
824 ret
= dcp_sha_init(req
);
828 return dcp_sha_finup(req
);
831 static int dcp_sha_import(struct ahash_request
*req
, const void *in
)
833 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
834 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
835 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
836 const struct dcp_export_state
*export
= in
;
838 memset(rctx
, 0, sizeof(struct dcp_sha_req_ctx
));
839 memset(actx
, 0, sizeof(struct dcp_async_ctx
));
840 memcpy(rctx
, &export
->req_ctx
, sizeof(struct dcp_sha_req_ctx
));
841 memcpy(actx
, &export
->async_ctx
, sizeof(struct dcp_async_ctx
));
846 static int dcp_sha_export(struct ahash_request
*req
, void *out
)
848 struct dcp_sha_req_ctx
*rctx_state
= ahash_request_ctx(req
);
849 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
850 struct dcp_async_ctx
*actx_state
= crypto_ahash_ctx(tfm
);
851 struct dcp_export_state
*export
= out
;
853 memcpy(&export
->req_ctx
, rctx_state
, sizeof(struct dcp_sha_req_ctx
));
854 memcpy(&export
->async_ctx
, actx_state
, sizeof(struct dcp_async_ctx
));
859 static int dcp_sha_cra_init(struct crypto_tfm
*tfm
)
861 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
862 sizeof(struct dcp_sha_req_ctx
));
866 static void dcp_sha_cra_exit(struct crypto_tfm
*tfm
)
870 /* AES 128 ECB and AES 128 CBC */
871 static struct crypto_alg dcp_aes_algs
[] = {
873 .cra_name
= "ecb(aes)",
874 .cra_driver_name
= "ecb-aes-dcp",
877 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
879 CRYPTO_ALG_NEED_FALLBACK
,
880 .cra_init
= mxs_dcp_aes_fallback_init
,
881 .cra_exit
= mxs_dcp_aes_fallback_exit
,
882 .cra_blocksize
= AES_BLOCK_SIZE
,
883 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
884 .cra_type
= &crypto_ablkcipher_type
,
885 .cra_module
= THIS_MODULE
,
888 .min_keysize
= AES_MIN_KEY_SIZE
,
889 .max_keysize
= AES_MAX_KEY_SIZE
,
890 .setkey
= mxs_dcp_aes_setkey
,
891 .encrypt
= mxs_dcp_aes_ecb_encrypt
,
892 .decrypt
= mxs_dcp_aes_ecb_decrypt
896 .cra_name
= "cbc(aes)",
897 .cra_driver_name
= "cbc-aes-dcp",
900 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
902 CRYPTO_ALG_NEED_FALLBACK
,
903 .cra_init
= mxs_dcp_aes_fallback_init
,
904 .cra_exit
= mxs_dcp_aes_fallback_exit
,
905 .cra_blocksize
= AES_BLOCK_SIZE
,
906 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
907 .cra_type
= &crypto_ablkcipher_type
,
908 .cra_module
= THIS_MODULE
,
911 .min_keysize
= AES_MIN_KEY_SIZE
,
912 .max_keysize
= AES_MAX_KEY_SIZE
,
913 .setkey
= mxs_dcp_aes_setkey
,
914 .encrypt
= mxs_dcp_aes_cbc_encrypt
,
915 .decrypt
= mxs_dcp_aes_cbc_decrypt
,
916 .ivsize
= AES_BLOCK_SIZE
,
923 static struct ahash_alg dcp_sha1_alg
= {
924 .init
= dcp_sha_init
,
925 .update
= dcp_sha_update
,
926 .final
= dcp_sha_final
,
927 .finup
= dcp_sha_finup
,
928 .digest
= dcp_sha_digest
,
929 .import
= dcp_sha_import
,
930 .export
= dcp_sha_export
,
932 .digestsize
= SHA1_DIGEST_SIZE
,
933 .statesize
= sizeof(struct dcp_export_state
),
936 .cra_driver_name
= "sha1-dcp",
939 .cra_flags
= CRYPTO_ALG_ASYNC
,
940 .cra_blocksize
= SHA1_BLOCK_SIZE
,
941 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
942 .cra_module
= THIS_MODULE
,
943 .cra_init
= dcp_sha_cra_init
,
944 .cra_exit
= dcp_sha_cra_exit
,
950 static struct ahash_alg dcp_sha256_alg
= {
951 .init
= dcp_sha_init
,
952 .update
= dcp_sha_update
,
953 .final
= dcp_sha_final
,
954 .finup
= dcp_sha_finup
,
955 .digest
= dcp_sha_digest
,
956 .import
= dcp_sha_import
,
957 .export
= dcp_sha_export
,
959 .digestsize
= SHA256_DIGEST_SIZE
,
960 .statesize
= sizeof(struct dcp_export_state
),
962 .cra_name
= "sha256",
963 .cra_driver_name
= "sha256-dcp",
966 .cra_flags
= CRYPTO_ALG_ASYNC
,
967 .cra_blocksize
= SHA256_BLOCK_SIZE
,
968 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
969 .cra_module
= THIS_MODULE
,
970 .cra_init
= dcp_sha_cra_init
,
971 .cra_exit
= dcp_sha_cra_exit
,
976 static irqreturn_t
mxs_dcp_irq(int irq
, void *context
)
978 struct dcp
*sdcp
= context
;
982 stat
= readl(sdcp
->base
+ MXS_DCP_STAT
);
983 stat
&= MXS_DCP_STAT_IRQ_MASK
;
987 /* Clear the interrupts. */
988 writel(stat
, sdcp
->base
+ MXS_DCP_STAT_CLR
);
990 /* Complete the DMA requests that finished. */
991 for (i
= 0; i
< DCP_MAX_CHANS
; i
++)
993 complete(&sdcp
->completion
[i
]);
998 static int mxs_dcp_probe(struct platform_device
*pdev
)
1000 struct device
*dev
= &pdev
->dev
;
1001 struct dcp
*sdcp
= NULL
;
1004 struct resource
*iores
;
1005 int dcp_vmi_irq
, dcp_irq
;
1008 dev_err(dev
, "Only one DCP instance allowed!\n");
1012 iores
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1013 dcp_vmi_irq
= platform_get_irq(pdev
, 0);
1014 if (dcp_vmi_irq
< 0) {
1015 dev_err(dev
, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq
);
1019 dcp_irq
= platform_get_irq(pdev
, 1);
1021 dev_err(dev
, "Failed to get IRQ: (%d)!\n", dcp_irq
);
1025 sdcp
= devm_kzalloc(dev
, sizeof(*sdcp
), GFP_KERNEL
);
1030 sdcp
->base
= devm_ioremap_resource(dev
, iores
);
1031 if (IS_ERR(sdcp
->base
))
1032 return PTR_ERR(sdcp
->base
);
1035 ret
= devm_request_irq(dev
, dcp_vmi_irq
, mxs_dcp_irq
, 0,
1036 "dcp-vmi-irq", sdcp
);
1038 dev_err(dev
, "Failed to claim DCP VMI IRQ!\n");
1042 ret
= devm_request_irq(dev
, dcp_irq
, mxs_dcp_irq
, 0,
1045 dev_err(dev
, "Failed to claim DCP IRQ!\n");
1049 /* Allocate coherent helper block. */
1050 sdcp
->coh
= devm_kzalloc(dev
, sizeof(*sdcp
->coh
) + DCP_ALIGNMENT
,
1055 /* Re-align the structure so it fits the DCP constraints. */
1056 sdcp
->coh
= PTR_ALIGN(sdcp
->coh
, DCP_ALIGNMENT
);
1058 /* DCP clock is optional, only used on some SOCs */
1059 sdcp
->dcp_clk
= devm_clk_get(dev
, "dcp");
1060 if (IS_ERR(sdcp
->dcp_clk
)) {
1061 if (sdcp
->dcp_clk
!= ERR_PTR(-ENOENT
))
1062 return PTR_ERR(sdcp
->dcp_clk
);
1063 sdcp
->dcp_clk
= NULL
;
1065 ret
= clk_prepare_enable(sdcp
->dcp_clk
);
1069 /* Restart the DCP block. */
1070 ret
= stmp_reset_block(sdcp
->base
);
1072 dev_err(dev
, "Failed reset\n");
1073 goto err_disable_unprepare_clk
;
1076 /* Initialize control register. */
1077 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES
|
1078 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING
| 0xf,
1079 sdcp
->base
+ MXS_DCP_CTRL
);
1081 /* Enable all DCP DMA channels. */
1082 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK
,
1083 sdcp
->base
+ MXS_DCP_CHANNELCTRL
);
1086 * We do not enable context switching. Give the context buffer a
1087 * pointer to an illegal address so if context switching is
1088 * inadvertantly enabled, the DCP will return an error instead of
1089 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1092 writel(0xffff0000, sdcp
->base
+ MXS_DCP_CONTEXT
);
1093 for (i
= 0; i
< DCP_MAX_CHANS
; i
++)
1094 writel(0xffffffff, sdcp
->base
+ MXS_DCP_CH_N_STAT_CLR(i
));
1095 writel(0xffffffff, sdcp
->base
+ MXS_DCP_STAT_CLR
);
1099 platform_set_drvdata(pdev
, sdcp
);
1101 for (i
= 0; i
< DCP_MAX_CHANS
; i
++) {
1102 spin_lock_init(&sdcp
->lock
[i
]);
1103 init_completion(&sdcp
->completion
[i
]);
1104 crypto_init_queue(&sdcp
->queue
[i
], 50);
1107 /* Create the SHA and AES handler threads. */
1108 sdcp
->thread
[DCP_CHAN_HASH_SHA
] = kthread_run(dcp_chan_thread_sha
,
1109 NULL
, "mxs_dcp_chan/sha");
1110 if (IS_ERR(sdcp
->thread
[DCP_CHAN_HASH_SHA
])) {
1111 dev_err(dev
, "Error starting SHA thread!\n");
1112 ret
= PTR_ERR(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1113 goto err_disable_unprepare_clk
;
1116 sdcp
->thread
[DCP_CHAN_CRYPTO
] = kthread_run(dcp_chan_thread_aes
,
1117 NULL
, "mxs_dcp_chan/aes");
1118 if (IS_ERR(sdcp
->thread
[DCP_CHAN_CRYPTO
])) {
1119 dev_err(dev
, "Error starting SHA thread!\n");
1120 ret
= PTR_ERR(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1121 goto err_destroy_sha_thread
;
1124 /* Register the various crypto algorithms. */
1125 sdcp
->caps
= readl(sdcp
->base
+ MXS_DCP_CAPABILITY1
);
1127 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
) {
1128 ret
= crypto_register_algs(dcp_aes_algs
,
1129 ARRAY_SIZE(dcp_aes_algs
));
1131 /* Failed to register algorithm. */
1132 dev_err(dev
, "Failed to register AES crypto!\n");
1133 goto err_destroy_aes_thread
;
1137 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
) {
1138 ret
= crypto_register_ahash(&dcp_sha1_alg
);
1140 dev_err(dev
, "Failed to register %s hash!\n",
1141 dcp_sha1_alg
.halg
.base
.cra_name
);
1142 goto err_unregister_aes
;
1146 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA256
) {
1147 ret
= crypto_register_ahash(&dcp_sha256_alg
);
1149 dev_err(dev
, "Failed to register %s hash!\n",
1150 dcp_sha256_alg
.halg
.base
.cra_name
);
1151 goto err_unregister_sha1
;
1157 err_unregister_sha1
:
1158 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
)
1159 crypto_unregister_ahash(&dcp_sha1_alg
);
1162 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
)
1163 crypto_unregister_algs(dcp_aes_algs
, ARRAY_SIZE(dcp_aes_algs
));
1165 err_destroy_aes_thread
:
1166 kthread_stop(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1168 err_destroy_sha_thread
:
1169 kthread_stop(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1171 err_disable_unprepare_clk
:
1172 clk_disable_unprepare(sdcp
->dcp_clk
);
1177 static int mxs_dcp_remove(struct platform_device
*pdev
)
1179 struct dcp
*sdcp
= platform_get_drvdata(pdev
);
1181 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA256
)
1182 crypto_unregister_ahash(&dcp_sha256_alg
);
1184 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
)
1185 crypto_unregister_ahash(&dcp_sha1_alg
);
1187 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
)
1188 crypto_unregister_algs(dcp_aes_algs
, ARRAY_SIZE(dcp_aes_algs
));
1190 kthread_stop(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1191 kthread_stop(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1193 clk_disable_unprepare(sdcp
->dcp_clk
);
1195 platform_set_drvdata(pdev
, NULL
);
1202 static const struct of_device_id mxs_dcp_dt_ids
[] = {
1203 { .compatible
= "fsl,imx23-dcp", .data
= NULL
, },
1204 { .compatible
= "fsl,imx28-dcp", .data
= NULL
, },
1208 MODULE_DEVICE_TABLE(of
, mxs_dcp_dt_ids
);
1210 static struct platform_driver mxs_dcp_driver
= {
1211 .probe
= mxs_dcp_probe
,
1212 .remove
= mxs_dcp_remove
,
1215 .of_match_table
= mxs_dcp_dt_ids
,
1219 module_platform_driver(mxs_dcp_driver
);
1221 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1222 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1223 MODULE_LICENSE("GPL");
1224 MODULE_ALIAS("platform:mxs-dcp");