2 * drivers/crypto/tegra-aes.c
4 * Driver for NVIDIA Tegra AES hardware engine residing inside the
5 * Bit Stream Engine for Video (BSEV) hardware block.
7 * The programming sequence for this engine is with the help
8 * of commands which travel via a command queue residing between the
9 * CPU and the BSEV block. The BSEV engine has an internal RAM (VRAM)
10 * where the final input plaintext, keys and the IV have to be copied
11 * before starting the encrypt/decrypt operation.
13 * Copyright (c) 2010, NVIDIA Corporation.
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful, but WITHOUT
21 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/clk.h>
37 #include <linux/platform_device.h>
38 #include <linux/scatterlist.h>
39 #include <linux/dma-mapping.h>
41 #include <linux/mutex.h>
42 #include <linux/interrupt.h>
43 #include <linux/completion.h>
44 #include <linux/workqueue.h>
46 #include <crypto/scatterwalk.h>
47 #include <crypto/aes.h>
48 #include <crypto/internal/rng.h>
50 #include "tegra-aes.h"
52 #define FLAGS_MODE_MASK 0x00FF
53 #define FLAGS_ENCRYPT BIT(0)
54 #define FLAGS_CBC BIT(1)
55 #define FLAGS_GIV BIT(2)
56 #define FLAGS_RNG BIT(3)
57 #define FLAGS_OFB BIT(4)
58 #define FLAGS_NEW_KEY BIT(5)
59 #define FLAGS_NEW_IV BIT(6)
60 #define FLAGS_INIT BIT(7)
61 #define FLAGS_FAST BIT(8)
65 * Defines AES engine Max process bytes size in one go, which takes 1 msec.
66 * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte
67 * The duration CPU can use the BSE to 1 msec, then the number of available
68 * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB
69 * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB.
71 #define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000
74 * The key table length is 64 bytes
75 * (This includes first upto 32 bytes key + 16 bytes original initial vector
76 * and 16 bytes updated initial vector)
78 #define AES_HW_KEY_TABLE_LENGTH_BYTES 64
81 * The memory being used is divides as follows:
83 * 2. Original IV - 16 bytes
84 * 3. Updated IV - 16 bytes
85 * 4. Key schedule - 256 bytes
87 * 1+2+3 constitute the hw key table.
89 #define AES_HW_IV_SIZE 16
90 #define AES_HW_KEYSCHEDULE_LEN 256
91 #define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN)
93 /* Define commands required for AES operation */
95 CMD_BLKSTARTENGINE
= 0x0E,
97 CMD_DMACOMPLETE
= 0x11,
102 /* Define sub-commands */
104 SUBCMD_VRAM_SEL
= 0x1,
105 SUBCMD_CRYPTO_TABLE_SEL
= 0x3,
106 SUBCMD_KEY_TABLE_SEL
= 0x8,
109 /* memdma_vd command */
110 #define MEMDMA_DIR_DTOVRAM 0 /* sdram -> vram */
111 #define MEMDMA_DIR_VTODRAM 1 /* vram -> sdram */
112 #define MEMDMA_DIR_SHIFT 25
113 #define MEMDMA_NUM_WORDS_SHIFT 12
115 /* command queue bit shifts */
117 CMDQ_KEYTABLEADDR_SHIFT
= 0,
118 CMDQ_KEYTABLEID_SHIFT
= 17,
119 CMDQ_VRAMSEL_SHIFT
= 23,
120 CMDQ_TABLESEL_SHIFT
= 24,
121 CMDQ_OPCODE_SHIFT
= 26,
125 * The secure key slot contains a unique secure key generated
126 * and loaded by the bootloader. This slot is marked as non-accessible
129 #define SSK_SLOT_NUM 4
131 #define AES_NR_KEYSLOTS 8
132 #define TEGRA_AES_QUEUE_LENGTH 50
133 #define DEFAULT_RNG_BLK_SZ 16
135 /* The command queue depth */
136 #define AES_HW_MAX_ICQ_LENGTH 5
138 struct tegra_aes_slot
{
139 struct list_head node
;
143 static struct tegra_aes_slot ssk
= {
144 .slot_num
= SSK_SLOT_NUM
,
147 struct tegra_aes_reqctx
{
151 struct tegra_aes_dev
{
153 void __iomem
*io_base
;
154 dma_addr_t ivkey_phys_base
;
155 void __iomem
*ivkey_base
;
157 struct tegra_aes_ctx
*ctx
;
160 struct completion op_complete
;
162 dma_addr_t dma_buf_in
;
164 dma_addr_t dma_buf_out
;
166 u8 dt
[DEFAULT_RNG_BLK_SZ
];
170 struct crypto_queue queue
;
171 struct tegra_aes_slot
*slots
;
172 struct ablkcipher_request
*req
;
174 struct scatterlist
*in_sg
;
176 struct scatterlist
*out_sg
;
180 static struct tegra_aes_dev
*aes_dev
;
182 struct tegra_aes_ctx
{
183 struct tegra_aes_dev
*dd
;
185 struct tegra_aes_slot
*slot
;
186 u8 key
[AES_MAX_KEY_SIZE
];
190 static struct tegra_aes_ctx rng_ctx
= {
191 .flags
= FLAGS_NEW_KEY
,
192 .keylen
= AES_KEYSIZE_128
,
195 /* keep registered devices data here */
196 static struct list_head dev_list
;
197 static DEFINE_SPINLOCK(list_lock
);
198 static DEFINE_MUTEX(aes_lock
);
200 static void aes_workqueue_handler(struct work_struct
*work
);
201 static DECLARE_WORK(aes_work
, aes_workqueue_handler
);
202 static struct workqueue_struct
*aes_wq
;
204 static inline u32
aes_readl(struct tegra_aes_dev
*dd
, u32 offset
)
206 return readl(dd
->io_base
+ offset
);
209 static inline void aes_writel(struct tegra_aes_dev
*dd
, u32 val
, u32 offset
)
211 writel(val
, dd
->io_base
+ offset
);
214 static int aes_start_crypt(struct tegra_aes_dev
*dd
, u32 in_addr
, u32 out_addr
,
215 int nblocks
, int mode
, bool upd_iv
)
217 u32 cmdq
[AES_HW_MAX_ICQ_LENGTH
];
218 int i
, eng_busy
, icq_empty
, ret
;
221 /* reset all the interrupt bits */
222 aes_writel(dd
, 0xFFFFFFFF, TEGRA_AES_INTR_STATUS
);
224 /* enable error, dma xfer complete interrupts */
225 aes_writel(dd
, 0x33, TEGRA_AES_INT_ENB
);
227 cmdq
[0] = CMD_DMASETUP
<< CMDQ_OPCODE_SHIFT
;
229 cmdq
[2] = CMD_BLKSTARTENGINE
<< CMDQ_OPCODE_SHIFT
| (nblocks
-1);
230 cmdq
[3] = CMD_DMACOMPLETE
<< CMDQ_OPCODE_SHIFT
;
232 value
= aes_readl(dd
, TEGRA_AES_CMDQUE_CONTROL
);
233 /* access SDRAM through AHB */
234 value
&= ~TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD
;
235 value
&= ~TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD
;
236 value
|= TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD
|
237 TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD
|
238 TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD
;
239 aes_writel(dd
, value
, TEGRA_AES_CMDQUE_CONTROL
);
240 dev_dbg(dd
->dev
, "cmd_q_ctrl=0x%x", value
);
242 value
= (0x1 << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT
) |
243 ((dd
->ctx
->keylen
* 8) <<
244 TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT
) |
245 ((u32
)upd_iv
<< TEGRA_AES_SECURE_IV_SELECT_SHIFT
);
247 if (mode
& FLAGS_CBC
) {
248 value
|= ((((mode
& FLAGS_ENCRYPT
) ? 2 : 3)
249 << TEGRA_AES_SECURE_XOR_POS_SHIFT
) |
250 (((mode
& FLAGS_ENCRYPT
) ? 2 : 3)
251 << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT
) |
252 ((mode
& FLAGS_ENCRYPT
) ? 1 : 0)
253 << TEGRA_AES_SECURE_CORE_SEL_SHIFT
);
254 } else if (mode
& FLAGS_OFB
) {
255 value
|= ((TEGRA_AES_SECURE_XOR_POS_FIELD
) |
256 (2 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT
) |
257 (TEGRA_AES_SECURE_CORE_SEL_FIELD
));
258 } else if (mode
& FLAGS_RNG
) {
259 value
|= (((mode
& FLAGS_ENCRYPT
) ? 1 : 0)
260 << TEGRA_AES_SECURE_CORE_SEL_SHIFT
|
261 TEGRA_AES_SECURE_RNG_ENB_FIELD
);
263 value
|= (((mode
& FLAGS_ENCRYPT
) ? 1 : 0)
264 << TEGRA_AES_SECURE_CORE_SEL_SHIFT
);
267 dev_dbg(dd
->dev
, "secure_in_sel=0x%x", value
);
268 aes_writel(dd
, value
, TEGRA_AES_SECURE_INPUT_SELECT
);
270 aes_writel(dd
, out_addr
, TEGRA_AES_SECURE_DEST_ADDR
);
271 reinit_completion(&dd
->op_complete
);
273 for (i
= 0; i
< AES_HW_MAX_ICQ_LENGTH
- 1; i
++) {
275 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
276 eng_busy
= value
& TEGRA_AES_ENGINE_BUSY_FIELD
;
277 icq_empty
= value
& TEGRA_AES_ICQ_EMPTY_FIELD
;
278 } while (eng_busy
&& !icq_empty
);
279 aes_writel(dd
, cmdq
[i
], TEGRA_AES_ICMDQUE_WR
);
282 ret
= wait_for_completion_timeout(&dd
->op_complete
,
283 msecs_to_jiffies(150));
285 dev_err(dd
->dev
, "timed out (0x%x)\n",
286 aes_readl(dd
, TEGRA_AES_INTR_STATUS
));
290 aes_writel(dd
, cmdq
[AES_HW_MAX_ICQ_LENGTH
- 1], TEGRA_AES_ICMDQUE_WR
);
294 static void aes_release_key_slot(struct tegra_aes_slot
*slot
)
296 if (slot
->slot_num
== SSK_SLOT_NUM
)
299 spin_lock(&list_lock
);
300 list_add_tail(&slot
->node
, &dev_list
);
302 spin_unlock(&list_lock
);
305 static struct tegra_aes_slot
*aes_find_key_slot(void)
307 struct tegra_aes_slot
*slot
= NULL
;
308 struct list_head
*new_head
;
311 spin_lock(&list_lock
);
312 empty
= list_empty(&dev_list
);
314 slot
= list_entry(&dev_list
, struct tegra_aes_slot
, node
);
315 new_head
= dev_list
.next
;
317 dev_list
.next
= new_head
->next
;
318 dev_list
.prev
= NULL
;
320 spin_unlock(&list_lock
);
325 static int aes_set_key(struct tegra_aes_dev
*dd
)
328 struct tegra_aes_ctx
*ctx
= dd
->ctx
;
329 int eng_busy
, icq_empty
, dma_busy
;
330 bool use_ssk
= false;
333 if (!dd
->ctx
->slot
) {
334 dev_dbg(dd
->dev
, "using ssk");
335 dd
->ctx
->slot
= &ssk
;
339 /* enable key schedule generation in hardware */
340 value
= aes_readl(dd
, TEGRA_AES_SECURE_CONFIG_EXT
);
341 value
&= ~TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD
;
342 aes_writel(dd
, value
, TEGRA_AES_SECURE_CONFIG_EXT
);
344 /* select the key slot */
345 value
= aes_readl(dd
, TEGRA_AES_SECURE_CONFIG
);
346 value
&= ~TEGRA_AES_SECURE_KEY_INDEX_FIELD
;
347 value
|= (ctx
->slot
->slot_num
<< TEGRA_AES_SECURE_KEY_INDEX_SHIFT
);
348 aes_writel(dd
, value
, TEGRA_AES_SECURE_CONFIG
);
353 /* copy the key table from sdram to vram */
354 cmdq
[0] = CMD_MEMDMAVD
<< CMDQ_OPCODE_SHIFT
|
355 MEMDMA_DIR_DTOVRAM
<< MEMDMA_DIR_SHIFT
|
356 AES_HW_KEY_TABLE_LENGTH_BYTES
/ sizeof(u32
) <<
357 MEMDMA_NUM_WORDS_SHIFT
;
358 cmdq
[1] = (u32
)dd
->ivkey_phys_base
;
360 aes_writel(dd
, cmdq
[0], TEGRA_AES_ICMDQUE_WR
);
361 aes_writel(dd
, cmdq
[1], TEGRA_AES_ICMDQUE_WR
);
364 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
365 eng_busy
= value
& TEGRA_AES_ENGINE_BUSY_FIELD
;
366 icq_empty
= value
& TEGRA_AES_ICQ_EMPTY_FIELD
;
367 dma_busy
= value
& TEGRA_AES_DMA_BUSY_FIELD
;
368 } while (eng_busy
&& !icq_empty
&& dma_busy
);
370 /* settable command to get key into internal registers */
371 value
= CMD_SETTABLE
<< CMDQ_OPCODE_SHIFT
|
372 SUBCMD_CRYPTO_TABLE_SEL
<< CMDQ_TABLESEL_SHIFT
|
373 SUBCMD_VRAM_SEL
<< CMDQ_VRAMSEL_SHIFT
|
374 (SUBCMD_KEY_TABLE_SEL
| ctx
->slot
->slot_num
) <<
375 CMDQ_KEYTABLEID_SHIFT
;
376 aes_writel(dd
, value
, TEGRA_AES_ICMDQUE_WR
);
379 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
380 eng_busy
= value
& TEGRA_AES_ENGINE_BUSY_FIELD
;
381 icq_empty
= value
& TEGRA_AES_ICQ_EMPTY_FIELD
;
382 } while (eng_busy
&& !icq_empty
);
387 static int tegra_aes_handle_req(struct tegra_aes_dev
*dd
)
389 struct crypto_async_request
*async_req
, *backlog
;
390 struct crypto_ablkcipher
*tfm
;
391 struct tegra_aes_ctx
*ctx
;
392 struct tegra_aes_reqctx
*rctx
;
393 struct ablkcipher_request
*req
;
395 int dma_max
= AES_HW_DMA_BUFFER_SIZE_BYTES
;
396 int ret
= 0, nblocks
, total
;
398 dma_addr_t addr_in
, addr_out
;
399 struct scatterlist
*in_sg
, *out_sg
;
404 spin_lock_irqsave(&dd
->lock
, flags
);
405 backlog
= crypto_get_backlog(&dd
->queue
);
406 async_req
= crypto_dequeue_request(&dd
->queue
);
408 clear_bit(FLAGS_BUSY
, &dd
->flags
);
409 spin_unlock_irqrestore(&dd
->lock
, flags
);
415 backlog
->complete(backlog
, -EINPROGRESS
);
417 req
= ablkcipher_request_cast(async_req
);
419 dev_dbg(dd
->dev
, "%s: get new req\n", __func__
);
421 if (!req
->src
|| !req
->dst
)
424 /* take mutex to access the aes hw */
425 mutex_lock(&aes_lock
);
427 /* assign new request to device */
429 dd
->total
= req
->nbytes
;
431 dd
->in_sg
= req
->src
;
433 dd
->out_sg
= req
->dst
;
440 tfm
= crypto_ablkcipher_reqtfm(req
);
441 rctx
= ablkcipher_request_ctx(req
);
442 ctx
= crypto_ablkcipher_ctx(tfm
);
443 rctx
->mode
&= FLAGS_MODE_MASK
;
444 dd
->flags
= (dd
->flags
& ~FLAGS_MODE_MASK
) | rctx
->mode
;
446 dd
->iv
= (u8
*)req
->info
;
447 dd
->ivlen
= crypto_ablkcipher_ivsize(tfm
);
449 /* assign new context to device */
453 if (ctx
->flags
& FLAGS_NEW_KEY
) {
455 memcpy(dd
->ivkey_base
, ctx
->key
, ctx
->keylen
);
456 memset(dd
->ivkey_base
+ ctx
->keylen
, 0, AES_HW_KEY_TABLE_LENGTH_BYTES
- ctx
->keylen
);
458 ctx
->flags
&= ~FLAGS_NEW_KEY
;
461 if (((dd
->flags
& FLAGS_CBC
) || (dd
->flags
& FLAGS_OFB
)) && dd
->iv
) {
462 /* set iv to the aes hw slot
463 * Hw generates updated iv only after iv is set in slot.
464 * So key and iv is passed asynchronously.
466 memcpy(dd
->buf_in
, dd
->iv
, dd
->ivlen
);
468 ret
= aes_start_crypt(dd
, (u32
)dd
->dma_buf_in
,
469 dd
->dma_buf_out
, 1, FLAGS_CBC
, false);
471 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
477 dev_dbg(dd
->dev
, "remain: %d\n", total
);
478 ret
= dma_map_sg(dd
->dev
, in_sg
, 1, DMA_TO_DEVICE
);
480 dev_err(dd
->dev
, "dma_map_sg() error\n");
484 ret
= dma_map_sg(dd
->dev
, out_sg
, 1, DMA_FROM_DEVICE
);
486 dev_err(dd
->dev
, "dma_map_sg() error\n");
487 dma_unmap_sg(dd
->dev
, dd
->in_sg
,
492 addr_in
= sg_dma_address(in_sg
);
493 addr_out
= sg_dma_address(out_sg
);
494 dd
->flags
|= FLAGS_FAST
;
495 count
= min_t(int, sg_dma_len(in_sg
), dma_max
);
496 WARN_ON(sg_dma_len(in_sg
) != sg_dma_len(out_sg
));
497 nblocks
= DIV_ROUND_UP(count
, AES_BLOCK_SIZE
);
499 ret
= aes_start_crypt(dd
, addr_in
, addr_out
, nblocks
,
502 dma_unmap_sg(dd
->dev
, out_sg
, 1, DMA_FROM_DEVICE
);
503 dma_unmap_sg(dd
->dev
, in_sg
, 1, DMA_TO_DEVICE
);
506 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
509 dd
->flags
&= ~FLAGS_FAST
;
511 dev_dbg(dd
->dev
, "out: copied %d\n", count
);
513 in_sg
= sg_next(in_sg
);
514 out_sg
= sg_next(out_sg
);
515 WARN_ON(((total
!= 0) && (!in_sg
|| !out_sg
)));
519 mutex_unlock(&aes_lock
);
523 if (dd
->req
->base
.complete
)
524 dd
->req
->base
.complete(&dd
->req
->base
, ret
);
526 dev_dbg(dd
->dev
, "%s: exit\n", __func__
);
530 static int tegra_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
533 struct tegra_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
534 struct tegra_aes_dev
*dd
= aes_dev
;
535 struct tegra_aes_slot
*key_slot
;
537 if ((keylen
!= AES_KEYSIZE_128
) && (keylen
!= AES_KEYSIZE_192
) &&
538 (keylen
!= AES_KEYSIZE_256
)) {
539 dev_err(dd
->dev
, "unsupported key size\n");
540 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
544 dev_dbg(dd
->dev
, "keylen: %d\n", keylen
);
550 key_slot
= aes_find_key_slot();
552 dev_err(dd
->dev
, "no empty slot\n");
556 ctx
->slot
= key_slot
;
559 memcpy(ctx
->key
, key
, keylen
);
560 ctx
->keylen
= keylen
;
563 ctx
->flags
|= FLAGS_NEW_KEY
;
564 dev_dbg(dd
->dev
, "done\n");
568 static void aes_workqueue_handler(struct work_struct
*work
)
570 struct tegra_aes_dev
*dd
= aes_dev
;
573 ret
= clk_prepare_enable(dd
->aes_clk
);
575 BUG_ON("clock enable failed");
577 /* empty the crypto queue and then return */
579 ret
= tegra_aes_handle_req(dd
);
582 clk_disable_unprepare(dd
->aes_clk
);
585 static irqreturn_t
aes_irq(int irq
, void *dev_id
)
587 struct tegra_aes_dev
*dd
= (struct tegra_aes_dev
*)dev_id
;
588 u32 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
589 int busy
= test_bit(FLAGS_BUSY
, &dd
->flags
);
592 dev_dbg(dd
->dev
, "spurious interrupt\n");
596 dev_dbg(dd
->dev
, "irq_stat: 0x%x\n", value
);
597 if (value
& TEGRA_AES_INT_ERROR_MASK
)
598 aes_writel(dd
, TEGRA_AES_INT_ERROR_MASK
, TEGRA_AES_INTR_STATUS
);
600 if (!(value
& TEGRA_AES_ENGINE_BUSY_FIELD
))
601 complete(&dd
->op_complete
);
608 static int tegra_aes_crypt(struct ablkcipher_request
*req
, unsigned long mode
)
610 struct tegra_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
611 struct tegra_aes_dev
*dd
= aes_dev
;
616 dev_dbg(dd
->dev
, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n",
617 req
->nbytes
, !!(mode
& FLAGS_ENCRYPT
),
618 !!(mode
& FLAGS_CBC
), !!(mode
& FLAGS_OFB
));
622 spin_lock_irqsave(&dd
->lock
, flags
);
623 err
= ablkcipher_enqueue_request(&dd
->queue
, req
);
624 busy
= test_and_set_bit(FLAGS_BUSY
, &dd
->flags
);
625 spin_unlock_irqrestore(&dd
->lock
, flags
);
628 queue_work(aes_wq
, &aes_work
);
633 static int tegra_aes_ecb_encrypt(struct ablkcipher_request
*req
)
635 return tegra_aes_crypt(req
, FLAGS_ENCRYPT
);
638 static int tegra_aes_ecb_decrypt(struct ablkcipher_request
*req
)
640 return tegra_aes_crypt(req
, 0);
643 static int tegra_aes_cbc_encrypt(struct ablkcipher_request
*req
)
645 return tegra_aes_crypt(req
, FLAGS_ENCRYPT
| FLAGS_CBC
);
648 static int tegra_aes_cbc_decrypt(struct ablkcipher_request
*req
)
650 return tegra_aes_crypt(req
, FLAGS_CBC
);
653 static int tegra_aes_ofb_encrypt(struct ablkcipher_request
*req
)
655 return tegra_aes_crypt(req
, FLAGS_ENCRYPT
| FLAGS_OFB
);
658 static int tegra_aes_ofb_decrypt(struct ablkcipher_request
*req
)
660 return tegra_aes_crypt(req
, FLAGS_OFB
);
663 static int tegra_aes_get_random(struct crypto_rng
*tfm
, u8
*rdata
,
666 struct tegra_aes_dev
*dd
= aes_dev
;
667 struct tegra_aes_ctx
*ctx
= &rng_ctx
;
669 u8
*dest
= rdata
, *dt
= dd
->dt
;
671 /* take mutex to access the aes hw */
672 mutex_lock(&aes_lock
);
674 ret
= clk_prepare_enable(dd
->aes_clk
);
676 mutex_unlock(&aes_lock
);
682 dd
->flags
= FLAGS_ENCRYPT
| FLAGS_RNG
;
684 memcpy(dd
->buf_in
, dt
, DEFAULT_RNG_BLK_SZ
);
686 ret
= aes_start_crypt(dd
, (u32
)dd
->dma_buf_in
,
687 (u32
)dd
->dma_buf_out
, 1, dd
->flags
, true);
689 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
693 memcpy(dest
, dd
->buf_out
, dlen
);
696 for (i
= DEFAULT_RNG_BLK_SZ
- 1; i
>= 0; i
--) {
703 clk_disable_unprepare(dd
->aes_clk
);
704 mutex_unlock(&aes_lock
);
706 dev_dbg(dd
->dev
, "%s: done\n", __func__
);
710 static int tegra_aes_rng_reset(struct crypto_rng
*tfm
, u8
*seed
,
713 struct tegra_aes_dev
*dd
= aes_dev
;
714 struct tegra_aes_ctx
*ctx
= &rng_ctx
;
715 struct tegra_aes_slot
*key_slot
;
717 u8 tmp
[16]; /* 16 bytes = 128 bits of entropy */
721 pr_err("ctx=0x%x, dd=0x%x\n",
722 (unsigned int)ctx
, (unsigned int)dd
);
726 if (slen
< (DEFAULT_RNG_BLK_SZ
+ AES_KEYSIZE_128
)) {
727 dev_err(dd
->dev
, "seed size invalid");
731 /* take mutex to access the aes hw */
732 mutex_lock(&aes_lock
);
735 key_slot
= aes_find_key_slot();
737 dev_err(dd
->dev
, "no empty slot\n");
738 mutex_unlock(&aes_lock
);
741 ctx
->slot
= key_slot
;
748 ctx
->keylen
= AES_KEYSIZE_128
;
749 ctx
->flags
|= FLAGS_NEW_KEY
;
751 /* copy the key to the key slot */
752 memcpy(dd
->ivkey_base
, seed
+ DEFAULT_RNG_BLK_SZ
, AES_KEYSIZE_128
);
753 memset(dd
->ivkey_base
+ AES_KEYSIZE_128
, 0, AES_HW_KEY_TABLE_LENGTH_BYTES
- AES_KEYSIZE_128
);
758 dd
->flags
= FLAGS_ENCRYPT
| FLAGS_RNG
;
760 ret
= clk_prepare_enable(dd
->aes_clk
);
762 mutex_unlock(&aes_lock
);
768 /* set seed to the aes hw slot */
769 memcpy(dd
->buf_in
, dd
->iv
, DEFAULT_RNG_BLK_SZ
);
770 ret
= aes_start_crypt(dd
, (u32
)dd
->dma_buf_in
,
771 dd
->dma_buf_out
, 1, FLAGS_CBC
, false);
773 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
777 if (dd
->ivlen
>= (2 * DEFAULT_RNG_BLK_SZ
+ AES_KEYSIZE_128
)) {
778 dt
= dd
->iv
+ DEFAULT_RNG_BLK_SZ
+ AES_KEYSIZE_128
;
780 get_random_bytes(tmp
, sizeof(tmp
));
783 memcpy(dd
->dt
, dt
, DEFAULT_RNG_BLK_SZ
);
786 clk_disable_unprepare(dd
->aes_clk
);
787 mutex_unlock(&aes_lock
);
789 dev_dbg(dd
->dev
, "%s: done\n", __func__
);
793 static int tegra_aes_cra_init(struct crypto_tfm
*tfm
)
795 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct tegra_aes_reqctx
);
800 static void tegra_aes_cra_exit(struct crypto_tfm
*tfm
)
802 struct tegra_aes_ctx
*ctx
=
803 crypto_ablkcipher_ctx((struct crypto_ablkcipher
*)tfm
);
805 if (ctx
&& ctx
->slot
)
806 aes_release_key_slot(ctx
->slot
);
809 static struct crypto_alg algs
[] = {
811 .cra_name
= "ecb(aes)",
812 .cra_driver_name
= "ecb-aes-tegra",
814 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
815 .cra_blocksize
= AES_BLOCK_SIZE
,
817 .cra_type
= &crypto_ablkcipher_type
,
818 .cra_u
.ablkcipher
= {
819 .min_keysize
= AES_MIN_KEY_SIZE
,
820 .max_keysize
= AES_MAX_KEY_SIZE
,
821 .setkey
= tegra_aes_setkey
,
822 .encrypt
= tegra_aes_ecb_encrypt
,
823 .decrypt
= tegra_aes_ecb_decrypt
,
826 .cra_name
= "cbc(aes)",
827 .cra_driver_name
= "cbc-aes-tegra",
829 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
830 .cra_blocksize
= AES_BLOCK_SIZE
,
832 .cra_type
= &crypto_ablkcipher_type
,
833 .cra_u
.ablkcipher
= {
834 .min_keysize
= AES_MIN_KEY_SIZE
,
835 .max_keysize
= AES_MAX_KEY_SIZE
,
836 .ivsize
= AES_MIN_KEY_SIZE
,
837 .setkey
= tegra_aes_setkey
,
838 .encrypt
= tegra_aes_cbc_encrypt
,
839 .decrypt
= tegra_aes_cbc_decrypt
,
842 .cra_name
= "ofb(aes)",
843 .cra_driver_name
= "ofb-aes-tegra",
845 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
846 .cra_blocksize
= AES_BLOCK_SIZE
,
848 .cra_type
= &crypto_ablkcipher_type
,
849 .cra_u
.ablkcipher
= {
850 .min_keysize
= AES_MIN_KEY_SIZE
,
851 .max_keysize
= AES_MAX_KEY_SIZE
,
852 .ivsize
= AES_MIN_KEY_SIZE
,
853 .setkey
= tegra_aes_setkey
,
854 .encrypt
= tegra_aes_ofb_encrypt
,
855 .decrypt
= tegra_aes_ofb_decrypt
,
858 .cra_name
= "ansi_cprng",
859 .cra_driver_name
= "rng-aes-tegra",
860 .cra_flags
= CRYPTO_ALG_TYPE_RNG
,
861 .cra_ctxsize
= sizeof(struct tegra_aes_ctx
),
862 .cra_type
= &crypto_rng_type
,
864 .rng_make_random
= tegra_aes_get_random
,
865 .rng_reset
= tegra_aes_rng_reset
,
866 .seedsize
= AES_KEYSIZE_128
+ (2 * DEFAULT_RNG_BLK_SZ
),
871 static int tegra_aes_probe(struct platform_device
*pdev
)
873 struct device
*dev
= &pdev
->dev
;
874 struct tegra_aes_dev
*dd
;
875 struct resource
*res
;
876 int err
= -ENOMEM
, i
= 0, j
;
878 dd
= devm_kzalloc(dev
, sizeof(struct tegra_aes_dev
), GFP_KERNEL
);
880 dev_err(dev
, "unable to alloc data struct.\n");
885 platform_set_drvdata(pdev
, dd
);
887 dd
->slots
= devm_kzalloc(dev
, sizeof(struct tegra_aes_slot
) *
888 AES_NR_KEYSLOTS
, GFP_KERNEL
);
889 if (dd
->slots
== NULL
) {
890 dev_err(dev
, "unable to alloc slot struct.\n");
894 spin_lock_init(&dd
->lock
);
895 crypto_init_queue(&dd
->queue
, TEGRA_AES_QUEUE_LENGTH
);
897 /* Get the module base address */
898 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
900 dev_err(dev
, "invalid resource type: base\n");
905 if (!devm_request_mem_region(&pdev
->dev
, res
->start
,
907 dev_name(&pdev
->dev
))) {
908 dev_err(&pdev
->dev
, "Couldn't request MEM resource\n");
912 dd
->io_base
= devm_ioremap(dev
, res
->start
, resource_size(res
));
914 dev_err(dev
, "can't ioremap register space\n");
919 /* Initialize the vde clock */
920 dd
->aes_clk
= devm_clk_get(dev
, "vde");
921 if (IS_ERR(dd
->aes_clk
)) {
922 dev_err(dev
, "iclock intialization failed.\n");
927 err
= clk_set_rate(dd
->aes_clk
, ULONG_MAX
);
929 dev_err(dd
->dev
, "iclk set_rate fail(%d)\n", err
);
934 * the foll contiguous memory is allocated as follows -
935 * - hardware key table
938 dd
->ivkey_base
= dma_alloc_coherent(dev
, AES_HW_KEY_TABLE_LENGTH_BYTES
,
939 &dd
->ivkey_phys_base
,
941 if (!dd
->ivkey_base
) {
942 dev_err(dev
, "can not allocate iv/key buffer\n");
947 dd
->buf_in
= dma_alloc_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
948 &dd
->dma_buf_in
, GFP_KERNEL
);
950 dev_err(dev
, "can not allocate dma-in buffer\n");
955 dd
->buf_out
= dma_alloc_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
956 &dd
->dma_buf_out
, GFP_KERNEL
);
958 dev_err(dev
, "can not allocate dma-out buffer\n");
963 init_completion(&dd
->op_complete
);
964 aes_wq
= alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI
| WQ_UNBOUND
, 1);
966 dev_err(dev
, "alloc_workqueue failed\n");
972 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
974 dev_err(dev
, "invalid resource type: base\n");
978 dd
->irq
= res
->start
;
980 err
= devm_request_irq(dev
, dd
->irq
, aes_irq
, IRQF_TRIGGER_HIGH
|
981 IRQF_SHARED
, "tegra-aes", dd
);
983 dev_err(dev
, "request_irq failed\n");
987 mutex_init(&aes_lock
);
988 INIT_LIST_HEAD(&dev_list
);
990 spin_lock_init(&list_lock
);
991 spin_lock(&list_lock
);
992 for (i
= 0; i
< AES_NR_KEYSLOTS
; i
++) {
993 if (i
== SSK_SLOT_NUM
)
995 dd
->slots
[i
].slot_num
= i
;
996 INIT_LIST_HEAD(&dd
->slots
[i
].node
);
997 list_add_tail(&dd
->slots
[i
].node
, &dev_list
);
999 spin_unlock(&list_lock
);
1002 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++) {
1003 algs
[i
].cra_priority
= 300;
1004 algs
[i
].cra_ctxsize
= sizeof(struct tegra_aes_ctx
);
1005 algs
[i
].cra_module
= THIS_MODULE
;
1006 algs
[i
].cra_init
= tegra_aes_cra_init
;
1007 algs
[i
].cra_exit
= tegra_aes_cra_exit
;
1009 err
= crypto_register_alg(&algs
[i
]);
1014 dev_info(dev
, "registered");
1018 for (j
= 0; j
< i
; j
++)
1019 crypto_unregister_alg(&algs
[j
]);
1021 dma_free_coherent(dev
, AES_HW_KEY_TABLE_LENGTH_BYTES
,
1022 dd
->ivkey_base
, dd
->ivkey_phys_base
);
1024 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1025 dd
->buf_in
, dd
->dma_buf_in
);
1027 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1028 dd
->buf_out
, dd
->dma_buf_out
);
1030 destroy_workqueue(aes_wq
);
1031 spin_lock(&list_lock
);
1032 list_del(&dev_list
);
1033 spin_unlock(&list_lock
);
1037 dev_err(dev
, "%s: initialization failed.\n", __func__
);
1041 static int tegra_aes_remove(struct platform_device
*pdev
)
1043 struct device
*dev
= &pdev
->dev
;
1044 struct tegra_aes_dev
*dd
= platform_get_drvdata(pdev
);
1047 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++)
1048 crypto_unregister_alg(&algs
[i
]);
1050 cancel_work_sync(&aes_work
);
1051 destroy_workqueue(aes_wq
);
1052 spin_lock(&list_lock
);
1053 list_del(&dev_list
);
1054 spin_unlock(&list_lock
);
1056 dma_free_coherent(dev
, AES_HW_KEY_TABLE_LENGTH_BYTES
,
1057 dd
->ivkey_base
, dd
->ivkey_phys_base
);
1058 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1059 dd
->buf_in
, dd
->dma_buf_in
);
1060 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1061 dd
->buf_out
, dd
->dma_buf_out
);
1067 static struct of_device_id tegra_aes_of_match
[] = {
1068 { .compatible
= "nvidia,tegra20-aes", },
1069 { .compatible
= "nvidia,tegra30-aes", },
1073 static struct platform_driver tegra_aes_driver
= {
1074 .probe
= tegra_aes_probe
,
1075 .remove
= tegra_aes_remove
,
1077 .name
= "tegra-aes",
1078 .owner
= THIS_MODULE
,
1079 .of_match_table
= tegra_aes_of_match
,
1083 module_platform_driver(tegra_aes_driver
);
1085 MODULE_DESCRIPTION("Tegra AES/OFB/CPRNG hw acceleration support.");
1086 MODULE_AUTHOR("NVIDIA Corporation");
1087 MODULE_LICENSE("GPL v2");