2 * drivers/crypto/tegra-aes.c
4 * Driver for NVIDIA Tegra AES hardware engine residing inside the
5 * Bit Stream Engine for Video (BSEV) hardware block.
7 * The programming sequence for this engine is with the help
8 * of commands which travel via a command queue residing between the
9 * CPU and the BSEV block. The BSEV engine has an internal RAM (VRAM)
10 * where the final input plaintext, keys and the IV have to be copied
11 * before starting the encrypt/decrypt operation.
13 * Copyright (c) 2010, NVIDIA Corporation.
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful, but WITHOUT
21 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/clk.h>
35 #include <linux/platform_device.h>
36 #include <linux/scatterlist.h>
37 #include <linux/dma-mapping.h>
39 #include <linux/mutex.h>
40 #include <linux/interrupt.h>
41 #include <linux/completion.h>
42 #include <linux/workqueue.h>
44 #include <crypto/scatterwalk.h>
45 #include <crypto/aes.h>
46 #include <crypto/internal/rng.h>
48 #include "tegra-aes.h"
50 #define FLAGS_MODE_MASK 0x00FF
51 #define FLAGS_ENCRYPT BIT(0)
52 #define FLAGS_CBC BIT(1)
53 #define FLAGS_GIV BIT(2)
54 #define FLAGS_RNG BIT(3)
55 #define FLAGS_OFB BIT(4)
56 #define FLAGS_NEW_KEY BIT(5)
57 #define FLAGS_NEW_IV BIT(6)
58 #define FLAGS_INIT BIT(7)
59 #define FLAGS_FAST BIT(8)
63 * Defines AES engine Max process bytes size in one go, which takes 1 msec.
64 * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte
65 * The duration CPU can use the BSE to 1 msec, then the number of available
66 * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB
67 * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB.
69 #define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000
72 * The key table length is 64 bytes
73 * (This includes first upto 32 bytes key + 16 bytes original initial vector
74 * and 16 bytes updated initial vector)
76 #define AES_HW_KEY_TABLE_LENGTH_BYTES 64
79 * The memory being used is divides as follows:
81 * 2. Original IV - 16 bytes
82 * 3. Updated IV - 16 bytes
83 * 4. Key schedule - 256 bytes
85 * 1+2+3 constitute the hw key table.
87 #define AES_HW_IV_SIZE 16
88 #define AES_HW_KEYSCHEDULE_LEN 256
89 #define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN)
91 /* Define commands required for AES operation */
93 CMD_BLKSTARTENGINE
= 0x0E,
95 CMD_DMACOMPLETE
= 0x11,
100 /* Define sub-commands */
102 SUBCMD_VRAM_SEL
= 0x1,
103 SUBCMD_CRYPTO_TABLE_SEL
= 0x3,
104 SUBCMD_KEY_TABLE_SEL
= 0x8,
107 /* memdma_vd command */
108 #define MEMDMA_DIR_DTOVRAM 0 /* sdram -> vram */
109 #define MEMDMA_DIR_VTODRAM 1 /* vram -> sdram */
110 #define MEMDMA_DIR_SHIFT 25
111 #define MEMDMA_NUM_WORDS_SHIFT 12
113 /* command queue bit shifts */
115 CMDQ_KEYTABLEADDR_SHIFT
= 0,
116 CMDQ_KEYTABLEID_SHIFT
= 17,
117 CMDQ_VRAMSEL_SHIFT
= 23,
118 CMDQ_TABLESEL_SHIFT
= 24,
119 CMDQ_OPCODE_SHIFT
= 26,
123 * The secure key slot contains a unique secure key generated
124 * and loaded by the bootloader. This slot is marked as non-accessible
127 #define SSK_SLOT_NUM 4
129 #define AES_NR_KEYSLOTS 8
130 #define TEGRA_AES_QUEUE_LENGTH 50
131 #define DEFAULT_RNG_BLK_SZ 16
133 /* The command queue depth */
134 #define AES_HW_MAX_ICQ_LENGTH 5
136 struct tegra_aes_slot
{
137 struct list_head node
;
141 static struct tegra_aes_slot ssk
= {
142 .slot_num
= SSK_SLOT_NUM
,
145 struct tegra_aes_reqctx
{
149 struct tegra_aes_dev
{
151 void __iomem
*io_base
;
152 dma_addr_t ivkey_phys_base
;
153 void __iomem
*ivkey_base
;
155 struct tegra_aes_ctx
*ctx
;
158 struct completion op_complete
;
160 dma_addr_t dma_buf_in
;
162 dma_addr_t dma_buf_out
;
164 u8 dt
[DEFAULT_RNG_BLK_SZ
];
168 struct crypto_queue queue
;
169 struct tegra_aes_slot
*slots
;
170 struct ablkcipher_request
*req
;
172 struct scatterlist
*in_sg
;
174 struct scatterlist
*out_sg
;
178 static struct tegra_aes_dev
*aes_dev
;
180 struct tegra_aes_ctx
{
181 struct tegra_aes_dev
*dd
;
183 struct tegra_aes_slot
*slot
;
184 u8 key
[AES_MAX_KEY_SIZE
];
188 static struct tegra_aes_ctx rng_ctx
= {
189 .flags
= FLAGS_NEW_KEY
,
190 .keylen
= AES_KEYSIZE_128
,
193 /* keep registered devices data here */
194 static struct list_head dev_list
;
195 static DEFINE_SPINLOCK(list_lock
);
196 static DEFINE_MUTEX(aes_lock
);
198 static void aes_workqueue_handler(struct work_struct
*work
);
199 static DECLARE_WORK(aes_work
, aes_workqueue_handler
);
200 static struct workqueue_struct
*aes_wq
;
202 extern unsigned long long tegra_chip_uid(void);
204 static inline u32
aes_readl(struct tegra_aes_dev
*dd
, u32 offset
)
206 return readl(dd
->io_base
+ offset
);
209 static inline void aes_writel(struct tegra_aes_dev
*dd
, u32 val
, u32 offset
)
211 writel(val
, dd
->io_base
+ offset
);
214 static int aes_start_crypt(struct tegra_aes_dev
*dd
, u32 in_addr
, u32 out_addr
,
215 int nblocks
, int mode
, bool upd_iv
)
217 u32 cmdq
[AES_HW_MAX_ICQ_LENGTH
];
218 int i
, eng_busy
, icq_empty
, ret
;
221 /* reset all the interrupt bits */
222 aes_writel(dd
, 0xFFFFFFFF, TEGRA_AES_INTR_STATUS
);
224 /* enable error, dma xfer complete interrupts */
225 aes_writel(dd
, 0x33, TEGRA_AES_INT_ENB
);
227 cmdq
[0] = CMD_DMASETUP
<< CMDQ_OPCODE_SHIFT
;
229 cmdq
[2] = CMD_BLKSTARTENGINE
<< CMDQ_OPCODE_SHIFT
| (nblocks
-1);
230 cmdq
[3] = CMD_DMACOMPLETE
<< CMDQ_OPCODE_SHIFT
;
232 value
= aes_readl(dd
, TEGRA_AES_CMDQUE_CONTROL
);
233 /* access SDRAM through AHB */
234 value
&= ~TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD
;
235 value
&= ~TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD
;
236 value
|= TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD
|
237 TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD
|
238 TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD
;
239 aes_writel(dd
, value
, TEGRA_AES_CMDQUE_CONTROL
);
240 dev_dbg(dd
->dev
, "cmd_q_ctrl=0x%x", value
);
242 value
= (0x1 << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT
) |
243 ((dd
->ctx
->keylen
* 8) <<
244 TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT
) |
245 ((u32
)upd_iv
<< TEGRA_AES_SECURE_IV_SELECT_SHIFT
);
247 if (mode
& FLAGS_CBC
) {
248 value
|= ((((mode
& FLAGS_ENCRYPT
) ? 2 : 3)
249 << TEGRA_AES_SECURE_XOR_POS_SHIFT
) |
250 (((mode
& FLAGS_ENCRYPT
) ? 2 : 3)
251 << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT
) |
252 ((mode
& FLAGS_ENCRYPT
) ? 1 : 0)
253 << TEGRA_AES_SECURE_CORE_SEL_SHIFT
);
254 } else if (mode
& FLAGS_OFB
) {
255 value
|= ((TEGRA_AES_SECURE_XOR_POS_FIELD
) |
256 (2 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT
) |
257 (TEGRA_AES_SECURE_CORE_SEL_FIELD
));
258 } else if (mode
& FLAGS_RNG
) {
259 value
|= (((mode
& FLAGS_ENCRYPT
) ? 1 : 0)
260 << TEGRA_AES_SECURE_CORE_SEL_SHIFT
|
261 TEGRA_AES_SECURE_RNG_ENB_FIELD
);
263 value
|= (((mode
& FLAGS_ENCRYPT
) ? 1 : 0)
264 << TEGRA_AES_SECURE_CORE_SEL_SHIFT
);
267 dev_dbg(dd
->dev
, "secure_in_sel=0x%x", value
);
268 aes_writel(dd
, value
, TEGRA_AES_SECURE_INPUT_SELECT
);
270 aes_writel(dd
, out_addr
, TEGRA_AES_SECURE_DEST_ADDR
);
271 INIT_COMPLETION(dd
->op_complete
);
273 for (i
= 0; i
< AES_HW_MAX_ICQ_LENGTH
- 1; i
++) {
275 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
276 eng_busy
= value
& TEGRA_AES_ENGINE_BUSY_FIELD
;
277 icq_empty
= value
& TEGRA_AES_ICQ_EMPTY_FIELD
;
278 } while (eng_busy
&& !icq_empty
);
279 aes_writel(dd
, cmdq
[i
], TEGRA_AES_ICMDQUE_WR
);
282 ret
= wait_for_completion_timeout(&dd
->op_complete
,
283 msecs_to_jiffies(150));
285 dev_err(dd
->dev
, "timed out (0x%x)\n",
286 aes_readl(dd
, TEGRA_AES_INTR_STATUS
));
290 aes_writel(dd
, cmdq
[AES_HW_MAX_ICQ_LENGTH
- 1], TEGRA_AES_ICMDQUE_WR
);
294 static void aes_release_key_slot(struct tegra_aes_slot
*slot
)
296 if (slot
->slot_num
== SSK_SLOT_NUM
)
299 spin_lock(&list_lock
);
300 list_add_tail(&slot
->node
, &dev_list
);
302 spin_unlock(&list_lock
);
305 static struct tegra_aes_slot
*aes_find_key_slot(void)
307 struct tegra_aes_slot
*slot
= NULL
;
308 struct list_head
*new_head
;
311 spin_lock(&list_lock
);
312 empty
= list_empty(&dev_list
);
314 slot
= list_entry(&dev_list
, struct tegra_aes_slot
, node
);
315 new_head
= dev_list
.next
;
317 dev_list
.next
= new_head
->next
;
318 dev_list
.prev
= NULL
;
320 spin_unlock(&list_lock
);
325 static int aes_set_key(struct tegra_aes_dev
*dd
)
328 struct tegra_aes_ctx
*ctx
= dd
->ctx
;
329 int eng_busy
, icq_empty
, dma_busy
;
330 bool use_ssk
= false;
333 if (!dd
->ctx
->slot
) {
334 dev_dbg(dd
->dev
, "using ssk");
335 dd
->ctx
->slot
= &ssk
;
339 /* enable key schedule generation in hardware */
340 value
= aes_readl(dd
, TEGRA_AES_SECURE_CONFIG_EXT
);
341 value
&= ~TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD
;
342 aes_writel(dd
, value
, TEGRA_AES_SECURE_CONFIG_EXT
);
344 /* select the key slot */
345 value
= aes_readl(dd
, TEGRA_AES_SECURE_CONFIG
);
346 value
&= ~TEGRA_AES_SECURE_KEY_INDEX_FIELD
;
347 value
|= (ctx
->slot
->slot_num
<< TEGRA_AES_SECURE_KEY_INDEX_SHIFT
);
348 aes_writel(dd
, value
, TEGRA_AES_SECURE_CONFIG
);
353 /* copy the key table from sdram to vram */
354 cmdq
[0] = CMD_MEMDMAVD
<< CMDQ_OPCODE_SHIFT
|
355 MEMDMA_DIR_DTOVRAM
<< MEMDMA_DIR_SHIFT
|
356 AES_HW_KEY_TABLE_LENGTH_BYTES
/ sizeof(u32
) <<
357 MEMDMA_NUM_WORDS_SHIFT
;
358 cmdq
[1] = (u32
)dd
->ivkey_phys_base
;
360 aes_writel(dd
, cmdq
[0], TEGRA_AES_ICMDQUE_WR
);
361 aes_writel(dd
, cmdq
[1], TEGRA_AES_ICMDQUE_WR
);
364 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
365 eng_busy
= value
& TEGRA_AES_ENGINE_BUSY_FIELD
;
366 icq_empty
= value
& TEGRA_AES_ICQ_EMPTY_FIELD
;
367 dma_busy
= value
& TEGRA_AES_DMA_BUSY_FIELD
;
368 } while (eng_busy
&& !icq_empty
&& dma_busy
);
370 /* settable command to get key into internal registers */
371 value
= CMD_SETTABLE
<< CMDQ_OPCODE_SHIFT
|
372 SUBCMD_CRYPTO_TABLE_SEL
<< CMDQ_TABLESEL_SHIFT
|
373 SUBCMD_VRAM_SEL
<< CMDQ_VRAMSEL_SHIFT
|
374 (SUBCMD_KEY_TABLE_SEL
| ctx
->slot
->slot_num
) <<
375 CMDQ_KEYTABLEID_SHIFT
;
376 aes_writel(dd
, value
, TEGRA_AES_ICMDQUE_WR
);
379 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
380 eng_busy
= value
& TEGRA_AES_ENGINE_BUSY_FIELD
;
381 icq_empty
= value
& TEGRA_AES_ICQ_EMPTY_FIELD
;
382 } while (eng_busy
&& !icq_empty
);
387 static int tegra_aes_handle_req(struct tegra_aes_dev
*dd
)
389 struct crypto_async_request
*async_req
, *backlog
;
390 struct crypto_ablkcipher
*tfm
;
391 struct tegra_aes_ctx
*ctx
;
392 struct tegra_aes_reqctx
*rctx
;
393 struct ablkcipher_request
*req
;
395 int dma_max
= AES_HW_DMA_BUFFER_SIZE_BYTES
;
396 int ret
= 0, nblocks
, total
;
398 dma_addr_t addr_in
, addr_out
;
399 struct scatterlist
*in_sg
, *out_sg
;
404 spin_lock_irqsave(&dd
->lock
, flags
);
405 backlog
= crypto_get_backlog(&dd
->queue
);
406 async_req
= crypto_dequeue_request(&dd
->queue
);
408 clear_bit(FLAGS_BUSY
, &dd
->flags
);
409 spin_unlock_irqrestore(&dd
->lock
, flags
);
415 backlog
->complete(backlog
, -EINPROGRESS
);
417 req
= ablkcipher_request_cast(async_req
);
419 dev_dbg(dd
->dev
, "%s: get new req\n", __func__
);
421 if (!req
->src
|| !req
->dst
)
424 /* take mutex to access the aes hw */
425 mutex_lock(&aes_lock
);
427 /* assign new request to device */
429 dd
->total
= req
->nbytes
;
431 dd
->in_sg
= req
->src
;
433 dd
->out_sg
= req
->dst
;
440 tfm
= crypto_ablkcipher_reqtfm(req
);
441 rctx
= ablkcipher_request_ctx(req
);
442 ctx
= crypto_ablkcipher_ctx(tfm
);
443 rctx
->mode
&= FLAGS_MODE_MASK
;
444 dd
->flags
= (dd
->flags
& ~FLAGS_MODE_MASK
) | rctx
->mode
;
446 dd
->iv
= (u8
*)req
->info
;
447 dd
->ivlen
= crypto_ablkcipher_ivsize(tfm
);
449 /* assign new context to device */
453 if (ctx
->flags
& FLAGS_NEW_KEY
) {
455 memcpy(dd
->ivkey_base
, ctx
->key
, ctx
->keylen
);
456 memset(dd
->ivkey_base
+ ctx
->keylen
, 0, AES_HW_KEY_TABLE_LENGTH_BYTES
- ctx
->keylen
);
458 ctx
->flags
&= ~FLAGS_NEW_KEY
;
461 if (((dd
->flags
& FLAGS_CBC
) || (dd
->flags
& FLAGS_OFB
)) && dd
->iv
) {
462 /* set iv to the aes hw slot
463 * Hw generates updated iv only after iv is set in slot.
464 * So key and iv is passed asynchronously.
466 memcpy(dd
->buf_in
, dd
->iv
, dd
->ivlen
);
468 ret
= aes_start_crypt(dd
, (u32
)dd
->dma_buf_in
,
469 dd
->dma_buf_out
, 1, FLAGS_CBC
, false);
471 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
477 dev_dbg(dd
->dev
, "remain: %d\n", total
);
478 ret
= dma_map_sg(dd
->dev
, in_sg
, 1, DMA_TO_DEVICE
);
480 dev_err(dd
->dev
, "dma_map_sg() error\n");
484 ret
= dma_map_sg(dd
->dev
, out_sg
, 1, DMA_FROM_DEVICE
);
486 dev_err(dd
->dev
, "dma_map_sg() error\n");
487 dma_unmap_sg(dd
->dev
, dd
->in_sg
,
492 addr_in
= sg_dma_address(in_sg
);
493 addr_out
= sg_dma_address(out_sg
);
494 dd
->flags
|= FLAGS_FAST
;
495 count
= min_t(int, sg_dma_len(in_sg
), dma_max
);
496 WARN_ON(sg_dma_len(in_sg
) != sg_dma_len(out_sg
));
497 nblocks
= DIV_ROUND_UP(count
, AES_BLOCK_SIZE
);
499 ret
= aes_start_crypt(dd
, addr_in
, addr_out
, nblocks
,
502 dma_unmap_sg(dd
->dev
, out_sg
, 1, DMA_FROM_DEVICE
);
503 dma_unmap_sg(dd
->dev
, in_sg
, 1, DMA_TO_DEVICE
);
506 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
509 dd
->flags
&= ~FLAGS_FAST
;
511 dev_dbg(dd
->dev
, "out: copied %d\n", count
);
513 in_sg
= sg_next(in_sg
);
514 out_sg
= sg_next(out_sg
);
515 WARN_ON(((total
!= 0) && (!in_sg
|| !out_sg
)));
519 mutex_unlock(&aes_lock
);
523 if (dd
->req
->base
.complete
)
524 dd
->req
->base
.complete(&dd
->req
->base
, ret
);
526 dev_dbg(dd
->dev
, "%s: exit\n", __func__
);
530 static int tegra_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
533 struct tegra_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
534 struct tegra_aes_dev
*dd
= aes_dev
;
535 struct tegra_aes_slot
*key_slot
;
537 if ((keylen
!= AES_KEYSIZE_128
) && (keylen
!= AES_KEYSIZE_192
) &&
538 (keylen
!= AES_KEYSIZE_256
)) {
539 dev_err(dd
->dev
, "unsupported key size\n");
540 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
544 dev_dbg(dd
->dev
, "keylen: %d\n", keylen
);
550 key_slot
= aes_find_key_slot();
552 dev_err(dd
->dev
, "no empty slot\n");
556 ctx
->slot
= key_slot
;
559 memcpy(ctx
->key
, key
, keylen
);
560 ctx
->keylen
= keylen
;
563 ctx
->flags
|= FLAGS_NEW_KEY
;
564 dev_dbg(dd
->dev
, "done\n");
568 static void aes_workqueue_handler(struct work_struct
*work
)
570 struct tegra_aes_dev
*dd
= aes_dev
;
573 ret
= clk_prepare_enable(dd
->aes_clk
);
575 BUG_ON("clock enable failed");
577 /* empty the crypto queue and then return */
579 ret
= tegra_aes_handle_req(dd
);
582 clk_disable_unprepare(dd
->aes_clk
);
585 static irqreturn_t
aes_irq(int irq
, void *dev_id
)
587 struct tegra_aes_dev
*dd
= (struct tegra_aes_dev
*)dev_id
;
588 u32 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
589 int busy
= test_bit(FLAGS_BUSY
, &dd
->flags
);
592 dev_dbg(dd
->dev
, "spurious interrupt\n");
596 dev_dbg(dd
->dev
, "irq_stat: 0x%x\n", value
);
597 if (value
& TEGRA_AES_INT_ERROR_MASK
)
598 aes_writel(dd
, TEGRA_AES_INT_ERROR_MASK
, TEGRA_AES_INTR_STATUS
);
600 if (!(value
& TEGRA_AES_ENGINE_BUSY_FIELD
))
601 complete(&dd
->op_complete
);
608 static int tegra_aes_crypt(struct ablkcipher_request
*req
, unsigned long mode
)
610 struct tegra_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
611 struct tegra_aes_dev
*dd
= aes_dev
;
616 dev_dbg(dd
->dev
, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n",
617 req
->nbytes
, !!(mode
& FLAGS_ENCRYPT
),
618 !!(mode
& FLAGS_CBC
), !!(mode
& FLAGS_OFB
));
622 spin_lock_irqsave(&dd
->lock
, flags
);
623 err
= ablkcipher_enqueue_request(&dd
->queue
, req
);
624 busy
= test_and_set_bit(FLAGS_BUSY
, &dd
->flags
);
625 spin_unlock_irqrestore(&dd
->lock
, flags
);
628 queue_work(aes_wq
, &aes_work
);
633 static int tegra_aes_ecb_encrypt(struct ablkcipher_request
*req
)
635 return tegra_aes_crypt(req
, FLAGS_ENCRYPT
);
638 static int tegra_aes_ecb_decrypt(struct ablkcipher_request
*req
)
640 return tegra_aes_crypt(req
, 0);
643 static int tegra_aes_cbc_encrypt(struct ablkcipher_request
*req
)
645 return tegra_aes_crypt(req
, FLAGS_ENCRYPT
| FLAGS_CBC
);
648 static int tegra_aes_cbc_decrypt(struct ablkcipher_request
*req
)
650 return tegra_aes_crypt(req
, FLAGS_CBC
);
653 static int tegra_aes_ofb_encrypt(struct ablkcipher_request
*req
)
655 return tegra_aes_crypt(req
, FLAGS_ENCRYPT
| FLAGS_OFB
);
658 static int tegra_aes_ofb_decrypt(struct ablkcipher_request
*req
)
660 return tegra_aes_crypt(req
, FLAGS_OFB
);
663 static int tegra_aes_get_random(struct crypto_rng
*tfm
, u8
*rdata
,
666 struct tegra_aes_dev
*dd
= aes_dev
;
667 struct tegra_aes_ctx
*ctx
= &rng_ctx
;
669 u8
*dest
= rdata
, *dt
= dd
->dt
;
671 /* take mutex to access the aes hw */
672 mutex_lock(&aes_lock
);
674 ret
= clk_prepare_enable(dd
->aes_clk
);
676 mutex_unlock(&aes_lock
);
682 dd
->flags
= FLAGS_ENCRYPT
| FLAGS_RNG
;
684 memcpy(dd
->buf_in
, dt
, DEFAULT_RNG_BLK_SZ
);
686 ret
= aes_start_crypt(dd
, (u32
)dd
->dma_buf_in
,
687 (u32
)dd
->dma_buf_out
, 1, dd
->flags
, true);
689 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
693 memcpy(dest
, dd
->buf_out
, dlen
);
696 for (i
= DEFAULT_RNG_BLK_SZ
- 1; i
>= 0; i
--) {
703 clk_disable_unprepare(dd
->aes_clk
);
704 mutex_unlock(&aes_lock
);
706 dev_dbg(dd
->dev
, "%s: done\n", __func__
);
710 static int tegra_aes_rng_reset(struct crypto_rng
*tfm
, u8
*seed
,
713 struct tegra_aes_dev
*dd
= aes_dev
;
714 struct tegra_aes_ctx
*ctx
= &rng_ctx
;
715 struct tegra_aes_slot
*key_slot
;
722 dev_err(dd
->dev
, "ctx=0x%x, dd=0x%x\n",
723 (unsigned int)ctx
, (unsigned int)dd
);
727 if (slen
< (DEFAULT_RNG_BLK_SZ
+ AES_KEYSIZE_128
)) {
728 dev_err(dd
->dev
, "seed size invalid");
732 /* take mutex to access the aes hw */
733 mutex_lock(&aes_lock
);
736 key_slot
= aes_find_key_slot();
738 dev_err(dd
->dev
, "no empty slot\n");
739 mutex_unlock(&aes_lock
);
742 ctx
->slot
= key_slot
;
749 ctx
->keylen
= AES_KEYSIZE_128
;
750 ctx
->flags
|= FLAGS_NEW_KEY
;
752 /* copy the key to the key slot */
753 memcpy(dd
->ivkey_base
, seed
+ DEFAULT_RNG_BLK_SZ
, AES_KEYSIZE_128
);
754 memset(dd
->ivkey_base
+ AES_KEYSIZE_128
, 0, AES_HW_KEY_TABLE_LENGTH_BYTES
- AES_KEYSIZE_128
);
759 dd
->flags
= FLAGS_ENCRYPT
| FLAGS_RNG
;
761 ret
= clk_prepare_enable(dd
->aes_clk
);
763 mutex_unlock(&aes_lock
);
769 /* set seed to the aes hw slot */
770 memcpy(dd
->buf_in
, dd
->iv
, DEFAULT_RNG_BLK_SZ
);
771 ret
= aes_start_crypt(dd
, (u32
)dd
->dma_buf_in
,
772 dd
->dma_buf_out
, 1, FLAGS_CBC
, false);
774 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
778 if (dd
->ivlen
>= (2 * DEFAULT_RNG_BLK_SZ
+ AES_KEYSIZE_128
)) {
779 dt
= dd
->iv
+ DEFAULT_RNG_BLK_SZ
+ AES_KEYSIZE_128
;
782 nsec
= timespec_to_ns(&ts
);
784 nsec
^= dd
->ctr
<< 56;
787 tmp
[1] = tegra_chip_uid();
790 memcpy(dd
->dt
, dt
, DEFAULT_RNG_BLK_SZ
);
793 clk_disable_unprepare(dd
->aes_clk
);
794 mutex_unlock(&aes_lock
);
796 dev_dbg(dd
->dev
, "%s: done\n", __func__
);
800 static int tegra_aes_cra_init(struct crypto_tfm
*tfm
)
802 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct tegra_aes_reqctx
);
807 void tegra_aes_cra_exit(struct crypto_tfm
*tfm
)
809 struct tegra_aes_ctx
*ctx
=
810 crypto_ablkcipher_ctx((struct crypto_ablkcipher
*)tfm
);
812 if (ctx
&& ctx
->slot
)
813 aes_release_key_slot(ctx
->slot
);
816 static struct crypto_alg algs
[] = {
818 .cra_name
= "ecb(aes)",
819 .cra_driver_name
= "ecb-aes-tegra",
821 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
822 .cra_blocksize
= AES_BLOCK_SIZE
,
824 .cra_type
= &crypto_ablkcipher_type
,
825 .cra_u
.ablkcipher
= {
826 .min_keysize
= AES_MIN_KEY_SIZE
,
827 .max_keysize
= AES_MAX_KEY_SIZE
,
828 .setkey
= tegra_aes_setkey
,
829 .encrypt
= tegra_aes_ecb_encrypt
,
830 .decrypt
= tegra_aes_ecb_decrypt
,
833 .cra_name
= "cbc(aes)",
834 .cra_driver_name
= "cbc-aes-tegra",
836 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
837 .cra_blocksize
= AES_BLOCK_SIZE
,
839 .cra_type
= &crypto_ablkcipher_type
,
840 .cra_u
.ablkcipher
= {
841 .min_keysize
= AES_MIN_KEY_SIZE
,
842 .max_keysize
= AES_MAX_KEY_SIZE
,
843 .ivsize
= AES_MIN_KEY_SIZE
,
844 .setkey
= tegra_aes_setkey
,
845 .encrypt
= tegra_aes_cbc_encrypt
,
846 .decrypt
= tegra_aes_cbc_decrypt
,
849 .cra_name
= "ofb(aes)",
850 .cra_driver_name
= "ofb-aes-tegra",
852 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
853 .cra_blocksize
= AES_BLOCK_SIZE
,
855 .cra_type
= &crypto_ablkcipher_type
,
856 .cra_u
.ablkcipher
= {
857 .min_keysize
= AES_MIN_KEY_SIZE
,
858 .max_keysize
= AES_MAX_KEY_SIZE
,
859 .ivsize
= AES_MIN_KEY_SIZE
,
860 .setkey
= tegra_aes_setkey
,
861 .encrypt
= tegra_aes_ofb_encrypt
,
862 .decrypt
= tegra_aes_ofb_decrypt
,
865 .cra_name
= "ansi_cprng",
866 .cra_driver_name
= "rng-aes-tegra",
867 .cra_flags
= CRYPTO_ALG_TYPE_RNG
,
868 .cra_ctxsize
= sizeof(struct tegra_aes_ctx
),
869 .cra_type
= &crypto_rng_type
,
871 .rng_make_random
= tegra_aes_get_random
,
872 .rng_reset
= tegra_aes_rng_reset
,
873 .seedsize
= AES_KEYSIZE_128
+ (2 * DEFAULT_RNG_BLK_SZ
),
878 static int tegra_aes_probe(struct platform_device
*pdev
)
880 struct device
*dev
= &pdev
->dev
;
881 struct tegra_aes_dev
*dd
;
882 struct resource
*res
;
883 int err
= -ENOMEM
, i
= 0, j
;
885 dd
= devm_kzalloc(dev
, sizeof(struct tegra_aes_dev
), GFP_KERNEL
);
887 dev_err(dev
, "unable to alloc data struct.\n");
892 platform_set_drvdata(pdev
, dd
);
894 dd
->slots
= devm_kzalloc(dev
, sizeof(struct tegra_aes_slot
) *
895 AES_NR_KEYSLOTS
, GFP_KERNEL
);
896 if (dd
->slots
== NULL
) {
897 dev_err(dev
, "unable to alloc slot struct.\n");
901 spin_lock_init(&dd
->lock
);
902 crypto_init_queue(&dd
->queue
, TEGRA_AES_QUEUE_LENGTH
);
904 /* Get the module base address */
905 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
907 dev_err(dev
, "invalid resource type: base\n");
912 if (!devm_request_mem_region(&pdev
->dev
, res
->start
,
914 dev_name(&pdev
->dev
))) {
915 dev_err(&pdev
->dev
, "Couldn't request MEM resource\n");
919 dd
->io_base
= devm_ioremap(dev
, res
->start
, resource_size(res
));
921 dev_err(dev
, "can't ioremap register space\n");
926 /* Initialize the vde clock */
927 dd
->aes_clk
= clk_get(dev
, "vde");
928 if (IS_ERR(dd
->aes_clk
)) {
929 dev_err(dev
, "iclock intialization failed.\n");
934 err
= clk_set_rate(dd
->aes_clk
, ULONG_MAX
);
936 dev_err(dd
->dev
, "iclk set_rate fail(%d)\n", err
);
941 * the foll contiguous memory is allocated as follows -
942 * - hardware key table
945 dd
->ivkey_base
= dma_alloc_coherent(dev
, AES_HW_KEY_TABLE_LENGTH_BYTES
,
946 &dd
->ivkey_phys_base
,
948 if (!dd
->ivkey_base
) {
949 dev_err(dev
, "can not allocate iv/key buffer\n");
954 dd
->buf_in
= dma_alloc_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
955 &dd
->dma_buf_in
, GFP_KERNEL
);
957 dev_err(dev
, "can not allocate dma-in buffer\n");
962 dd
->buf_out
= dma_alloc_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
963 &dd
->dma_buf_out
, GFP_KERNEL
);
965 dev_err(dev
, "can not allocate dma-out buffer\n");
970 init_completion(&dd
->op_complete
);
971 aes_wq
= alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI
| WQ_UNBOUND
, 1);
973 dev_err(dev
, "alloc_workqueue failed\n");
979 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
981 dev_err(dev
, "invalid resource type: base\n");
985 dd
->irq
= res
->start
;
987 err
= devm_request_irq(dev
, dd
->irq
, aes_irq
, IRQF_TRIGGER_HIGH
|
988 IRQF_SHARED
, "tegra-aes", dd
);
990 dev_err(dev
, "request_irq failed\n");
994 mutex_init(&aes_lock
);
995 INIT_LIST_HEAD(&dev_list
);
997 spin_lock_init(&list_lock
);
998 spin_lock(&list_lock
);
999 for (i
= 0; i
< AES_NR_KEYSLOTS
; i
++) {
1000 if (i
== SSK_SLOT_NUM
)
1002 dd
->slots
[i
].slot_num
= i
;
1003 INIT_LIST_HEAD(&dd
->slots
[i
].node
);
1004 list_add_tail(&dd
->slots
[i
].node
, &dev_list
);
1006 spin_unlock(&list_lock
);
1009 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++) {
1010 algs
[i
].cra_priority
= 300;
1011 algs
[i
].cra_ctxsize
= sizeof(struct tegra_aes_ctx
);
1012 algs
[i
].cra_module
= THIS_MODULE
;
1013 algs
[i
].cra_init
= tegra_aes_cra_init
;
1014 algs
[i
].cra_exit
= tegra_aes_cra_exit
;
1016 err
= crypto_register_alg(&algs
[i
]);
1021 dev_info(dev
, "registered");
1025 for (j
= 0; j
< i
; j
++)
1026 crypto_unregister_alg(&algs
[j
]);
1028 dma_free_coherent(dev
, AES_HW_KEY_TABLE_LENGTH_BYTES
,
1029 dd
->ivkey_base
, dd
->ivkey_phys_base
);
1031 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1032 dd
->buf_in
, dd
->dma_buf_in
);
1034 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1035 dd
->buf_out
, dd
->dma_buf_out
);
1036 if (!IS_ERR(dd
->aes_clk
))
1037 clk_put(dd
->aes_clk
);
1039 destroy_workqueue(aes_wq
);
1040 spin_lock(&list_lock
);
1041 list_del(&dev_list
);
1042 spin_unlock(&list_lock
);
1046 dev_err(dev
, "%s: initialization failed.\n", __func__
);
1050 static int tegra_aes_remove(struct platform_device
*pdev
)
1052 struct device
*dev
= &pdev
->dev
;
1053 struct tegra_aes_dev
*dd
= platform_get_drvdata(pdev
);
1056 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++)
1057 crypto_unregister_alg(&algs
[i
]);
1059 cancel_work_sync(&aes_work
);
1060 destroy_workqueue(aes_wq
);
1061 spin_lock(&list_lock
);
1062 list_del(&dev_list
);
1063 spin_unlock(&list_lock
);
1065 dma_free_coherent(dev
, AES_HW_KEY_TABLE_LENGTH_BYTES
,
1066 dd
->ivkey_base
, dd
->ivkey_phys_base
);
1067 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1068 dd
->buf_in
, dd
->dma_buf_in
);
1069 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1070 dd
->buf_out
, dd
->dma_buf_out
);
1071 clk_put(dd
->aes_clk
);
1077 static struct of_device_id tegra_aes_of_match
[] = {
1078 { .compatible
= "nvidia,tegra20-aes", },
1079 { .compatible
= "nvidia,tegra30-aes", },
1083 static struct platform_driver tegra_aes_driver
= {
1084 .probe
= tegra_aes_probe
,
1085 .remove
= tegra_aes_remove
,
1087 .name
= "tegra-aes",
1088 .owner
= THIS_MODULE
,
1089 .of_match_table
= tegra_aes_of_match
,
1093 module_platform_driver(tegra_aes_driver
);
1095 MODULE_DESCRIPTION("Tegra AES/OFB/CPRNG hw acceleration support.");
1096 MODULE_AUTHOR("NVIDIA Corporation");
1097 MODULE_LICENSE("GPL v2");