1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
5 * Copyright (C) 2014-2017 Axis Communications AB
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/bitfield.h>
10 #include <linux/crypto.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/fault-inject.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
25 #include <crypto/aes.h>
26 #include <crypto/gcm.h>
27 #include <crypto/internal/aead.h>
28 #include <crypto/internal/hash.h>
29 #include <crypto/internal/skcipher.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/sha1.h>
32 #include <crypto/sha2.h>
33 #include <crypto/xts.h>
35 /* Max length of a line in all cache levels for Artpec SoCs. */
36 #define ARTPEC_CACHE_LINE_MAX 32
38 #define PDMA_OUT_CFG 0x0000
39 #define PDMA_OUT_BUF_CFG 0x0004
40 #define PDMA_OUT_CMD 0x0008
41 #define PDMA_OUT_DESCRQ_PUSH 0x0010
42 #define PDMA_OUT_DESCRQ_STAT 0x0014
44 #define A6_PDMA_IN_CFG 0x0028
45 #define A6_PDMA_IN_BUF_CFG 0x002c
46 #define A6_PDMA_IN_CMD 0x0030
47 #define A6_PDMA_IN_STATQ_PUSH 0x0038
48 #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
49 #define A6_PDMA_IN_DESCRQ_STAT 0x0048
50 #define A6_PDMA_INTR_MASK 0x0068
51 #define A6_PDMA_ACK_INTR 0x006c
52 #define A6_PDMA_MASKED_INTR 0x0074
54 #define A7_PDMA_IN_CFG 0x002c
55 #define A7_PDMA_IN_BUF_CFG 0x0030
56 #define A7_PDMA_IN_CMD 0x0034
57 #define A7_PDMA_IN_STATQ_PUSH 0x003c
58 #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
59 #define A7_PDMA_IN_DESCRQ_STAT 0x004C
60 #define A7_PDMA_INTR_MASK 0x006c
61 #define A7_PDMA_ACK_INTR 0x0070
62 #define A7_PDMA_MASKED_INTR 0x0078
64 #define PDMA_OUT_CFG_EN BIT(0)
66 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
67 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
69 #define PDMA_OUT_CMD_START BIT(0)
70 #define A6_PDMA_OUT_CMD_STOP BIT(3)
71 #define A7_PDMA_OUT_CMD_STOP BIT(2)
73 #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
74 #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
76 #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
77 #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
79 #define PDMA_IN_CFG_EN BIT(0)
81 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
82 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
83 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
85 #define PDMA_IN_CMD_START BIT(0)
86 #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
87 #define A6_PDMA_IN_CMD_STOP BIT(3)
88 #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
89 #define A7_PDMA_IN_CMD_STOP BIT(2)
91 #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
92 #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
94 #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
95 #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
97 #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
98 #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
100 #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
101 #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
102 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
104 #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
105 #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
106 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
108 #define A6_CRY_MD_OPER GENMASK(19, 16)
110 #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
111 #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
113 #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
114 #define A6_CRY_MD_CIPHER_DECR BIT(22)
115 #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
116 #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
118 #define A7_CRY_MD_OPER GENMASK(11, 8)
120 #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
121 #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
123 #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
124 #define A7_CRY_MD_CIPHER_DECR BIT(14)
125 #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
126 #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
128 /* DMA metadata constants */
129 #define regk_crypto_aes_cbc 0x00000002
130 #define regk_crypto_aes_ctr 0x00000003
131 #define regk_crypto_aes_ecb 0x00000001
132 #define regk_crypto_aes_gcm 0x00000004
133 #define regk_crypto_aes_xts 0x00000005
134 #define regk_crypto_cache 0x00000002
135 #define a6_regk_crypto_dlkey 0x0000000a
136 #define a7_regk_crypto_dlkey 0x0000000e
137 #define regk_crypto_ext 0x00000001
138 #define regk_crypto_hmac_sha1 0x00000007
139 #define regk_crypto_hmac_sha256 0x00000009
140 #define regk_crypto_init 0x00000000
141 #define regk_crypto_key_128 0x00000000
142 #define regk_crypto_key_192 0x00000001
143 #define regk_crypto_key_256 0x00000002
144 #define regk_crypto_null 0x00000000
145 #define regk_crypto_sha1 0x00000006
146 #define regk_crypto_sha256 0x00000008
148 /* DMA descriptor structures */
149 struct pdma_descr_ctrl
{
150 unsigned char short_descr
: 1;
151 unsigned char pad1
: 1;
152 unsigned char eop
: 1;
153 unsigned char intr
: 1;
154 unsigned char short_len
: 3;
155 unsigned char pad2
: 1;
158 struct pdma_data_descr
{
159 unsigned int len
: 24;
160 unsigned int buf
: 32;
163 struct pdma_short_descr
{
164 unsigned char data
[7];
168 struct pdma_descr_ctrl ctrl
;
170 struct pdma_data_descr data
;
171 struct pdma_short_descr shrt
;
175 struct pdma_stat_descr
{
176 unsigned char pad1
: 1;
177 unsigned char pad2
: 1;
178 unsigned char eop
: 1;
179 unsigned char pad3
: 5;
180 unsigned int len
: 24;
183 /* Each descriptor array can hold max 64 entries */
184 #define PDMA_DESCR_COUNT 64
186 #define MODULE_NAME "Artpec-6 CA"
188 /* Hash modes (including HMAC variants) */
189 #define ARTPEC6_CRYPTO_HASH_SHA1 1
190 #define ARTPEC6_CRYPTO_HASH_SHA256 2
193 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
194 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
195 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
196 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
198 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
199 * It operates on a descriptor array with up to 64 descriptor entries.
200 * The arrays must be 64 byte aligned in memory.
202 * The ciphering unit has no registers and is completely controlled by
203 * a 4-byte metadata that is inserted at the beginning of each dma packet.
205 * A dma packet is a sequence of descriptors terminated by setting the .eop
206 * field in the final descriptor of the packet.
208 * Multiple packets are used for providing context data, key data and
209 * the plain/ciphertext.
211 * PDMA Descriptors (Array)
212 * +------+------+------+~~+-------+------+----
213 * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
214 * +--+---+--+---+----+-+~~+-------+----+-+----
217 * __|__ +-------++-------++-------+ +----+
218 * | MD | |Payload||Payload||Payload| | MD |
219 * +-----+ +-------++-------++-------+ +----+
222 struct artpec6_crypto_bounce_buffer
{
223 struct list_head list
;
225 struct scatterlist
*sg
;
227 /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
228 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
233 struct artpec6_crypto_dma_map
{
236 enum dma_data_direction dir
;
239 struct artpec6_crypto_dma_descriptors
{
240 struct pdma_descr out
[PDMA_DESCR_COUNT
] __aligned(64);
241 struct pdma_descr in
[PDMA_DESCR_COUNT
] __aligned(64);
242 u32 stat
[PDMA_DESCR_COUNT
] __aligned(64);
243 struct list_head bounce_buffers
;
244 /* Enough maps for all out/in buffers, and all three descr. arrays */
245 struct artpec6_crypto_dma_map maps
[PDMA_DESCR_COUNT
* 2 + 2];
246 dma_addr_t out_dma_addr
;
247 dma_addr_t in_dma_addr
;
248 dma_addr_t stat_dma_addr
;
254 enum artpec6_crypto_variant
{
259 struct artpec6_crypto
{
261 spinlock_t queue_lock
;
262 struct list_head queue
; /* waiting for pdma fifo space */
263 struct list_head pending
; /* submitted to pdma fifo */
264 struct tasklet_struct task
;
265 struct kmem_cache
*dma_cache
;
267 struct timer_list timer
;
268 enum artpec6_crypto_variant variant
;
269 void *pad_buffer
; /* cache-aligned block padding buffer */
273 enum artpec6_crypto_hash_flags
{
274 HASH_FLAG_INIT_CTX
= 2,
275 HASH_FLAG_UPDATE
= 4,
276 HASH_FLAG_FINALIZE
= 8,
278 HASH_FLAG_UPDATE_KEY
= 32,
281 struct artpec6_crypto_req_common
{
282 struct list_head list
;
283 struct list_head complete_in_progress
;
284 struct artpec6_crypto_dma_descriptors
*dma
;
285 struct crypto_async_request
*req
;
286 void (*complete
)(struct crypto_async_request
*req
);
290 struct artpec6_hash_request_context
{
291 char partial_buffer
[SHA256_BLOCK_SIZE
];
292 char partial_buffer_out
[SHA256_BLOCK_SIZE
];
293 char key_buffer
[SHA256_BLOCK_SIZE
];
294 char pad_buffer
[SHA256_BLOCK_SIZE
+ 32];
295 unsigned char digeststate
[SHA256_DIGEST_SIZE
];
296 size_t partial_bytes
;
300 enum artpec6_crypto_hash_flags hash_flags
;
301 struct artpec6_crypto_req_common common
;
304 struct artpec6_hash_export_state
{
305 char partial_buffer
[SHA256_BLOCK_SIZE
];
306 unsigned char digeststate
[SHA256_DIGEST_SIZE
];
307 size_t partial_bytes
;
310 unsigned int hash_flags
;
313 struct artpec6_hashalg_context
{
314 char hmac_key
[SHA256_BLOCK_SIZE
];
315 size_t hmac_key_length
;
316 struct crypto_shash
*child_hash
;
319 struct artpec6_crypto_request_context
{
322 struct artpec6_crypto_req_common common
;
325 struct artpec6_cryptotfm_context
{
326 unsigned char aes_key
[2*AES_MAX_KEY_SIZE
];
330 struct crypto_sync_skcipher
*fallback
;
333 struct artpec6_crypto_aead_hw_ctx
{
334 __be64 aad_length_bits
;
335 __be64 text_length_bits
;
336 __u8 J0
[AES_BLOCK_SIZE
];
339 struct artpec6_crypto_aead_req_ctx
{
340 struct artpec6_crypto_aead_hw_ctx hw_ctx
;
343 struct artpec6_crypto_req_common common
;
344 __u8 decryption_tag
[AES_BLOCK_SIZE
] ____cacheline_aligned
;
347 /* The crypto framework makes it hard to avoid this global. */
348 static struct device
*artpec6_crypto_dev
;
350 #ifdef CONFIG_FAULT_INJECTION
351 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read
);
352 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full
);
356 ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
,
357 ARTPEC6_CRYPTO_PREPARE_HASH_START
,
360 static int artpec6_crypto_prepare_aead(struct aead_request
*areq
);
361 static int artpec6_crypto_prepare_crypto(struct skcipher_request
*areq
);
362 static int artpec6_crypto_prepare_hash(struct ahash_request
*areq
);
365 artpec6_crypto_complete_crypto(struct crypto_async_request
*req
);
367 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request
*req
);
369 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request
*req
);
371 artpec6_crypto_complete_aead(struct crypto_async_request
*req
);
373 artpec6_crypto_complete_hash(struct crypto_async_request
*req
);
376 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common
*common
);
379 artpec6_crypto_start_dma(struct artpec6_crypto_req_common
*common
);
381 struct artpec6_crypto_walk
{
382 struct scatterlist
*sg
;
386 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk
*awalk
,
387 struct scatterlist
*sg
)
393 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk
*awalk
,
396 while (nbytes
&& awalk
->sg
) {
399 WARN_ON(awalk
->offset
> awalk
->sg
->length
);
401 piece
= min(nbytes
, (size_t)awalk
->sg
->length
- awalk
->offset
);
403 awalk
->offset
+= piece
;
404 if (awalk
->offset
== awalk
->sg
->length
) {
405 awalk
->sg
= sg_next(awalk
->sg
);
415 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk
*awalk
)
417 WARN_ON(awalk
->sg
->length
== awalk
->offset
);
419 return awalk
->sg
->length
- awalk
->offset
;
423 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk
*awalk
)
425 return sg_phys(awalk
->sg
) + awalk
->offset
;
429 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common
*common
)
431 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
432 struct artpec6_crypto_bounce_buffer
*b
;
433 struct artpec6_crypto_bounce_buffer
*next
;
435 list_for_each_entry_safe(b
, next
, &dma
->bounce_buffers
, list
) {
436 pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
437 b
, b
->length
, b
->offset
, b
->buf
);
438 sg_pcopy_from_buffer(b
->sg
,
449 static inline bool artpec6_crypto_busy(void)
451 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
452 int fifo_count
= ac
->pending_count
;
454 return fifo_count
> 6;
457 static int artpec6_crypto_submit(struct artpec6_crypto_req_common
*req
)
459 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
462 spin_lock_bh(&ac
->queue_lock
);
464 if (!artpec6_crypto_busy()) {
465 list_add_tail(&req
->list
, &ac
->pending
);
466 artpec6_crypto_start_dma(req
);
468 } else if (req
->req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
) {
469 list_add_tail(&req
->list
, &ac
->queue
);
471 artpec6_crypto_common_destroy(req
);
474 spin_unlock_bh(&ac
->queue_lock
);
479 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common
*common
)
481 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
482 enum artpec6_crypto_variant variant
= ac
->variant
;
483 void __iomem
*base
= ac
->base
;
484 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
485 u32 ind
, statd
, outd
;
487 /* Make descriptor content visible to the DMA before starting it. */
490 ind
= FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN
, dma
->in_cnt
- 1) |
491 FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR
, dma
->in_dma_addr
>> 6);
493 statd
= FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN
, dma
->in_cnt
- 1) |
494 FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR
, dma
->stat_dma_addr
>> 6);
496 outd
= FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN
, dma
->out_cnt
- 1) |
497 FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR
, dma
->out_dma_addr
>> 6);
499 if (variant
== ARTPEC6_CRYPTO
) {
500 writel_relaxed(ind
, base
+ A6_PDMA_IN_DESCRQ_PUSH
);
501 writel_relaxed(statd
, base
+ A6_PDMA_IN_STATQ_PUSH
);
502 writel_relaxed(PDMA_IN_CMD_START
, base
+ A6_PDMA_IN_CMD
);
504 writel_relaxed(ind
, base
+ A7_PDMA_IN_DESCRQ_PUSH
);
505 writel_relaxed(statd
, base
+ A7_PDMA_IN_STATQ_PUSH
);
506 writel_relaxed(PDMA_IN_CMD_START
, base
+ A7_PDMA_IN_CMD
);
509 writel_relaxed(outd
, base
+ PDMA_OUT_DESCRQ_PUSH
);
510 writel_relaxed(PDMA_OUT_CMD_START
, base
+ PDMA_OUT_CMD
);
516 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common
*common
)
518 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
523 INIT_LIST_HEAD(&dma
->bounce_buffers
);
526 static bool fault_inject_dma_descr(void)
528 #ifdef CONFIG_FAULT_INJECTION
529 return should_fail(&artpec6_crypto_fail_dma_array_full
, 1);
535 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
538 * @addr: The physical address of the data buffer
539 * @len: The length of the data buffer
540 * @eop: True if this is the last buffer in the packet
542 * @return 0 on success or -ENOSPC if there are no more descriptors available
545 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common
*common
,
546 dma_addr_t addr
, size_t len
, bool eop
)
548 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
549 struct pdma_descr
*d
;
551 if (dma
->out_cnt
>= PDMA_DESCR_COUNT
||
552 fault_inject_dma_descr()) {
553 pr_err("No free OUT DMA descriptors available!\n");
557 d
= &dma
->out
[dma
->out_cnt
++];
558 memset(d
, 0, sizeof(*d
));
560 d
->ctrl
.short_descr
= 0;
567 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
569 * @dst: The virtual address of the data
570 * @len: The length of the data, must be between 1 to 7 bytes
571 * @eop: True if this is the last buffer in the packet
573 * @return 0 on success
574 * -ENOSPC if no more descriptors are available
575 * -EINVAL if the data length exceeds 7 bytes
578 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common
*common
,
579 void *dst
, unsigned int len
, bool eop
)
581 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
582 struct pdma_descr
*d
;
584 if (dma
->out_cnt
>= PDMA_DESCR_COUNT
||
585 fault_inject_dma_descr()) {
586 pr_err("No free OUT DMA descriptors available!\n");
588 } else if (len
> 7 || len
< 1) {
591 d
= &dma
->out
[dma
->out_cnt
++];
592 memset(d
, 0, sizeof(*d
));
594 d
->ctrl
.short_descr
= 1;
595 d
->ctrl
.short_len
= len
;
597 memcpy(d
->shrt
.data
, dst
, len
);
601 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common
*common
,
602 struct page
*page
, size_t offset
,
604 enum dma_data_direction dir
,
605 dma_addr_t
*dma_addr_out
)
607 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
608 struct device
*dev
= artpec6_crypto_dev
;
609 struct artpec6_crypto_dma_map
*map
;
614 if (dma
->map_count
>= ARRAY_SIZE(dma
->maps
))
617 dma_addr
= dma_map_page(dev
, page
, offset
, size
, dir
);
618 if (dma_mapping_error(dev
, dma_addr
))
621 map
= &dma
->maps
[dma
->map_count
++];
623 map
->dma_addr
= dma_addr
;
626 *dma_addr_out
= dma_addr
;
632 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common
*common
,
633 void *ptr
, size_t size
,
634 enum dma_data_direction dir
,
635 dma_addr_t
*dma_addr_out
)
637 struct page
*page
= virt_to_page(ptr
);
638 size_t offset
= (uintptr_t)ptr
& ~PAGE_MASK
;
640 return artpec6_crypto_dma_map_page(common
, page
, offset
, size
, dir
,
645 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common
*common
)
647 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
650 ret
= artpec6_crypto_dma_map_single(common
, dma
->in
,
651 sizeof(dma
->in
[0]) * dma
->in_cnt
,
652 DMA_TO_DEVICE
, &dma
->in_dma_addr
);
656 ret
= artpec6_crypto_dma_map_single(common
, dma
->out
,
657 sizeof(dma
->out
[0]) * dma
->out_cnt
,
658 DMA_TO_DEVICE
, &dma
->out_dma_addr
);
662 /* We only read one stat descriptor */
663 dma
->stat
[dma
->in_cnt
- 1] = 0;
666 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
669 return artpec6_crypto_dma_map_single(common
,
671 sizeof(dma
->stat
[0]) * dma
->in_cnt
,
673 &dma
->stat_dma_addr
);
677 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common
*common
)
679 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
680 struct device
*dev
= artpec6_crypto_dev
;
683 for (i
= 0; i
< dma
->map_count
; i
++) {
684 struct artpec6_crypto_dma_map
*map
= &dma
->maps
[i
];
686 dma_unmap_page(dev
, map
->dma_addr
, map
->size
, map
->dir
);
692 /** artpec6_crypto_setup_out_descr - Setup an out descriptor
694 * @dst: The virtual address of the data
695 * @len: The length of the data
696 * @eop: True if this is the last buffer in the packet
697 * @use_short: If this is true and the data length is 7 bytes or less then
698 * a short descriptor will be used
700 * @return 0 on success
701 * Any errors from artpec6_crypto_setup_out_descr_short() or
702 * setup_out_descr_phys()
705 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common
*common
,
706 void *dst
, unsigned int len
, bool eop
,
709 if (use_short
&& len
< 7) {
710 return artpec6_crypto_setup_out_descr_short(common
, dst
, len
,
716 ret
= artpec6_crypto_dma_map_single(common
, dst
, len
,
722 return artpec6_crypto_setup_out_descr_phys(common
, dma_addr
,
727 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
730 * @addr: The physical address of the data buffer
731 * @len: The length of the data buffer
732 * @intr: True if an interrupt should be fired after HW processing of this
737 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common
*common
,
738 dma_addr_t addr
, unsigned int len
, bool intr
)
740 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
741 struct pdma_descr
*d
;
743 if (dma
->in_cnt
>= PDMA_DESCR_COUNT
||
744 fault_inject_dma_descr()) {
745 pr_err("No free IN DMA descriptors available!\n");
748 d
= &dma
->in
[dma
->in_cnt
++];
749 memset(d
, 0, sizeof(*d
));
757 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
759 * @buffer: The virtual address to of the data buffer
760 * @len: The length of the data buffer
761 * @last: If this is the last data buffer in the request (i.e. an interrupt
764 * Short descriptors are not used for the in channel
767 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common
*common
,
768 void *buffer
, unsigned int len
, bool last
)
773 ret
= artpec6_crypto_dma_map_single(common
, buffer
, len
,
774 DMA_FROM_DEVICE
, &dma_addr
);
778 return artpec6_crypto_setup_in_descr_phys(common
, dma_addr
, len
, last
);
781 static struct artpec6_crypto_bounce_buffer
*
782 artpec6_crypto_alloc_bounce(gfp_t flags
)
785 size_t alloc_size
= sizeof(struct artpec6_crypto_bounce_buffer
) +
786 2 * ARTPEC_CACHE_LINE_MAX
;
787 struct artpec6_crypto_bounce_buffer
*bbuf
= kzalloc(alloc_size
, flags
);
793 bbuf
->buf
= PTR_ALIGN(base
, ARTPEC_CACHE_LINE_MAX
);
797 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common
*common
,
798 struct artpec6_crypto_walk
*walk
, size_t size
)
800 struct artpec6_crypto_bounce_buffer
*bbuf
;
803 bbuf
= artpec6_crypto_alloc_bounce(common
->gfp_flags
);
809 bbuf
->offset
= walk
->offset
;
811 ret
= artpec6_crypto_setup_in_descr(common
, bbuf
->buf
, size
, false);
817 pr_debug("BOUNCE %zu offset %zu\n", size
, walk
->offset
);
818 list_add_tail(&bbuf
->list
, &common
->dma
->bounce_buffers
);
823 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common
*common
,
824 struct artpec6_crypto_walk
*walk
,
831 while (walk
->sg
&& count
) {
832 chunk
= min(count
, artpec6_crypto_walk_chunklen(walk
));
833 addr
= artpec6_crypto_walk_chunk_phys(walk
);
835 /* When destination buffers are not aligned to the cache line
836 * size we need bounce buffers. The DMA-API requires that the
837 * entire line is owned by the DMA buffer and this holds also
838 * for the case when coherent DMA is used.
840 if (!IS_ALIGNED(addr
, ARTPEC_CACHE_LINE_MAX
)) {
841 chunk
= min_t(dma_addr_t
, chunk
,
842 ALIGN(addr
, ARTPEC_CACHE_LINE_MAX
) -
845 pr_debug("CHUNK-b %pad:%zu\n", &addr
, chunk
);
846 ret
= setup_bounce_buffer_in(common
, walk
, chunk
);
847 } else if (chunk
< ARTPEC_CACHE_LINE_MAX
) {
848 pr_debug("CHUNK-b %pad:%zu\n", &addr
, chunk
);
849 ret
= setup_bounce_buffer_in(common
, walk
, chunk
);
853 chunk
= chunk
& ~(ARTPEC_CACHE_LINE_MAX
-1);
855 pr_debug("CHUNK %pad:%zu\n", &addr
, chunk
);
857 ret
= artpec6_crypto_dma_map_page(common
,
867 ret
= artpec6_crypto_setup_in_descr_phys(common
,
875 count
= count
- chunk
;
876 artpec6_crypto_walk_advance(walk
, chunk
);
880 pr_err("EOL unexpected %zu bytes left\n", count
);
882 return count
? -EINVAL
: 0;
886 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common
*common
,
887 struct artpec6_crypto_walk
*walk
,
894 while (walk
->sg
&& count
) {
895 chunk
= min(count
, artpec6_crypto_walk_chunklen(walk
));
896 addr
= artpec6_crypto_walk_chunk_phys(walk
);
898 pr_debug("OUT-CHUNK %pad:%zu\n", &addr
, chunk
);
903 chunk
= min_t(size_t, chunk
, (4-(addr
&3)));
905 sg_pcopy_to_buffer(walk
->sg
, 1, buf
, chunk
,
908 ret
= artpec6_crypto_setup_out_descr_short(common
, buf
,
914 ret
= artpec6_crypto_dma_map_page(common
,
924 ret
= artpec6_crypto_setup_out_descr_phys(common
,
932 count
= count
- chunk
;
933 artpec6_crypto_walk_advance(walk
, chunk
);
937 pr_err("EOL unexpected %zu bytes left\n", count
);
939 return count
? -EINVAL
: 0;
943 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
945 * If the out descriptor list is non-empty, then the eop flag on the
946 * last used out descriptor will be set.
948 * @return 0 on success
949 * -EINVAL if the out descriptor is empty or has overflown
952 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common
*common
)
954 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
955 struct pdma_descr
*d
;
957 if (!dma
->out_cnt
|| dma
->out_cnt
> PDMA_DESCR_COUNT
) {
958 pr_err("%s: OUT descriptor list is %s\n",
959 MODULE_NAME
, dma
->out_cnt
? "empty" : "full");
964 d
= &dma
->out
[dma
->out_cnt
-1];
970 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
973 * See artpec6_crypto_terminate_out_descrs() for return values
976 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common
*common
)
978 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
979 struct pdma_descr
*d
;
981 if (!dma
->in_cnt
|| dma
->in_cnt
> PDMA_DESCR_COUNT
) {
982 pr_err("%s: IN descriptor list is %s\n",
983 MODULE_NAME
, dma
->in_cnt
? "empty" : "full");
987 d
= &dma
->in
[dma
->in_cnt
-1];
992 /** create_hash_pad - Create a Secure Hash conformant pad
994 * @dst: The destination buffer to write the pad. Must be at least 64 bytes
995 * @dgstlen: The total length of the hash digest in bytes
996 * @bitcount: The total length of the digest in bits
998 * @return The total number of padding bytes written to @dst
1001 create_hash_pad(int oper
, unsigned char *dst
, u64 dgstlen
, u64 bitcount
)
1003 unsigned int mod
, target
, diff
, pad_bytes
, size_bytes
;
1004 __be64 bits
= __cpu_to_be64(bitcount
);
1007 case regk_crypto_sha1
:
1008 case regk_crypto_sha256
:
1009 case regk_crypto_hmac_sha1
:
1010 case regk_crypto_hmac_sha256
:
1023 diff
= dgstlen
& (mod
- 1);
1024 pad_bytes
= diff
> target
? target
+ mod
- diff
: target
- diff
;
1026 memset(dst
+ 1, 0, pad_bytes
);
1029 if (size_bytes
== 16) {
1030 memset(dst
+ 1 + pad_bytes
, 0, 8);
1031 memcpy(dst
+ 1 + pad_bytes
+ 8, &bits
, 8);
1033 memcpy(dst
+ 1 + pad_bytes
, &bits
, 8);
1036 return pad_bytes
+ size_bytes
+ 1;
1039 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common
*common
,
1040 struct crypto_async_request
*parent
,
1041 void (*complete
)(struct crypto_async_request
*req
),
1042 struct scatterlist
*dstsg
, unsigned int nbytes
)
1045 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1047 flags
= (parent
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1048 GFP_KERNEL
: GFP_ATOMIC
;
1050 common
->gfp_flags
= flags
;
1051 common
->dma
= kmem_cache_alloc(ac
->dma_cache
, flags
);
1055 common
->req
= parent
;
1056 common
->complete
= complete
;
1061 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors
*dma
)
1063 struct artpec6_crypto_bounce_buffer
*b
;
1064 struct artpec6_crypto_bounce_buffer
*next
;
1066 list_for_each_entry_safe(b
, next
, &dma
->bounce_buffers
, list
) {
1072 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common
*common
)
1074 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1076 artpec6_crypto_dma_unmap_all(common
);
1077 artpec6_crypto_bounce_destroy(common
->dma
);
1078 kmem_cache_free(ac
->dma_cache
, common
->dma
);
1084 * Ciphering functions.
1086 static int artpec6_crypto_encrypt(struct skcipher_request
*req
)
1088 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1089 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1090 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1091 void (*complete
)(struct crypto_async_request
*req
);
1094 req_ctx
= skcipher_request_ctx(req
);
1096 switch (ctx
->crypto_type
) {
1097 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1098 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1099 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1100 req_ctx
->decrypt
= 0;
1106 switch (ctx
->crypto_type
) {
1107 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1108 complete
= artpec6_crypto_complete_cbc_encrypt
;
1111 complete
= artpec6_crypto_complete_crypto
;
1115 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
1118 req
->dst
, req
->cryptlen
);
1122 ret
= artpec6_crypto_prepare_crypto(req
);
1124 artpec6_crypto_common_destroy(&req_ctx
->common
);
1128 return artpec6_crypto_submit(&req_ctx
->common
);
1131 static int artpec6_crypto_decrypt(struct skcipher_request
*req
)
1134 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1135 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1136 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1137 void (*complete
)(struct crypto_async_request
*req
);
1139 req_ctx
= skcipher_request_ctx(req
);
1141 switch (ctx
->crypto_type
) {
1142 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1143 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1144 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1145 req_ctx
->decrypt
= 1;
1152 switch (ctx
->crypto_type
) {
1153 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1154 complete
= artpec6_crypto_complete_cbc_decrypt
;
1157 complete
= artpec6_crypto_complete_crypto
;
1161 ret
= artpec6_crypto_common_init(&req_ctx
->common
, &req
->base
,
1163 req
->dst
, req
->cryptlen
);
1167 ret
= artpec6_crypto_prepare_crypto(req
);
1169 artpec6_crypto_common_destroy(&req_ctx
->common
);
1173 return artpec6_crypto_submit(&req_ctx
->common
);
1177 artpec6_crypto_ctr_crypt(struct skcipher_request
*req
, bool encrypt
)
1179 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1180 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1181 size_t iv_len
= crypto_skcipher_ivsize(cipher
);
1182 unsigned int counter
= be32_to_cpup((__be32
*)
1183 (req
->iv
+ iv_len
- 4));
1184 unsigned int nblks
= ALIGN(req
->cryptlen
, AES_BLOCK_SIZE
) /
1188 * The hardware uses only the last 32-bits as the counter while the
1189 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1190 * the whole IV is a counter. So fallback if the counter is going to
1193 if (counter
+ nblks
< counter
) {
1196 pr_debug("counter %x will overflow (nblks %u), falling back\n",
1197 counter
, counter
+ nblks
);
1199 ret
= crypto_sync_skcipher_setkey(ctx
->fallback
, ctx
->aes_key
,
1205 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, ctx
->fallback
);
1207 skcipher_request_set_sync_tfm(subreq
, ctx
->fallback
);
1208 skcipher_request_set_callback(subreq
, req
->base
.flags
,
1210 skcipher_request_set_crypt(subreq
, req
->src
, req
->dst
,
1211 req
->cryptlen
, req
->iv
);
1212 ret
= encrypt
? crypto_skcipher_encrypt(subreq
)
1213 : crypto_skcipher_decrypt(subreq
);
1214 skcipher_request_zero(subreq
);
1219 return encrypt
? artpec6_crypto_encrypt(req
)
1220 : artpec6_crypto_decrypt(req
);
1223 static int artpec6_crypto_ctr_encrypt(struct skcipher_request
*req
)
1225 return artpec6_crypto_ctr_crypt(req
, true);
1228 static int artpec6_crypto_ctr_decrypt(struct skcipher_request
*req
)
1230 return artpec6_crypto_ctr_crypt(req
, false);
1236 static int artpec6_crypto_aead_init(struct crypto_aead
*tfm
)
1238 struct artpec6_cryptotfm_context
*tfm_ctx
= crypto_aead_ctx(tfm
);
1240 memset(tfm_ctx
, 0, sizeof(*tfm_ctx
));
1242 crypto_aead_set_reqsize(tfm
,
1243 sizeof(struct artpec6_crypto_aead_req_ctx
));
1248 static int artpec6_crypto_aead_set_key(struct crypto_aead
*tfm
, const u8
*key
,
1251 struct artpec6_cryptotfm_context
*ctx
= crypto_tfm_ctx(&tfm
->base
);
1253 if (len
!= 16 && len
!= 24 && len
!= 32)
1256 ctx
->key_length
= len
;
1258 memcpy(ctx
->aes_key
, key
, len
);
1262 static int artpec6_crypto_aead_encrypt(struct aead_request
*req
)
1265 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1267 req_ctx
->decrypt
= false;
1268 ret
= artpec6_crypto_common_init(&req_ctx
->common
, &req
->base
,
1269 artpec6_crypto_complete_aead
,
1274 ret
= artpec6_crypto_prepare_aead(req
);
1276 artpec6_crypto_common_destroy(&req_ctx
->common
);
1280 return artpec6_crypto_submit(&req_ctx
->common
);
1283 static int artpec6_crypto_aead_decrypt(struct aead_request
*req
)
1286 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1288 req_ctx
->decrypt
= true;
1289 if (req
->cryptlen
< AES_BLOCK_SIZE
)
1292 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
1294 artpec6_crypto_complete_aead
,
1299 ret
= artpec6_crypto_prepare_aead(req
);
1301 artpec6_crypto_common_destroy(&req_ctx
->common
);
1305 return artpec6_crypto_submit(&req_ctx
->common
);
1308 static int artpec6_crypto_prepare_hash(struct ahash_request
*areq
)
1310 struct artpec6_hashalg_context
*ctx
= crypto_tfm_ctx(areq
->base
.tfm
);
1311 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(areq
);
1312 size_t digestsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(areq
));
1313 size_t contextsize
= digestsize
;
1314 size_t blocksize
= crypto_tfm_alg_blocksize(
1315 crypto_ahash_tfm(crypto_ahash_reqtfm(areq
)));
1316 struct artpec6_crypto_req_common
*common
= &req_ctx
->common
;
1317 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1318 enum artpec6_crypto_variant variant
= ac
->variant
;
1320 bool ext_ctx
= false;
1321 bool run_hw
= false;
1324 artpec6_crypto_init_dma_operation(common
);
1326 /* Upload HMAC key, must be first the first packet */
1327 if (req_ctx
->hash_flags
& HASH_FLAG_HMAC
) {
1328 if (variant
== ARTPEC6_CRYPTO
) {
1329 req_ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
,
1330 a6_regk_crypto_dlkey
);
1332 req_ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
,
1333 a7_regk_crypto_dlkey
);
1336 /* Copy and pad up the key */
1337 memcpy(req_ctx
->key_buffer
, ctx
->hmac_key
,
1338 ctx
->hmac_key_length
);
1339 memset(req_ctx
->key_buffer
+ ctx
->hmac_key_length
, 0,
1340 blocksize
- ctx
->hmac_key_length
);
1342 error
= artpec6_crypto_setup_out_descr(common
,
1343 (void *)&req_ctx
->key_md
,
1344 sizeof(req_ctx
->key_md
), false, false);
1348 error
= artpec6_crypto_setup_out_descr(common
,
1349 req_ctx
->key_buffer
, blocksize
,
1355 if (!(req_ctx
->hash_flags
& HASH_FLAG_INIT_CTX
)) {
1356 /* Restore context */
1357 sel_ctx
= regk_crypto_ext
;
1360 sel_ctx
= regk_crypto_init
;
1363 if (variant
== ARTPEC6_CRYPTO
) {
1364 req_ctx
->hash_md
&= ~A6_CRY_MD_HASH_SEL_CTX
;
1365 req_ctx
->hash_md
|= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX
, sel_ctx
);
1367 /* If this is the final round, set the final flag */
1368 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
)
1369 req_ctx
->hash_md
|= A6_CRY_MD_HASH_HMAC_FIN
;
1371 req_ctx
->hash_md
&= ~A7_CRY_MD_HASH_SEL_CTX
;
1372 req_ctx
->hash_md
|= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX
, sel_ctx
);
1374 /* If this is the final round, set the final flag */
1375 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
)
1376 req_ctx
->hash_md
|= A7_CRY_MD_HASH_HMAC_FIN
;
1379 /* Setup up metadata descriptors */
1380 error
= artpec6_crypto_setup_out_descr(common
,
1381 (void *)&req_ctx
->hash_md
,
1382 sizeof(req_ctx
->hash_md
), false, false);
1386 error
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1391 error
= artpec6_crypto_setup_out_descr(common
,
1392 req_ctx
->digeststate
,
1393 contextsize
, false, false);
1399 if (req_ctx
->hash_flags
& HASH_FLAG_UPDATE
) {
1400 size_t done_bytes
= 0;
1401 size_t total_bytes
= areq
->nbytes
+ req_ctx
->partial_bytes
;
1402 size_t ready_bytes
= round_down(total_bytes
, blocksize
);
1403 struct artpec6_crypto_walk walk
;
1405 run_hw
= ready_bytes
> 0;
1406 if (req_ctx
->partial_bytes
&& ready_bytes
) {
1407 /* We have a partial buffer and will at least some bytes
1408 * to the HW. Empty this partial buffer before tackling
1411 memcpy(req_ctx
->partial_buffer_out
,
1412 req_ctx
->partial_buffer
,
1413 req_ctx
->partial_bytes
);
1415 error
= artpec6_crypto_setup_out_descr(common
,
1416 req_ctx
->partial_buffer_out
,
1417 req_ctx
->partial_bytes
,
1422 /* Reset partial buffer */
1423 done_bytes
+= req_ctx
->partial_bytes
;
1424 req_ctx
->partial_bytes
= 0;
1427 artpec6_crypto_walk_init(&walk
, areq
->src
);
1429 error
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
,
1436 size_t sg_skip
= ready_bytes
- done_bytes
;
1437 size_t sg_rem
= areq
->nbytes
- sg_skip
;
1439 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
1440 req_ctx
->partial_buffer
+
1441 req_ctx
->partial_bytes
,
1444 req_ctx
->partial_bytes
+= sg_rem
;
1447 req_ctx
->digcnt
+= ready_bytes
;
1448 req_ctx
->hash_flags
&= ~(HASH_FLAG_UPDATE
);
1452 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
) {
1453 size_t hash_pad_len
;
1457 if (variant
== ARTPEC6_CRYPTO
)
1458 oper
= FIELD_GET(A6_CRY_MD_OPER
, req_ctx
->hash_md
);
1460 oper
= FIELD_GET(A7_CRY_MD_OPER
, req_ctx
->hash_md
);
1462 /* Write out the partial buffer if present */
1463 if (req_ctx
->partial_bytes
) {
1464 memcpy(req_ctx
->partial_buffer_out
,
1465 req_ctx
->partial_buffer
,
1466 req_ctx
->partial_bytes
);
1467 error
= artpec6_crypto_setup_out_descr(common
,
1468 req_ctx
->partial_buffer_out
,
1469 req_ctx
->partial_bytes
,
1474 req_ctx
->digcnt
+= req_ctx
->partial_bytes
;
1475 req_ctx
->partial_bytes
= 0;
1478 if (req_ctx
->hash_flags
& HASH_FLAG_HMAC
)
1479 digest_bits
= 8 * (req_ctx
->digcnt
+ blocksize
);
1481 digest_bits
= 8 * req_ctx
->digcnt
;
1483 /* Add the hash pad */
1484 hash_pad_len
= create_hash_pad(oper
, req_ctx
->pad_buffer
,
1485 req_ctx
->digcnt
, digest_bits
);
1486 error
= artpec6_crypto_setup_out_descr(common
,
1487 req_ctx
->pad_buffer
,
1488 hash_pad_len
, false,
1490 req_ctx
->digcnt
= 0;
1495 /* Descriptor for the final result */
1496 error
= artpec6_crypto_setup_in_descr(common
, areq
->result
,
1502 } else { /* This is not the final operation for this request */
1504 return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
;
1506 /* Save the result to the context */
1507 error
= artpec6_crypto_setup_in_descr(common
,
1508 req_ctx
->digeststate
,
1509 contextsize
, false);
1515 req_ctx
->hash_flags
&= ~(HASH_FLAG_INIT_CTX
| HASH_FLAG_UPDATE
|
1516 HASH_FLAG_FINALIZE
);
1518 error
= artpec6_crypto_terminate_in_descrs(common
);
1522 error
= artpec6_crypto_terminate_out_descrs(common
);
1526 error
= artpec6_crypto_dma_map_descs(common
);
1530 return ARTPEC6_CRYPTO_PREPARE_HASH_START
;
1534 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher
*tfm
)
1536 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1538 crypto_skcipher_set_reqsize(tfm
,
1539 sizeof(struct artpec6_crypto_request_context
));
1540 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_ECB
;
1545 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher
*tfm
)
1547 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1550 crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm
->base
),
1551 0, CRYPTO_ALG_NEED_FALLBACK
);
1552 if (IS_ERR(ctx
->fallback
))
1553 return PTR_ERR(ctx
->fallback
);
1555 crypto_skcipher_set_reqsize(tfm
,
1556 sizeof(struct artpec6_crypto_request_context
));
1557 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_CTR
;
1562 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher
*tfm
)
1564 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1566 crypto_skcipher_set_reqsize(tfm
,
1567 sizeof(struct artpec6_crypto_request_context
));
1568 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_CBC
;
1573 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher
*tfm
)
1575 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1577 crypto_skcipher_set_reqsize(tfm
,
1578 sizeof(struct artpec6_crypto_request_context
));
1579 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_XTS
;
1584 static void artpec6_crypto_aes_exit(struct crypto_skcipher
*tfm
)
1586 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1588 memset(ctx
, 0, sizeof(*ctx
));
1591 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher
*tfm
)
1593 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1595 crypto_free_sync_skcipher(ctx
->fallback
);
1596 artpec6_crypto_aes_exit(tfm
);
1600 artpec6_crypto_cipher_set_key(struct crypto_skcipher
*cipher
, const u8
*key
,
1601 unsigned int keylen
)
1603 struct artpec6_cryptotfm_context
*ctx
=
1604 crypto_skcipher_ctx(cipher
);
1615 memcpy(ctx
->aes_key
, key
, keylen
);
1616 ctx
->key_length
= keylen
;
1621 artpec6_crypto_xts_set_key(struct crypto_skcipher
*cipher
, const u8
*key
,
1622 unsigned int keylen
)
1624 struct artpec6_cryptotfm_context
*ctx
=
1625 crypto_skcipher_ctx(cipher
);
1628 ret
= xts_verify_key(cipher
, key
, keylen
);
1641 memcpy(ctx
->aes_key
, key
, keylen
);
1642 ctx
->key_length
= keylen
;
1646 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1648 * @req: The asynch request to process
1650 * @return 0 if the dma job was successfully prepared
1653 * This function sets up the PDMA descriptors for a block cipher request.
1655 * The required padding is added for AES-CTR using a statically defined
1658 * The PDMA descriptor list will be as follows:
1660 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1661 * IN: <CIPHER_MD><data_0>...[data_n]<intr>
1664 static int artpec6_crypto_prepare_crypto(struct skcipher_request
*areq
)
1667 struct artpec6_crypto_walk walk
;
1668 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(areq
);
1669 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1670 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1671 size_t iv_len
= crypto_skcipher_ivsize(cipher
);
1672 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1673 enum artpec6_crypto_variant variant
= ac
->variant
;
1674 struct artpec6_crypto_req_common
*common
;
1675 bool cipher_decr
= false;
1677 u32 cipher_len
= 0; /* Same as regk_crypto_key_128 for NULL crypto */
1680 req_ctx
= skcipher_request_ctx(areq
);
1681 common
= &req_ctx
->common
;
1683 artpec6_crypto_init_dma_operation(common
);
1685 if (variant
== ARTPEC6_CRYPTO
)
1686 ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
, a6_regk_crypto_dlkey
);
1688 ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
, a7_regk_crypto_dlkey
);
1690 ret
= artpec6_crypto_setup_out_descr(common
, (void *)&ctx
->key_md
,
1691 sizeof(ctx
->key_md
), false, false);
1695 ret
= artpec6_crypto_setup_out_descr(common
, ctx
->aes_key
,
1696 ctx
->key_length
, true, false);
1700 req_ctx
->cipher_md
= 0;
1702 if (ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_XTS
)
1703 cipher_klen
= ctx
->key_length
/2;
1705 cipher_klen
= ctx
->key_length
;
1708 switch (cipher_klen
) {
1710 cipher_len
= regk_crypto_key_128
;
1713 cipher_len
= regk_crypto_key_192
;
1716 cipher_len
= regk_crypto_key_256
;
1719 pr_err("%s: Invalid key length %zu!\n",
1720 MODULE_NAME
, ctx
->key_length
);
1724 switch (ctx
->crypto_type
) {
1725 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1726 oper
= regk_crypto_aes_ecb
;
1727 cipher_decr
= req_ctx
->decrypt
;
1730 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1731 oper
= regk_crypto_aes_cbc
;
1732 cipher_decr
= req_ctx
->decrypt
;
1735 case ARTPEC6_CRYPTO_CIPHER_AES_CTR
:
1736 oper
= regk_crypto_aes_ctr
;
1737 cipher_decr
= false;
1740 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1741 oper
= regk_crypto_aes_xts
;
1742 cipher_decr
= req_ctx
->decrypt
;
1744 if (variant
== ARTPEC6_CRYPTO
)
1745 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DSEQ
;
1747 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DSEQ
;
1751 pr_err("%s: Invalid cipher mode %d!\n",
1752 MODULE_NAME
, ctx
->crypto_type
);
1756 if (variant
== ARTPEC6_CRYPTO
) {
1757 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_OPER
, oper
);
1758 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_CIPHER_LEN
,
1761 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DECR
;
1763 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_OPER
, oper
);
1764 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_CIPHER_LEN
,
1767 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DECR
;
1770 ret
= artpec6_crypto_setup_out_descr(common
,
1771 &req_ctx
->cipher_md
,
1772 sizeof(req_ctx
->cipher_md
),
1777 ret
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1782 ret
= artpec6_crypto_setup_out_descr(common
, areq
->iv
, iv_len
,
1788 artpec6_crypto_walk_init(&walk
, areq
->src
);
1789 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, areq
->cryptlen
);
1794 artpec6_crypto_walk_init(&walk
, areq
->dst
);
1795 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
, areq
->cryptlen
);
1799 /* CTR-mode padding required by the HW. */
1800 if (ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_CTR
||
1801 ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_XTS
) {
1802 size_t pad
= ALIGN(areq
->cryptlen
, AES_BLOCK_SIZE
) -
1806 ret
= artpec6_crypto_setup_out_descr(common
,
1812 ret
= artpec6_crypto_setup_in_descr(common
,
1813 ac
->pad_buffer
, pad
,
1820 ret
= artpec6_crypto_terminate_out_descrs(common
);
1824 ret
= artpec6_crypto_terminate_in_descrs(common
);
1828 return artpec6_crypto_dma_map_descs(common
);
1831 static int artpec6_crypto_prepare_aead(struct aead_request
*areq
)
1835 size_t input_length
;
1836 struct artpec6_cryptotfm_context
*ctx
= crypto_tfm_ctx(areq
->base
.tfm
);
1837 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(areq
);
1838 struct crypto_aead
*cipher
= crypto_aead_reqtfm(areq
);
1839 struct artpec6_crypto_req_common
*common
= &req_ctx
->common
;
1840 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1841 enum artpec6_crypto_variant variant
= ac
->variant
;
1844 artpec6_crypto_init_dma_operation(common
);
1847 if (variant
== ARTPEC6_CRYPTO
) {
1848 ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
,
1849 a6_regk_crypto_dlkey
);
1851 ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
,
1852 a7_regk_crypto_dlkey
);
1854 ret
= artpec6_crypto_setup_out_descr(common
, (void *)&ctx
->key_md
,
1855 sizeof(ctx
->key_md
), false, false);
1859 ret
= artpec6_crypto_setup_out_descr(common
, ctx
->aes_key
,
1860 ctx
->key_length
, true, false);
1864 req_ctx
->cipher_md
= 0;
1866 switch (ctx
->key_length
) {
1868 md_cipher_len
= regk_crypto_key_128
;
1871 md_cipher_len
= regk_crypto_key_192
;
1874 md_cipher_len
= regk_crypto_key_256
;
1880 if (variant
== ARTPEC6_CRYPTO
) {
1881 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_OPER
,
1882 regk_crypto_aes_gcm
);
1883 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_CIPHER_LEN
,
1885 if (req_ctx
->decrypt
)
1886 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DECR
;
1888 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_OPER
,
1889 regk_crypto_aes_gcm
);
1890 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_CIPHER_LEN
,
1892 if (req_ctx
->decrypt
)
1893 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DECR
;
1896 ret
= artpec6_crypto_setup_out_descr(common
,
1897 (void *) &req_ctx
->cipher_md
,
1898 sizeof(req_ctx
->cipher_md
), false,
1903 ret
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1907 /* For the decryption, cryptlen includes the tag. */
1908 input_length
= areq
->cryptlen
;
1909 if (req_ctx
->decrypt
)
1910 input_length
-= crypto_aead_authsize(cipher
);
1912 /* Prepare the context buffer */
1913 req_ctx
->hw_ctx
.aad_length_bits
=
1914 __cpu_to_be64(8*areq
->assoclen
);
1916 req_ctx
->hw_ctx
.text_length_bits
=
1917 __cpu_to_be64(8*input_length
);
1919 memcpy(req_ctx
->hw_ctx
.J0
, areq
->iv
, crypto_aead_ivsize(cipher
));
1920 // The HW omits the initial increment of the counter field.
1921 memcpy(req_ctx
->hw_ctx
.J0
+ GCM_AES_IV_SIZE
, "\x00\x00\x00\x01", 4);
1923 ret
= artpec6_crypto_setup_out_descr(common
, &req_ctx
->hw_ctx
,
1924 sizeof(struct artpec6_crypto_aead_hw_ctx
), false, false);
1929 struct artpec6_crypto_walk walk
;
1931 artpec6_crypto_walk_init(&walk
, areq
->src
);
1933 /* Associated data */
1934 count
= areq
->assoclen
;
1935 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, count
);
1939 if (!IS_ALIGNED(areq
->assoclen
, 16)) {
1940 size_t assoc_pad
= 16 - (areq
->assoclen
% 16);
1941 /* The HW mandates zero padding here */
1942 ret
= artpec6_crypto_setup_out_descr(common
,
1950 /* Data to crypto */
1951 count
= input_length
;
1952 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, count
);
1956 if (!IS_ALIGNED(input_length
, 16)) {
1957 size_t crypto_pad
= 16 - (input_length
% 16);
1958 /* The HW mandates zero padding here */
1959 ret
= artpec6_crypto_setup_out_descr(common
,
1969 /* Data from crypto */
1971 struct artpec6_crypto_walk walk
;
1972 size_t output_len
= areq
->cryptlen
;
1974 if (req_ctx
->decrypt
)
1975 output_len
-= crypto_aead_authsize(cipher
);
1977 artpec6_crypto_walk_init(&walk
, areq
->dst
);
1979 /* skip associated data in the output */
1980 count
= artpec6_crypto_walk_advance(&walk
, areq
->assoclen
);
1985 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
, count
);
1989 /* Put padding between the cryptotext and the auth tag */
1990 if (!IS_ALIGNED(output_len
, 16)) {
1991 size_t crypto_pad
= 16 - (output_len
% 16);
1993 ret
= artpec6_crypto_setup_in_descr(common
,
2000 /* The authentication tag shall follow immediately after
2001 * the output ciphertext. For decryption it is put in a context
2002 * buffer for later compare against the input tag.
2005 if (req_ctx
->decrypt
) {
2006 ret
= artpec6_crypto_setup_in_descr(common
,
2007 req_ctx
->decryption_tag
, AES_BLOCK_SIZE
, false);
2012 /* For encryption the requested tag size may be smaller
2013 * than the hardware's generated tag.
2015 size_t authsize
= crypto_aead_authsize(cipher
);
2017 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
,
2022 if (authsize
< AES_BLOCK_SIZE
) {
2023 count
= AES_BLOCK_SIZE
- authsize
;
2024 ret
= artpec6_crypto_setup_in_descr(common
,
2034 ret
= artpec6_crypto_terminate_in_descrs(common
);
2038 ret
= artpec6_crypto_terminate_out_descrs(common
);
2042 return artpec6_crypto_dma_map_descs(common
);
2045 static void artpec6_crypto_process_queue(struct artpec6_crypto
*ac
,
2046 struct list_head
*completions
)
2048 struct artpec6_crypto_req_common
*req
;
2050 while (!list_empty(&ac
->queue
) && !artpec6_crypto_busy()) {
2051 req
= list_first_entry(&ac
->queue
,
2052 struct artpec6_crypto_req_common
,
2054 list_move_tail(&req
->list
, &ac
->pending
);
2055 artpec6_crypto_start_dma(req
);
2057 list_add_tail(&req
->complete_in_progress
, completions
);
2061 * In some cases, the hardware can raise an in_eop_flush interrupt
2062 * before actually updating the status, so we have an timer which will
2063 * recheck the status on timeout. Since the cases are expected to be
2064 * very rare, we use a relatively large timeout value. There should be
2065 * no noticeable negative effect if we timeout spuriously.
2067 if (ac
->pending_count
)
2068 mod_timer(&ac
->timer
, jiffies
+ msecs_to_jiffies(100));
2070 del_timer(&ac
->timer
);
2073 static void artpec6_crypto_timeout(struct timer_list
*t
)
2075 struct artpec6_crypto
*ac
= from_timer(ac
, t
, timer
);
2077 dev_info_ratelimited(artpec6_crypto_dev
, "timeout\n");
2079 tasklet_schedule(&ac
->task
);
2082 static void artpec6_crypto_task(unsigned long data
)
2084 struct artpec6_crypto
*ac
= (struct artpec6_crypto
*)data
;
2085 struct artpec6_crypto_req_common
*req
;
2086 struct artpec6_crypto_req_common
*n
;
2087 struct list_head complete_done
;
2088 struct list_head complete_in_progress
;
2090 INIT_LIST_HEAD(&complete_done
);
2091 INIT_LIST_HEAD(&complete_in_progress
);
2093 if (list_empty(&ac
->pending
)) {
2094 pr_debug("Spurious IRQ\n");
2098 spin_lock(&ac
->queue_lock
);
2100 list_for_each_entry_safe(req
, n
, &ac
->pending
, list
) {
2101 struct artpec6_crypto_dma_descriptors
*dma
= req
->dma
;
2103 dma_addr_t stataddr
;
2105 stataddr
= dma
->stat_dma_addr
+ 4 * (req
->dma
->in_cnt
- 1);
2106 dma_sync_single_for_cpu(artpec6_crypto_dev
,
2111 stat
= req
->dma
->stat
[req
->dma
->in_cnt
-1];
2113 /* A non-zero final status descriptor indicates
2114 * this job has finished.
2116 pr_debug("Request %p status is %X\n", req
, stat
);
2120 /* Allow testing of timeout handling with fault injection */
2121 #ifdef CONFIG_FAULT_INJECTION
2122 if (should_fail(&artpec6_crypto_fail_status_read
, 1))
2126 pr_debug("Completing request %p\n", req
);
2128 list_move_tail(&req
->list
, &complete_done
);
2130 ac
->pending_count
--;
2133 artpec6_crypto_process_queue(ac
, &complete_in_progress
);
2135 spin_unlock(&ac
->queue_lock
);
2137 /* Perform the completion callbacks without holding the queue lock
2138 * to allow new request submissions from the callbacks.
2140 list_for_each_entry_safe(req
, n
, &complete_done
, list
) {
2141 artpec6_crypto_dma_unmap_all(req
);
2142 artpec6_crypto_copy_bounce_buffers(req
);
2143 artpec6_crypto_common_destroy(req
);
2145 req
->complete(req
->req
);
2148 list_for_each_entry_safe(req
, n
, &complete_in_progress
,
2149 complete_in_progress
) {
2150 crypto_request_complete(req
->req
, -EINPROGRESS
);
2154 static void artpec6_crypto_complete_crypto(struct crypto_async_request
*req
)
2156 crypto_request_complete(req
, 0);
2160 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request
*req
)
2162 struct skcipher_request
*cipher_req
= container_of(req
,
2163 struct skcipher_request
, base
);
2165 scatterwalk_map_and_copy(cipher_req
->iv
, cipher_req
->src
,
2166 cipher_req
->cryptlen
- AES_BLOCK_SIZE
,
2168 skcipher_request_complete(cipher_req
, 0);
2172 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request
*req
)
2174 struct skcipher_request
*cipher_req
= container_of(req
,
2175 struct skcipher_request
, base
);
2177 scatterwalk_map_and_copy(cipher_req
->iv
, cipher_req
->dst
,
2178 cipher_req
->cryptlen
- AES_BLOCK_SIZE
,
2180 skcipher_request_complete(cipher_req
, 0);
2183 static void artpec6_crypto_complete_aead(struct crypto_async_request
*req
)
2187 /* Verify GCM hashtag. */
2188 struct aead_request
*areq
= container_of(req
,
2189 struct aead_request
, base
);
2190 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
2191 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(areq
);
2193 if (req_ctx
->decrypt
) {
2194 u8 input_tag
[AES_BLOCK_SIZE
];
2195 unsigned int authsize
= crypto_aead_authsize(aead
);
2197 sg_pcopy_to_buffer(areq
->src
,
2198 sg_nents(areq
->src
),
2201 areq
->assoclen
+ areq
->cryptlen
-
2204 if (crypto_memneq(req_ctx
->decryption_tag
,
2207 pr_debug("***EBADMSG:\n");
2208 print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS
, 32, 1,
2209 input_tag
, authsize
, true);
2210 print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS
, 32, 1,
2211 req_ctx
->decryption_tag
,
2218 aead_request_complete(areq
, result
);
2221 static void artpec6_crypto_complete_hash(struct crypto_async_request
*req
)
2223 crypto_request_complete(req
, 0);
2227 /*------------------- Hash functions -----------------------------------------*/
2229 artpec6_crypto_hash_set_key(struct crypto_ahash
*tfm
,
2230 const u8
*key
, unsigned int keylen
)
2232 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(&tfm
->base
);
2237 pr_err("Invalid length (%d) of HMAC key\n",
2242 memset(tfm_ctx
->hmac_key
, 0, sizeof(tfm_ctx
->hmac_key
));
2244 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2246 if (keylen
> blocksize
) {
2247 tfm_ctx
->hmac_key_length
= blocksize
;
2249 ret
= crypto_shash_tfm_digest(tfm_ctx
->child_hash
, key
, keylen
,
2254 memcpy(tfm_ctx
->hmac_key
, key
, keylen
);
2255 tfm_ctx
->hmac_key_length
= keylen
;
2262 artpec6_crypto_init_hash(struct ahash_request
*req
, u8 type
, int hmac
)
2264 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2265 enum artpec6_crypto_variant variant
= ac
->variant
;
2266 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2269 memset(req_ctx
, 0, sizeof(*req_ctx
));
2271 req_ctx
->hash_flags
= HASH_FLAG_INIT_CTX
;
2273 req_ctx
->hash_flags
|= (HASH_FLAG_HMAC
| HASH_FLAG_UPDATE_KEY
);
2276 case ARTPEC6_CRYPTO_HASH_SHA1
:
2277 oper
= hmac
? regk_crypto_hmac_sha1
: regk_crypto_sha1
;
2279 case ARTPEC6_CRYPTO_HASH_SHA256
:
2280 oper
= hmac
? regk_crypto_hmac_sha256
: regk_crypto_sha256
;
2283 pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME
, type
);
2287 if (variant
== ARTPEC6_CRYPTO
)
2288 req_ctx
->hash_md
= FIELD_PREP(A6_CRY_MD_OPER
, oper
);
2290 req_ctx
->hash_md
= FIELD_PREP(A7_CRY_MD_OPER
, oper
);
2295 static int artpec6_crypto_prepare_submit_hash(struct ahash_request
*req
)
2297 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2300 if (!req_ctx
->common
.dma
) {
2301 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
2303 artpec6_crypto_complete_hash
,
2310 ret
= artpec6_crypto_prepare_hash(req
);
2312 case ARTPEC6_CRYPTO_PREPARE_HASH_START
:
2313 ret
= artpec6_crypto_submit(&req_ctx
->common
);
2316 case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
:
2321 artpec6_crypto_common_destroy(&req_ctx
->common
);
2328 static int artpec6_crypto_hash_final(struct ahash_request
*req
)
2330 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2332 req_ctx
->hash_flags
|= HASH_FLAG_FINALIZE
;
2334 return artpec6_crypto_prepare_submit_hash(req
);
2337 static int artpec6_crypto_hash_update(struct ahash_request
*req
)
2339 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2341 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
;
2343 return artpec6_crypto_prepare_submit_hash(req
);
2346 static int artpec6_crypto_sha1_init(struct ahash_request
*req
)
2348 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA1
, 0);
2351 static int artpec6_crypto_sha1_digest(struct ahash_request
*req
)
2353 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2355 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA1
, 0);
2357 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2359 return artpec6_crypto_prepare_submit_hash(req
);
2362 static int artpec6_crypto_sha256_init(struct ahash_request
*req
)
2364 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 0);
2367 static int artpec6_crypto_sha256_digest(struct ahash_request
*req
)
2369 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2371 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 0);
2372 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2374 return artpec6_crypto_prepare_submit_hash(req
);
2377 static int artpec6_crypto_hmac_sha256_init(struct ahash_request
*req
)
2379 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 1);
2382 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request
*req
)
2384 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2386 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 1);
2387 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2389 return artpec6_crypto_prepare_submit_hash(req
);
2392 static int artpec6_crypto_ahash_init_common(struct crypto_tfm
*tfm
,
2393 const char *base_hash_name
)
2395 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(tfm
);
2397 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2398 sizeof(struct artpec6_hash_request_context
));
2399 memset(tfm_ctx
, 0, sizeof(*tfm_ctx
));
2401 if (base_hash_name
) {
2402 struct crypto_shash
*child
;
2404 child
= crypto_alloc_shash(base_hash_name
, 0,
2405 CRYPTO_ALG_NEED_FALLBACK
);
2408 return PTR_ERR(child
);
2410 tfm_ctx
->child_hash
= child
;
2416 static int artpec6_crypto_ahash_init(struct crypto_tfm
*tfm
)
2418 return artpec6_crypto_ahash_init_common(tfm
, NULL
);
2421 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm
*tfm
)
2423 return artpec6_crypto_ahash_init_common(tfm
, "sha256");
2426 static void artpec6_crypto_ahash_exit(struct crypto_tfm
*tfm
)
2428 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(tfm
);
2430 if (tfm_ctx
->child_hash
)
2431 crypto_free_shash(tfm_ctx
->child_hash
);
2433 memset(tfm_ctx
->hmac_key
, 0, sizeof(tfm_ctx
->hmac_key
));
2434 tfm_ctx
->hmac_key_length
= 0;
2437 static int artpec6_crypto_hash_export(struct ahash_request
*req
, void *out
)
2439 const struct artpec6_hash_request_context
*ctx
= ahash_request_ctx(req
);
2440 struct artpec6_hash_export_state
*state
= out
;
2441 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2442 enum artpec6_crypto_variant variant
= ac
->variant
;
2444 BUILD_BUG_ON(sizeof(state
->partial_buffer
) !=
2445 sizeof(ctx
->partial_buffer
));
2446 BUILD_BUG_ON(sizeof(state
->digeststate
) != sizeof(ctx
->digeststate
));
2448 state
->digcnt
= ctx
->digcnt
;
2449 state
->partial_bytes
= ctx
->partial_bytes
;
2450 state
->hash_flags
= ctx
->hash_flags
;
2452 if (variant
== ARTPEC6_CRYPTO
)
2453 state
->oper
= FIELD_GET(A6_CRY_MD_OPER
, ctx
->hash_md
);
2455 state
->oper
= FIELD_GET(A7_CRY_MD_OPER
, ctx
->hash_md
);
2457 memcpy(state
->partial_buffer
, ctx
->partial_buffer
,
2458 sizeof(state
->partial_buffer
));
2459 memcpy(state
->digeststate
, ctx
->digeststate
,
2460 sizeof(state
->digeststate
));
2465 static int artpec6_crypto_hash_import(struct ahash_request
*req
, const void *in
)
2467 struct artpec6_hash_request_context
*ctx
= ahash_request_ctx(req
);
2468 const struct artpec6_hash_export_state
*state
= in
;
2469 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2470 enum artpec6_crypto_variant variant
= ac
->variant
;
2472 memset(ctx
, 0, sizeof(*ctx
));
2474 ctx
->digcnt
= state
->digcnt
;
2475 ctx
->partial_bytes
= state
->partial_bytes
;
2476 ctx
->hash_flags
= state
->hash_flags
;
2478 if (variant
== ARTPEC6_CRYPTO
)
2479 ctx
->hash_md
= FIELD_PREP(A6_CRY_MD_OPER
, state
->oper
);
2481 ctx
->hash_md
= FIELD_PREP(A7_CRY_MD_OPER
, state
->oper
);
2483 memcpy(ctx
->partial_buffer
, state
->partial_buffer
,
2484 sizeof(state
->partial_buffer
));
2485 memcpy(ctx
->digeststate
, state
->digeststate
,
2486 sizeof(state
->digeststate
));
2491 static int init_crypto_hw(struct artpec6_crypto
*ac
)
2493 enum artpec6_crypto_variant variant
= ac
->variant
;
2494 void __iomem
*base
= ac
->base
;
2495 u32 out_descr_buf_size
;
2496 u32 out_data_buf_size
;
2497 u32 in_data_buf_size
;
2498 u32 in_descr_buf_size
;
2499 u32 in_stat_buf_size
;
2503 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2504 * channels and 1024 bytes for the IN channel. This is an elastic
2505 * memory used to internally store the descriptors and data. The values
2506 * ares specified in 64 byte incremements. Trustzone buffers are not
2507 * used at this stage.
2509 out_data_buf_size
= 16; /* 1024 bytes for data */
2510 out_descr_buf_size
= 15; /* 960 bytes for descriptors */
2511 in_data_buf_size
= 8; /* 512 bytes for data */
2512 in_descr_buf_size
= 4; /* 256 bytes for descriptors */
2513 in_stat_buf_size
= 4; /* 256 bytes for stat descrs */
2515 BUILD_BUG_ON_MSG((out_data_buf_size
2516 + out_descr_buf_size
) * 64 > 1984,
2517 "Invalid OUT configuration");
2519 BUILD_BUG_ON_MSG((in_data_buf_size
2521 + in_stat_buf_size
) * 64 > 1024,
2522 "Invalid IN configuration");
2524 in
= FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE
, in_data_buf_size
) |
2525 FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE
, in_descr_buf_size
) |
2526 FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE
, in_stat_buf_size
);
2528 out
= FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE
, out_data_buf_size
) |
2529 FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE
, out_descr_buf_size
);
2531 writel_relaxed(out
, base
+ PDMA_OUT_BUF_CFG
);
2532 writel_relaxed(PDMA_OUT_CFG_EN
, base
+ PDMA_OUT_CFG
);
2534 if (variant
== ARTPEC6_CRYPTO
) {
2535 writel_relaxed(in
, base
+ A6_PDMA_IN_BUF_CFG
);
2536 writel_relaxed(PDMA_IN_CFG_EN
, base
+ A6_PDMA_IN_CFG
);
2537 writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA
|
2538 A6_PDMA_INTR_MASK_IN_EOP_FLUSH
,
2539 base
+ A6_PDMA_INTR_MASK
);
2541 writel_relaxed(in
, base
+ A7_PDMA_IN_BUF_CFG
);
2542 writel_relaxed(PDMA_IN_CFG_EN
, base
+ A7_PDMA_IN_CFG
);
2543 writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA
|
2544 A7_PDMA_INTR_MASK_IN_EOP_FLUSH
,
2545 base
+ A7_PDMA_INTR_MASK
);
2551 static void artpec6_crypto_disable_hw(struct artpec6_crypto
*ac
)
2553 enum artpec6_crypto_variant variant
= ac
->variant
;
2554 void __iomem
*base
= ac
->base
;
2556 if (variant
== ARTPEC6_CRYPTO
) {
2557 writel_relaxed(A6_PDMA_IN_CMD_STOP
, base
+ A6_PDMA_IN_CMD
);
2558 writel_relaxed(0, base
+ A6_PDMA_IN_CFG
);
2559 writel_relaxed(A6_PDMA_OUT_CMD_STOP
, base
+ PDMA_OUT_CMD
);
2561 writel_relaxed(A7_PDMA_IN_CMD_STOP
, base
+ A7_PDMA_IN_CMD
);
2562 writel_relaxed(0, base
+ A7_PDMA_IN_CFG
);
2563 writel_relaxed(A7_PDMA_OUT_CMD_STOP
, base
+ PDMA_OUT_CMD
);
2566 writel_relaxed(0, base
+ PDMA_OUT_CFG
);
2570 static irqreturn_t
artpec6_crypto_irq(int irq
, void *dev_id
)
2572 struct artpec6_crypto
*ac
= dev_id
;
2573 enum artpec6_crypto_variant variant
= ac
->variant
;
2574 void __iomem
*base
= ac
->base
;
2575 u32 mask_in_data
, mask_in_eop_flush
;
2576 u32 in_cmd_flush_stat
, in_cmd_reg
;
2581 if (variant
== ARTPEC6_CRYPTO
) {
2582 intr
= readl_relaxed(base
+ A6_PDMA_MASKED_INTR
);
2583 mask_in_data
= A6_PDMA_INTR_MASK_IN_DATA
;
2584 mask_in_eop_flush
= A6_PDMA_INTR_MASK_IN_EOP_FLUSH
;
2585 in_cmd_flush_stat
= A6_PDMA_IN_CMD_FLUSH_STAT
;
2586 in_cmd_reg
= A6_PDMA_IN_CMD
;
2587 ack_intr_reg
= A6_PDMA_ACK_INTR
;
2589 intr
= readl_relaxed(base
+ A7_PDMA_MASKED_INTR
);
2590 mask_in_data
= A7_PDMA_INTR_MASK_IN_DATA
;
2591 mask_in_eop_flush
= A7_PDMA_INTR_MASK_IN_EOP_FLUSH
;
2592 in_cmd_flush_stat
= A7_PDMA_IN_CMD_FLUSH_STAT
;
2593 in_cmd_reg
= A7_PDMA_IN_CMD
;
2594 ack_intr_reg
= A7_PDMA_ACK_INTR
;
2597 /* We get two interrupt notifications from each job.
2598 * The in_data means all data was sent to memory and then
2599 * we request a status flush command to write the per-job
2600 * status to its status vector. This ensures that the
2601 * tasklet can detect exactly how many submitted jobs
2602 * that have finished.
2604 if (intr
& mask_in_data
)
2605 ack
|= mask_in_data
;
2607 if (intr
& mask_in_eop_flush
)
2608 ack
|= mask_in_eop_flush
;
2610 writel_relaxed(in_cmd_flush_stat
, base
+ in_cmd_reg
);
2612 writel_relaxed(ack
, base
+ ack_intr_reg
);
2614 if (intr
& mask_in_eop_flush
)
2615 tasklet_schedule(&ac
->task
);
2620 /*------------------- Algorithm definitions ----------------------------------*/
2623 static struct ahash_alg hash_algos
[] = {
2626 .init
= artpec6_crypto_sha1_init
,
2627 .update
= artpec6_crypto_hash_update
,
2628 .final
= artpec6_crypto_hash_final
,
2629 .digest
= artpec6_crypto_sha1_digest
,
2630 .import
= artpec6_crypto_hash_import
,
2631 .export
= artpec6_crypto_hash_export
,
2632 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2633 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2636 .cra_driver_name
= "artpec-sha1",
2637 .cra_priority
= 300,
2638 .cra_flags
= CRYPTO_ALG_ASYNC
|
2639 CRYPTO_ALG_ALLOCATES_MEMORY
,
2640 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2641 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2642 .cra_module
= THIS_MODULE
,
2643 .cra_init
= artpec6_crypto_ahash_init
,
2644 .cra_exit
= artpec6_crypto_ahash_exit
,
2649 .init
= artpec6_crypto_sha256_init
,
2650 .update
= artpec6_crypto_hash_update
,
2651 .final
= artpec6_crypto_hash_final
,
2652 .digest
= artpec6_crypto_sha256_digest
,
2653 .import
= artpec6_crypto_hash_import
,
2654 .export
= artpec6_crypto_hash_export
,
2655 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2656 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2658 .cra_name
= "sha256",
2659 .cra_driver_name
= "artpec-sha256",
2660 .cra_priority
= 300,
2661 .cra_flags
= CRYPTO_ALG_ASYNC
|
2662 CRYPTO_ALG_ALLOCATES_MEMORY
,
2663 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2664 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2665 .cra_module
= THIS_MODULE
,
2666 .cra_init
= artpec6_crypto_ahash_init
,
2667 .cra_exit
= artpec6_crypto_ahash_exit
,
2672 .init
= artpec6_crypto_hmac_sha256_init
,
2673 .update
= artpec6_crypto_hash_update
,
2674 .final
= artpec6_crypto_hash_final
,
2675 .digest
= artpec6_crypto_hmac_sha256_digest
,
2676 .import
= artpec6_crypto_hash_import
,
2677 .export
= artpec6_crypto_hash_export
,
2678 .setkey
= artpec6_crypto_hash_set_key
,
2679 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2680 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2682 .cra_name
= "hmac(sha256)",
2683 .cra_driver_name
= "artpec-hmac-sha256",
2684 .cra_priority
= 300,
2685 .cra_flags
= CRYPTO_ALG_ASYNC
|
2686 CRYPTO_ALG_ALLOCATES_MEMORY
,
2687 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2688 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2689 .cra_module
= THIS_MODULE
,
2690 .cra_init
= artpec6_crypto_ahash_init_hmac_sha256
,
2691 .cra_exit
= artpec6_crypto_ahash_exit
,
2697 static struct skcipher_alg crypto_algos
[] = {
2701 .cra_name
= "ecb(aes)",
2702 .cra_driver_name
= "artpec6-ecb-aes",
2703 .cra_priority
= 300,
2704 .cra_flags
= CRYPTO_ALG_ASYNC
|
2705 CRYPTO_ALG_ALLOCATES_MEMORY
,
2706 .cra_blocksize
= AES_BLOCK_SIZE
,
2707 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2709 .cra_module
= THIS_MODULE
,
2711 .min_keysize
= AES_MIN_KEY_SIZE
,
2712 .max_keysize
= AES_MAX_KEY_SIZE
,
2713 .setkey
= artpec6_crypto_cipher_set_key
,
2714 .encrypt
= artpec6_crypto_encrypt
,
2715 .decrypt
= artpec6_crypto_decrypt
,
2716 .init
= artpec6_crypto_aes_ecb_init
,
2717 .exit
= artpec6_crypto_aes_exit
,
2722 .cra_name
= "ctr(aes)",
2723 .cra_driver_name
= "artpec6-ctr-aes",
2724 .cra_priority
= 300,
2725 .cra_flags
= CRYPTO_ALG_ASYNC
|
2726 CRYPTO_ALG_ALLOCATES_MEMORY
|
2727 CRYPTO_ALG_NEED_FALLBACK
,
2729 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2731 .cra_module
= THIS_MODULE
,
2733 .min_keysize
= AES_MIN_KEY_SIZE
,
2734 .max_keysize
= AES_MAX_KEY_SIZE
,
2735 .ivsize
= AES_BLOCK_SIZE
,
2736 .setkey
= artpec6_crypto_cipher_set_key
,
2737 .encrypt
= artpec6_crypto_ctr_encrypt
,
2738 .decrypt
= artpec6_crypto_ctr_decrypt
,
2739 .init
= artpec6_crypto_aes_ctr_init
,
2740 .exit
= artpec6_crypto_aes_ctr_exit
,
2745 .cra_name
= "cbc(aes)",
2746 .cra_driver_name
= "artpec6-cbc-aes",
2747 .cra_priority
= 300,
2748 .cra_flags
= CRYPTO_ALG_ASYNC
|
2749 CRYPTO_ALG_ALLOCATES_MEMORY
,
2750 .cra_blocksize
= AES_BLOCK_SIZE
,
2751 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2753 .cra_module
= THIS_MODULE
,
2755 .min_keysize
= AES_MIN_KEY_SIZE
,
2756 .max_keysize
= AES_MAX_KEY_SIZE
,
2757 .ivsize
= AES_BLOCK_SIZE
,
2758 .setkey
= artpec6_crypto_cipher_set_key
,
2759 .encrypt
= artpec6_crypto_encrypt
,
2760 .decrypt
= artpec6_crypto_decrypt
,
2761 .init
= artpec6_crypto_aes_cbc_init
,
2762 .exit
= artpec6_crypto_aes_exit
2767 .cra_name
= "xts(aes)",
2768 .cra_driver_name
= "artpec6-xts-aes",
2769 .cra_priority
= 300,
2770 .cra_flags
= CRYPTO_ALG_ASYNC
|
2771 CRYPTO_ALG_ALLOCATES_MEMORY
,
2773 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2775 .cra_module
= THIS_MODULE
,
2777 .min_keysize
= 2*AES_MIN_KEY_SIZE
,
2778 .max_keysize
= 2*AES_MAX_KEY_SIZE
,
2780 .setkey
= artpec6_crypto_xts_set_key
,
2781 .encrypt
= artpec6_crypto_encrypt
,
2782 .decrypt
= artpec6_crypto_decrypt
,
2783 .init
= artpec6_crypto_aes_xts_init
,
2784 .exit
= artpec6_crypto_aes_exit
,
2788 static struct aead_alg aead_algos
[] = {
2790 .init
= artpec6_crypto_aead_init
,
2791 .setkey
= artpec6_crypto_aead_set_key
,
2792 .encrypt
= artpec6_crypto_aead_encrypt
,
2793 .decrypt
= artpec6_crypto_aead_decrypt
,
2794 .ivsize
= GCM_AES_IV_SIZE
,
2795 .maxauthsize
= AES_BLOCK_SIZE
,
2798 .cra_name
= "gcm(aes)",
2799 .cra_driver_name
= "artpec-gcm-aes",
2800 .cra_priority
= 300,
2801 .cra_flags
= CRYPTO_ALG_ASYNC
|
2802 CRYPTO_ALG_ALLOCATES_MEMORY
|
2803 CRYPTO_ALG_KERN_DRIVER_ONLY
,
2805 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2807 .cra_module
= THIS_MODULE
,
2812 #ifdef CONFIG_DEBUG_FS
2814 static struct dentry
*dbgfs_root
;
2816 static void artpec6_crypto_init_debugfs(void)
2818 dbgfs_root
= debugfs_create_dir("artpec6_crypto", NULL
);
2820 #ifdef CONFIG_FAULT_INJECTION
2821 fault_create_debugfs_attr("fail_status_read", dbgfs_root
,
2822 &artpec6_crypto_fail_status_read
);
2824 fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root
,
2825 &artpec6_crypto_fail_dma_array_full
);
2829 static void artpec6_crypto_free_debugfs(void)
2831 debugfs_remove_recursive(dbgfs_root
);
2836 static const struct of_device_id artpec6_crypto_of_match
[] = {
2837 { .compatible
= "axis,artpec6-crypto", .data
= (void *)ARTPEC6_CRYPTO
},
2838 { .compatible
= "axis,artpec7-crypto", .data
= (void *)ARTPEC7_CRYPTO
},
2841 MODULE_DEVICE_TABLE(of
, artpec6_crypto_of_match
);
2843 static int artpec6_crypto_probe(struct platform_device
*pdev
)
2845 const struct of_device_id
*match
;
2846 enum artpec6_crypto_variant variant
;
2847 struct artpec6_crypto
*ac
;
2848 struct device
*dev
= &pdev
->dev
;
2853 if (artpec6_crypto_dev
)
2856 match
= of_match_node(artpec6_crypto_of_match
, dev
->of_node
);
2860 variant
= (enum artpec6_crypto_variant
)match
->data
;
2862 base
= devm_platform_ioremap_resource(pdev
, 0);
2864 return PTR_ERR(base
);
2866 irq
= platform_get_irq(pdev
, 0);
2870 ac
= devm_kzalloc(&pdev
->dev
, sizeof(struct artpec6_crypto
),
2875 platform_set_drvdata(pdev
, ac
);
2876 ac
->variant
= variant
;
2878 spin_lock_init(&ac
->queue_lock
);
2879 INIT_LIST_HEAD(&ac
->queue
);
2880 INIT_LIST_HEAD(&ac
->pending
);
2881 timer_setup(&ac
->timer
, artpec6_crypto_timeout
, 0);
2885 ac
->dma_cache
= kmem_cache_create("artpec6_crypto_dma",
2886 sizeof(struct artpec6_crypto_dma_descriptors
),
2893 #ifdef CONFIG_DEBUG_FS
2894 artpec6_crypto_init_debugfs();
2897 tasklet_init(&ac
->task
, artpec6_crypto_task
,
2900 ac
->pad_buffer
= devm_kzalloc(&pdev
->dev
, 2 * ARTPEC_CACHE_LINE_MAX
,
2902 if (!ac
->pad_buffer
)
2904 ac
->pad_buffer
= PTR_ALIGN(ac
->pad_buffer
, ARTPEC_CACHE_LINE_MAX
);
2906 ac
->zero_buffer
= devm_kzalloc(&pdev
->dev
, 2 * ARTPEC_CACHE_LINE_MAX
,
2908 if (!ac
->zero_buffer
)
2910 ac
->zero_buffer
= PTR_ALIGN(ac
->zero_buffer
, ARTPEC_CACHE_LINE_MAX
);
2912 err
= init_crypto_hw(ac
);
2916 err
= devm_request_irq(&pdev
->dev
, irq
, artpec6_crypto_irq
, 0,
2917 "artpec6-crypto", ac
);
2921 artpec6_crypto_dev
= &pdev
->dev
;
2923 err
= crypto_register_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
2925 dev_err(dev
, "Failed to register ahashes\n");
2929 err
= crypto_register_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
2931 dev_err(dev
, "Failed to register ciphers\n");
2932 goto unregister_ahashes
;
2935 err
= crypto_register_aeads(aead_algos
, ARRAY_SIZE(aead_algos
));
2937 dev_err(dev
, "Failed to register aeads\n");
2938 goto unregister_algs
;
2944 crypto_unregister_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
2946 crypto_unregister_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
2948 artpec6_crypto_disable_hw(ac
);
2950 kmem_cache_destroy(ac
->dma_cache
);
2954 static void artpec6_crypto_remove(struct platform_device
*pdev
)
2956 struct artpec6_crypto
*ac
= platform_get_drvdata(pdev
);
2957 int irq
= platform_get_irq(pdev
, 0);
2959 crypto_unregister_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
2960 crypto_unregister_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
2961 crypto_unregister_aeads(aead_algos
, ARRAY_SIZE(aead_algos
));
2963 tasklet_disable(&ac
->task
);
2964 devm_free_irq(&pdev
->dev
, irq
, ac
);
2965 tasklet_kill(&ac
->task
);
2966 del_timer_sync(&ac
->timer
);
2968 artpec6_crypto_disable_hw(ac
);
2970 kmem_cache_destroy(ac
->dma_cache
);
2971 #ifdef CONFIG_DEBUG_FS
2972 artpec6_crypto_free_debugfs();
2976 static struct platform_driver artpec6_crypto_driver
= {
2977 .probe
= artpec6_crypto_probe
,
2978 .remove
= artpec6_crypto_remove
,
2980 .name
= "artpec6-crypto",
2981 .of_match_table
= artpec6_crypto_of_match
,
2985 module_platform_driver(artpec6_crypto_driver
);
2987 MODULE_AUTHOR("Axis Communications AB");
2988 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
2989 MODULE_LICENSE("GPL");