1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
5 * Copyright (C) 2014-2017 Axis Communications AB
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/bitfield.h>
10 #include <linux/crypto.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/fault-inject.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
25 #include <crypto/aes.h>
26 #include <crypto/gcm.h>
27 #include <crypto/internal/aead.h>
28 #include <crypto/internal/hash.h>
29 #include <crypto/internal/skcipher.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/sha.h>
32 #include <crypto/xts.h>
34 /* Max length of a line in all cache levels for Artpec SoCs. */
35 #define ARTPEC_CACHE_LINE_MAX 32
37 #define PDMA_OUT_CFG 0x0000
38 #define PDMA_OUT_BUF_CFG 0x0004
39 #define PDMA_OUT_CMD 0x0008
40 #define PDMA_OUT_DESCRQ_PUSH 0x0010
41 #define PDMA_OUT_DESCRQ_STAT 0x0014
43 #define A6_PDMA_IN_CFG 0x0028
44 #define A6_PDMA_IN_BUF_CFG 0x002c
45 #define A6_PDMA_IN_CMD 0x0030
46 #define A6_PDMA_IN_STATQ_PUSH 0x0038
47 #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
48 #define A6_PDMA_IN_DESCRQ_STAT 0x0048
49 #define A6_PDMA_INTR_MASK 0x0068
50 #define A6_PDMA_ACK_INTR 0x006c
51 #define A6_PDMA_MASKED_INTR 0x0074
53 #define A7_PDMA_IN_CFG 0x002c
54 #define A7_PDMA_IN_BUF_CFG 0x0030
55 #define A7_PDMA_IN_CMD 0x0034
56 #define A7_PDMA_IN_STATQ_PUSH 0x003c
57 #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
58 #define A7_PDMA_IN_DESCRQ_STAT 0x004C
59 #define A7_PDMA_INTR_MASK 0x006c
60 #define A7_PDMA_ACK_INTR 0x0070
61 #define A7_PDMA_MASKED_INTR 0x0078
63 #define PDMA_OUT_CFG_EN BIT(0)
65 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
66 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
68 #define PDMA_OUT_CMD_START BIT(0)
69 #define A6_PDMA_OUT_CMD_STOP BIT(3)
70 #define A7_PDMA_OUT_CMD_STOP BIT(2)
72 #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
73 #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
75 #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
76 #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
78 #define PDMA_IN_CFG_EN BIT(0)
80 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
81 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
82 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
84 #define PDMA_IN_CMD_START BIT(0)
85 #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
86 #define A6_PDMA_IN_CMD_STOP BIT(3)
87 #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
88 #define A7_PDMA_IN_CMD_STOP BIT(2)
90 #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
91 #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
93 #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
94 #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
96 #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
97 #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
99 #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
100 #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
101 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
103 #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
104 #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
105 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
107 #define A6_CRY_MD_OPER GENMASK(19, 16)
109 #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
110 #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
112 #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
113 #define A6_CRY_MD_CIPHER_DECR BIT(22)
114 #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
115 #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
117 #define A7_CRY_MD_OPER GENMASK(11, 8)
119 #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
120 #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
122 #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
123 #define A7_CRY_MD_CIPHER_DECR BIT(14)
124 #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
125 #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
127 /* DMA metadata constants */
128 #define regk_crypto_aes_cbc 0x00000002
129 #define regk_crypto_aes_ctr 0x00000003
130 #define regk_crypto_aes_ecb 0x00000001
131 #define regk_crypto_aes_gcm 0x00000004
132 #define regk_crypto_aes_xts 0x00000005
133 #define regk_crypto_cache 0x00000002
134 #define a6_regk_crypto_dlkey 0x0000000a
135 #define a7_regk_crypto_dlkey 0x0000000e
136 #define regk_crypto_ext 0x00000001
137 #define regk_crypto_hmac_sha1 0x00000007
138 #define regk_crypto_hmac_sha256 0x00000009
139 #define regk_crypto_init 0x00000000
140 #define regk_crypto_key_128 0x00000000
141 #define regk_crypto_key_192 0x00000001
142 #define regk_crypto_key_256 0x00000002
143 #define regk_crypto_null 0x00000000
144 #define regk_crypto_sha1 0x00000006
145 #define regk_crypto_sha256 0x00000008
147 /* DMA descriptor structures */
148 struct pdma_descr_ctrl
{
149 unsigned char short_descr
: 1;
150 unsigned char pad1
: 1;
151 unsigned char eop
: 1;
152 unsigned char intr
: 1;
153 unsigned char short_len
: 3;
154 unsigned char pad2
: 1;
157 struct pdma_data_descr
{
158 unsigned int len
: 24;
159 unsigned int buf
: 32;
162 struct pdma_short_descr
{
163 unsigned char data
[7];
167 struct pdma_descr_ctrl ctrl
;
169 struct pdma_data_descr data
;
170 struct pdma_short_descr shrt
;
174 struct pdma_stat_descr
{
175 unsigned char pad1
: 1;
176 unsigned char pad2
: 1;
177 unsigned char eop
: 1;
178 unsigned char pad3
: 5;
179 unsigned int len
: 24;
182 /* Each descriptor array can hold max 64 entries */
183 #define PDMA_DESCR_COUNT 64
185 #define MODULE_NAME "Artpec-6 CA"
187 /* Hash modes (including HMAC variants) */
188 #define ARTPEC6_CRYPTO_HASH_SHA1 1
189 #define ARTPEC6_CRYPTO_HASH_SHA256 2
192 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
193 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
194 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
195 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
197 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
198 * It operates on a descriptor array with up to 64 descriptor entries.
199 * The arrays must be 64 byte aligned in memory.
201 * The ciphering unit has no registers and is completely controlled by
202 * a 4-byte metadata that is inserted at the beginning of each dma packet.
204 * A dma packet is a sequence of descriptors terminated by setting the .eop
205 * field in the final descriptor of the packet.
207 * Multiple packets are used for providing context data, key data and
208 * the plain/ciphertext.
210 * PDMA Descriptors (Array)
211 * +------+------+------+~~+-------+------+----
212 * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
213 * +--+---+--+---+----+-+~~+-------+----+-+----
216 * __|__ +-------++-------++-------+ +----+
217 * | MD | |Payload||Payload||Payload| | MD |
218 * +-----+ +-------++-------++-------+ +----+
221 struct artpec6_crypto_bounce_buffer
{
222 struct list_head list
;
224 struct scatterlist
*sg
;
226 /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
227 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
232 struct artpec6_crypto_dma_map
{
235 enum dma_data_direction dir
;
238 struct artpec6_crypto_dma_descriptors
{
239 struct pdma_descr out
[PDMA_DESCR_COUNT
] __aligned(64);
240 struct pdma_descr in
[PDMA_DESCR_COUNT
] __aligned(64);
241 u32 stat
[PDMA_DESCR_COUNT
] __aligned(64);
242 struct list_head bounce_buffers
;
243 /* Enough maps for all out/in buffers, and all three descr. arrays */
244 struct artpec6_crypto_dma_map maps
[PDMA_DESCR_COUNT
* 2 + 2];
245 dma_addr_t out_dma_addr
;
246 dma_addr_t in_dma_addr
;
247 dma_addr_t stat_dma_addr
;
253 enum artpec6_crypto_variant
{
258 struct artpec6_crypto
{
260 spinlock_t queue_lock
;
261 struct list_head queue
; /* waiting for pdma fifo space */
262 struct list_head pending
; /* submitted to pdma fifo */
263 struct tasklet_struct task
;
264 struct kmem_cache
*dma_cache
;
266 struct timer_list timer
;
267 enum artpec6_crypto_variant variant
;
268 void *pad_buffer
; /* cache-aligned block padding buffer */
272 enum artpec6_crypto_hash_flags
{
273 HASH_FLAG_INIT_CTX
= 2,
274 HASH_FLAG_UPDATE
= 4,
275 HASH_FLAG_FINALIZE
= 8,
277 HASH_FLAG_UPDATE_KEY
= 32,
280 struct artpec6_crypto_req_common
{
281 struct list_head list
;
282 struct list_head complete_in_progress
;
283 struct artpec6_crypto_dma_descriptors
*dma
;
284 struct crypto_async_request
*req
;
285 void (*complete
)(struct crypto_async_request
*req
);
289 struct artpec6_hash_request_context
{
290 char partial_buffer
[SHA256_BLOCK_SIZE
];
291 char partial_buffer_out
[SHA256_BLOCK_SIZE
];
292 char key_buffer
[SHA256_BLOCK_SIZE
];
293 char pad_buffer
[SHA256_BLOCK_SIZE
+ 32];
294 unsigned char digeststate
[SHA256_DIGEST_SIZE
];
295 size_t partial_bytes
;
299 enum artpec6_crypto_hash_flags hash_flags
;
300 struct artpec6_crypto_req_common common
;
303 struct artpec6_hash_export_state
{
304 char partial_buffer
[SHA256_BLOCK_SIZE
];
305 unsigned char digeststate
[SHA256_DIGEST_SIZE
];
306 size_t partial_bytes
;
309 unsigned int hash_flags
;
312 struct artpec6_hashalg_context
{
313 char hmac_key
[SHA256_BLOCK_SIZE
];
314 size_t hmac_key_length
;
315 struct crypto_shash
*child_hash
;
318 struct artpec6_crypto_request_context
{
321 struct artpec6_crypto_req_common common
;
324 struct artpec6_cryptotfm_context
{
325 unsigned char aes_key
[2*AES_MAX_KEY_SIZE
];
329 struct crypto_sync_skcipher
*fallback
;
332 struct artpec6_crypto_aead_hw_ctx
{
333 __be64 aad_length_bits
;
334 __be64 text_length_bits
;
335 __u8 J0
[AES_BLOCK_SIZE
];
338 struct artpec6_crypto_aead_req_ctx
{
339 struct artpec6_crypto_aead_hw_ctx hw_ctx
;
342 struct artpec6_crypto_req_common common
;
343 __u8 decryption_tag
[AES_BLOCK_SIZE
] ____cacheline_aligned
;
346 /* The crypto framework makes it hard to avoid this global. */
347 static struct device
*artpec6_crypto_dev
;
349 #ifdef CONFIG_FAULT_INJECTION
350 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read
);
351 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full
);
355 ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
,
356 ARTPEC6_CRYPTO_PREPARE_HASH_START
,
359 static int artpec6_crypto_prepare_aead(struct aead_request
*areq
);
360 static int artpec6_crypto_prepare_crypto(struct skcipher_request
*areq
);
361 static int artpec6_crypto_prepare_hash(struct ahash_request
*areq
);
364 artpec6_crypto_complete_crypto(struct crypto_async_request
*req
);
366 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request
*req
);
368 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request
*req
);
370 artpec6_crypto_complete_aead(struct crypto_async_request
*req
);
372 artpec6_crypto_complete_hash(struct crypto_async_request
*req
);
375 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common
*common
);
378 artpec6_crypto_start_dma(struct artpec6_crypto_req_common
*common
);
380 struct artpec6_crypto_walk
{
381 struct scatterlist
*sg
;
385 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk
*awalk
,
386 struct scatterlist
*sg
)
392 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk
*awalk
,
395 while (nbytes
&& awalk
->sg
) {
398 WARN_ON(awalk
->offset
> awalk
->sg
->length
);
400 piece
= min(nbytes
, (size_t)awalk
->sg
->length
- awalk
->offset
);
402 awalk
->offset
+= piece
;
403 if (awalk
->offset
== awalk
->sg
->length
) {
404 awalk
->sg
= sg_next(awalk
->sg
);
414 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk
*awalk
)
416 WARN_ON(awalk
->sg
->length
== awalk
->offset
);
418 return awalk
->sg
->length
- awalk
->offset
;
422 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk
*awalk
)
424 return sg_phys(awalk
->sg
) + awalk
->offset
;
428 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common
*common
)
430 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
431 struct artpec6_crypto_bounce_buffer
*b
;
432 struct artpec6_crypto_bounce_buffer
*next
;
434 list_for_each_entry_safe(b
, next
, &dma
->bounce_buffers
, list
) {
435 pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
436 b
, b
->length
, b
->offset
, b
->buf
);
437 sg_pcopy_from_buffer(b
->sg
,
448 static inline bool artpec6_crypto_busy(void)
450 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
451 int fifo_count
= ac
->pending_count
;
453 return fifo_count
> 6;
456 static int artpec6_crypto_submit(struct artpec6_crypto_req_common
*req
)
458 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
461 spin_lock_bh(&ac
->queue_lock
);
463 if (!artpec6_crypto_busy()) {
464 list_add_tail(&req
->list
, &ac
->pending
);
465 artpec6_crypto_start_dma(req
);
467 } else if (req
->req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
) {
468 list_add_tail(&req
->list
, &ac
->queue
);
470 artpec6_crypto_common_destroy(req
);
473 spin_unlock_bh(&ac
->queue_lock
);
478 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common
*common
)
480 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
481 enum artpec6_crypto_variant variant
= ac
->variant
;
482 void __iomem
*base
= ac
->base
;
483 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
484 u32 ind
, statd
, outd
;
486 /* Make descriptor content visible to the DMA before starting it. */
489 ind
= FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN
, dma
->in_cnt
- 1) |
490 FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR
, dma
->in_dma_addr
>> 6);
492 statd
= FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN
, dma
->in_cnt
- 1) |
493 FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR
, dma
->stat_dma_addr
>> 6);
495 outd
= FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN
, dma
->out_cnt
- 1) |
496 FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR
, dma
->out_dma_addr
>> 6);
498 if (variant
== ARTPEC6_CRYPTO
) {
499 writel_relaxed(ind
, base
+ A6_PDMA_IN_DESCRQ_PUSH
);
500 writel_relaxed(statd
, base
+ A6_PDMA_IN_STATQ_PUSH
);
501 writel_relaxed(PDMA_IN_CMD_START
, base
+ A6_PDMA_IN_CMD
);
503 writel_relaxed(ind
, base
+ A7_PDMA_IN_DESCRQ_PUSH
);
504 writel_relaxed(statd
, base
+ A7_PDMA_IN_STATQ_PUSH
);
505 writel_relaxed(PDMA_IN_CMD_START
, base
+ A7_PDMA_IN_CMD
);
508 writel_relaxed(outd
, base
+ PDMA_OUT_DESCRQ_PUSH
);
509 writel_relaxed(PDMA_OUT_CMD_START
, base
+ PDMA_OUT_CMD
);
515 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common
*common
)
517 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
522 INIT_LIST_HEAD(&dma
->bounce_buffers
);
525 static bool fault_inject_dma_descr(void)
527 #ifdef CONFIG_FAULT_INJECTION
528 return should_fail(&artpec6_crypto_fail_dma_array_full
, 1);
534 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
537 * @addr: The physical address of the data buffer
538 * @len: The length of the data buffer
539 * @eop: True if this is the last buffer in the packet
541 * @return 0 on success or -ENOSPC if there are no more descriptors available
544 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common
*common
,
545 dma_addr_t addr
, size_t len
, bool eop
)
547 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
548 struct pdma_descr
*d
;
550 if (dma
->out_cnt
>= PDMA_DESCR_COUNT
||
551 fault_inject_dma_descr()) {
552 pr_err("No free OUT DMA descriptors available!\n");
556 d
= &dma
->out
[dma
->out_cnt
++];
557 memset(d
, 0, sizeof(*d
));
559 d
->ctrl
.short_descr
= 0;
566 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
568 * @dst: The virtual address of the data
569 * @len: The length of the data, must be between 1 to 7 bytes
570 * @eop: True if this is the last buffer in the packet
572 * @return 0 on success
573 * -ENOSPC if no more descriptors are available
574 * -EINVAL if the data length exceeds 7 bytes
577 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common
*common
,
578 void *dst
, unsigned int len
, bool eop
)
580 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
581 struct pdma_descr
*d
;
583 if (dma
->out_cnt
>= PDMA_DESCR_COUNT
||
584 fault_inject_dma_descr()) {
585 pr_err("No free OUT DMA descriptors available!\n");
587 } else if (len
> 7 || len
< 1) {
590 d
= &dma
->out
[dma
->out_cnt
++];
591 memset(d
, 0, sizeof(*d
));
593 d
->ctrl
.short_descr
= 1;
594 d
->ctrl
.short_len
= len
;
596 memcpy(d
->shrt
.data
, dst
, len
);
600 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common
*common
,
601 struct page
*page
, size_t offset
,
603 enum dma_data_direction dir
,
604 dma_addr_t
*dma_addr_out
)
606 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
607 struct device
*dev
= artpec6_crypto_dev
;
608 struct artpec6_crypto_dma_map
*map
;
613 if (dma
->map_count
>= ARRAY_SIZE(dma
->maps
))
616 dma_addr
= dma_map_page(dev
, page
, offset
, size
, dir
);
617 if (dma_mapping_error(dev
, dma_addr
))
620 map
= &dma
->maps
[dma
->map_count
++];
622 map
->dma_addr
= dma_addr
;
625 *dma_addr_out
= dma_addr
;
631 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common
*common
,
632 void *ptr
, size_t size
,
633 enum dma_data_direction dir
,
634 dma_addr_t
*dma_addr_out
)
636 struct page
*page
= virt_to_page(ptr
);
637 size_t offset
= (uintptr_t)ptr
& ~PAGE_MASK
;
639 return artpec6_crypto_dma_map_page(common
, page
, offset
, size
, dir
,
644 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common
*common
)
646 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
649 ret
= artpec6_crypto_dma_map_single(common
, dma
->in
,
650 sizeof(dma
->in
[0]) * dma
->in_cnt
,
651 DMA_TO_DEVICE
, &dma
->in_dma_addr
);
655 ret
= artpec6_crypto_dma_map_single(common
, dma
->out
,
656 sizeof(dma
->out
[0]) * dma
->out_cnt
,
657 DMA_TO_DEVICE
, &dma
->out_dma_addr
);
661 /* We only read one stat descriptor */
662 dma
->stat
[dma
->in_cnt
- 1] = 0;
665 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
668 return artpec6_crypto_dma_map_single(common
,
670 sizeof(dma
->stat
[0]) * dma
->in_cnt
,
672 &dma
->stat_dma_addr
);
676 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common
*common
)
678 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
679 struct device
*dev
= artpec6_crypto_dev
;
682 for (i
= 0; i
< dma
->map_count
; i
++) {
683 struct artpec6_crypto_dma_map
*map
= &dma
->maps
[i
];
685 dma_unmap_page(dev
, map
->dma_addr
, map
->size
, map
->dir
);
691 /** artpec6_crypto_setup_out_descr - Setup an out descriptor
693 * @dst: The virtual address of the data
694 * @len: The length of the data
695 * @eop: True if this is the last buffer in the packet
696 * @use_short: If this is true and the data length is 7 bytes or less then
697 * a short descriptor will be used
699 * @return 0 on success
700 * Any errors from artpec6_crypto_setup_out_descr_short() or
701 * setup_out_descr_phys()
704 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common
*common
,
705 void *dst
, unsigned int len
, bool eop
,
708 if (use_short
&& len
< 7) {
709 return artpec6_crypto_setup_out_descr_short(common
, dst
, len
,
715 ret
= artpec6_crypto_dma_map_single(common
, dst
, len
,
721 return artpec6_crypto_setup_out_descr_phys(common
, dma_addr
,
726 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
729 * @addr: The physical address of the data buffer
730 * @len: The length of the data buffer
731 * @intr: True if an interrupt should be fired after HW processing of this
736 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common
*common
,
737 dma_addr_t addr
, unsigned int len
, bool intr
)
739 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
740 struct pdma_descr
*d
;
742 if (dma
->in_cnt
>= PDMA_DESCR_COUNT
||
743 fault_inject_dma_descr()) {
744 pr_err("No free IN DMA descriptors available!\n");
747 d
= &dma
->in
[dma
->in_cnt
++];
748 memset(d
, 0, sizeof(*d
));
756 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
758 * @buffer: The virtual address to of the data buffer
759 * @len: The length of the data buffer
760 * @last: If this is the last data buffer in the request (i.e. an interrupt
763 * Short descriptors are not used for the in channel
766 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common
*common
,
767 void *buffer
, unsigned int len
, bool last
)
772 ret
= artpec6_crypto_dma_map_single(common
, buffer
, len
,
773 DMA_FROM_DEVICE
, &dma_addr
);
777 return artpec6_crypto_setup_in_descr_phys(common
, dma_addr
, len
, last
);
780 static struct artpec6_crypto_bounce_buffer
*
781 artpec6_crypto_alloc_bounce(gfp_t flags
)
784 size_t alloc_size
= sizeof(struct artpec6_crypto_bounce_buffer
) +
785 2 * ARTPEC_CACHE_LINE_MAX
;
786 struct artpec6_crypto_bounce_buffer
*bbuf
= kzalloc(alloc_size
, flags
);
792 bbuf
->buf
= PTR_ALIGN(base
, ARTPEC_CACHE_LINE_MAX
);
796 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common
*common
,
797 struct artpec6_crypto_walk
*walk
, size_t size
)
799 struct artpec6_crypto_bounce_buffer
*bbuf
;
802 bbuf
= artpec6_crypto_alloc_bounce(common
->gfp_flags
);
808 bbuf
->offset
= walk
->offset
;
810 ret
= artpec6_crypto_setup_in_descr(common
, bbuf
->buf
, size
, false);
816 pr_debug("BOUNCE %zu offset %zu\n", size
, walk
->offset
);
817 list_add_tail(&bbuf
->list
, &common
->dma
->bounce_buffers
);
822 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common
*common
,
823 struct artpec6_crypto_walk
*walk
,
830 while (walk
->sg
&& count
) {
831 chunk
= min(count
, artpec6_crypto_walk_chunklen(walk
));
832 addr
= artpec6_crypto_walk_chunk_phys(walk
);
834 /* When destination buffers are not aligned to the cache line
835 * size we need bounce buffers. The DMA-API requires that the
836 * entire line is owned by the DMA buffer and this holds also
837 * for the case when coherent DMA is used.
839 if (!IS_ALIGNED(addr
, ARTPEC_CACHE_LINE_MAX
)) {
840 chunk
= min_t(dma_addr_t
, chunk
,
841 ALIGN(addr
, ARTPEC_CACHE_LINE_MAX
) -
844 pr_debug("CHUNK-b %pad:%zu\n", &addr
, chunk
);
845 ret
= setup_bounce_buffer_in(common
, walk
, chunk
);
846 } else if (chunk
< ARTPEC_CACHE_LINE_MAX
) {
847 pr_debug("CHUNK-b %pad:%zu\n", &addr
, chunk
);
848 ret
= setup_bounce_buffer_in(common
, walk
, chunk
);
852 chunk
= chunk
& ~(ARTPEC_CACHE_LINE_MAX
-1);
854 pr_debug("CHUNK %pad:%zu\n", &addr
, chunk
);
856 ret
= artpec6_crypto_dma_map_page(common
,
866 ret
= artpec6_crypto_setup_in_descr_phys(common
,
874 count
= count
- chunk
;
875 artpec6_crypto_walk_advance(walk
, chunk
);
879 pr_err("EOL unexpected %zu bytes left\n", count
);
881 return count
? -EINVAL
: 0;
885 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common
*common
,
886 struct artpec6_crypto_walk
*walk
,
893 while (walk
->sg
&& count
) {
894 chunk
= min(count
, artpec6_crypto_walk_chunklen(walk
));
895 addr
= artpec6_crypto_walk_chunk_phys(walk
);
897 pr_debug("OUT-CHUNK %pad:%zu\n", &addr
, chunk
);
902 chunk
= min_t(size_t, chunk
, (4-(addr
&3)));
904 sg_pcopy_to_buffer(walk
->sg
, 1, buf
, chunk
,
907 ret
= artpec6_crypto_setup_out_descr_short(common
, buf
,
913 ret
= artpec6_crypto_dma_map_page(common
,
923 ret
= artpec6_crypto_setup_out_descr_phys(common
,
931 count
= count
- chunk
;
932 artpec6_crypto_walk_advance(walk
, chunk
);
936 pr_err("EOL unexpected %zu bytes left\n", count
);
938 return count
? -EINVAL
: 0;
942 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
944 * If the out descriptor list is non-empty, then the eop flag on the
945 * last used out descriptor will be set.
947 * @return 0 on success
948 * -EINVAL if the out descriptor is empty or has overflown
951 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common
*common
)
953 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
954 struct pdma_descr
*d
;
956 if (!dma
->out_cnt
|| dma
->out_cnt
> PDMA_DESCR_COUNT
) {
957 pr_err("%s: OUT descriptor list is %s\n",
958 MODULE_NAME
, dma
->out_cnt
? "empty" : "full");
963 d
= &dma
->out
[dma
->out_cnt
-1];
969 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
972 * See artpec6_crypto_terminate_out_descrs() for return values
975 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common
*common
)
977 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
978 struct pdma_descr
*d
;
980 if (!dma
->in_cnt
|| dma
->in_cnt
> PDMA_DESCR_COUNT
) {
981 pr_err("%s: IN descriptor list is %s\n",
982 MODULE_NAME
, dma
->in_cnt
? "empty" : "full");
986 d
= &dma
->in
[dma
->in_cnt
-1];
991 /** create_hash_pad - Create a Secure Hash conformant pad
993 * @dst: The destination buffer to write the pad. Must be at least 64 bytes
994 * @dgstlen: The total length of the hash digest in bytes
995 * @bitcount: The total length of the digest in bits
997 * @return The total number of padding bytes written to @dst
1000 create_hash_pad(int oper
, unsigned char *dst
, u64 dgstlen
, u64 bitcount
)
1002 unsigned int mod
, target
, diff
, pad_bytes
, size_bytes
;
1003 __be64 bits
= __cpu_to_be64(bitcount
);
1006 case regk_crypto_sha1
:
1007 case regk_crypto_sha256
:
1008 case regk_crypto_hmac_sha1
:
1009 case regk_crypto_hmac_sha256
:
1022 diff
= dgstlen
& (mod
- 1);
1023 pad_bytes
= diff
> target
? target
+ mod
- diff
: target
- diff
;
1025 memset(dst
+ 1, 0, pad_bytes
);
1028 if (size_bytes
== 16) {
1029 memset(dst
+ 1 + pad_bytes
, 0, 8);
1030 memcpy(dst
+ 1 + pad_bytes
+ 8, &bits
, 8);
1032 memcpy(dst
+ 1 + pad_bytes
, &bits
, 8);
1035 return pad_bytes
+ size_bytes
+ 1;
1038 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common
*common
,
1039 struct crypto_async_request
*parent
,
1040 void (*complete
)(struct crypto_async_request
*req
),
1041 struct scatterlist
*dstsg
, unsigned int nbytes
)
1044 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1046 flags
= (parent
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1047 GFP_KERNEL
: GFP_ATOMIC
;
1049 common
->gfp_flags
= flags
;
1050 common
->dma
= kmem_cache_alloc(ac
->dma_cache
, flags
);
1054 common
->req
= parent
;
1055 common
->complete
= complete
;
1060 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors
*dma
)
1062 struct artpec6_crypto_bounce_buffer
*b
;
1063 struct artpec6_crypto_bounce_buffer
*next
;
1065 list_for_each_entry_safe(b
, next
, &dma
->bounce_buffers
, list
) {
1071 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common
*common
)
1073 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1075 artpec6_crypto_dma_unmap_all(common
);
1076 artpec6_crypto_bounce_destroy(common
->dma
);
1077 kmem_cache_free(ac
->dma_cache
, common
->dma
);
1083 * Ciphering functions.
1085 static int artpec6_crypto_encrypt(struct skcipher_request
*req
)
1087 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1088 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1089 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1090 void (*complete
)(struct crypto_async_request
*req
);
1093 req_ctx
= skcipher_request_ctx(req
);
1095 switch (ctx
->crypto_type
) {
1096 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1097 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1098 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1099 req_ctx
->decrypt
= 0;
1105 switch (ctx
->crypto_type
) {
1106 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1107 complete
= artpec6_crypto_complete_cbc_encrypt
;
1110 complete
= artpec6_crypto_complete_crypto
;
1114 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
1117 req
->dst
, req
->cryptlen
);
1121 ret
= artpec6_crypto_prepare_crypto(req
);
1123 artpec6_crypto_common_destroy(&req_ctx
->common
);
1127 return artpec6_crypto_submit(&req_ctx
->common
);
1130 static int artpec6_crypto_decrypt(struct skcipher_request
*req
)
1133 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1134 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1135 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1136 void (*complete
)(struct crypto_async_request
*req
);
1138 req_ctx
= skcipher_request_ctx(req
);
1140 switch (ctx
->crypto_type
) {
1141 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1142 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1143 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1144 req_ctx
->decrypt
= 1;
1151 switch (ctx
->crypto_type
) {
1152 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1153 complete
= artpec6_crypto_complete_cbc_decrypt
;
1156 complete
= artpec6_crypto_complete_crypto
;
1160 ret
= artpec6_crypto_common_init(&req_ctx
->common
, &req
->base
,
1162 req
->dst
, req
->cryptlen
);
1166 ret
= artpec6_crypto_prepare_crypto(req
);
1168 artpec6_crypto_common_destroy(&req_ctx
->common
);
1172 return artpec6_crypto_submit(&req_ctx
->common
);
1176 artpec6_crypto_ctr_crypt(struct skcipher_request
*req
, bool encrypt
)
1178 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1179 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1180 size_t iv_len
= crypto_skcipher_ivsize(cipher
);
1181 unsigned int counter
= be32_to_cpup((__be32
*)
1182 (req
->iv
+ iv_len
- 4));
1183 unsigned int nblks
= ALIGN(req
->cryptlen
, AES_BLOCK_SIZE
) /
1187 * The hardware uses only the last 32-bits as the counter while the
1188 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1189 * the whole IV is a counter. So fallback if the counter is going to
1192 if (counter
+ nblks
< counter
) {
1195 pr_debug("counter %x will overflow (nblks %u), falling back\n",
1196 counter
, counter
+ nblks
);
1198 ret
= crypto_sync_skcipher_setkey(ctx
->fallback
, ctx
->aes_key
,
1204 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, ctx
->fallback
);
1206 skcipher_request_set_sync_tfm(subreq
, ctx
->fallback
);
1207 skcipher_request_set_callback(subreq
, req
->base
.flags
,
1209 skcipher_request_set_crypt(subreq
, req
->src
, req
->dst
,
1210 req
->cryptlen
, req
->iv
);
1211 ret
= encrypt
? crypto_skcipher_encrypt(subreq
)
1212 : crypto_skcipher_decrypt(subreq
);
1213 skcipher_request_zero(subreq
);
1218 return encrypt
? artpec6_crypto_encrypt(req
)
1219 : artpec6_crypto_decrypt(req
);
1222 static int artpec6_crypto_ctr_encrypt(struct skcipher_request
*req
)
1224 return artpec6_crypto_ctr_crypt(req
, true);
1227 static int artpec6_crypto_ctr_decrypt(struct skcipher_request
*req
)
1229 return artpec6_crypto_ctr_crypt(req
, false);
1235 static int artpec6_crypto_aead_init(struct crypto_aead
*tfm
)
1237 struct artpec6_cryptotfm_context
*tfm_ctx
= crypto_aead_ctx(tfm
);
1239 memset(tfm_ctx
, 0, sizeof(*tfm_ctx
));
1241 crypto_aead_set_reqsize(tfm
,
1242 sizeof(struct artpec6_crypto_aead_req_ctx
));
1247 static int artpec6_crypto_aead_set_key(struct crypto_aead
*tfm
, const u8
*key
,
1250 struct artpec6_cryptotfm_context
*ctx
= crypto_tfm_ctx(&tfm
->base
);
1252 if (len
!= 16 && len
!= 24 && len
!= 32)
1255 ctx
->key_length
= len
;
1257 memcpy(ctx
->aes_key
, key
, len
);
1261 static int artpec6_crypto_aead_encrypt(struct aead_request
*req
)
1264 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1266 req_ctx
->decrypt
= false;
1267 ret
= artpec6_crypto_common_init(&req_ctx
->common
, &req
->base
,
1268 artpec6_crypto_complete_aead
,
1273 ret
= artpec6_crypto_prepare_aead(req
);
1275 artpec6_crypto_common_destroy(&req_ctx
->common
);
1279 return artpec6_crypto_submit(&req_ctx
->common
);
1282 static int artpec6_crypto_aead_decrypt(struct aead_request
*req
)
1285 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1287 req_ctx
->decrypt
= true;
1288 if (req
->cryptlen
< AES_BLOCK_SIZE
)
1291 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
1293 artpec6_crypto_complete_aead
,
1298 ret
= artpec6_crypto_prepare_aead(req
);
1300 artpec6_crypto_common_destroy(&req_ctx
->common
);
1304 return artpec6_crypto_submit(&req_ctx
->common
);
1307 static int artpec6_crypto_prepare_hash(struct ahash_request
*areq
)
1309 struct artpec6_hashalg_context
*ctx
= crypto_tfm_ctx(areq
->base
.tfm
);
1310 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(areq
);
1311 size_t digestsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(areq
));
1312 size_t contextsize
= digestsize
;
1313 size_t blocksize
= crypto_tfm_alg_blocksize(
1314 crypto_ahash_tfm(crypto_ahash_reqtfm(areq
)));
1315 struct artpec6_crypto_req_common
*common
= &req_ctx
->common
;
1316 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1317 enum artpec6_crypto_variant variant
= ac
->variant
;
1319 bool ext_ctx
= false;
1320 bool run_hw
= false;
1323 artpec6_crypto_init_dma_operation(common
);
1325 /* Upload HMAC key, must be first the first packet */
1326 if (req_ctx
->hash_flags
& HASH_FLAG_HMAC
) {
1327 if (variant
== ARTPEC6_CRYPTO
) {
1328 req_ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
,
1329 a6_regk_crypto_dlkey
);
1331 req_ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
,
1332 a7_regk_crypto_dlkey
);
1335 /* Copy and pad up the key */
1336 memcpy(req_ctx
->key_buffer
, ctx
->hmac_key
,
1337 ctx
->hmac_key_length
);
1338 memset(req_ctx
->key_buffer
+ ctx
->hmac_key_length
, 0,
1339 blocksize
- ctx
->hmac_key_length
);
1341 error
= artpec6_crypto_setup_out_descr(common
,
1342 (void *)&req_ctx
->key_md
,
1343 sizeof(req_ctx
->key_md
), false, false);
1347 error
= artpec6_crypto_setup_out_descr(common
,
1348 req_ctx
->key_buffer
, blocksize
,
1354 if (!(req_ctx
->hash_flags
& HASH_FLAG_INIT_CTX
)) {
1355 /* Restore context */
1356 sel_ctx
= regk_crypto_ext
;
1359 sel_ctx
= regk_crypto_init
;
1362 if (variant
== ARTPEC6_CRYPTO
) {
1363 req_ctx
->hash_md
&= ~A6_CRY_MD_HASH_SEL_CTX
;
1364 req_ctx
->hash_md
|= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX
, sel_ctx
);
1366 /* If this is the final round, set the final flag */
1367 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
)
1368 req_ctx
->hash_md
|= A6_CRY_MD_HASH_HMAC_FIN
;
1370 req_ctx
->hash_md
&= ~A7_CRY_MD_HASH_SEL_CTX
;
1371 req_ctx
->hash_md
|= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX
, sel_ctx
);
1373 /* If this is the final round, set the final flag */
1374 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
)
1375 req_ctx
->hash_md
|= A7_CRY_MD_HASH_HMAC_FIN
;
1378 /* Setup up metadata descriptors */
1379 error
= artpec6_crypto_setup_out_descr(common
,
1380 (void *)&req_ctx
->hash_md
,
1381 sizeof(req_ctx
->hash_md
), false, false);
1385 error
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1390 error
= artpec6_crypto_setup_out_descr(common
,
1391 req_ctx
->digeststate
,
1392 contextsize
, false, false);
1398 if (req_ctx
->hash_flags
& HASH_FLAG_UPDATE
) {
1399 size_t done_bytes
= 0;
1400 size_t total_bytes
= areq
->nbytes
+ req_ctx
->partial_bytes
;
1401 size_t ready_bytes
= round_down(total_bytes
, blocksize
);
1402 struct artpec6_crypto_walk walk
;
1404 run_hw
= ready_bytes
> 0;
1405 if (req_ctx
->partial_bytes
&& ready_bytes
) {
1406 /* We have a partial buffer and will at least some bytes
1407 * to the HW. Empty this partial buffer before tackling
1410 memcpy(req_ctx
->partial_buffer_out
,
1411 req_ctx
->partial_buffer
,
1412 req_ctx
->partial_bytes
);
1414 error
= artpec6_crypto_setup_out_descr(common
,
1415 req_ctx
->partial_buffer_out
,
1416 req_ctx
->partial_bytes
,
1421 /* Reset partial buffer */
1422 done_bytes
+= req_ctx
->partial_bytes
;
1423 req_ctx
->partial_bytes
= 0;
1426 artpec6_crypto_walk_init(&walk
, areq
->src
);
1428 error
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
,
1435 size_t sg_skip
= ready_bytes
- done_bytes
;
1436 size_t sg_rem
= areq
->nbytes
- sg_skip
;
1438 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
1439 req_ctx
->partial_buffer
+
1440 req_ctx
->partial_bytes
,
1443 req_ctx
->partial_bytes
+= sg_rem
;
1446 req_ctx
->digcnt
+= ready_bytes
;
1447 req_ctx
->hash_flags
&= ~(HASH_FLAG_UPDATE
);
1451 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
) {
1452 size_t hash_pad_len
;
1456 if (variant
== ARTPEC6_CRYPTO
)
1457 oper
= FIELD_GET(A6_CRY_MD_OPER
, req_ctx
->hash_md
);
1459 oper
= FIELD_GET(A7_CRY_MD_OPER
, req_ctx
->hash_md
);
1461 /* Write out the partial buffer if present */
1462 if (req_ctx
->partial_bytes
) {
1463 memcpy(req_ctx
->partial_buffer_out
,
1464 req_ctx
->partial_buffer
,
1465 req_ctx
->partial_bytes
);
1466 error
= artpec6_crypto_setup_out_descr(common
,
1467 req_ctx
->partial_buffer_out
,
1468 req_ctx
->partial_bytes
,
1473 req_ctx
->digcnt
+= req_ctx
->partial_bytes
;
1474 req_ctx
->partial_bytes
= 0;
1477 if (req_ctx
->hash_flags
& HASH_FLAG_HMAC
)
1478 digest_bits
= 8 * (req_ctx
->digcnt
+ blocksize
);
1480 digest_bits
= 8 * req_ctx
->digcnt
;
1482 /* Add the hash pad */
1483 hash_pad_len
= create_hash_pad(oper
, req_ctx
->pad_buffer
,
1484 req_ctx
->digcnt
, digest_bits
);
1485 error
= artpec6_crypto_setup_out_descr(common
,
1486 req_ctx
->pad_buffer
,
1487 hash_pad_len
, false,
1489 req_ctx
->digcnt
= 0;
1494 /* Descriptor for the final result */
1495 error
= artpec6_crypto_setup_in_descr(common
, areq
->result
,
1501 } else { /* This is not the final operation for this request */
1503 return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
;
1505 /* Save the result to the context */
1506 error
= artpec6_crypto_setup_in_descr(common
,
1507 req_ctx
->digeststate
,
1508 contextsize
, false);
1514 req_ctx
->hash_flags
&= ~(HASH_FLAG_INIT_CTX
| HASH_FLAG_UPDATE
|
1515 HASH_FLAG_FINALIZE
);
1517 error
= artpec6_crypto_terminate_in_descrs(common
);
1521 error
= artpec6_crypto_terminate_out_descrs(common
);
1525 error
= artpec6_crypto_dma_map_descs(common
);
1529 return ARTPEC6_CRYPTO_PREPARE_HASH_START
;
1533 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher
*tfm
)
1535 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1537 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1538 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_ECB
;
1543 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher
*tfm
)
1545 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1548 crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm
->base
),
1549 0, CRYPTO_ALG_NEED_FALLBACK
);
1550 if (IS_ERR(ctx
->fallback
))
1551 return PTR_ERR(ctx
->fallback
);
1553 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1554 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_CTR
;
1559 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher
*tfm
)
1561 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1563 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1564 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_CBC
;
1569 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher
*tfm
)
1571 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1573 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1574 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_XTS
;
1579 static void artpec6_crypto_aes_exit(struct crypto_skcipher
*tfm
)
1581 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1583 memset(ctx
, 0, sizeof(*ctx
));
1586 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher
*tfm
)
1588 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1590 crypto_free_sync_skcipher(ctx
->fallback
);
1591 artpec6_crypto_aes_exit(tfm
);
1595 artpec6_crypto_cipher_set_key(struct crypto_skcipher
*cipher
, const u8
*key
,
1596 unsigned int keylen
)
1598 struct artpec6_cryptotfm_context
*ctx
=
1599 crypto_skcipher_ctx(cipher
);
1610 memcpy(ctx
->aes_key
, key
, keylen
);
1611 ctx
->key_length
= keylen
;
1616 artpec6_crypto_xts_set_key(struct crypto_skcipher
*cipher
, const u8
*key
,
1617 unsigned int keylen
)
1619 struct artpec6_cryptotfm_context
*ctx
=
1620 crypto_skcipher_ctx(cipher
);
1623 ret
= xts_check_key(&cipher
->base
, key
, keylen
);
1636 memcpy(ctx
->aes_key
, key
, keylen
);
1637 ctx
->key_length
= keylen
;
1641 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1643 * @req: The asynch request to process
1645 * @return 0 if the dma job was successfully prepared
1648 * This function sets up the PDMA descriptors for a block cipher request.
1650 * The required padding is added for AES-CTR using a statically defined
1653 * The PDMA descriptor list will be as follows:
1655 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1656 * IN: <CIPHER_MD><data_0>...[data_n]<intr>
1659 static int artpec6_crypto_prepare_crypto(struct skcipher_request
*areq
)
1662 struct artpec6_crypto_walk walk
;
1663 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(areq
);
1664 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1665 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1666 size_t iv_len
= crypto_skcipher_ivsize(cipher
);
1667 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1668 enum artpec6_crypto_variant variant
= ac
->variant
;
1669 struct artpec6_crypto_req_common
*common
;
1670 bool cipher_decr
= false;
1672 u32 cipher_len
= 0; /* Same as regk_crypto_key_128 for NULL crypto */
1675 req_ctx
= skcipher_request_ctx(areq
);
1676 common
= &req_ctx
->common
;
1678 artpec6_crypto_init_dma_operation(common
);
1680 if (variant
== ARTPEC6_CRYPTO
)
1681 ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
, a6_regk_crypto_dlkey
);
1683 ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
, a7_regk_crypto_dlkey
);
1685 ret
= artpec6_crypto_setup_out_descr(common
, (void *)&ctx
->key_md
,
1686 sizeof(ctx
->key_md
), false, false);
1690 ret
= artpec6_crypto_setup_out_descr(common
, ctx
->aes_key
,
1691 ctx
->key_length
, true, false);
1695 req_ctx
->cipher_md
= 0;
1697 if (ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_XTS
)
1698 cipher_klen
= ctx
->key_length
/2;
1700 cipher_klen
= ctx
->key_length
;
1703 switch (cipher_klen
) {
1705 cipher_len
= regk_crypto_key_128
;
1708 cipher_len
= regk_crypto_key_192
;
1711 cipher_len
= regk_crypto_key_256
;
1714 pr_err("%s: Invalid key length %d!\n",
1715 MODULE_NAME
, ctx
->key_length
);
1719 switch (ctx
->crypto_type
) {
1720 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1721 oper
= regk_crypto_aes_ecb
;
1722 cipher_decr
= req_ctx
->decrypt
;
1725 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1726 oper
= regk_crypto_aes_cbc
;
1727 cipher_decr
= req_ctx
->decrypt
;
1730 case ARTPEC6_CRYPTO_CIPHER_AES_CTR
:
1731 oper
= regk_crypto_aes_ctr
;
1732 cipher_decr
= false;
1735 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1736 oper
= regk_crypto_aes_xts
;
1737 cipher_decr
= req_ctx
->decrypt
;
1739 if (variant
== ARTPEC6_CRYPTO
)
1740 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DSEQ
;
1742 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DSEQ
;
1746 pr_err("%s: Invalid cipher mode %d!\n",
1747 MODULE_NAME
, ctx
->crypto_type
);
1751 if (variant
== ARTPEC6_CRYPTO
) {
1752 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_OPER
, oper
);
1753 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_CIPHER_LEN
,
1756 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DECR
;
1758 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_OPER
, oper
);
1759 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_CIPHER_LEN
,
1762 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DECR
;
1765 ret
= artpec6_crypto_setup_out_descr(common
,
1766 &req_ctx
->cipher_md
,
1767 sizeof(req_ctx
->cipher_md
),
1772 ret
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1777 ret
= artpec6_crypto_setup_out_descr(common
, areq
->iv
, iv_len
,
1783 artpec6_crypto_walk_init(&walk
, areq
->src
);
1784 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, areq
->cryptlen
);
1789 artpec6_crypto_walk_init(&walk
, areq
->dst
);
1790 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
, areq
->cryptlen
);
1794 /* CTR-mode padding required by the HW. */
1795 if (ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_CTR
||
1796 ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_XTS
) {
1797 size_t pad
= ALIGN(areq
->cryptlen
, AES_BLOCK_SIZE
) -
1801 ret
= artpec6_crypto_setup_out_descr(common
,
1807 ret
= artpec6_crypto_setup_in_descr(common
,
1808 ac
->pad_buffer
, pad
,
1815 ret
= artpec6_crypto_terminate_out_descrs(common
);
1819 ret
= artpec6_crypto_terminate_in_descrs(common
);
1823 return artpec6_crypto_dma_map_descs(common
);
1826 static int artpec6_crypto_prepare_aead(struct aead_request
*areq
)
1830 size_t input_length
;
1831 struct artpec6_cryptotfm_context
*ctx
= crypto_tfm_ctx(areq
->base
.tfm
);
1832 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(areq
);
1833 struct crypto_aead
*cipher
= crypto_aead_reqtfm(areq
);
1834 struct artpec6_crypto_req_common
*common
= &req_ctx
->common
;
1835 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1836 enum artpec6_crypto_variant variant
= ac
->variant
;
1839 artpec6_crypto_init_dma_operation(common
);
1842 if (variant
== ARTPEC6_CRYPTO
) {
1843 ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
,
1844 a6_regk_crypto_dlkey
);
1846 ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
,
1847 a7_regk_crypto_dlkey
);
1849 ret
= artpec6_crypto_setup_out_descr(common
, (void *)&ctx
->key_md
,
1850 sizeof(ctx
->key_md
), false, false);
1854 ret
= artpec6_crypto_setup_out_descr(common
, ctx
->aes_key
,
1855 ctx
->key_length
, true, false);
1859 req_ctx
->cipher_md
= 0;
1861 switch (ctx
->key_length
) {
1863 md_cipher_len
= regk_crypto_key_128
;
1866 md_cipher_len
= regk_crypto_key_192
;
1869 md_cipher_len
= regk_crypto_key_256
;
1875 if (variant
== ARTPEC6_CRYPTO
) {
1876 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_OPER
,
1877 regk_crypto_aes_gcm
);
1878 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_CIPHER_LEN
,
1880 if (req_ctx
->decrypt
)
1881 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DECR
;
1883 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_OPER
,
1884 regk_crypto_aes_gcm
);
1885 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_CIPHER_LEN
,
1887 if (req_ctx
->decrypt
)
1888 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DECR
;
1891 ret
= artpec6_crypto_setup_out_descr(common
,
1892 (void *) &req_ctx
->cipher_md
,
1893 sizeof(req_ctx
->cipher_md
), false,
1898 ret
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1902 /* For the decryption, cryptlen includes the tag. */
1903 input_length
= areq
->cryptlen
;
1904 if (req_ctx
->decrypt
)
1905 input_length
-= crypto_aead_authsize(cipher
);
1907 /* Prepare the context buffer */
1908 req_ctx
->hw_ctx
.aad_length_bits
=
1909 __cpu_to_be64(8*areq
->assoclen
);
1911 req_ctx
->hw_ctx
.text_length_bits
=
1912 __cpu_to_be64(8*input_length
);
1914 memcpy(req_ctx
->hw_ctx
.J0
, areq
->iv
, crypto_aead_ivsize(cipher
));
1915 // The HW omits the initial increment of the counter field.
1916 memcpy(req_ctx
->hw_ctx
.J0
+ GCM_AES_IV_SIZE
, "\x00\x00\x00\x01", 4);
1918 ret
= artpec6_crypto_setup_out_descr(common
, &req_ctx
->hw_ctx
,
1919 sizeof(struct artpec6_crypto_aead_hw_ctx
), false, false);
1924 struct artpec6_crypto_walk walk
;
1926 artpec6_crypto_walk_init(&walk
, areq
->src
);
1928 /* Associated data */
1929 count
= areq
->assoclen
;
1930 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, count
);
1934 if (!IS_ALIGNED(areq
->assoclen
, 16)) {
1935 size_t assoc_pad
= 16 - (areq
->assoclen
% 16);
1936 /* The HW mandates zero padding here */
1937 ret
= artpec6_crypto_setup_out_descr(common
,
1945 /* Data to crypto */
1946 count
= input_length
;
1947 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, count
);
1951 if (!IS_ALIGNED(input_length
, 16)) {
1952 size_t crypto_pad
= 16 - (input_length
% 16);
1953 /* The HW mandates zero padding here */
1954 ret
= artpec6_crypto_setup_out_descr(common
,
1964 /* Data from crypto */
1966 struct artpec6_crypto_walk walk
;
1967 size_t output_len
= areq
->cryptlen
;
1969 if (req_ctx
->decrypt
)
1970 output_len
-= crypto_aead_authsize(cipher
);
1972 artpec6_crypto_walk_init(&walk
, areq
->dst
);
1974 /* skip associated data in the output */
1975 count
= artpec6_crypto_walk_advance(&walk
, areq
->assoclen
);
1980 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
, count
);
1984 /* Put padding between the cryptotext and the auth tag */
1985 if (!IS_ALIGNED(output_len
, 16)) {
1986 size_t crypto_pad
= 16 - (output_len
% 16);
1988 ret
= artpec6_crypto_setup_in_descr(common
,
1995 /* The authentication tag shall follow immediately after
1996 * the output ciphertext. For decryption it is put in a context
1997 * buffer for later compare against the input tag.
2000 if (req_ctx
->decrypt
) {
2001 ret
= artpec6_crypto_setup_in_descr(common
,
2002 req_ctx
->decryption_tag
, AES_BLOCK_SIZE
, false);
2007 /* For encryption the requested tag size may be smaller
2008 * than the hardware's generated tag.
2010 size_t authsize
= crypto_aead_authsize(cipher
);
2012 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
,
2017 if (authsize
< AES_BLOCK_SIZE
) {
2018 count
= AES_BLOCK_SIZE
- authsize
;
2019 ret
= artpec6_crypto_setup_in_descr(common
,
2029 ret
= artpec6_crypto_terminate_in_descrs(common
);
2033 ret
= artpec6_crypto_terminate_out_descrs(common
);
2037 return artpec6_crypto_dma_map_descs(common
);
2040 static void artpec6_crypto_process_queue(struct artpec6_crypto
*ac
,
2041 struct list_head
*completions
)
2043 struct artpec6_crypto_req_common
*req
;
2045 while (!list_empty(&ac
->queue
) && !artpec6_crypto_busy()) {
2046 req
= list_first_entry(&ac
->queue
,
2047 struct artpec6_crypto_req_common
,
2049 list_move_tail(&req
->list
, &ac
->pending
);
2050 artpec6_crypto_start_dma(req
);
2052 list_add_tail(&req
->complete_in_progress
, completions
);
2056 * In some cases, the hardware can raise an in_eop_flush interrupt
2057 * before actually updating the status, so we have an timer which will
2058 * recheck the status on timeout. Since the cases are expected to be
2059 * very rare, we use a relatively large timeout value. There should be
2060 * no noticeable negative effect if we timeout spuriously.
2062 if (ac
->pending_count
)
2063 mod_timer(&ac
->timer
, jiffies
+ msecs_to_jiffies(100));
2065 del_timer(&ac
->timer
);
2068 static void artpec6_crypto_timeout(struct timer_list
*t
)
2070 struct artpec6_crypto
*ac
= from_timer(ac
, t
, timer
);
2072 dev_info_ratelimited(artpec6_crypto_dev
, "timeout\n");
2074 tasklet_schedule(&ac
->task
);
2077 static void artpec6_crypto_task(unsigned long data
)
2079 struct artpec6_crypto
*ac
= (struct artpec6_crypto
*)data
;
2080 struct artpec6_crypto_req_common
*req
;
2081 struct artpec6_crypto_req_common
*n
;
2082 struct list_head complete_done
;
2083 struct list_head complete_in_progress
;
2085 INIT_LIST_HEAD(&complete_done
);
2086 INIT_LIST_HEAD(&complete_in_progress
);
2088 if (list_empty(&ac
->pending
)) {
2089 pr_debug("Spurious IRQ\n");
2093 spin_lock_bh(&ac
->queue_lock
);
2095 list_for_each_entry_safe(req
, n
, &ac
->pending
, list
) {
2096 struct artpec6_crypto_dma_descriptors
*dma
= req
->dma
;
2098 dma_addr_t stataddr
;
2100 stataddr
= dma
->stat_dma_addr
+ 4 * (req
->dma
->in_cnt
- 1);
2101 dma_sync_single_for_cpu(artpec6_crypto_dev
,
2106 stat
= req
->dma
->stat
[req
->dma
->in_cnt
-1];
2108 /* A non-zero final status descriptor indicates
2109 * this job has finished.
2111 pr_debug("Request %p status is %X\n", req
, stat
);
2115 /* Allow testing of timeout handling with fault injection */
2116 #ifdef CONFIG_FAULT_INJECTION
2117 if (should_fail(&artpec6_crypto_fail_status_read
, 1))
2121 pr_debug("Completing request %p\n", req
);
2123 list_move_tail(&req
->list
, &complete_done
);
2125 ac
->pending_count
--;
2128 artpec6_crypto_process_queue(ac
, &complete_in_progress
);
2130 spin_unlock_bh(&ac
->queue_lock
);
2132 /* Perform the completion callbacks without holding the queue lock
2133 * to allow new request submissions from the callbacks.
2135 list_for_each_entry_safe(req
, n
, &complete_done
, list
) {
2136 artpec6_crypto_dma_unmap_all(req
);
2137 artpec6_crypto_copy_bounce_buffers(req
);
2138 artpec6_crypto_common_destroy(req
);
2140 req
->complete(req
->req
);
2143 list_for_each_entry_safe(req
, n
, &complete_in_progress
,
2144 complete_in_progress
) {
2145 req
->req
->complete(req
->req
, -EINPROGRESS
);
2149 static void artpec6_crypto_complete_crypto(struct crypto_async_request
*req
)
2151 req
->complete(req
, 0);
2155 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request
*req
)
2157 struct skcipher_request
*cipher_req
= container_of(req
,
2158 struct skcipher_request
, base
);
2160 scatterwalk_map_and_copy(cipher_req
->iv
, cipher_req
->src
,
2161 cipher_req
->cryptlen
- AES_BLOCK_SIZE
,
2163 req
->complete(req
, 0);
2167 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request
*req
)
2169 struct skcipher_request
*cipher_req
= container_of(req
,
2170 struct skcipher_request
, base
);
2172 scatterwalk_map_and_copy(cipher_req
->iv
, cipher_req
->dst
,
2173 cipher_req
->cryptlen
- AES_BLOCK_SIZE
,
2175 req
->complete(req
, 0);
2178 static void artpec6_crypto_complete_aead(struct crypto_async_request
*req
)
2182 /* Verify GCM hashtag. */
2183 struct aead_request
*areq
= container_of(req
,
2184 struct aead_request
, base
);
2185 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
2186 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(areq
);
2188 if (req_ctx
->decrypt
) {
2189 u8 input_tag
[AES_BLOCK_SIZE
];
2190 unsigned int authsize
= crypto_aead_authsize(aead
);
2192 sg_pcopy_to_buffer(areq
->src
,
2193 sg_nents(areq
->src
),
2196 areq
->assoclen
+ areq
->cryptlen
-
2199 if (crypto_memneq(req_ctx
->decryption_tag
,
2202 pr_debug("***EBADMSG:\n");
2203 print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS
, 32, 1,
2204 input_tag
, authsize
, true);
2205 print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS
, 32, 1,
2206 req_ctx
->decryption_tag
,
2213 req
->complete(req
, result
);
2216 static void artpec6_crypto_complete_hash(struct crypto_async_request
*req
)
2218 req
->complete(req
, 0);
2222 /*------------------- Hash functions -----------------------------------------*/
2224 artpec6_crypto_hash_set_key(struct crypto_ahash
*tfm
,
2225 const u8
*key
, unsigned int keylen
)
2227 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(&tfm
->base
);
2232 pr_err("Invalid length (%d) of HMAC key\n",
2237 memset(tfm_ctx
->hmac_key
, 0, sizeof(tfm_ctx
->hmac_key
));
2239 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2241 if (keylen
> blocksize
) {
2242 SHASH_DESC_ON_STACK(hdesc
, tfm_ctx
->child_hash
);
2244 hdesc
->tfm
= tfm_ctx
->child_hash
;
2246 tfm_ctx
->hmac_key_length
= blocksize
;
2247 ret
= crypto_shash_digest(hdesc
, key
, keylen
,
2253 memcpy(tfm_ctx
->hmac_key
, key
, keylen
);
2254 tfm_ctx
->hmac_key_length
= keylen
;
2261 artpec6_crypto_init_hash(struct ahash_request
*req
, u8 type
, int hmac
)
2263 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2264 enum artpec6_crypto_variant variant
= ac
->variant
;
2265 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2268 memset(req_ctx
, 0, sizeof(*req_ctx
));
2270 req_ctx
->hash_flags
= HASH_FLAG_INIT_CTX
;
2272 req_ctx
->hash_flags
|= (HASH_FLAG_HMAC
| HASH_FLAG_UPDATE_KEY
);
2275 case ARTPEC6_CRYPTO_HASH_SHA1
:
2276 oper
= hmac
? regk_crypto_hmac_sha1
: regk_crypto_sha1
;
2278 case ARTPEC6_CRYPTO_HASH_SHA256
:
2279 oper
= hmac
? regk_crypto_hmac_sha256
: regk_crypto_sha256
;
2282 pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME
, type
);
2286 if (variant
== ARTPEC6_CRYPTO
)
2287 req_ctx
->hash_md
= FIELD_PREP(A6_CRY_MD_OPER
, oper
);
2289 req_ctx
->hash_md
= FIELD_PREP(A7_CRY_MD_OPER
, oper
);
2294 static int artpec6_crypto_prepare_submit_hash(struct ahash_request
*req
)
2296 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2299 if (!req_ctx
->common
.dma
) {
2300 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
2302 artpec6_crypto_complete_hash
,
2309 ret
= artpec6_crypto_prepare_hash(req
);
2311 case ARTPEC6_CRYPTO_PREPARE_HASH_START
:
2312 ret
= artpec6_crypto_submit(&req_ctx
->common
);
2315 case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
:
2320 artpec6_crypto_common_destroy(&req_ctx
->common
);
2327 static int artpec6_crypto_hash_final(struct ahash_request
*req
)
2329 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2331 req_ctx
->hash_flags
|= HASH_FLAG_FINALIZE
;
2333 return artpec6_crypto_prepare_submit_hash(req
);
2336 static int artpec6_crypto_hash_update(struct ahash_request
*req
)
2338 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2340 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
;
2342 return artpec6_crypto_prepare_submit_hash(req
);
2345 static int artpec6_crypto_sha1_init(struct ahash_request
*req
)
2347 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA1
, 0);
2350 static int artpec6_crypto_sha1_digest(struct ahash_request
*req
)
2352 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2354 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA1
, 0);
2356 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2358 return artpec6_crypto_prepare_submit_hash(req
);
2361 static int artpec6_crypto_sha256_init(struct ahash_request
*req
)
2363 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 0);
2366 static int artpec6_crypto_sha256_digest(struct ahash_request
*req
)
2368 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2370 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 0);
2371 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2373 return artpec6_crypto_prepare_submit_hash(req
);
2376 static int artpec6_crypto_hmac_sha256_init(struct ahash_request
*req
)
2378 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 1);
2381 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request
*req
)
2383 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2385 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 1);
2386 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2388 return artpec6_crypto_prepare_submit_hash(req
);
2391 static int artpec6_crypto_ahash_init_common(struct crypto_tfm
*tfm
,
2392 const char *base_hash_name
)
2394 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(tfm
);
2396 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2397 sizeof(struct artpec6_hash_request_context
));
2398 memset(tfm_ctx
, 0, sizeof(*tfm_ctx
));
2400 if (base_hash_name
) {
2401 struct crypto_shash
*child
;
2403 child
= crypto_alloc_shash(base_hash_name
, 0,
2404 CRYPTO_ALG_NEED_FALLBACK
);
2407 return PTR_ERR(child
);
2409 tfm_ctx
->child_hash
= child
;
2415 static int artpec6_crypto_ahash_init(struct crypto_tfm
*tfm
)
2417 return artpec6_crypto_ahash_init_common(tfm
, NULL
);
2420 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm
*tfm
)
2422 return artpec6_crypto_ahash_init_common(tfm
, "sha256");
2425 static void artpec6_crypto_ahash_exit(struct crypto_tfm
*tfm
)
2427 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(tfm
);
2429 if (tfm_ctx
->child_hash
)
2430 crypto_free_shash(tfm_ctx
->child_hash
);
2432 memset(tfm_ctx
->hmac_key
, 0, sizeof(tfm_ctx
->hmac_key
));
2433 tfm_ctx
->hmac_key_length
= 0;
2436 static int artpec6_crypto_hash_export(struct ahash_request
*req
, void *out
)
2438 const struct artpec6_hash_request_context
*ctx
= ahash_request_ctx(req
);
2439 struct artpec6_hash_export_state
*state
= out
;
2440 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2441 enum artpec6_crypto_variant variant
= ac
->variant
;
2443 BUILD_BUG_ON(sizeof(state
->partial_buffer
) !=
2444 sizeof(ctx
->partial_buffer
));
2445 BUILD_BUG_ON(sizeof(state
->digeststate
) != sizeof(ctx
->digeststate
));
2447 state
->digcnt
= ctx
->digcnt
;
2448 state
->partial_bytes
= ctx
->partial_bytes
;
2449 state
->hash_flags
= ctx
->hash_flags
;
2451 if (variant
== ARTPEC6_CRYPTO
)
2452 state
->oper
= FIELD_GET(A6_CRY_MD_OPER
, ctx
->hash_md
);
2454 state
->oper
= FIELD_GET(A7_CRY_MD_OPER
, ctx
->hash_md
);
2456 memcpy(state
->partial_buffer
, ctx
->partial_buffer
,
2457 sizeof(state
->partial_buffer
));
2458 memcpy(state
->digeststate
, ctx
->digeststate
,
2459 sizeof(state
->digeststate
));
2464 static int artpec6_crypto_hash_import(struct ahash_request
*req
, const void *in
)
2466 struct artpec6_hash_request_context
*ctx
= ahash_request_ctx(req
);
2467 const struct artpec6_hash_export_state
*state
= in
;
2468 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2469 enum artpec6_crypto_variant variant
= ac
->variant
;
2471 memset(ctx
, 0, sizeof(*ctx
));
2473 ctx
->digcnt
= state
->digcnt
;
2474 ctx
->partial_bytes
= state
->partial_bytes
;
2475 ctx
->hash_flags
= state
->hash_flags
;
2477 if (variant
== ARTPEC6_CRYPTO
)
2478 ctx
->hash_md
= FIELD_PREP(A6_CRY_MD_OPER
, state
->oper
);
2480 ctx
->hash_md
= FIELD_PREP(A7_CRY_MD_OPER
, state
->oper
);
2482 memcpy(ctx
->partial_buffer
, state
->partial_buffer
,
2483 sizeof(state
->partial_buffer
));
2484 memcpy(ctx
->digeststate
, state
->digeststate
,
2485 sizeof(state
->digeststate
));
2490 static int init_crypto_hw(struct artpec6_crypto
*ac
)
2492 enum artpec6_crypto_variant variant
= ac
->variant
;
2493 void __iomem
*base
= ac
->base
;
2494 u32 out_descr_buf_size
;
2495 u32 out_data_buf_size
;
2496 u32 in_data_buf_size
;
2497 u32 in_descr_buf_size
;
2498 u32 in_stat_buf_size
;
2502 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2503 * channels and 1024 bytes for the IN channel. This is an elastic
2504 * memory used to internally store the descriptors and data. The values
2505 * ares specified in 64 byte incremements. Trustzone buffers are not
2506 * used at this stage.
2508 out_data_buf_size
= 16; /* 1024 bytes for data */
2509 out_descr_buf_size
= 15; /* 960 bytes for descriptors */
2510 in_data_buf_size
= 8; /* 512 bytes for data */
2511 in_descr_buf_size
= 4; /* 256 bytes for descriptors */
2512 in_stat_buf_size
= 4; /* 256 bytes for stat descrs */
2514 BUILD_BUG_ON_MSG((out_data_buf_size
2515 + out_descr_buf_size
) * 64 > 1984,
2516 "Invalid OUT configuration");
2518 BUILD_BUG_ON_MSG((in_data_buf_size
2520 + in_stat_buf_size
) * 64 > 1024,
2521 "Invalid IN configuration");
2523 in
= FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE
, in_data_buf_size
) |
2524 FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE
, in_descr_buf_size
) |
2525 FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE
, in_stat_buf_size
);
2527 out
= FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE
, out_data_buf_size
) |
2528 FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE
, out_descr_buf_size
);
2530 writel_relaxed(out
, base
+ PDMA_OUT_BUF_CFG
);
2531 writel_relaxed(PDMA_OUT_CFG_EN
, base
+ PDMA_OUT_CFG
);
2533 if (variant
== ARTPEC6_CRYPTO
) {
2534 writel_relaxed(in
, base
+ A6_PDMA_IN_BUF_CFG
);
2535 writel_relaxed(PDMA_IN_CFG_EN
, base
+ A6_PDMA_IN_CFG
);
2536 writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA
|
2537 A6_PDMA_INTR_MASK_IN_EOP_FLUSH
,
2538 base
+ A6_PDMA_INTR_MASK
);
2540 writel_relaxed(in
, base
+ A7_PDMA_IN_BUF_CFG
);
2541 writel_relaxed(PDMA_IN_CFG_EN
, base
+ A7_PDMA_IN_CFG
);
2542 writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA
|
2543 A7_PDMA_INTR_MASK_IN_EOP_FLUSH
,
2544 base
+ A7_PDMA_INTR_MASK
);
2550 static void artpec6_crypto_disable_hw(struct artpec6_crypto
*ac
)
2552 enum artpec6_crypto_variant variant
= ac
->variant
;
2553 void __iomem
*base
= ac
->base
;
2555 if (variant
== ARTPEC6_CRYPTO
) {
2556 writel_relaxed(A6_PDMA_IN_CMD_STOP
, base
+ A6_PDMA_IN_CMD
);
2557 writel_relaxed(0, base
+ A6_PDMA_IN_CFG
);
2558 writel_relaxed(A6_PDMA_OUT_CMD_STOP
, base
+ PDMA_OUT_CMD
);
2560 writel_relaxed(A7_PDMA_IN_CMD_STOP
, base
+ A7_PDMA_IN_CMD
);
2561 writel_relaxed(0, base
+ A7_PDMA_IN_CFG
);
2562 writel_relaxed(A7_PDMA_OUT_CMD_STOP
, base
+ PDMA_OUT_CMD
);
2565 writel_relaxed(0, base
+ PDMA_OUT_CFG
);
2569 static irqreturn_t
artpec6_crypto_irq(int irq
, void *dev_id
)
2571 struct artpec6_crypto
*ac
= dev_id
;
2572 enum artpec6_crypto_variant variant
= ac
->variant
;
2573 void __iomem
*base
= ac
->base
;
2574 u32 mask_in_data
, mask_in_eop_flush
;
2575 u32 in_cmd_flush_stat
, in_cmd_reg
;
2580 if (variant
== ARTPEC6_CRYPTO
) {
2581 intr
= readl_relaxed(base
+ A6_PDMA_MASKED_INTR
);
2582 mask_in_data
= A6_PDMA_INTR_MASK_IN_DATA
;
2583 mask_in_eop_flush
= A6_PDMA_INTR_MASK_IN_EOP_FLUSH
;
2584 in_cmd_flush_stat
= A6_PDMA_IN_CMD_FLUSH_STAT
;
2585 in_cmd_reg
= A6_PDMA_IN_CMD
;
2586 ack_intr_reg
= A6_PDMA_ACK_INTR
;
2588 intr
= readl_relaxed(base
+ A7_PDMA_MASKED_INTR
);
2589 mask_in_data
= A7_PDMA_INTR_MASK_IN_DATA
;
2590 mask_in_eop_flush
= A7_PDMA_INTR_MASK_IN_EOP_FLUSH
;
2591 in_cmd_flush_stat
= A7_PDMA_IN_CMD_FLUSH_STAT
;
2592 in_cmd_reg
= A7_PDMA_IN_CMD
;
2593 ack_intr_reg
= A7_PDMA_ACK_INTR
;
2596 /* We get two interrupt notifications from each job.
2597 * The in_data means all data was sent to memory and then
2598 * we request a status flush command to write the per-job
2599 * status to its status vector. This ensures that the
2600 * tasklet can detect exactly how many submitted jobs
2601 * that have finished.
2603 if (intr
& mask_in_data
)
2604 ack
|= mask_in_data
;
2606 if (intr
& mask_in_eop_flush
)
2607 ack
|= mask_in_eop_flush
;
2609 writel_relaxed(in_cmd_flush_stat
, base
+ in_cmd_reg
);
2611 writel_relaxed(ack
, base
+ ack_intr_reg
);
2613 if (intr
& mask_in_eop_flush
)
2614 tasklet_schedule(&ac
->task
);
2619 /*------------------- Algorithm definitions ----------------------------------*/
2622 static struct ahash_alg hash_algos
[] = {
2625 .init
= artpec6_crypto_sha1_init
,
2626 .update
= artpec6_crypto_hash_update
,
2627 .final
= artpec6_crypto_hash_final
,
2628 .digest
= artpec6_crypto_sha1_digest
,
2629 .import
= artpec6_crypto_hash_import
,
2630 .export
= artpec6_crypto_hash_export
,
2631 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2632 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2635 .cra_driver_name
= "artpec-sha1",
2636 .cra_priority
= 300,
2637 .cra_flags
= CRYPTO_ALG_ASYNC
,
2638 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2639 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2641 .cra_module
= THIS_MODULE
,
2642 .cra_init
= artpec6_crypto_ahash_init
,
2643 .cra_exit
= artpec6_crypto_ahash_exit
,
2648 .init
= artpec6_crypto_sha256_init
,
2649 .update
= artpec6_crypto_hash_update
,
2650 .final
= artpec6_crypto_hash_final
,
2651 .digest
= artpec6_crypto_sha256_digest
,
2652 .import
= artpec6_crypto_hash_import
,
2653 .export
= artpec6_crypto_hash_export
,
2654 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2655 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2657 .cra_name
= "sha256",
2658 .cra_driver_name
= "artpec-sha256",
2659 .cra_priority
= 300,
2660 .cra_flags
= CRYPTO_ALG_ASYNC
,
2661 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2662 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2664 .cra_module
= THIS_MODULE
,
2665 .cra_init
= artpec6_crypto_ahash_init
,
2666 .cra_exit
= artpec6_crypto_ahash_exit
,
2671 .init
= artpec6_crypto_hmac_sha256_init
,
2672 .update
= artpec6_crypto_hash_update
,
2673 .final
= artpec6_crypto_hash_final
,
2674 .digest
= artpec6_crypto_hmac_sha256_digest
,
2675 .import
= artpec6_crypto_hash_import
,
2676 .export
= artpec6_crypto_hash_export
,
2677 .setkey
= artpec6_crypto_hash_set_key
,
2678 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2679 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2681 .cra_name
= "hmac(sha256)",
2682 .cra_driver_name
= "artpec-hmac-sha256",
2683 .cra_priority
= 300,
2684 .cra_flags
= CRYPTO_ALG_ASYNC
,
2685 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2686 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2688 .cra_module
= THIS_MODULE
,
2689 .cra_init
= artpec6_crypto_ahash_init_hmac_sha256
,
2690 .cra_exit
= artpec6_crypto_ahash_exit
,
2696 static struct skcipher_alg crypto_algos
[] = {
2700 .cra_name
= "ecb(aes)",
2701 .cra_driver_name
= "artpec6-ecb-aes",
2702 .cra_priority
= 300,
2703 .cra_flags
= CRYPTO_ALG_ASYNC
,
2704 .cra_blocksize
= AES_BLOCK_SIZE
,
2705 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2707 .cra_module
= THIS_MODULE
,
2709 .min_keysize
= AES_MIN_KEY_SIZE
,
2710 .max_keysize
= AES_MAX_KEY_SIZE
,
2711 .setkey
= artpec6_crypto_cipher_set_key
,
2712 .encrypt
= artpec6_crypto_encrypt
,
2713 .decrypt
= artpec6_crypto_decrypt
,
2714 .init
= artpec6_crypto_aes_ecb_init
,
2715 .exit
= artpec6_crypto_aes_exit
,
2720 .cra_name
= "ctr(aes)",
2721 .cra_driver_name
= "artpec6-ctr-aes",
2722 .cra_priority
= 300,
2723 .cra_flags
= CRYPTO_ALG_ASYNC
|
2724 CRYPTO_ALG_NEED_FALLBACK
,
2726 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2728 .cra_module
= THIS_MODULE
,
2730 .min_keysize
= AES_MIN_KEY_SIZE
,
2731 .max_keysize
= AES_MAX_KEY_SIZE
,
2732 .ivsize
= AES_BLOCK_SIZE
,
2733 .setkey
= artpec6_crypto_cipher_set_key
,
2734 .encrypt
= artpec6_crypto_ctr_encrypt
,
2735 .decrypt
= artpec6_crypto_ctr_decrypt
,
2736 .init
= artpec6_crypto_aes_ctr_init
,
2737 .exit
= artpec6_crypto_aes_ctr_exit
,
2742 .cra_name
= "cbc(aes)",
2743 .cra_driver_name
= "artpec6-cbc-aes",
2744 .cra_priority
= 300,
2745 .cra_flags
= CRYPTO_ALG_ASYNC
,
2746 .cra_blocksize
= AES_BLOCK_SIZE
,
2747 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2749 .cra_module
= THIS_MODULE
,
2751 .min_keysize
= AES_MIN_KEY_SIZE
,
2752 .max_keysize
= AES_MAX_KEY_SIZE
,
2753 .ivsize
= AES_BLOCK_SIZE
,
2754 .setkey
= artpec6_crypto_cipher_set_key
,
2755 .encrypt
= artpec6_crypto_encrypt
,
2756 .decrypt
= artpec6_crypto_decrypt
,
2757 .init
= artpec6_crypto_aes_cbc_init
,
2758 .exit
= artpec6_crypto_aes_exit
2763 .cra_name
= "xts(aes)",
2764 .cra_driver_name
= "artpec6-xts-aes",
2765 .cra_priority
= 300,
2766 .cra_flags
= CRYPTO_ALG_ASYNC
,
2768 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2770 .cra_module
= THIS_MODULE
,
2772 .min_keysize
= 2*AES_MIN_KEY_SIZE
,
2773 .max_keysize
= 2*AES_MAX_KEY_SIZE
,
2775 .setkey
= artpec6_crypto_xts_set_key
,
2776 .encrypt
= artpec6_crypto_encrypt
,
2777 .decrypt
= artpec6_crypto_decrypt
,
2778 .init
= artpec6_crypto_aes_xts_init
,
2779 .exit
= artpec6_crypto_aes_exit
,
2783 static struct aead_alg aead_algos
[] = {
2785 .init
= artpec6_crypto_aead_init
,
2786 .setkey
= artpec6_crypto_aead_set_key
,
2787 .encrypt
= artpec6_crypto_aead_encrypt
,
2788 .decrypt
= artpec6_crypto_aead_decrypt
,
2789 .ivsize
= GCM_AES_IV_SIZE
,
2790 .maxauthsize
= AES_BLOCK_SIZE
,
2793 .cra_name
= "gcm(aes)",
2794 .cra_driver_name
= "artpec-gcm-aes",
2795 .cra_priority
= 300,
2796 .cra_flags
= CRYPTO_ALG_ASYNC
|
2797 CRYPTO_ALG_KERN_DRIVER_ONLY
,
2799 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2801 .cra_module
= THIS_MODULE
,
2806 #ifdef CONFIG_DEBUG_FS
2815 static struct dentry
*dbgfs_root
;
2817 static void artpec6_crypto_init_debugfs(void)
2819 dbgfs_root
= debugfs_create_dir("artpec6_crypto", NULL
);
2821 #ifdef CONFIG_FAULT_INJECTION
2822 fault_create_debugfs_attr("fail_status_read", dbgfs_root
,
2823 &artpec6_crypto_fail_status_read
);
2825 fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root
,
2826 &artpec6_crypto_fail_dma_array_full
);
2830 static void artpec6_crypto_free_debugfs(void)
2832 debugfs_remove_recursive(dbgfs_root
);
2837 static const struct of_device_id artpec6_crypto_of_match
[] = {
2838 { .compatible
= "axis,artpec6-crypto", .data
= (void *)ARTPEC6_CRYPTO
},
2839 { .compatible
= "axis,artpec7-crypto", .data
= (void *)ARTPEC7_CRYPTO
},
2842 MODULE_DEVICE_TABLE(of
, artpec6_crypto_of_match
);
2844 static int artpec6_crypto_probe(struct platform_device
*pdev
)
2846 const struct of_device_id
*match
;
2847 enum artpec6_crypto_variant variant
;
2848 struct artpec6_crypto
*ac
;
2849 struct device
*dev
= &pdev
->dev
;
2854 if (artpec6_crypto_dev
)
2857 match
= of_match_node(artpec6_crypto_of_match
, dev
->of_node
);
2861 variant
= (enum artpec6_crypto_variant
)match
->data
;
2863 base
= devm_platform_ioremap_resource(pdev
, 0);
2865 return PTR_ERR(base
);
2867 irq
= platform_get_irq(pdev
, 0);
2871 ac
= devm_kzalloc(&pdev
->dev
, sizeof(struct artpec6_crypto
),
2876 platform_set_drvdata(pdev
, ac
);
2877 ac
->variant
= variant
;
2879 spin_lock_init(&ac
->queue_lock
);
2880 INIT_LIST_HEAD(&ac
->queue
);
2881 INIT_LIST_HEAD(&ac
->pending
);
2882 timer_setup(&ac
->timer
, artpec6_crypto_timeout
, 0);
2886 ac
->dma_cache
= kmem_cache_create("artpec6_crypto_dma",
2887 sizeof(struct artpec6_crypto_dma_descriptors
),
2894 #ifdef CONFIG_DEBUG_FS
2895 artpec6_crypto_init_debugfs();
2898 tasklet_init(&ac
->task
, artpec6_crypto_task
,
2901 ac
->pad_buffer
= devm_kzalloc(&pdev
->dev
, 2 * ARTPEC_CACHE_LINE_MAX
,
2903 if (!ac
->pad_buffer
)
2905 ac
->pad_buffer
= PTR_ALIGN(ac
->pad_buffer
, ARTPEC_CACHE_LINE_MAX
);
2907 ac
->zero_buffer
= devm_kzalloc(&pdev
->dev
, 2 * ARTPEC_CACHE_LINE_MAX
,
2909 if (!ac
->zero_buffer
)
2911 ac
->zero_buffer
= PTR_ALIGN(ac
->zero_buffer
, ARTPEC_CACHE_LINE_MAX
);
2913 err
= init_crypto_hw(ac
);
2917 err
= devm_request_irq(&pdev
->dev
, irq
, artpec6_crypto_irq
, 0,
2918 "artpec6-crypto", ac
);
2922 artpec6_crypto_dev
= &pdev
->dev
;
2924 err
= crypto_register_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
2926 dev_err(dev
, "Failed to register ahashes\n");
2930 err
= crypto_register_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
2932 dev_err(dev
, "Failed to register ciphers\n");
2933 goto unregister_ahashes
;
2936 err
= crypto_register_aeads(aead_algos
, ARRAY_SIZE(aead_algos
));
2938 dev_err(dev
, "Failed to register aeads\n");
2939 goto unregister_algs
;
2945 crypto_unregister_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
2947 crypto_unregister_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
2949 artpec6_crypto_disable_hw(ac
);
2951 kmem_cache_destroy(ac
->dma_cache
);
2955 static int artpec6_crypto_remove(struct platform_device
*pdev
)
2957 struct artpec6_crypto
*ac
= platform_get_drvdata(pdev
);
2958 int irq
= platform_get_irq(pdev
, 0);
2960 crypto_unregister_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
2961 crypto_unregister_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
2962 crypto_unregister_aeads(aead_algos
, ARRAY_SIZE(aead_algos
));
2964 tasklet_disable(&ac
->task
);
2965 devm_free_irq(&pdev
->dev
, irq
, ac
);
2966 tasklet_kill(&ac
->task
);
2967 del_timer_sync(&ac
->timer
);
2969 artpec6_crypto_disable_hw(ac
);
2971 kmem_cache_destroy(ac
->dma_cache
);
2972 #ifdef CONFIG_DEBUG_FS
2973 artpec6_crypto_free_debugfs();
2978 static struct platform_driver artpec6_crypto_driver
= {
2979 .probe
= artpec6_crypto_probe
,
2980 .remove
= artpec6_crypto_remove
,
2982 .name
= "artpec6-crypto",
2983 .of_match_table
= artpec6_crypto_of_match
,
2987 module_platform_driver(artpec6_crypto_driver
);
2989 MODULE_AUTHOR("Axis Communications AB");
2990 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
2991 MODULE_LICENSE("GPL");