2 * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
4 * Copyright (C) 2014-2017 Axis Communications AB
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/bitfield.h>
9 #include <linux/crypto.h>
10 #include <linux/debugfs.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/fault-inject.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
24 #include <crypto/aes.h>
25 #include <crypto/gcm.h>
26 #include <crypto/internal/aead.h>
27 #include <crypto/internal/hash.h>
28 #include <crypto/internal/skcipher.h>
29 #include <crypto/scatterwalk.h>
30 #include <crypto/sha.h>
31 #include <crypto/xts.h>
33 /* Max length of a line in all cache levels for Artpec SoCs. */
34 #define ARTPEC_CACHE_LINE_MAX 32
36 #define PDMA_OUT_CFG 0x0000
37 #define PDMA_OUT_BUF_CFG 0x0004
38 #define PDMA_OUT_CMD 0x0008
39 #define PDMA_OUT_DESCRQ_PUSH 0x0010
40 #define PDMA_OUT_DESCRQ_STAT 0x0014
42 #define A6_PDMA_IN_CFG 0x0028
43 #define A6_PDMA_IN_BUF_CFG 0x002c
44 #define A6_PDMA_IN_CMD 0x0030
45 #define A6_PDMA_IN_STATQ_PUSH 0x0038
46 #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
47 #define A6_PDMA_IN_DESCRQ_STAT 0x0048
48 #define A6_PDMA_INTR_MASK 0x0068
49 #define A6_PDMA_ACK_INTR 0x006c
50 #define A6_PDMA_MASKED_INTR 0x0074
52 #define A7_PDMA_IN_CFG 0x002c
53 #define A7_PDMA_IN_BUF_CFG 0x0030
54 #define A7_PDMA_IN_CMD 0x0034
55 #define A7_PDMA_IN_STATQ_PUSH 0x003c
56 #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
57 #define A7_PDMA_IN_DESCRQ_STAT 0x004C
58 #define A7_PDMA_INTR_MASK 0x006c
59 #define A7_PDMA_ACK_INTR 0x0070
60 #define A7_PDMA_MASKED_INTR 0x0078
62 #define PDMA_OUT_CFG_EN BIT(0)
64 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
65 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
67 #define PDMA_OUT_CMD_START BIT(0)
68 #define A6_PDMA_OUT_CMD_STOP BIT(3)
69 #define A7_PDMA_OUT_CMD_STOP BIT(2)
71 #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
72 #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
74 #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
75 #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
77 #define PDMA_IN_CFG_EN BIT(0)
79 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
80 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
81 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
83 #define PDMA_IN_CMD_START BIT(0)
84 #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
85 #define A6_PDMA_IN_CMD_STOP BIT(3)
86 #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
87 #define A7_PDMA_IN_CMD_STOP BIT(2)
89 #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
90 #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
92 #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
93 #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
95 #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
96 #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
98 #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
99 #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
100 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
102 #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
103 #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
104 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
106 #define A6_CRY_MD_OPER GENMASK(19, 16)
108 #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
109 #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
111 #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
112 #define A6_CRY_MD_CIPHER_DECR BIT(22)
113 #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
114 #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
116 #define A7_CRY_MD_OPER GENMASK(11, 8)
118 #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
119 #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
121 #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
122 #define A7_CRY_MD_CIPHER_DECR BIT(14)
123 #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
124 #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
126 /* DMA metadata constants */
127 #define regk_crypto_aes_cbc 0x00000002
128 #define regk_crypto_aes_ctr 0x00000003
129 #define regk_crypto_aes_ecb 0x00000001
130 #define regk_crypto_aes_gcm 0x00000004
131 #define regk_crypto_aes_xts 0x00000005
132 #define regk_crypto_cache 0x00000002
133 #define a6_regk_crypto_dlkey 0x0000000a
134 #define a7_regk_crypto_dlkey 0x0000000e
135 #define regk_crypto_ext 0x00000001
136 #define regk_crypto_hmac_sha1 0x00000007
137 #define regk_crypto_hmac_sha256 0x00000009
138 #define regk_crypto_init 0x00000000
139 #define regk_crypto_key_128 0x00000000
140 #define regk_crypto_key_192 0x00000001
141 #define regk_crypto_key_256 0x00000002
142 #define regk_crypto_null 0x00000000
143 #define regk_crypto_sha1 0x00000006
144 #define regk_crypto_sha256 0x00000008
146 /* DMA descriptor structures */
147 struct pdma_descr_ctrl
{
148 unsigned char short_descr
: 1;
149 unsigned char pad1
: 1;
150 unsigned char eop
: 1;
151 unsigned char intr
: 1;
152 unsigned char short_len
: 3;
153 unsigned char pad2
: 1;
156 struct pdma_data_descr
{
157 unsigned int len
: 24;
158 unsigned int buf
: 32;
161 struct pdma_short_descr
{
162 unsigned char data
[7];
166 struct pdma_descr_ctrl ctrl
;
168 struct pdma_data_descr data
;
169 struct pdma_short_descr shrt
;
173 struct pdma_stat_descr
{
174 unsigned char pad1
: 1;
175 unsigned char pad2
: 1;
176 unsigned char eop
: 1;
177 unsigned char pad3
: 5;
178 unsigned int len
: 24;
181 /* Each descriptor array can hold max 64 entries */
182 #define PDMA_DESCR_COUNT 64
184 #define MODULE_NAME "Artpec-6 CA"
186 /* Hash modes (including HMAC variants) */
187 #define ARTPEC6_CRYPTO_HASH_SHA1 1
188 #define ARTPEC6_CRYPTO_HASH_SHA256 2
191 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
192 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
193 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
194 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
196 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
197 * It operates on a descriptor array with up to 64 descriptor entries.
198 * The arrays must be 64 byte aligned in memory.
200 * The ciphering unit has no registers and is completely controlled by
201 * a 4-byte metadata that is inserted at the beginning of each dma packet.
203 * A dma packet is a sequence of descriptors terminated by setting the .eop
204 * field in the final descriptor of the packet.
206 * Multiple packets are used for providing context data, key data and
207 * the plain/ciphertext.
209 * PDMA Descriptors (Array)
210 * +------+------+------+~~+-------+------+----
211 * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
212 * +--+---+--+---+----+-+~~+-------+----+-+----
215 * __|__ +-------++-------++-------+ +----+
216 * | MD | |Payload||Payload||Payload| | MD |
217 * +-----+ +-------++-------++-------+ +----+
220 struct artpec6_crypto_bounce_buffer
{
221 struct list_head list
;
223 struct scatterlist
*sg
;
225 /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
226 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
231 struct artpec6_crypto_dma_map
{
234 enum dma_data_direction dir
;
237 struct artpec6_crypto_dma_descriptors
{
238 struct pdma_descr out
[PDMA_DESCR_COUNT
] __aligned(64);
239 struct pdma_descr in
[PDMA_DESCR_COUNT
] __aligned(64);
240 u32 stat
[PDMA_DESCR_COUNT
] __aligned(64);
241 struct list_head bounce_buffers
;
242 /* Enough maps for all out/in buffers, and all three descr. arrays */
243 struct artpec6_crypto_dma_map maps
[PDMA_DESCR_COUNT
* 2 + 2];
244 dma_addr_t out_dma_addr
;
245 dma_addr_t in_dma_addr
;
246 dma_addr_t stat_dma_addr
;
252 enum artpec6_crypto_variant
{
257 struct artpec6_crypto
{
259 spinlock_t queue_lock
;
260 struct list_head queue
; /* waiting for pdma fifo space */
261 struct list_head pending
; /* submitted to pdma fifo */
262 struct tasklet_struct task
;
263 struct kmem_cache
*dma_cache
;
265 struct timer_list timer
;
266 enum artpec6_crypto_variant variant
;
267 void *pad_buffer
; /* cache-aligned block padding buffer */
271 enum artpec6_crypto_hash_flags
{
272 HASH_FLAG_INIT_CTX
= 2,
273 HASH_FLAG_UPDATE
= 4,
274 HASH_FLAG_FINALIZE
= 8,
276 HASH_FLAG_UPDATE_KEY
= 32,
279 struct artpec6_crypto_req_common
{
280 struct list_head list
;
281 struct list_head complete_in_progress
;
282 struct artpec6_crypto_dma_descriptors
*dma
;
283 struct crypto_async_request
*req
;
284 void (*complete
)(struct crypto_async_request
*req
);
288 struct artpec6_hash_request_context
{
289 char partial_buffer
[SHA256_BLOCK_SIZE
];
290 char partial_buffer_out
[SHA256_BLOCK_SIZE
];
291 char key_buffer
[SHA256_BLOCK_SIZE
];
292 char pad_buffer
[SHA256_BLOCK_SIZE
+ 32];
293 unsigned char digeststate
[SHA256_DIGEST_SIZE
];
294 size_t partial_bytes
;
298 enum artpec6_crypto_hash_flags hash_flags
;
299 struct artpec6_crypto_req_common common
;
302 struct artpec6_hash_export_state
{
303 char partial_buffer
[SHA256_BLOCK_SIZE
];
304 unsigned char digeststate
[SHA256_DIGEST_SIZE
];
305 size_t partial_bytes
;
308 unsigned int hash_flags
;
311 struct artpec6_hashalg_context
{
312 char hmac_key
[SHA256_BLOCK_SIZE
];
313 size_t hmac_key_length
;
314 struct crypto_shash
*child_hash
;
317 struct artpec6_crypto_request_context
{
320 struct artpec6_crypto_req_common common
;
323 struct artpec6_cryptotfm_context
{
324 unsigned char aes_key
[2*AES_MAX_KEY_SIZE
];
328 struct crypto_sync_skcipher
*fallback
;
331 struct artpec6_crypto_aead_hw_ctx
{
332 __be64 aad_length_bits
;
333 __be64 text_length_bits
;
334 __u8 J0
[AES_BLOCK_SIZE
];
337 struct artpec6_crypto_aead_req_ctx
{
338 struct artpec6_crypto_aead_hw_ctx hw_ctx
;
341 struct artpec6_crypto_req_common common
;
342 __u8 decryption_tag
[AES_BLOCK_SIZE
] ____cacheline_aligned
;
345 /* The crypto framework makes it hard to avoid this global. */
346 static struct device
*artpec6_crypto_dev
;
348 #ifdef CONFIG_FAULT_INJECTION
349 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read
);
350 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full
);
354 ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
,
355 ARTPEC6_CRYPTO_PREPARE_HASH_START
,
358 static int artpec6_crypto_prepare_aead(struct aead_request
*areq
);
359 static int artpec6_crypto_prepare_crypto(struct skcipher_request
*areq
);
360 static int artpec6_crypto_prepare_hash(struct ahash_request
*areq
);
363 artpec6_crypto_complete_crypto(struct crypto_async_request
*req
);
365 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request
*req
);
367 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request
*req
);
369 artpec6_crypto_complete_aead(struct crypto_async_request
*req
);
371 artpec6_crypto_complete_hash(struct crypto_async_request
*req
);
374 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common
*common
);
377 artpec6_crypto_start_dma(struct artpec6_crypto_req_common
*common
);
379 struct artpec6_crypto_walk
{
380 struct scatterlist
*sg
;
384 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk
*awalk
,
385 struct scatterlist
*sg
)
391 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk
*awalk
,
394 while (nbytes
&& awalk
->sg
) {
397 WARN_ON(awalk
->offset
> awalk
->sg
->length
);
399 piece
= min(nbytes
, (size_t)awalk
->sg
->length
- awalk
->offset
);
401 awalk
->offset
+= piece
;
402 if (awalk
->offset
== awalk
->sg
->length
) {
403 awalk
->sg
= sg_next(awalk
->sg
);
413 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk
*awalk
)
415 WARN_ON(awalk
->sg
->length
== awalk
->offset
);
417 return awalk
->sg
->length
- awalk
->offset
;
421 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk
*awalk
)
423 return sg_phys(awalk
->sg
) + awalk
->offset
;
427 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common
*common
)
429 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
430 struct artpec6_crypto_bounce_buffer
*b
;
431 struct artpec6_crypto_bounce_buffer
*next
;
433 list_for_each_entry_safe(b
, next
, &dma
->bounce_buffers
, list
) {
434 pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
435 b
, b
->length
, b
->offset
, b
->buf
);
436 sg_pcopy_from_buffer(b
->sg
,
447 static inline bool artpec6_crypto_busy(void)
449 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
450 int fifo_count
= ac
->pending_count
;
452 return fifo_count
> 6;
455 static int artpec6_crypto_submit(struct artpec6_crypto_req_common
*req
)
457 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
460 spin_lock_bh(&ac
->queue_lock
);
462 if (!artpec6_crypto_busy()) {
463 list_add_tail(&req
->list
, &ac
->pending
);
464 artpec6_crypto_start_dma(req
);
466 } else if (req
->req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
) {
467 list_add_tail(&req
->list
, &ac
->queue
);
469 artpec6_crypto_common_destroy(req
);
472 spin_unlock_bh(&ac
->queue_lock
);
477 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common
*common
)
479 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
480 enum artpec6_crypto_variant variant
= ac
->variant
;
481 void __iomem
*base
= ac
->base
;
482 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
483 u32 ind
, statd
, outd
;
485 /* Make descriptor content visible to the DMA before starting it. */
488 ind
= FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN
, dma
->in_cnt
- 1) |
489 FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR
, dma
->in_dma_addr
>> 6);
491 statd
= FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN
, dma
->in_cnt
- 1) |
492 FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR
, dma
->stat_dma_addr
>> 6);
494 outd
= FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN
, dma
->out_cnt
- 1) |
495 FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR
, dma
->out_dma_addr
>> 6);
497 if (variant
== ARTPEC6_CRYPTO
) {
498 writel_relaxed(ind
, base
+ A6_PDMA_IN_DESCRQ_PUSH
);
499 writel_relaxed(statd
, base
+ A6_PDMA_IN_STATQ_PUSH
);
500 writel_relaxed(PDMA_IN_CMD_START
, base
+ A6_PDMA_IN_CMD
);
502 writel_relaxed(ind
, base
+ A7_PDMA_IN_DESCRQ_PUSH
);
503 writel_relaxed(statd
, base
+ A7_PDMA_IN_STATQ_PUSH
);
504 writel_relaxed(PDMA_IN_CMD_START
, base
+ A7_PDMA_IN_CMD
);
507 writel_relaxed(outd
, base
+ PDMA_OUT_DESCRQ_PUSH
);
508 writel_relaxed(PDMA_OUT_CMD_START
, base
+ PDMA_OUT_CMD
);
514 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common
*common
)
516 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
521 INIT_LIST_HEAD(&dma
->bounce_buffers
);
524 static bool fault_inject_dma_descr(void)
526 #ifdef CONFIG_FAULT_INJECTION
527 return should_fail(&artpec6_crypto_fail_dma_array_full
, 1);
533 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
536 * @addr: The physical address of the data buffer
537 * @len: The length of the data buffer
538 * @eop: True if this is the last buffer in the packet
540 * @return 0 on success or -ENOSPC if there are no more descriptors available
543 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common
*common
,
544 dma_addr_t addr
, size_t len
, bool eop
)
546 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
547 struct pdma_descr
*d
;
549 if (dma
->out_cnt
>= PDMA_DESCR_COUNT
||
550 fault_inject_dma_descr()) {
551 pr_err("No free OUT DMA descriptors available!\n");
555 d
= &dma
->out
[dma
->out_cnt
++];
556 memset(d
, 0, sizeof(*d
));
558 d
->ctrl
.short_descr
= 0;
565 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
567 * @dst: The virtual address of the data
568 * @len: The length of the data, must be between 1 to 7 bytes
569 * @eop: True if this is the last buffer in the packet
571 * @return 0 on success
572 * -ENOSPC if no more descriptors are available
573 * -EINVAL if the data length exceeds 7 bytes
576 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common
*common
,
577 void *dst
, unsigned int len
, bool eop
)
579 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
580 struct pdma_descr
*d
;
582 if (dma
->out_cnt
>= PDMA_DESCR_COUNT
||
583 fault_inject_dma_descr()) {
584 pr_err("No free OUT DMA descriptors available!\n");
586 } else if (len
> 7 || len
< 1) {
589 d
= &dma
->out
[dma
->out_cnt
++];
590 memset(d
, 0, sizeof(*d
));
592 d
->ctrl
.short_descr
= 1;
593 d
->ctrl
.short_len
= len
;
595 memcpy(d
->shrt
.data
, dst
, len
);
599 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common
*common
,
600 struct page
*page
, size_t offset
,
602 enum dma_data_direction dir
,
603 dma_addr_t
*dma_addr_out
)
605 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
606 struct device
*dev
= artpec6_crypto_dev
;
607 struct artpec6_crypto_dma_map
*map
;
612 if (dma
->map_count
>= ARRAY_SIZE(dma
->maps
))
615 dma_addr
= dma_map_page(dev
, page
, offset
, size
, dir
);
616 if (dma_mapping_error(dev
, dma_addr
))
619 map
= &dma
->maps
[dma
->map_count
++];
621 map
->dma_addr
= dma_addr
;
624 *dma_addr_out
= dma_addr
;
630 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common
*common
,
631 void *ptr
, size_t size
,
632 enum dma_data_direction dir
,
633 dma_addr_t
*dma_addr_out
)
635 struct page
*page
= virt_to_page(ptr
);
636 size_t offset
= (uintptr_t)ptr
& ~PAGE_MASK
;
638 return artpec6_crypto_dma_map_page(common
, page
, offset
, size
, dir
,
643 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common
*common
)
645 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
648 ret
= artpec6_crypto_dma_map_single(common
, dma
->in
,
649 sizeof(dma
->in
[0]) * dma
->in_cnt
,
650 DMA_TO_DEVICE
, &dma
->in_dma_addr
);
654 ret
= artpec6_crypto_dma_map_single(common
, dma
->out
,
655 sizeof(dma
->out
[0]) * dma
->out_cnt
,
656 DMA_TO_DEVICE
, &dma
->out_dma_addr
);
660 /* We only read one stat descriptor */
661 dma
->stat
[dma
->in_cnt
- 1] = 0;
664 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
667 return artpec6_crypto_dma_map_single(common
,
669 sizeof(dma
->stat
[0]) * dma
->in_cnt
,
671 &dma
->stat_dma_addr
);
675 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common
*common
)
677 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
678 struct device
*dev
= artpec6_crypto_dev
;
681 for (i
= 0; i
< dma
->map_count
; i
++) {
682 struct artpec6_crypto_dma_map
*map
= &dma
->maps
[i
];
684 dma_unmap_page(dev
, map
->dma_addr
, map
->size
, map
->dir
);
690 /** artpec6_crypto_setup_out_descr - Setup an out descriptor
692 * @dst: The virtual address of the data
693 * @len: The length of the data
694 * @eop: True if this is the last buffer in the packet
695 * @use_short: If this is true and the data length is 7 bytes or less then
696 * a short descriptor will be used
698 * @return 0 on success
699 * Any errors from artpec6_crypto_setup_out_descr_short() or
700 * setup_out_descr_phys()
703 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common
*common
,
704 void *dst
, unsigned int len
, bool eop
,
707 if (use_short
&& len
< 7) {
708 return artpec6_crypto_setup_out_descr_short(common
, dst
, len
,
714 ret
= artpec6_crypto_dma_map_single(common
, dst
, len
,
720 return artpec6_crypto_setup_out_descr_phys(common
, dma_addr
,
725 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
728 * @addr: The physical address of the data buffer
729 * @len: The length of the data buffer
730 * @intr: True if an interrupt should be fired after HW processing of this
735 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common
*common
,
736 dma_addr_t addr
, unsigned int len
, bool intr
)
738 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
739 struct pdma_descr
*d
;
741 if (dma
->in_cnt
>= PDMA_DESCR_COUNT
||
742 fault_inject_dma_descr()) {
743 pr_err("No free IN DMA descriptors available!\n");
746 d
= &dma
->in
[dma
->in_cnt
++];
747 memset(d
, 0, sizeof(*d
));
755 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
757 * @buffer: The virtual address to of the data buffer
758 * @len: The length of the data buffer
759 * @last: If this is the last data buffer in the request (i.e. an interrupt
762 * Short descriptors are not used for the in channel
765 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common
*common
,
766 void *buffer
, unsigned int len
, bool last
)
771 ret
= artpec6_crypto_dma_map_single(common
, buffer
, len
,
772 DMA_FROM_DEVICE
, &dma_addr
);
776 return artpec6_crypto_setup_in_descr_phys(common
, dma_addr
, len
, last
);
779 static struct artpec6_crypto_bounce_buffer
*
780 artpec6_crypto_alloc_bounce(gfp_t flags
)
783 size_t alloc_size
= sizeof(struct artpec6_crypto_bounce_buffer
) +
784 2 * ARTPEC_CACHE_LINE_MAX
;
785 struct artpec6_crypto_bounce_buffer
*bbuf
= kzalloc(alloc_size
, flags
);
791 bbuf
->buf
= PTR_ALIGN(base
, ARTPEC_CACHE_LINE_MAX
);
795 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common
*common
,
796 struct artpec6_crypto_walk
*walk
, size_t size
)
798 struct artpec6_crypto_bounce_buffer
*bbuf
;
801 bbuf
= artpec6_crypto_alloc_bounce(common
->gfp_flags
);
807 bbuf
->offset
= walk
->offset
;
809 ret
= artpec6_crypto_setup_in_descr(common
, bbuf
->buf
, size
, false);
815 pr_debug("BOUNCE %zu offset %zu\n", size
, walk
->offset
);
816 list_add_tail(&bbuf
->list
, &common
->dma
->bounce_buffers
);
821 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common
*common
,
822 struct artpec6_crypto_walk
*walk
,
829 while (walk
->sg
&& count
) {
830 chunk
= min(count
, artpec6_crypto_walk_chunklen(walk
));
831 addr
= artpec6_crypto_walk_chunk_phys(walk
);
833 /* When destination buffers are not aligned to the cache line
834 * size we need bounce buffers. The DMA-API requires that the
835 * entire line is owned by the DMA buffer and this holds also
836 * for the case when coherent DMA is used.
838 if (!IS_ALIGNED(addr
, ARTPEC_CACHE_LINE_MAX
)) {
839 chunk
= min_t(dma_addr_t
, chunk
,
840 ALIGN(addr
, ARTPEC_CACHE_LINE_MAX
) -
843 pr_debug("CHUNK-b %pad:%zu\n", &addr
, chunk
);
844 ret
= setup_bounce_buffer_in(common
, walk
, chunk
);
845 } else if (chunk
< ARTPEC_CACHE_LINE_MAX
) {
846 pr_debug("CHUNK-b %pad:%zu\n", &addr
, chunk
);
847 ret
= setup_bounce_buffer_in(common
, walk
, chunk
);
851 chunk
= chunk
& ~(ARTPEC_CACHE_LINE_MAX
-1);
853 pr_debug("CHUNK %pad:%zu\n", &addr
, chunk
);
855 ret
= artpec6_crypto_dma_map_page(common
,
865 ret
= artpec6_crypto_setup_in_descr_phys(common
,
873 count
= count
- chunk
;
874 artpec6_crypto_walk_advance(walk
, chunk
);
878 pr_err("EOL unexpected %zu bytes left\n", count
);
880 return count
? -EINVAL
: 0;
884 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common
*common
,
885 struct artpec6_crypto_walk
*walk
,
892 while (walk
->sg
&& count
) {
893 chunk
= min(count
, artpec6_crypto_walk_chunklen(walk
));
894 addr
= artpec6_crypto_walk_chunk_phys(walk
);
896 pr_debug("OUT-CHUNK %pad:%zu\n", &addr
, chunk
);
901 chunk
= min_t(size_t, chunk
, (4-(addr
&3)));
903 sg_pcopy_to_buffer(walk
->sg
, 1, buf
, chunk
,
906 ret
= artpec6_crypto_setup_out_descr_short(common
, buf
,
912 ret
= artpec6_crypto_dma_map_page(common
,
922 ret
= artpec6_crypto_setup_out_descr_phys(common
,
930 count
= count
- chunk
;
931 artpec6_crypto_walk_advance(walk
, chunk
);
935 pr_err("EOL unexpected %zu bytes left\n", count
);
937 return count
? -EINVAL
: 0;
941 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
943 * If the out descriptor list is non-empty, then the eop flag on the
944 * last used out descriptor will be set.
946 * @return 0 on success
947 * -EINVAL if the out descriptor is empty or has overflown
950 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common
*common
)
952 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
953 struct pdma_descr
*d
;
955 if (!dma
->out_cnt
|| dma
->out_cnt
> PDMA_DESCR_COUNT
) {
956 pr_err("%s: OUT descriptor list is %s\n",
957 MODULE_NAME
, dma
->out_cnt
? "empty" : "full");
962 d
= &dma
->out
[dma
->out_cnt
-1];
968 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
971 * See artpec6_crypto_terminate_out_descrs() for return values
974 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common
*common
)
976 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
977 struct pdma_descr
*d
;
979 if (!dma
->in_cnt
|| dma
->in_cnt
> PDMA_DESCR_COUNT
) {
980 pr_err("%s: IN descriptor list is %s\n",
981 MODULE_NAME
, dma
->in_cnt
? "empty" : "full");
985 d
= &dma
->in
[dma
->in_cnt
-1];
990 /** create_hash_pad - Create a Secure Hash conformant pad
992 * @dst: The destination buffer to write the pad. Must be at least 64 bytes
993 * @dgstlen: The total length of the hash digest in bytes
994 * @bitcount: The total length of the digest in bits
996 * @return The total number of padding bytes written to @dst
999 create_hash_pad(int oper
, unsigned char *dst
, u64 dgstlen
, u64 bitcount
)
1001 unsigned int mod
, target
, diff
, pad_bytes
, size_bytes
;
1002 __be64 bits
= __cpu_to_be64(bitcount
);
1005 case regk_crypto_sha1
:
1006 case regk_crypto_sha256
:
1007 case regk_crypto_hmac_sha1
:
1008 case regk_crypto_hmac_sha256
:
1021 diff
= dgstlen
& (mod
- 1);
1022 pad_bytes
= diff
> target
? target
+ mod
- diff
: target
- diff
;
1024 memset(dst
+ 1, 0, pad_bytes
);
1027 if (size_bytes
== 16) {
1028 memset(dst
+ 1 + pad_bytes
, 0, 8);
1029 memcpy(dst
+ 1 + pad_bytes
+ 8, &bits
, 8);
1031 memcpy(dst
+ 1 + pad_bytes
, &bits
, 8);
1034 return pad_bytes
+ size_bytes
+ 1;
1037 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common
*common
,
1038 struct crypto_async_request
*parent
,
1039 void (*complete
)(struct crypto_async_request
*req
),
1040 struct scatterlist
*dstsg
, unsigned int nbytes
)
1043 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1045 flags
= (parent
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1046 GFP_KERNEL
: GFP_ATOMIC
;
1048 common
->gfp_flags
= flags
;
1049 common
->dma
= kmem_cache_alloc(ac
->dma_cache
, flags
);
1053 common
->req
= parent
;
1054 common
->complete
= complete
;
1059 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors
*dma
)
1061 struct artpec6_crypto_bounce_buffer
*b
;
1062 struct artpec6_crypto_bounce_buffer
*next
;
1064 list_for_each_entry_safe(b
, next
, &dma
->bounce_buffers
, list
) {
1070 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common
*common
)
1072 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1074 artpec6_crypto_dma_unmap_all(common
);
1075 artpec6_crypto_bounce_destroy(common
->dma
);
1076 kmem_cache_free(ac
->dma_cache
, common
->dma
);
1082 * Ciphering functions.
1084 static int artpec6_crypto_encrypt(struct skcipher_request
*req
)
1086 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1087 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1088 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1089 void (*complete
)(struct crypto_async_request
*req
);
1092 req_ctx
= skcipher_request_ctx(req
);
1094 switch (ctx
->crypto_type
) {
1095 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1096 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1097 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1098 req_ctx
->decrypt
= 0;
1104 switch (ctx
->crypto_type
) {
1105 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1106 complete
= artpec6_crypto_complete_cbc_encrypt
;
1109 complete
= artpec6_crypto_complete_crypto
;
1113 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
1116 req
->dst
, req
->cryptlen
);
1120 ret
= artpec6_crypto_prepare_crypto(req
);
1122 artpec6_crypto_common_destroy(&req_ctx
->common
);
1126 return artpec6_crypto_submit(&req_ctx
->common
);
1129 static int artpec6_crypto_decrypt(struct skcipher_request
*req
)
1132 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1133 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1134 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1135 void (*complete
)(struct crypto_async_request
*req
);
1137 req_ctx
= skcipher_request_ctx(req
);
1139 switch (ctx
->crypto_type
) {
1140 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1141 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1142 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1143 req_ctx
->decrypt
= 1;
1150 switch (ctx
->crypto_type
) {
1151 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1152 complete
= artpec6_crypto_complete_cbc_decrypt
;
1155 complete
= artpec6_crypto_complete_crypto
;
1159 ret
= artpec6_crypto_common_init(&req_ctx
->common
, &req
->base
,
1161 req
->dst
, req
->cryptlen
);
1165 ret
= artpec6_crypto_prepare_crypto(req
);
1167 artpec6_crypto_common_destroy(&req_ctx
->common
);
1171 return artpec6_crypto_submit(&req_ctx
->common
);
1175 artpec6_crypto_ctr_crypt(struct skcipher_request
*req
, bool encrypt
)
1177 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1178 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1179 size_t iv_len
= crypto_skcipher_ivsize(cipher
);
1180 unsigned int counter
= be32_to_cpup((__be32
*)
1181 (req
->iv
+ iv_len
- 4));
1182 unsigned int nblks
= ALIGN(req
->cryptlen
, AES_BLOCK_SIZE
) /
1186 * The hardware uses only the last 32-bits as the counter while the
1187 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1188 * the whole IV is a counter. So fallback if the counter is going to
1191 if (counter
+ nblks
< counter
) {
1194 pr_debug("counter %x will overflow (nblks %u), falling back\n",
1195 counter
, counter
+ nblks
);
1197 ret
= crypto_sync_skcipher_setkey(ctx
->fallback
, ctx
->aes_key
,
1203 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, ctx
->fallback
);
1205 skcipher_request_set_sync_tfm(subreq
, ctx
->fallback
);
1206 skcipher_request_set_callback(subreq
, req
->base
.flags
,
1208 skcipher_request_set_crypt(subreq
, req
->src
, req
->dst
,
1209 req
->cryptlen
, req
->iv
);
1210 ret
= encrypt
? crypto_skcipher_encrypt(subreq
)
1211 : crypto_skcipher_decrypt(subreq
);
1212 skcipher_request_zero(subreq
);
1217 return encrypt
? artpec6_crypto_encrypt(req
)
1218 : artpec6_crypto_decrypt(req
);
1221 static int artpec6_crypto_ctr_encrypt(struct skcipher_request
*req
)
1223 return artpec6_crypto_ctr_crypt(req
, true);
1226 static int artpec6_crypto_ctr_decrypt(struct skcipher_request
*req
)
1228 return artpec6_crypto_ctr_crypt(req
, false);
1234 static int artpec6_crypto_aead_init(struct crypto_aead
*tfm
)
1236 struct artpec6_cryptotfm_context
*tfm_ctx
= crypto_aead_ctx(tfm
);
1238 memset(tfm_ctx
, 0, sizeof(*tfm_ctx
));
1240 crypto_aead_set_reqsize(tfm
,
1241 sizeof(struct artpec6_crypto_aead_req_ctx
));
1246 static int artpec6_crypto_aead_set_key(struct crypto_aead
*tfm
, const u8
*key
,
1249 struct artpec6_cryptotfm_context
*ctx
= crypto_tfm_ctx(&tfm
->base
);
1251 if (len
!= 16 && len
!= 24 && len
!= 32) {
1252 crypto_aead_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1256 ctx
->key_length
= len
;
1258 memcpy(ctx
->aes_key
, key
, len
);
1262 static int artpec6_crypto_aead_encrypt(struct aead_request
*req
)
1265 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1267 req_ctx
->decrypt
= false;
1268 ret
= artpec6_crypto_common_init(&req_ctx
->common
, &req
->base
,
1269 artpec6_crypto_complete_aead
,
1274 ret
= artpec6_crypto_prepare_aead(req
);
1276 artpec6_crypto_common_destroy(&req_ctx
->common
);
1280 return artpec6_crypto_submit(&req_ctx
->common
);
1283 static int artpec6_crypto_aead_decrypt(struct aead_request
*req
)
1286 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1288 req_ctx
->decrypt
= true;
1289 if (req
->cryptlen
< AES_BLOCK_SIZE
)
1292 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
1294 artpec6_crypto_complete_aead
,
1299 ret
= artpec6_crypto_prepare_aead(req
);
1301 artpec6_crypto_common_destroy(&req_ctx
->common
);
1305 return artpec6_crypto_submit(&req_ctx
->common
);
1308 static int artpec6_crypto_prepare_hash(struct ahash_request
*areq
)
1310 struct artpec6_hashalg_context
*ctx
= crypto_tfm_ctx(areq
->base
.tfm
);
1311 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(areq
);
1312 size_t digestsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(areq
));
1313 size_t contextsize
= digestsize
;
1314 size_t blocksize
= crypto_tfm_alg_blocksize(
1315 crypto_ahash_tfm(crypto_ahash_reqtfm(areq
)));
1316 struct artpec6_crypto_req_common
*common
= &req_ctx
->common
;
1317 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1318 enum artpec6_crypto_variant variant
= ac
->variant
;
1320 bool ext_ctx
= false;
1321 bool run_hw
= false;
1324 artpec6_crypto_init_dma_operation(common
);
1326 /* Upload HMAC key, must be first the first packet */
1327 if (req_ctx
->hash_flags
& HASH_FLAG_HMAC
) {
1328 if (variant
== ARTPEC6_CRYPTO
) {
1329 req_ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
,
1330 a6_regk_crypto_dlkey
);
1332 req_ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
,
1333 a7_regk_crypto_dlkey
);
1336 /* Copy and pad up the key */
1337 memcpy(req_ctx
->key_buffer
, ctx
->hmac_key
,
1338 ctx
->hmac_key_length
);
1339 memset(req_ctx
->key_buffer
+ ctx
->hmac_key_length
, 0,
1340 blocksize
- ctx
->hmac_key_length
);
1342 error
= artpec6_crypto_setup_out_descr(common
,
1343 (void *)&req_ctx
->key_md
,
1344 sizeof(req_ctx
->key_md
), false, false);
1348 error
= artpec6_crypto_setup_out_descr(common
,
1349 req_ctx
->key_buffer
, blocksize
,
1355 if (!(req_ctx
->hash_flags
& HASH_FLAG_INIT_CTX
)) {
1356 /* Restore context */
1357 sel_ctx
= regk_crypto_ext
;
1360 sel_ctx
= regk_crypto_init
;
1363 if (variant
== ARTPEC6_CRYPTO
) {
1364 req_ctx
->hash_md
&= ~A6_CRY_MD_HASH_SEL_CTX
;
1365 req_ctx
->hash_md
|= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX
, sel_ctx
);
1367 /* If this is the final round, set the final flag */
1368 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
)
1369 req_ctx
->hash_md
|= A6_CRY_MD_HASH_HMAC_FIN
;
1371 req_ctx
->hash_md
&= ~A7_CRY_MD_HASH_SEL_CTX
;
1372 req_ctx
->hash_md
|= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX
, sel_ctx
);
1374 /* If this is the final round, set the final flag */
1375 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
)
1376 req_ctx
->hash_md
|= A7_CRY_MD_HASH_HMAC_FIN
;
1379 /* Setup up metadata descriptors */
1380 error
= artpec6_crypto_setup_out_descr(common
,
1381 (void *)&req_ctx
->hash_md
,
1382 sizeof(req_ctx
->hash_md
), false, false);
1386 error
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1391 error
= artpec6_crypto_setup_out_descr(common
,
1392 req_ctx
->digeststate
,
1393 contextsize
, false, false);
1399 if (req_ctx
->hash_flags
& HASH_FLAG_UPDATE
) {
1400 size_t done_bytes
= 0;
1401 size_t total_bytes
= areq
->nbytes
+ req_ctx
->partial_bytes
;
1402 size_t ready_bytes
= round_down(total_bytes
, blocksize
);
1403 struct artpec6_crypto_walk walk
;
1405 run_hw
= ready_bytes
> 0;
1406 if (req_ctx
->partial_bytes
&& ready_bytes
) {
1407 /* We have a partial buffer and will at least some bytes
1408 * to the HW. Empty this partial buffer before tackling
1411 memcpy(req_ctx
->partial_buffer_out
,
1412 req_ctx
->partial_buffer
,
1413 req_ctx
->partial_bytes
);
1415 error
= artpec6_crypto_setup_out_descr(common
,
1416 req_ctx
->partial_buffer_out
,
1417 req_ctx
->partial_bytes
,
1422 /* Reset partial buffer */
1423 done_bytes
+= req_ctx
->partial_bytes
;
1424 req_ctx
->partial_bytes
= 0;
1427 artpec6_crypto_walk_init(&walk
, areq
->src
);
1429 error
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
,
1436 size_t sg_skip
= ready_bytes
- done_bytes
;
1437 size_t sg_rem
= areq
->nbytes
- sg_skip
;
1439 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
1440 req_ctx
->partial_buffer
+
1441 req_ctx
->partial_bytes
,
1444 req_ctx
->partial_bytes
+= sg_rem
;
1447 req_ctx
->digcnt
+= ready_bytes
;
1448 req_ctx
->hash_flags
&= ~(HASH_FLAG_UPDATE
);
1452 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
) {
1453 size_t hash_pad_len
;
1457 if (variant
== ARTPEC6_CRYPTO
)
1458 oper
= FIELD_GET(A6_CRY_MD_OPER
, req_ctx
->hash_md
);
1460 oper
= FIELD_GET(A7_CRY_MD_OPER
, req_ctx
->hash_md
);
1462 /* Write out the partial buffer if present */
1463 if (req_ctx
->partial_bytes
) {
1464 memcpy(req_ctx
->partial_buffer_out
,
1465 req_ctx
->partial_buffer
,
1466 req_ctx
->partial_bytes
);
1467 error
= artpec6_crypto_setup_out_descr(common
,
1468 req_ctx
->partial_buffer_out
,
1469 req_ctx
->partial_bytes
,
1474 req_ctx
->digcnt
+= req_ctx
->partial_bytes
;
1475 req_ctx
->partial_bytes
= 0;
1478 if (req_ctx
->hash_flags
& HASH_FLAG_HMAC
)
1479 digest_bits
= 8 * (req_ctx
->digcnt
+ blocksize
);
1481 digest_bits
= 8 * req_ctx
->digcnt
;
1483 /* Add the hash pad */
1484 hash_pad_len
= create_hash_pad(oper
, req_ctx
->pad_buffer
,
1485 req_ctx
->digcnt
, digest_bits
);
1486 error
= artpec6_crypto_setup_out_descr(common
,
1487 req_ctx
->pad_buffer
,
1488 hash_pad_len
, false,
1490 req_ctx
->digcnt
= 0;
1495 /* Descriptor for the final result */
1496 error
= artpec6_crypto_setup_in_descr(common
, areq
->result
,
1502 } else { /* This is not the final operation for this request */
1504 return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
;
1506 /* Save the result to the context */
1507 error
= artpec6_crypto_setup_in_descr(common
,
1508 req_ctx
->digeststate
,
1509 contextsize
, false);
1515 req_ctx
->hash_flags
&= ~(HASH_FLAG_INIT_CTX
| HASH_FLAG_UPDATE
|
1516 HASH_FLAG_FINALIZE
);
1518 error
= artpec6_crypto_terminate_in_descrs(common
);
1522 error
= artpec6_crypto_terminate_out_descrs(common
);
1526 error
= artpec6_crypto_dma_map_descs(common
);
1530 return ARTPEC6_CRYPTO_PREPARE_HASH_START
;
1534 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher
*tfm
)
1536 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1538 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1539 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_ECB
;
1544 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher
*tfm
)
1546 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1549 crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm
->base
),
1550 0, CRYPTO_ALG_NEED_FALLBACK
);
1551 if (IS_ERR(ctx
->fallback
))
1552 return PTR_ERR(ctx
->fallback
);
1554 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1555 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_CTR
;
1560 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher
*tfm
)
1562 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1564 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1565 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_CBC
;
1570 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher
*tfm
)
1572 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1574 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1575 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_XTS
;
1580 static void artpec6_crypto_aes_exit(struct crypto_skcipher
*tfm
)
1582 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1584 memset(ctx
, 0, sizeof(*ctx
));
1587 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher
*tfm
)
1589 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1591 crypto_free_sync_skcipher(ctx
->fallback
);
1592 artpec6_crypto_aes_exit(tfm
);
1596 artpec6_crypto_cipher_set_key(struct crypto_skcipher
*cipher
, const u8
*key
,
1597 unsigned int keylen
)
1599 struct artpec6_cryptotfm_context
*ctx
=
1600 crypto_skcipher_ctx(cipher
);
1608 crypto_skcipher_set_flags(cipher
,
1609 CRYPTO_TFM_RES_BAD_KEY_LEN
);
1613 memcpy(ctx
->aes_key
, key
, keylen
);
1614 ctx
->key_length
= keylen
;
1619 artpec6_crypto_xts_set_key(struct crypto_skcipher
*cipher
, const u8
*key
,
1620 unsigned int keylen
)
1622 struct artpec6_cryptotfm_context
*ctx
=
1623 crypto_skcipher_ctx(cipher
);
1626 ret
= xts_check_key(&cipher
->base
, key
, keylen
);
1636 crypto_skcipher_set_flags(cipher
,
1637 CRYPTO_TFM_RES_BAD_KEY_LEN
);
1641 memcpy(ctx
->aes_key
, key
, keylen
);
1642 ctx
->key_length
= keylen
;
1646 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1648 * @req: The asynch request to process
1650 * @return 0 if the dma job was successfully prepared
1653 * This function sets up the PDMA descriptors for a block cipher request.
1655 * The required padding is added for AES-CTR using a statically defined
1658 * The PDMA descriptor list will be as follows:
1660 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1661 * IN: <CIPHER_MD><data_0>...[data_n]<intr>
1664 static int artpec6_crypto_prepare_crypto(struct skcipher_request
*areq
)
1667 struct artpec6_crypto_walk walk
;
1668 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(areq
);
1669 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1670 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1671 size_t iv_len
= crypto_skcipher_ivsize(cipher
);
1672 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1673 enum artpec6_crypto_variant variant
= ac
->variant
;
1674 struct artpec6_crypto_req_common
*common
;
1675 bool cipher_decr
= false;
1677 u32 cipher_len
= 0; /* Same as regk_crypto_key_128 for NULL crypto */
1680 req_ctx
= skcipher_request_ctx(areq
);
1681 common
= &req_ctx
->common
;
1683 artpec6_crypto_init_dma_operation(common
);
1685 if (variant
== ARTPEC6_CRYPTO
)
1686 ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
, a6_regk_crypto_dlkey
);
1688 ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
, a7_regk_crypto_dlkey
);
1690 ret
= artpec6_crypto_setup_out_descr(common
, (void *)&ctx
->key_md
,
1691 sizeof(ctx
->key_md
), false, false);
1695 ret
= artpec6_crypto_setup_out_descr(common
, ctx
->aes_key
,
1696 ctx
->key_length
, true, false);
1700 req_ctx
->cipher_md
= 0;
1702 if (ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_XTS
)
1703 cipher_klen
= ctx
->key_length
/2;
1705 cipher_klen
= ctx
->key_length
;
1708 switch (cipher_klen
) {
1710 cipher_len
= regk_crypto_key_128
;
1713 cipher_len
= regk_crypto_key_192
;
1716 cipher_len
= regk_crypto_key_256
;
1719 pr_err("%s: Invalid key length %d!\n",
1720 MODULE_NAME
, ctx
->key_length
);
1724 switch (ctx
->crypto_type
) {
1725 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1726 oper
= regk_crypto_aes_ecb
;
1727 cipher_decr
= req_ctx
->decrypt
;
1730 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1731 oper
= regk_crypto_aes_cbc
;
1732 cipher_decr
= req_ctx
->decrypt
;
1735 case ARTPEC6_CRYPTO_CIPHER_AES_CTR
:
1736 oper
= regk_crypto_aes_ctr
;
1737 cipher_decr
= false;
1740 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1741 oper
= regk_crypto_aes_xts
;
1742 cipher_decr
= req_ctx
->decrypt
;
1744 if (variant
== ARTPEC6_CRYPTO
)
1745 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DSEQ
;
1747 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DSEQ
;
1751 pr_err("%s: Invalid cipher mode %d!\n",
1752 MODULE_NAME
, ctx
->crypto_type
);
1756 if (variant
== ARTPEC6_CRYPTO
) {
1757 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_OPER
, oper
);
1758 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_CIPHER_LEN
,
1761 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DECR
;
1763 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_OPER
, oper
);
1764 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_CIPHER_LEN
,
1767 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DECR
;
1770 ret
= artpec6_crypto_setup_out_descr(common
,
1771 &req_ctx
->cipher_md
,
1772 sizeof(req_ctx
->cipher_md
),
1777 ret
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1782 ret
= artpec6_crypto_setup_out_descr(common
, areq
->iv
, iv_len
,
1788 artpec6_crypto_walk_init(&walk
, areq
->src
);
1789 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, areq
->cryptlen
);
1794 artpec6_crypto_walk_init(&walk
, areq
->dst
);
1795 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
, areq
->cryptlen
);
1799 /* CTR-mode padding required by the HW. */
1800 if (ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_CTR
||
1801 ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_XTS
) {
1802 size_t pad
= ALIGN(areq
->cryptlen
, AES_BLOCK_SIZE
) -
1806 ret
= artpec6_crypto_setup_out_descr(common
,
1812 ret
= artpec6_crypto_setup_in_descr(common
,
1813 ac
->pad_buffer
, pad
,
1820 ret
= artpec6_crypto_terminate_out_descrs(common
);
1824 ret
= artpec6_crypto_terminate_in_descrs(common
);
1828 return artpec6_crypto_dma_map_descs(common
);
1831 static int artpec6_crypto_prepare_aead(struct aead_request
*areq
)
1835 size_t input_length
;
1836 struct artpec6_cryptotfm_context
*ctx
= crypto_tfm_ctx(areq
->base
.tfm
);
1837 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(areq
);
1838 struct crypto_aead
*cipher
= crypto_aead_reqtfm(areq
);
1839 struct artpec6_crypto_req_common
*common
= &req_ctx
->common
;
1840 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1841 enum artpec6_crypto_variant variant
= ac
->variant
;
1844 artpec6_crypto_init_dma_operation(common
);
1847 if (variant
== ARTPEC6_CRYPTO
) {
1848 ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
,
1849 a6_regk_crypto_dlkey
);
1851 ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
,
1852 a7_regk_crypto_dlkey
);
1854 ret
= artpec6_crypto_setup_out_descr(common
, (void *)&ctx
->key_md
,
1855 sizeof(ctx
->key_md
), false, false);
1859 ret
= artpec6_crypto_setup_out_descr(common
, ctx
->aes_key
,
1860 ctx
->key_length
, true, false);
1864 req_ctx
->cipher_md
= 0;
1866 switch (ctx
->key_length
) {
1868 md_cipher_len
= regk_crypto_key_128
;
1871 md_cipher_len
= regk_crypto_key_192
;
1874 md_cipher_len
= regk_crypto_key_256
;
1880 if (variant
== ARTPEC6_CRYPTO
) {
1881 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_OPER
,
1882 regk_crypto_aes_gcm
);
1883 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_CIPHER_LEN
,
1885 if (req_ctx
->decrypt
)
1886 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DECR
;
1888 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_OPER
,
1889 regk_crypto_aes_gcm
);
1890 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_CIPHER_LEN
,
1892 if (req_ctx
->decrypt
)
1893 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DECR
;
1896 ret
= artpec6_crypto_setup_out_descr(common
,
1897 (void *) &req_ctx
->cipher_md
,
1898 sizeof(req_ctx
->cipher_md
), false,
1903 ret
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1907 /* For the decryption, cryptlen includes the tag. */
1908 input_length
= areq
->cryptlen
;
1909 if (req_ctx
->decrypt
)
1910 input_length
-= crypto_aead_authsize(cipher
);
1912 /* Prepare the context buffer */
1913 req_ctx
->hw_ctx
.aad_length_bits
=
1914 __cpu_to_be64(8*areq
->assoclen
);
1916 req_ctx
->hw_ctx
.text_length_bits
=
1917 __cpu_to_be64(8*input_length
);
1919 memcpy(req_ctx
->hw_ctx
.J0
, areq
->iv
, crypto_aead_ivsize(cipher
));
1920 // The HW omits the initial increment of the counter field.
1921 memcpy(req_ctx
->hw_ctx
.J0
+ GCM_AES_IV_SIZE
, "\x00\x00\x00\x01", 4);
1923 ret
= artpec6_crypto_setup_out_descr(common
, &req_ctx
->hw_ctx
,
1924 sizeof(struct artpec6_crypto_aead_hw_ctx
), false, false);
1929 struct artpec6_crypto_walk walk
;
1931 artpec6_crypto_walk_init(&walk
, areq
->src
);
1933 /* Associated data */
1934 count
= areq
->assoclen
;
1935 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, count
);
1939 if (!IS_ALIGNED(areq
->assoclen
, 16)) {
1940 size_t assoc_pad
= 16 - (areq
->assoclen
% 16);
1941 /* The HW mandates zero padding here */
1942 ret
= artpec6_crypto_setup_out_descr(common
,
1950 /* Data to crypto */
1951 count
= input_length
;
1952 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, count
);
1956 if (!IS_ALIGNED(input_length
, 16)) {
1957 size_t crypto_pad
= 16 - (input_length
% 16);
1958 /* The HW mandates zero padding here */
1959 ret
= artpec6_crypto_setup_out_descr(common
,
1969 /* Data from crypto */
1971 struct artpec6_crypto_walk walk
;
1972 size_t output_len
= areq
->cryptlen
;
1974 if (req_ctx
->decrypt
)
1975 output_len
-= crypto_aead_authsize(cipher
);
1977 artpec6_crypto_walk_init(&walk
, areq
->dst
);
1979 /* skip associated data in the output */
1980 count
= artpec6_crypto_walk_advance(&walk
, areq
->assoclen
);
1985 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
, count
);
1989 /* Put padding between the cryptotext and the auth tag */
1990 if (!IS_ALIGNED(output_len
, 16)) {
1991 size_t crypto_pad
= 16 - (output_len
% 16);
1993 ret
= artpec6_crypto_setup_in_descr(common
,
2000 /* The authentication tag shall follow immediately after
2001 * the output ciphertext. For decryption it is put in a context
2002 * buffer for later compare against the input tag.
2005 if (req_ctx
->decrypt
) {
2006 ret
= artpec6_crypto_setup_in_descr(common
,
2007 req_ctx
->decryption_tag
, AES_BLOCK_SIZE
, false);
2012 /* For encryption the requested tag size may be smaller
2013 * than the hardware's generated tag.
2015 size_t authsize
= crypto_aead_authsize(cipher
);
2017 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
,
2022 if (authsize
< AES_BLOCK_SIZE
) {
2023 count
= AES_BLOCK_SIZE
- authsize
;
2024 ret
= artpec6_crypto_setup_in_descr(common
,
2034 ret
= artpec6_crypto_terminate_in_descrs(common
);
2038 ret
= artpec6_crypto_terminate_out_descrs(common
);
2042 return artpec6_crypto_dma_map_descs(common
);
2045 static void artpec6_crypto_process_queue(struct artpec6_crypto
*ac
,
2046 struct list_head
*completions
)
2048 struct artpec6_crypto_req_common
*req
;
2050 while (!list_empty(&ac
->queue
) && !artpec6_crypto_busy()) {
2051 req
= list_first_entry(&ac
->queue
,
2052 struct artpec6_crypto_req_common
,
2054 list_move_tail(&req
->list
, &ac
->pending
);
2055 artpec6_crypto_start_dma(req
);
2057 list_add_tail(&req
->complete_in_progress
, completions
);
2061 * In some cases, the hardware can raise an in_eop_flush interrupt
2062 * before actually updating the status, so we have an timer which will
2063 * recheck the status on timeout. Since the cases are expected to be
2064 * very rare, we use a relatively large timeout value. There should be
2065 * no noticeable negative effect if we timeout spuriously.
2067 if (ac
->pending_count
)
2068 mod_timer(&ac
->timer
, jiffies
+ msecs_to_jiffies(100));
2070 del_timer(&ac
->timer
);
2073 static void artpec6_crypto_timeout(struct timer_list
*t
)
2075 struct artpec6_crypto
*ac
= from_timer(ac
, t
, timer
);
2077 dev_info_ratelimited(artpec6_crypto_dev
, "timeout\n");
2079 tasklet_schedule(&ac
->task
);
2082 static void artpec6_crypto_task(unsigned long data
)
2084 struct artpec6_crypto
*ac
= (struct artpec6_crypto
*)data
;
2085 struct artpec6_crypto_req_common
*req
;
2086 struct artpec6_crypto_req_common
*n
;
2087 struct list_head complete_done
;
2088 struct list_head complete_in_progress
;
2090 INIT_LIST_HEAD(&complete_done
);
2091 INIT_LIST_HEAD(&complete_in_progress
);
2093 if (list_empty(&ac
->pending
)) {
2094 pr_debug("Spurious IRQ\n");
2098 spin_lock_bh(&ac
->queue_lock
);
2100 list_for_each_entry_safe(req
, n
, &ac
->pending
, list
) {
2101 struct artpec6_crypto_dma_descriptors
*dma
= req
->dma
;
2103 dma_addr_t stataddr
;
2105 stataddr
= dma
->stat_dma_addr
+ 4 * (req
->dma
->in_cnt
- 1);
2106 dma_sync_single_for_cpu(artpec6_crypto_dev
,
2111 stat
= req
->dma
->stat
[req
->dma
->in_cnt
-1];
2113 /* A non-zero final status descriptor indicates
2114 * this job has finished.
2116 pr_debug("Request %p status is %X\n", req
, stat
);
2120 /* Allow testing of timeout handling with fault injection */
2121 #ifdef CONFIG_FAULT_INJECTION
2122 if (should_fail(&artpec6_crypto_fail_status_read
, 1))
2126 pr_debug("Completing request %p\n", req
);
2128 list_move_tail(&req
->list
, &complete_done
);
2130 ac
->pending_count
--;
2133 artpec6_crypto_process_queue(ac
, &complete_in_progress
);
2135 spin_unlock_bh(&ac
->queue_lock
);
2137 /* Perform the completion callbacks without holding the queue lock
2138 * to allow new request submissions from the callbacks.
2140 list_for_each_entry_safe(req
, n
, &complete_done
, list
) {
2141 artpec6_crypto_dma_unmap_all(req
);
2142 artpec6_crypto_copy_bounce_buffers(req
);
2143 artpec6_crypto_common_destroy(req
);
2145 req
->complete(req
->req
);
2148 list_for_each_entry_safe(req
, n
, &complete_in_progress
,
2149 complete_in_progress
) {
2150 req
->req
->complete(req
->req
, -EINPROGRESS
);
2154 static void artpec6_crypto_complete_crypto(struct crypto_async_request
*req
)
2156 req
->complete(req
, 0);
2160 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request
*req
)
2162 struct skcipher_request
*cipher_req
= container_of(req
,
2163 struct skcipher_request
, base
);
2165 scatterwalk_map_and_copy(cipher_req
->iv
, cipher_req
->src
,
2166 cipher_req
->cryptlen
- AES_BLOCK_SIZE
,
2168 req
->complete(req
, 0);
2172 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request
*req
)
2174 struct skcipher_request
*cipher_req
= container_of(req
,
2175 struct skcipher_request
, base
);
2177 scatterwalk_map_and_copy(cipher_req
->iv
, cipher_req
->dst
,
2178 cipher_req
->cryptlen
- AES_BLOCK_SIZE
,
2180 req
->complete(req
, 0);
2183 static void artpec6_crypto_complete_aead(struct crypto_async_request
*req
)
2187 /* Verify GCM hashtag. */
2188 struct aead_request
*areq
= container_of(req
,
2189 struct aead_request
, base
);
2190 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
2191 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(areq
);
2193 if (req_ctx
->decrypt
) {
2194 u8 input_tag
[AES_BLOCK_SIZE
];
2195 unsigned int authsize
= crypto_aead_authsize(aead
);
2197 sg_pcopy_to_buffer(areq
->src
,
2198 sg_nents(areq
->src
),
2201 areq
->assoclen
+ areq
->cryptlen
-
2204 if (crypto_memneq(req_ctx
->decryption_tag
,
2207 pr_debug("***EBADMSG:\n");
2208 print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS
, 32, 1,
2209 input_tag
, authsize
, true);
2210 print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS
, 32, 1,
2211 req_ctx
->decryption_tag
,
2218 req
->complete(req
, result
);
2221 static void artpec6_crypto_complete_hash(struct crypto_async_request
*req
)
2223 req
->complete(req
, 0);
2227 /*------------------- Hash functions -----------------------------------------*/
2229 artpec6_crypto_hash_set_key(struct crypto_ahash
*tfm
,
2230 const u8
*key
, unsigned int keylen
)
2232 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(&tfm
->base
);
2237 pr_err("Invalid length (%d) of HMAC key\n",
2242 memset(tfm_ctx
->hmac_key
, 0, sizeof(tfm_ctx
->hmac_key
));
2244 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2246 if (keylen
> blocksize
) {
2247 SHASH_DESC_ON_STACK(hdesc
, tfm_ctx
->child_hash
);
2249 hdesc
->tfm
= tfm_ctx
->child_hash
;
2250 hdesc
->flags
= crypto_ahash_get_flags(tfm
) &
2251 CRYPTO_TFM_REQ_MAY_SLEEP
;
2253 tfm_ctx
->hmac_key_length
= blocksize
;
2254 ret
= crypto_shash_digest(hdesc
, key
, keylen
,
2260 memcpy(tfm_ctx
->hmac_key
, key
, keylen
);
2261 tfm_ctx
->hmac_key_length
= keylen
;
2268 artpec6_crypto_init_hash(struct ahash_request
*req
, u8 type
, int hmac
)
2270 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2271 enum artpec6_crypto_variant variant
= ac
->variant
;
2272 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2275 memset(req_ctx
, 0, sizeof(*req_ctx
));
2277 req_ctx
->hash_flags
= HASH_FLAG_INIT_CTX
;
2279 req_ctx
->hash_flags
|= (HASH_FLAG_HMAC
| HASH_FLAG_UPDATE_KEY
);
2282 case ARTPEC6_CRYPTO_HASH_SHA1
:
2283 oper
= hmac
? regk_crypto_hmac_sha1
: regk_crypto_sha1
;
2285 case ARTPEC6_CRYPTO_HASH_SHA256
:
2286 oper
= hmac
? regk_crypto_hmac_sha256
: regk_crypto_sha256
;
2289 pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME
, type
);
2293 if (variant
== ARTPEC6_CRYPTO
)
2294 req_ctx
->hash_md
= FIELD_PREP(A6_CRY_MD_OPER
, oper
);
2296 req_ctx
->hash_md
= FIELD_PREP(A7_CRY_MD_OPER
, oper
);
2301 static int artpec6_crypto_prepare_submit_hash(struct ahash_request
*req
)
2303 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2306 if (!req_ctx
->common
.dma
) {
2307 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
2309 artpec6_crypto_complete_hash
,
2316 ret
= artpec6_crypto_prepare_hash(req
);
2318 case ARTPEC6_CRYPTO_PREPARE_HASH_START
:
2319 ret
= artpec6_crypto_submit(&req_ctx
->common
);
2322 case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
:
2327 artpec6_crypto_common_destroy(&req_ctx
->common
);
2334 static int artpec6_crypto_hash_final(struct ahash_request
*req
)
2336 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2338 req_ctx
->hash_flags
|= HASH_FLAG_FINALIZE
;
2340 return artpec6_crypto_prepare_submit_hash(req
);
2343 static int artpec6_crypto_hash_update(struct ahash_request
*req
)
2345 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2347 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
;
2349 return artpec6_crypto_prepare_submit_hash(req
);
2352 static int artpec6_crypto_sha1_init(struct ahash_request
*req
)
2354 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA1
, 0);
2357 static int artpec6_crypto_sha1_digest(struct ahash_request
*req
)
2359 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2361 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA1
, 0);
2363 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2365 return artpec6_crypto_prepare_submit_hash(req
);
2368 static int artpec6_crypto_sha256_init(struct ahash_request
*req
)
2370 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 0);
2373 static int artpec6_crypto_sha256_digest(struct ahash_request
*req
)
2375 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2377 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 0);
2378 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2380 return artpec6_crypto_prepare_submit_hash(req
);
2383 static int artpec6_crypto_hmac_sha256_init(struct ahash_request
*req
)
2385 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 1);
2388 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request
*req
)
2390 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2392 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 1);
2393 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2395 return artpec6_crypto_prepare_submit_hash(req
);
2398 static int artpec6_crypto_ahash_init_common(struct crypto_tfm
*tfm
,
2399 const char *base_hash_name
)
2401 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(tfm
);
2403 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2404 sizeof(struct artpec6_hash_request_context
));
2405 memset(tfm_ctx
, 0, sizeof(*tfm_ctx
));
2407 if (base_hash_name
) {
2408 struct crypto_shash
*child
;
2410 child
= crypto_alloc_shash(base_hash_name
, 0,
2411 CRYPTO_ALG_NEED_FALLBACK
);
2414 return PTR_ERR(child
);
2416 tfm_ctx
->child_hash
= child
;
2422 static int artpec6_crypto_ahash_init(struct crypto_tfm
*tfm
)
2424 return artpec6_crypto_ahash_init_common(tfm
, NULL
);
2427 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm
*tfm
)
2429 return artpec6_crypto_ahash_init_common(tfm
, "sha256");
2432 static void artpec6_crypto_ahash_exit(struct crypto_tfm
*tfm
)
2434 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(tfm
);
2436 if (tfm_ctx
->child_hash
)
2437 crypto_free_shash(tfm_ctx
->child_hash
);
2439 memset(tfm_ctx
->hmac_key
, 0, sizeof(tfm_ctx
->hmac_key
));
2440 tfm_ctx
->hmac_key_length
= 0;
2443 static int artpec6_crypto_hash_export(struct ahash_request
*req
, void *out
)
2445 const struct artpec6_hash_request_context
*ctx
= ahash_request_ctx(req
);
2446 struct artpec6_hash_export_state
*state
= out
;
2447 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2448 enum artpec6_crypto_variant variant
= ac
->variant
;
2450 BUILD_BUG_ON(sizeof(state
->partial_buffer
) !=
2451 sizeof(ctx
->partial_buffer
));
2452 BUILD_BUG_ON(sizeof(state
->digeststate
) != sizeof(ctx
->digeststate
));
2454 state
->digcnt
= ctx
->digcnt
;
2455 state
->partial_bytes
= ctx
->partial_bytes
;
2456 state
->hash_flags
= ctx
->hash_flags
;
2458 if (variant
== ARTPEC6_CRYPTO
)
2459 state
->oper
= FIELD_GET(A6_CRY_MD_OPER
, ctx
->hash_md
);
2461 state
->oper
= FIELD_GET(A7_CRY_MD_OPER
, ctx
->hash_md
);
2463 memcpy(state
->partial_buffer
, ctx
->partial_buffer
,
2464 sizeof(state
->partial_buffer
));
2465 memcpy(state
->digeststate
, ctx
->digeststate
,
2466 sizeof(state
->digeststate
));
2471 static int artpec6_crypto_hash_import(struct ahash_request
*req
, const void *in
)
2473 struct artpec6_hash_request_context
*ctx
= ahash_request_ctx(req
);
2474 const struct artpec6_hash_export_state
*state
= in
;
2475 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2476 enum artpec6_crypto_variant variant
= ac
->variant
;
2478 memset(ctx
, 0, sizeof(*ctx
));
2480 ctx
->digcnt
= state
->digcnt
;
2481 ctx
->partial_bytes
= state
->partial_bytes
;
2482 ctx
->hash_flags
= state
->hash_flags
;
2484 if (variant
== ARTPEC6_CRYPTO
)
2485 ctx
->hash_md
= FIELD_PREP(A6_CRY_MD_OPER
, state
->oper
);
2487 ctx
->hash_md
= FIELD_PREP(A7_CRY_MD_OPER
, state
->oper
);
2489 memcpy(ctx
->partial_buffer
, state
->partial_buffer
,
2490 sizeof(state
->partial_buffer
));
2491 memcpy(ctx
->digeststate
, state
->digeststate
,
2492 sizeof(state
->digeststate
));
2497 static int init_crypto_hw(struct artpec6_crypto
*ac
)
2499 enum artpec6_crypto_variant variant
= ac
->variant
;
2500 void __iomem
*base
= ac
->base
;
2501 u32 out_descr_buf_size
;
2502 u32 out_data_buf_size
;
2503 u32 in_data_buf_size
;
2504 u32 in_descr_buf_size
;
2505 u32 in_stat_buf_size
;
2509 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2510 * channels and 1024 bytes for the IN channel. This is an elastic
2511 * memory used to internally store the descriptors and data. The values
2512 * ares specified in 64 byte incremements. Trustzone buffers are not
2513 * used at this stage.
2515 out_data_buf_size
= 16; /* 1024 bytes for data */
2516 out_descr_buf_size
= 15; /* 960 bytes for descriptors */
2517 in_data_buf_size
= 8; /* 512 bytes for data */
2518 in_descr_buf_size
= 4; /* 256 bytes for descriptors */
2519 in_stat_buf_size
= 4; /* 256 bytes for stat descrs */
2521 BUILD_BUG_ON_MSG((out_data_buf_size
2522 + out_descr_buf_size
) * 64 > 1984,
2523 "Invalid OUT configuration");
2525 BUILD_BUG_ON_MSG((in_data_buf_size
2527 + in_stat_buf_size
) * 64 > 1024,
2528 "Invalid IN configuration");
2530 in
= FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE
, in_data_buf_size
) |
2531 FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE
, in_descr_buf_size
) |
2532 FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE
, in_stat_buf_size
);
2534 out
= FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE
, out_data_buf_size
) |
2535 FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE
, out_descr_buf_size
);
2537 writel_relaxed(out
, base
+ PDMA_OUT_BUF_CFG
);
2538 writel_relaxed(PDMA_OUT_CFG_EN
, base
+ PDMA_OUT_CFG
);
2540 if (variant
== ARTPEC6_CRYPTO
) {
2541 writel_relaxed(in
, base
+ A6_PDMA_IN_BUF_CFG
);
2542 writel_relaxed(PDMA_IN_CFG_EN
, base
+ A6_PDMA_IN_CFG
);
2543 writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA
|
2544 A6_PDMA_INTR_MASK_IN_EOP_FLUSH
,
2545 base
+ A6_PDMA_INTR_MASK
);
2547 writel_relaxed(in
, base
+ A7_PDMA_IN_BUF_CFG
);
2548 writel_relaxed(PDMA_IN_CFG_EN
, base
+ A7_PDMA_IN_CFG
);
2549 writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA
|
2550 A7_PDMA_INTR_MASK_IN_EOP_FLUSH
,
2551 base
+ A7_PDMA_INTR_MASK
);
2557 static void artpec6_crypto_disable_hw(struct artpec6_crypto
*ac
)
2559 enum artpec6_crypto_variant variant
= ac
->variant
;
2560 void __iomem
*base
= ac
->base
;
2562 if (variant
== ARTPEC6_CRYPTO
) {
2563 writel_relaxed(A6_PDMA_IN_CMD_STOP
, base
+ A6_PDMA_IN_CMD
);
2564 writel_relaxed(0, base
+ A6_PDMA_IN_CFG
);
2565 writel_relaxed(A6_PDMA_OUT_CMD_STOP
, base
+ PDMA_OUT_CMD
);
2567 writel_relaxed(A7_PDMA_IN_CMD_STOP
, base
+ A7_PDMA_IN_CMD
);
2568 writel_relaxed(0, base
+ A7_PDMA_IN_CFG
);
2569 writel_relaxed(A7_PDMA_OUT_CMD_STOP
, base
+ PDMA_OUT_CMD
);
2572 writel_relaxed(0, base
+ PDMA_OUT_CFG
);
2576 static irqreturn_t
artpec6_crypto_irq(int irq
, void *dev_id
)
2578 struct artpec6_crypto
*ac
= dev_id
;
2579 enum artpec6_crypto_variant variant
= ac
->variant
;
2580 void __iomem
*base
= ac
->base
;
2581 u32 mask_in_data
, mask_in_eop_flush
;
2582 u32 in_cmd_flush_stat
, in_cmd_reg
;
2587 if (variant
== ARTPEC6_CRYPTO
) {
2588 intr
= readl_relaxed(base
+ A6_PDMA_MASKED_INTR
);
2589 mask_in_data
= A6_PDMA_INTR_MASK_IN_DATA
;
2590 mask_in_eop_flush
= A6_PDMA_INTR_MASK_IN_EOP_FLUSH
;
2591 in_cmd_flush_stat
= A6_PDMA_IN_CMD_FLUSH_STAT
;
2592 in_cmd_reg
= A6_PDMA_IN_CMD
;
2593 ack_intr_reg
= A6_PDMA_ACK_INTR
;
2595 intr
= readl_relaxed(base
+ A7_PDMA_MASKED_INTR
);
2596 mask_in_data
= A7_PDMA_INTR_MASK_IN_DATA
;
2597 mask_in_eop_flush
= A7_PDMA_INTR_MASK_IN_EOP_FLUSH
;
2598 in_cmd_flush_stat
= A7_PDMA_IN_CMD_FLUSH_STAT
;
2599 in_cmd_reg
= A7_PDMA_IN_CMD
;
2600 ack_intr_reg
= A7_PDMA_ACK_INTR
;
2603 /* We get two interrupt notifications from each job.
2604 * The in_data means all data was sent to memory and then
2605 * we request a status flush command to write the per-job
2606 * status to its status vector. This ensures that the
2607 * tasklet can detect exactly how many submitted jobs
2608 * that have finished.
2610 if (intr
& mask_in_data
)
2611 ack
|= mask_in_data
;
2613 if (intr
& mask_in_eop_flush
)
2614 ack
|= mask_in_eop_flush
;
2616 writel_relaxed(in_cmd_flush_stat
, base
+ in_cmd_reg
);
2618 writel_relaxed(ack
, base
+ ack_intr_reg
);
2620 if (intr
& mask_in_eop_flush
)
2621 tasklet_schedule(&ac
->task
);
2626 /*------------------- Algorithm definitions ----------------------------------*/
2629 static struct ahash_alg hash_algos
[] = {
2632 .init
= artpec6_crypto_sha1_init
,
2633 .update
= artpec6_crypto_hash_update
,
2634 .final
= artpec6_crypto_hash_final
,
2635 .digest
= artpec6_crypto_sha1_digest
,
2636 .import
= artpec6_crypto_hash_import
,
2637 .export
= artpec6_crypto_hash_export
,
2638 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2639 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2642 .cra_driver_name
= "artpec-sha1",
2643 .cra_priority
= 300,
2644 .cra_flags
= CRYPTO_ALG_ASYNC
,
2645 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2646 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2648 .cra_module
= THIS_MODULE
,
2649 .cra_init
= artpec6_crypto_ahash_init
,
2650 .cra_exit
= artpec6_crypto_ahash_exit
,
2655 .init
= artpec6_crypto_sha256_init
,
2656 .update
= artpec6_crypto_hash_update
,
2657 .final
= artpec6_crypto_hash_final
,
2658 .digest
= artpec6_crypto_sha256_digest
,
2659 .import
= artpec6_crypto_hash_import
,
2660 .export
= artpec6_crypto_hash_export
,
2661 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2662 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2664 .cra_name
= "sha256",
2665 .cra_driver_name
= "artpec-sha256",
2666 .cra_priority
= 300,
2667 .cra_flags
= CRYPTO_ALG_ASYNC
,
2668 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2669 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2671 .cra_module
= THIS_MODULE
,
2672 .cra_init
= artpec6_crypto_ahash_init
,
2673 .cra_exit
= artpec6_crypto_ahash_exit
,
2678 .init
= artpec6_crypto_hmac_sha256_init
,
2679 .update
= artpec6_crypto_hash_update
,
2680 .final
= artpec6_crypto_hash_final
,
2681 .digest
= artpec6_crypto_hmac_sha256_digest
,
2682 .import
= artpec6_crypto_hash_import
,
2683 .export
= artpec6_crypto_hash_export
,
2684 .setkey
= artpec6_crypto_hash_set_key
,
2685 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2686 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2688 .cra_name
= "hmac(sha256)",
2689 .cra_driver_name
= "artpec-hmac-sha256",
2690 .cra_priority
= 300,
2691 .cra_flags
= CRYPTO_ALG_ASYNC
,
2692 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2693 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2695 .cra_module
= THIS_MODULE
,
2696 .cra_init
= artpec6_crypto_ahash_init_hmac_sha256
,
2697 .cra_exit
= artpec6_crypto_ahash_exit
,
2703 static struct skcipher_alg crypto_algos
[] = {
2707 .cra_name
= "ecb(aes)",
2708 .cra_driver_name
= "artpec6-ecb-aes",
2709 .cra_priority
= 300,
2710 .cra_flags
= CRYPTO_ALG_ASYNC
,
2711 .cra_blocksize
= AES_BLOCK_SIZE
,
2712 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2714 .cra_module
= THIS_MODULE
,
2716 .min_keysize
= AES_MIN_KEY_SIZE
,
2717 .max_keysize
= AES_MAX_KEY_SIZE
,
2718 .setkey
= artpec6_crypto_cipher_set_key
,
2719 .encrypt
= artpec6_crypto_encrypt
,
2720 .decrypt
= artpec6_crypto_decrypt
,
2721 .init
= artpec6_crypto_aes_ecb_init
,
2722 .exit
= artpec6_crypto_aes_exit
,
2727 .cra_name
= "ctr(aes)",
2728 .cra_driver_name
= "artpec6-ctr-aes",
2729 .cra_priority
= 300,
2730 .cra_flags
= CRYPTO_ALG_ASYNC
|
2731 CRYPTO_ALG_NEED_FALLBACK
,
2733 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2735 .cra_module
= THIS_MODULE
,
2737 .min_keysize
= AES_MIN_KEY_SIZE
,
2738 .max_keysize
= AES_MAX_KEY_SIZE
,
2739 .ivsize
= AES_BLOCK_SIZE
,
2740 .setkey
= artpec6_crypto_cipher_set_key
,
2741 .encrypt
= artpec6_crypto_ctr_encrypt
,
2742 .decrypt
= artpec6_crypto_ctr_decrypt
,
2743 .init
= artpec6_crypto_aes_ctr_init
,
2744 .exit
= artpec6_crypto_aes_ctr_exit
,
2749 .cra_name
= "cbc(aes)",
2750 .cra_driver_name
= "artpec6-cbc-aes",
2751 .cra_priority
= 300,
2752 .cra_flags
= CRYPTO_ALG_ASYNC
,
2753 .cra_blocksize
= AES_BLOCK_SIZE
,
2754 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2756 .cra_module
= THIS_MODULE
,
2758 .min_keysize
= AES_MIN_KEY_SIZE
,
2759 .max_keysize
= AES_MAX_KEY_SIZE
,
2760 .ivsize
= AES_BLOCK_SIZE
,
2761 .setkey
= artpec6_crypto_cipher_set_key
,
2762 .encrypt
= artpec6_crypto_encrypt
,
2763 .decrypt
= artpec6_crypto_decrypt
,
2764 .init
= artpec6_crypto_aes_cbc_init
,
2765 .exit
= artpec6_crypto_aes_exit
2770 .cra_name
= "xts(aes)",
2771 .cra_driver_name
= "artpec6-xts-aes",
2772 .cra_priority
= 300,
2773 .cra_flags
= CRYPTO_ALG_ASYNC
,
2775 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2777 .cra_module
= THIS_MODULE
,
2779 .min_keysize
= 2*AES_MIN_KEY_SIZE
,
2780 .max_keysize
= 2*AES_MAX_KEY_SIZE
,
2782 .setkey
= artpec6_crypto_xts_set_key
,
2783 .encrypt
= artpec6_crypto_encrypt
,
2784 .decrypt
= artpec6_crypto_decrypt
,
2785 .init
= artpec6_crypto_aes_xts_init
,
2786 .exit
= artpec6_crypto_aes_exit
,
2790 static struct aead_alg aead_algos
[] = {
2792 .init
= artpec6_crypto_aead_init
,
2793 .setkey
= artpec6_crypto_aead_set_key
,
2794 .encrypt
= artpec6_crypto_aead_encrypt
,
2795 .decrypt
= artpec6_crypto_aead_decrypt
,
2796 .ivsize
= GCM_AES_IV_SIZE
,
2797 .maxauthsize
= AES_BLOCK_SIZE
,
2800 .cra_name
= "gcm(aes)",
2801 .cra_driver_name
= "artpec-gcm-aes",
2802 .cra_priority
= 300,
2803 .cra_flags
= CRYPTO_ALG_ASYNC
|
2804 CRYPTO_ALG_KERN_DRIVER_ONLY
,
2806 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2808 .cra_module
= THIS_MODULE
,
2813 #ifdef CONFIG_DEBUG_FS
2822 static struct dentry
*dbgfs_root
;
2824 static void artpec6_crypto_init_debugfs(void)
2826 dbgfs_root
= debugfs_create_dir("artpec6_crypto", NULL
);
2828 #ifdef CONFIG_FAULT_INJECTION
2829 fault_create_debugfs_attr("fail_status_read", dbgfs_root
,
2830 &artpec6_crypto_fail_status_read
);
2832 fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root
,
2833 &artpec6_crypto_fail_dma_array_full
);
2837 static void artpec6_crypto_free_debugfs(void)
2839 debugfs_remove_recursive(dbgfs_root
);
2844 static const struct of_device_id artpec6_crypto_of_match
[] = {
2845 { .compatible
= "axis,artpec6-crypto", .data
= (void *)ARTPEC6_CRYPTO
},
2846 { .compatible
= "axis,artpec7-crypto", .data
= (void *)ARTPEC7_CRYPTO
},
2849 MODULE_DEVICE_TABLE(of
, artpec6_crypto_of_match
);
2851 static int artpec6_crypto_probe(struct platform_device
*pdev
)
2853 const struct of_device_id
*match
;
2854 enum artpec6_crypto_variant variant
;
2855 struct artpec6_crypto
*ac
;
2856 struct device
*dev
= &pdev
->dev
;
2858 struct resource
*res
;
2862 if (artpec6_crypto_dev
)
2865 match
= of_match_node(artpec6_crypto_of_match
, dev
->of_node
);
2869 variant
= (enum artpec6_crypto_variant
)match
->data
;
2871 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2872 base
= devm_ioremap_resource(&pdev
->dev
, res
);
2874 return PTR_ERR(base
);
2876 irq
= platform_get_irq(pdev
, 0);
2880 ac
= devm_kzalloc(&pdev
->dev
, sizeof(struct artpec6_crypto
),
2885 platform_set_drvdata(pdev
, ac
);
2886 ac
->variant
= variant
;
2888 spin_lock_init(&ac
->queue_lock
);
2889 INIT_LIST_HEAD(&ac
->queue
);
2890 INIT_LIST_HEAD(&ac
->pending
);
2891 timer_setup(&ac
->timer
, artpec6_crypto_timeout
, 0);
2895 ac
->dma_cache
= kmem_cache_create("artpec6_crypto_dma",
2896 sizeof(struct artpec6_crypto_dma_descriptors
),
2903 #ifdef CONFIG_DEBUG_FS
2904 artpec6_crypto_init_debugfs();
2907 tasklet_init(&ac
->task
, artpec6_crypto_task
,
2910 ac
->pad_buffer
= devm_kzalloc(&pdev
->dev
, 2 * ARTPEC_CACHE_LINE_MAX
,
2912 if (!ac
->pad_buffer
)
2914 ac
->pad_buffer
= PTR_ALIGN(ac
->pad_buffer
, ARTPEC_CACHE_LINE_MAX
);
2916 ac
->zero_buffer
= devm_kzalloc(&pdev
->dev
, 2 * ARTPEC_CACHE_LINE_MAX
,
2918 if (!ac
->zero_buffer
)
2920 ac
->zero_buffer
= PTR_ALIGN(ac
->zero_buffer
, ARTPEC_CACHE_LINE_MAX
);
2922 err
= init_crypto_hw(ac
);
2926 err
= devm_request_irq(&pdev
->dev
, irq
, artpec6_crypto_irq
, 0,
2927 "artpec6-crypto", ac
);
2931 artpec6_crypto_dev
= &pdev
->dev
;
2933 err
= crypto_register_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
2935 dev_err(dev
, "Failed to register ahashes\n");
2939 err
= crypto_register_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
2941 dev_err(dev
, "Failed to register ciphers\n");
2942 goto unregister_ahashes
;
2945 err
= crypto_register_aeads(aead_algos
, ARRAY_SIZE(aead_algos
));
2947 dev_err(dev
, "Failed to register aeads\n");
2948 goto unregister_algs
;
2954 crypto_unregister_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
2956 crypto_unregister_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
2958 artpec6_crypto_disable_hw(ac
);
2960 kmem_cache_destroy(ac
->dma_cache
);
2964 static int artpec6_crypto_remove(struct platform_device
*pdev
)
2966 struct artpec6_crypto
*ac
= platform_get_drvdata(pdev
);
2967 int irq
= platform_get_irq(pdev
, 0);
2969 crypto_unregister_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
2970 crypto_unregister_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
2971 crypto_unregister_aeads(aead_algos
, ARRAY_SIZE(aead_algos
));
2973 tasklet_disable(&ac
->task
);
2974 devm_free_irq(&pdev
->dev
, irq
, ac
);
2975 tasklet_kill(&ac
->task
);
2976 del_timer_sync(&ac
->timer
);
2978 artpec6_crypto_disable_hw(ac
);
2980 kmem_cache_destroy(ac
->dma_cache
);
2981 #ifdef CONFIG_DEBUG_FS
2982 artpec6_crypto_free_debugfs();
2987 static struct platform_driver artpec6_crypto_driver
= {
2988 .probe
= artpec6_crypto_probe
,
2989 .remove
= artpec6_crypto_remove
,
2991 .name
= "artpec6-crypto",
2992 .of_match_table
= artpec6_crypto_of_match
,
2996 module_platform_driver(artpec6_crypto_driver
);
2998 MODULE_AUTHOR("Axis Communications AB");
2999 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
3000 MODULE_LICENSE("GPL");