1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel Keem Bay OCS AES Crypto Driver.
5 * Copyright (C) 2018-2020 Intel Corporation
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/platform_device.h>
11 #include <linux/slab.h>
12 #include <linux/swab.h>
14 #include <asm/byteorder.h>
15 #include <asm/errno.h>
17 #include <crypto/aes.h>
18 #include <crypto/gcm.h>
22 #define AES_COMMAND_OFFSET 0x0000
23 #define AES_KEY_0_OFFSET 0x0004
24 #define AES_KEY_1_OFFSET 0x0008
25 #define AES_KEY_2_OFFSET 0x000C
26 #define AES_KEY_3_OFFSET 0x0010
27 #define AES_KEY_4_OFFSET 0x0014
28 #define AES_KEY_5_OFFSET 0x0018
29 #define AES_KEY_6_OFFSET 0x001C
30 #define AES_KEY_7_OFFSET 0x0020
31 #define AES_IV_0_OFFSET 0x0024
32 #define AES_IV_1_OFFSET 0x0028
33 #define AES_IV_2_OFFSET 0x002C
34 #define AES_IV_3_OFFSET 0x0030
35 #define AES_ACTIVE_OFFSET 0x0034
36 #define AES_STATUS_OFFSET 0x0038
37 #define AES_KEY_SIZE_OFFSET 0x0044
38 #define AES_IER_OFFSET 0x0048
39 #define AES_ISR_OFFSET 0x005C
40 #define AES_MULTIPURPOSE1_0_OFFSET 0x0200
41 #define AES_MULTIPURPOSE1_1_OFFSET 0x0204
42 #define AES_MULTIPURPOSE1_2_OFFSET 0x0208
43 #define AES_MULTIPURPOSE1_3_OFFSET 0x020C
44 #define AES_MULTIPURPOSE2_0_OFFSET 0x0220
45 #define AES_MULTIPURPOSE2_1_OFFSET 0x0224
46 #define AES_MULTIPURPOSE2_2_OFFSET 0x0228
47 #define AES_MULTIPURPOSE2_3_OFFSET 0x022C
48 #define AES_BYTE_ORDER_CFG_OFFSET 0x02C0
49 #define AES_TLEN_OFFSET 0x0300
50 #define AES_T_MAC_0_OFFSET 0x0304
51 #define AES_T_MAC_1_OFFSET 0x0308
52 #define AES_T_MAC_2_OFFSET 0x030C
53 #define AES_T_MAC_3_OFFSET 0x0310
54 #define AES_PLEN_OFFSET 0x0314
55 #define AES_A_DMA_SRC_ADDR_OFFSET 0x0400
56 #define AES_A_DMA_DST_ADDR_OFFSET 0x0404
57 #define AES_A_DMA_SRC_SIZE_OFFSET 0x0408
58 #define AES_A_DMA_DST_SIZE_OFFSET 0x040C
59 #define AES_A_DMA_DMA_MODE_OFFSET 0x0410
60 #define AES_A_DMA_NEXT_SRC_DESCR_OFFSET 0x0418
61 #define AES_A_DMA_NEXT_DST_DESCR_OFFSET 0x041C
62 #define AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET 0x0420
63 #define AES_A_DMA_LOG_OFFSET 0x0424
64 #define AES_A_DMA_STATUS_OFFSET 0x0428
65 #define AES_A_DMA_PERF_CNTR_OFFSET 0x042C
66 #define AES_A_DMA_MSI_ISR_OFFSET 0x0480
67 #define AES_A_DMA_MSI_IER_OFFSET 0x0484
68 #define AES_A_DMA_MSI_MASK_OFFSET 0x0488
69 #define AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET 0x0600
70 #define AES_A_DMA_OUTBUFFER_READ_FIFO_OFFSET 0x0700
73 * AES_A_DMA_DMA_MODE register.
74 * Default: 0x00000000.
76 * This bit activates the DMA. When the DMA finishes, it resets
78 * bit[30:26] Unused by this driver.
79 * bit[25] SRC_LINK_LIST_EN
80 * Source link list enable bit. When the linked list is terminated
81 * this bit is reset by the DMA.
82 * bit[24] DST_LINK_LIST_EN
83 * Destination link list enable bit. When the linked list is
84 * terminated this bit is reset by the DMA.
85 * bit[23:0] Unused by this driver.
87 #define AES_A_DMA_DMA_MODE_ACTIVE BIT(31)
88 #define AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN BIT(25)
89 #define AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN BIT(24)
101 #define AES_ACTIVE_LAST_ADATA BIT(9)
102 #define AES_ACTIVE_LAST_CCM_GCM BIT(8)
103 #define AES_ACTIVE_TERMINATION BIT(1)
104 #define AES_ACTIVE_TRIGGER BIT(0)
106 #define AES_DISABLE_INT 0x00000000
107 #define AES_DMA_CPD_ERR_INT BIT(8)
108 #define AES_DMA_OUTBUF_RD_ERR_INT BIT(7)
109 #define AES_DMA_OUTBUF_WR_ERR_INT BIT(6)
110 #define AES_DMA_INBUF_RD_ERR_INT BIT(5)
111 #define AES_DMA_INBUF_WR_ERR_INT BIT(4)
112 #define AES_DMA_BAD_COMP_INT BIT(3)
113 #define AES_DMA_SAI_INT BIT(2)
114 #define AES_DMA_SRC_DONE_INT BIT(0)
115 #define AES_COMPLETE_INT BIT(1)
117 #define AES_DMA_MSI_MASK_CLEAR BIT(0)
119 #define AES_128_BIT_KEY 0x00000000
120 #define AES_256_BIT_KEY BIT(0)
122 #define AES_DEACTIVATE_PERF_CNTR 0x00000000
123 #define AES_ACTIVATE_PERF_CNTR BIT(0)
125 #define AES_MAX_TAG_SIZE_U32 4
127 #define OCS_LL_DMA_FLAG_TERMINATE BIT(31)
130 * There is an inconsistency in the documentation. This is documented as a
131 * 11-bit value, but it is actually 10-bits.
133 #define AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK 0x3FF
136 * During CCM decrypt, the OCS block needs to finish processing the ciphertext
137 * before the tag is written. For 128-bit mode this required delay is 28 OCS
138 * clock cycles. For 256-bit mode it is 36 OCS clock cycles.
140 #define CCM_DECRYPT_DELAY_TAG_CLK_COUNT 36UL
143 * During CCM decrypt there must be a delay of at least 42 OCS clock cycles
144 * between setting the TRIGGER bit in AES_ACTIVE and setting the LAST_CCM_GCM
145 * bit in the same register (as stated in the OCS databook)
147 #define CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT 42UL
149 /* See RFC3610 section 2.2 */
150 #define L_PRIME_MIN (1)
151 #define L_PRIME_MAX (7)
153 * CCM IV format from RFC 3610 section 2.3
155 * Octet Number Contents
156 * ------------ ---------
159 * 16-L ... 15 Counter i
163 #define L_PRIME_IDX 0
164 #define COUNTER_START(lprime) (16 - ((lprime) + 1))
165 #define COUNTER_LEN(lprime) ((lprime) + 1)
167 enum aes_counter_mode
{
168 AES_CTR_M_NO_INC
= 0,
169 AES_CTR_M_32_INC
= 1,
170 AES_CTR_M_64_INC
= 2,
171 AES_CTR_M_128_INC
= 3,
175 * struct ocs_dma_linked_list - OCS DMA linked list entry.
176 * @src_addr: Source address of the data.
177 * @src_len: Length of data to be fetched.
178 * @next: Next dma_list to fetch.
179 * @ll_flags: Flags (Freeze @ terminate) for the DMA engine.
181 struct ocs_dma_linked_list
{
189 * Set endianness of inputs and outputs
192 * bit [10] - KEY_HI_LO_SWAP
193 * bit [9] - KEY_HI_SWAP_DWORDS_IN_OCTWORD
194 * bit [8] - KEY_HI_SWAP_BYTES_IN_DWORD
195 * bit [7] - KEY_LO_SWAP_DWORDS_IN_OCTWORD
196 * bit [6] - KEY_LO_SWAP_BYTES_IN_DWORD
197 * bit [5] - IV_SWAP_DWORDS_IN_OCTWORD
198 * bit [4] - IV_SWAP_BYTES_IN_DWORD
199 * bit [3] - DOUT_SWAP_DWORDS_IN_OCTWORD
200 * bit [2] - DOUT_SWAP_BYTES_IN_DWORD
201 * bit [1] - DOUT_SWAP_DWORDS_IN_OCTWORD
202 * bit [0] - DOUT_SWAP_BYTES_IN_DWORD
204 static inline void aes_a_set_endianness(const struct ocs_aes_dev
*aes_dev
)
206 iowrite32(0x7FF, aes_dev
->base_reg
+ AES_BYTE_ORDER_CFG_OFFSET
);
209 /* Trigger AES process start. */
210 static inline void aes_a_op_trigger(const struct ocs_aes_dev
*aes_dev
)
212 iowrite32(AES_ACTIVE_TRIGGER
, aes_dev
->base_reg
+ AES_ACTIVE_OFFSET
);
215 /* Indicate last bulk of data. */
216 static inline void aes_a_op_termination(const struct ocs_aes_dev
*aes_dev
)
218 iowrite32(AES_ACTIVE_TERMINATION
,
219 aes_dev
->base_reg
+ AES_ACTIVE_OFFSET
);
223 * Set LAST_CCM_GCM in AES_ACTIVE register and clear all other bits.
225 * Called when DMA is programmed to fetch the last batch of data.
226 * - For AES-CCM it is called for the last batch of Payload data and Ciphertext
228 * - For AES-GCM, it is called for the last batch of Plaintext data and
231 static inline void aes_a_set_last_gcx(const struct ocs_aes_dev
*aes_dev
)
233 iowrite32(AES_ACTIVE_LAST_CCM_GCM
,
234 aes_dev
->base_reg
+ AES_ACTIVE_OFFSET
);
237 /* Wait for LAST_CCM_GCM bit to be unset. */
238 static inline void aes_a_wait_last_gcx(const struct ocs_aes_dev
*aes_dev
)
243 aes_active_reg
= ioread32(aes_dev
->base_reg
+
245 } while (aes_active_reg
& AES_ACTIVE_LAST_CCM_GCM
);
248 /* Wait for 10 bits of input occupancy. */
249 static void aes_a_dma_wait_input_buffer_occupancy(const struct ocs_aes_dev
*aes_dev
)
254 reg
= ioread32(aes_dev
->base_reg
+ AES_A_DMA_STATUS_OFFSET
);
255 } while (reg
& AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK
);
259 * Set LAST_CCM_GCM and LAST_ADATA bits in AES_ACTIVE register (and clear all
262 * Called when DMA is programmed to fetch the last batch of Associated Data
263 * (CCM case) or Additional Authenticated Data (GCM case).
265 static inline void aes_a_set_last_gcx_and_adata(const struct ocs_aes_dev
*aes_dev
)
267 iowrite32(AES_ACTIVE_LAST_ADATA
| AES_ACTIVE_LAST_CCM_GCM
,
268 aes_dev
->base_reg
+ AES_ACTIVE_OFFSET
);
271 /* Set DMA src and dst transfer size to 0 */
272 static inline void aes_a_dma_set_xfer_size_zero(const struct ocs_aes_dev
*aes_dev
)
274 iowrite32(0, aes_dev
->base_reg
+ AES_A_DMA_SRC_SIZE_OFFSET
);
275 iowrite32(0, aes_dev
->base_reg
+ AES_A_DMA_DST_SIZE_OFFSET
);
278 /* Activate DMA for zero-byte transfer case. */
279 static inline void aes_a_dma_active(const struct ocs_aes_dev
*aes_dev
)
281 iowrite32(AES_A_DMA_DMA_MODE_ACTIVE
,
282 aes_dev
->base_reg
+ AES_A_DMA_DMA_MODE_OFFSET
);
285 /* Activate DMA and enable src linked list */
286 static inline void aes_a_dma_active_src_ll_en(const struct ocs_aes_dev
*aes_dev
)
288 iowrite32(AES_A_DMA_DMA_MODE_ACTIVE
|
289 AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN
,
290 aes_dev
->base_reg
+ AES_A_DMA_DMA_MODE_OFFSET
);
293 /* Activate DMA and enable dst linked list */
294 static inline void aes_a_dma_active_dst_ll_en(const struct ocs_aes_dev
*aes_dev
)
296 iowrite32(AES_A_DMA_DMA_MODE_ACTIVE
|
297 AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN
,
298 aes_dev
->base_reg
+ AES_A_DMA_DMA_MODE_OFFSET
);
301 /* Activate DMA and enable src and dst linked lists */
302 static inline void aes_a_dma_active_src_dst_ll_en(const struct ocs_aes_dev
*aes_dev
)
304 iowrite32(AES_A_DMA_DMA_MODE_ACTIVE
|
305 AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN
|
306 AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN
,
307 aes_dev
->base_reg
+ AES_A_DMA_DMA_MODE_OFFSET
);
310 /* Reset PERF_CNTR to 0 and activate it */
311 static inline void aes_a_dma_reset_and_activate_perf_cntr(const struct ocs_aes_dev
*aes_dev
)
313 iowrite32(0x00000000, aes_dev
->base_reg
+ AES_A_DMA_PERF_CNTR_OFFSET
);
314 iowrite32(AES_ACTIVATE_PERF_CNTR
,
315 aes_dev
->base_reg
+ AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET
);
318 /* Wait until PERF_CNTR is > delay, then deactivate it */
319 static inline void aes_a_dma_wait_and_deactivate_perf_cntr(const struct ocs_aes_dev
*aes_dev
,
322 while (ioread32(aes_dev
->base_reg
+ AES_A_DMA_PERF_CNTR_OFFSET
) < delay
)
324 iowrite32(AES_DEACTIVATE_PERF_CNTR
,
325 aes_dev
->base_reg
+ AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET
);
328 /* Disable AES and DMA IRQ. */
329 static void aes_irq_disable(struct ocs_aes_dev
*aes_dev
)
333 /* Disable interrupts */
334 iowrite32(AES_DISABLE_INT
,
335 aes_dev
->base_reg
+ AES_A_DMA_MSI_IER_OFFSET
);
336 iowrite32(AES_DISABLE_INT
, aes_dev
->base_reg
+ AES_IER_OFFSET
);
338 /* Clear any pending interrupt */
339 isr_val
= ioread32(aes_dev
->base_reg
+ AES_A_DMA_MSI_ISR_OFFSET
);
342 aes_dev
->base_reg
+ AES_A_DMA_MSI_ISR_OFFSET
);
344 isr_val
= ioread32(aes_dev
->base_reg
+ AES_A_DMA_MSI_MASK_OFFSET
);
347 aes_dev
->base_reg
+ AES_A_DMA_MSI_MASK_OFFSET
);
349 isr_val
= ioread32(aes_dev
->base_reg
+ AES_ISR_OFFSET
);
351 iowrite32(isr_val
, aes_dev
->base_reg
+ AES_ISR_OFFSET
);
354 /* Enable AES or DMA IRQ. IRQ is disabled once fired. */
355 static void aes_irq_enable(struct ocs_aes_dev
*aes_dev
, u8 irq
)
357 if (irq
== AES_COMPLETE_INT
) {
358 /* Ensure DMA error interrupts are enabled */
359 iowrite32(AES_DMA_CPD_ERR_INT
|
360 AES_DMA_OUTBUF_RD_ERR_INT
|
361 AES_DMA_OUTBUF_WR_ERR_INT
|
362 AES_DMA_INBUF_RD_ERR_INT
|
363 AES_DMA_INBUF_WR_ERR_INT
|
364 AES_DMA_BAD_COMP_INT
|
366 aes_dev
->base_reg
+ AES_A_DMA_MSI_IER_OFFSET
);
370 * bits [31:3] - reserved
371 * bit [2] - EN_SKS_ERR
372 * bit [1] - EN_AES_COMPLETE
375 iowrite32(AES_COMPLETE_INT
, aes_dev
->base_reg
+ AES_IER_OFFSET
);
378 if (irq
== AES_DMA_SRC_DONE_INT
) {
379 /* Ensure AES interrupts are disabled */
380 iowrite32(AES_DISABLE_INT
, aes_dev
->base_reg
+ AES_IER_OFFSET
);
384 * bits [31:9] - reserved
385 * bit [8] - CPD_ERR_INT_EN
386 * bit [7] - OUTBUF_RD_ERR_INT_EN
387 * bit [6] - OUTBUF_WR_ERR_INT_EN
388 * bit [5] - INBUF_RD_ERR_INT_EN
389 * bit [4] - INBUF_WR_ERR_INT_EN
390 * bit [3] - BAD_COMP_INT_EN
391 * bit [2] - SAI_INT_EN
392 * bit [1] - DST_DONE_INT_EN
393 * bit [0] - SRC_DONE_INT_EN
395 iowrite32(AES_DMA_CPD_ERR_INT
|
396 AES_DMA_OUTBUF_RD_ERR_INT
|
397 AES_DMA_OUTBUF_WR_ERR_INT
|
398 AES_DMA_INBUF_RD_ERR_INT
|
399 AES_DMA_INBUF_WR_ERR_INT
|
400 AES_DMA_BAD_COMP_INT
|
402 AES_DMA_SRC_DONE_INT
,
403 aes_dev
->base_reg
+ AES_A_DMA_MSI_IER_OFFSET
);
407 /* Enable and wait for IRQ (either from OCS AES engine or DMA) */
408 static int ocs_aes_irq_enable_and_wait(struct ocs_aes_dev
*aes_dev
, u8 irq
)
412 reinit_completion(&aes_dev
->irq_completion
);
413 aes_irq_enable(aes_dev
, irq
);
414 rc
= wait_for_completion_interruptible(&aes_dev
->irq_completion
);
418 return aes_dev
->dma_err_mask
? -EIO
: 0;
421 /* Configure DMA to OCS, linked list mode */
422 static inline void dma_to_ocs_aes_ll(struct ocs_aes_dev
*aes_dev
,
425 iowrite32(0, aes_dev
->base_reg
+ AES_A_DMA_SRC_SIZE_OFFSET
);
427 aes_dev
->base_reg
+ AES_A_DMA_NEXT_SRC_DESCR_OFFSET
);
430 /* Configure DMA from OCS, linked list mode */
431 static inline void dma_from_ocs_aes_ll(struct ocs_aes_dev
*aes_dev
,
434 iowrite32(0, aes_dev
->base_reg
+ AES_A_DMA_DST_SIZE_OFFSET
);
436 aes_dev
->base_reg
+ AES_A_DMA_NEXT_DST_DESCR_OFFSET
);
439 irqreturn_t
ocs_aes_irq_handler(int irq
, void *dev_id
)
441 struct ocs_aes_dev
*aes_dev
= dev_id
;
444 /* Read DMA ISR status. */
445 aes_dma_isr
= ioread32(aes_dev
->base_reg
+ AES_A_DMA_MSI_ISR_OFFSET
);
447 /* Disable and clear interrupts. */
448 aes_irq_disable(aes_dev
);
450 /* Save DMA error status. */
451 aes_dev
->dma_err_mask
= aes_dma_isr
&
452 (AES_DMA_CPD_ERR_INT
|
453 AES_DMA_OUTBUF_RD_ERR_INT
|
454 AES_DMA_OUTBUF_WR_ERR_INT
|
455 AES_DMA_INBUF_RD_ERR_INT
|
456 AES_DMA_INBUF_WR_ERR_INT
|
457 AES_DMA_BAD_COMP_INT
|
460 /* Signal IRQ completion. */
461 complete(&aes_dev
->irq_completion
);
467 * ocs_aes_set_key() - Write key into OCS AES hardware.
468 * @aes_dev: The OCS AES device to write the key to.
469 * @key_size: The size of the key (in bytes).
470 * @key: The key to write.
471 * @cipher: The cipher the key is for.
473 * For AES @key_size must be either 16 or 32. For SM4 @key_size must be 16.
475 * Return: 0 on success, negative error code otherwise.
477 int ocs_aes_set_key(struct ocs_aes_dev
*aes_dev
, u32 key_size
, const u8
*key
,
478 enum ocs_cipher cipher
)
484 /* OCS AES supports 128-bit and 256-bit keys only. */
485 if (cipher
== OCS_AES
&& !(key_size
== 32 || key_size
== 16)) {
486 dev_err(aes_dev
->dev
,
487 "%d-bit keys not supported by AES cipher\n",
491 /* OCS SM4 supports 128-bit keys only. */
492 if (cipher
== OCS_SM4
&& key_size
!= 16) {
493 dev_err(aes_dev
->dev
,
494 "%d-bit keys not supported for SM4 cipher\n",
502 key_u32
= (const u32
*)key
;
504 /* Write key to AES_KEY[0-7] registers */
505 for (i
= 0; i
< (key_size
/ sizeof(u32
)); i
++) {
506 iowrite32(key_u32
[i
],
507 aes_dev
->base_reg
+ AES_KEY_0_OFFSET
+
512 * bits [31:1] - reserved
513 * bit [0] - AES_KEY_SIZE
517 val
= (key_size
== 16) ? AES_128_BIT_KEY
: AES_256_BIT_KEY
;
518 iowrite32(val
, aes_dev
->base_reg
+ AES_KEY_SIZE_OFFSET
);
523 /* Write AES_COMMAND */
524 static inline void set_ocs_aes_command(struct ocs_aes_dev
*aes_dev
,
525 enum ocs_cipher cipher
,
527 enum ocs_instruction instruction
)
533 * bit [14] - CIPHER_SELECT
536 * bits [11:8] - OCS_AES_MODE
543 * bits [7:6] - AES_INSTRUCTION
548 * bits [3:2] - CTR_M_BITS
550 * 01 - Least significant 32 bits are incremented
551 * 10 - Least significant 64 bits are incremented
552 * 11 - Full 128 bits are incremented
554 val
= (cipher
<< 14) | (mode
<< 8) | (instruction
<< 6) |
555 (AES_CTR_M_128_INC
<< 2);
556 iowrite32(val
, aes_dev
->base_reg
+ AES_COMMAND_OFFSET
);
559 static void ocs_aes_init(struct ocs_aes_dev
*aes_dev
,
561 enum ocs_cipher cipher
,
562 enum ocs_instruction instruction
)
564 /* Ensure interrupts are disabled and pending interrupts cleared. */
565 aes_irq_disable(aes_dev
);
567 /* Set endianness recommended by data-sheet. */
568 aes_a_set_endianness(aes_dev
);
570 /* Set AES_COMMAND register. */
571 set_ocs_aes_command(aes_dev
, cipher
, mode
, instruction
);
575 * Write the byte length of the last AES/SM4 block of Payload data (without
576 * zero padding and without the length of the MAC) in register AES_PLEN.
578 static inline void ocs_aes_write_last_data_blk_len(struct ocs_aes_dev
*aes_dev
,
588 val
= size
% AES_BLOCK_SIZE
;
590 val
= AES_BLOCK_SIZE
;
593 iowrite32(val
, aes_dev
->base_reg
+ AES_PLEN_OFFSET
);
597 * Validate inputs according to mode.
598 * If OK return 0; else return -EINVAL.
600 static int ocs_aes_validate_inputs(dma_addr_t src_dma_list
, u32 src_size
,
601 const u8
*iv
, u32 iv_size
,
602 dma_addr_t aad_dma_list
, u32 aad_size
,
603 const u8
*tag
, u32 tag_size
,
604 enum ocs_cipher cipher
, enum ocs_mode mode
,
605 enum ocs_instruction instruction
,
606 dma_addr_t dst_dma_list
)
608 /* Ensure cipher, mode and instruction are valid. */
609 if (!(cipher
== OCS_AES
|| cipher
== OCS_SM4
))
612 if (mode
!= OCS_MODE_ECB
&& mode
!= OCS_MODE_CBC
&&
613 mode
!= OCS_MODE_CTR
&& mode
!= OCS_MODE_CCM
&&
614 mode
!= OCS_MODE_GCM
&& mode
!= OCS_MODE_CTS
)
617 if (instruction
!= OCS_ENCRYPT
&& instruction
!= OCS_DECRYPT
&&
618 instruction
!= OCS_EXPAND
&& instruction
!= OCS_BYPASS
)
622 * When instruction is OCS_BYPASS, OCS simply copies data from source
623 * to destination using DMA.
625 * AES mode is irrelevant, but both source and destination DMA
626 * linked-list must be defined.
628 if (instruction
== OCS_BYPASS
) {
629 if (src_dma_list
== DMA_MAPPING_ERROR
||
630 dst_dma_list
== DMA_MAPPING_ERROR
)
637 * For performance reasons switch based on mode to limit unnecessary
638 * conditionals for each mode
642 /* Ensure input length is multiple of block size */
643 if (src_size
% AES_BLOCK_SIZE
!= 0)
646 /* Ensure source and destination linked lists are created */
647 if (src_dma_list
== DMA_MAPPING_ERROR
||
648 dst_dma_list
== DMA_MAPPING_ERROR
)
654 /* Ensure input length is multiple of block size */
655 if (src_size
% AES_BLOCK_SIZE
!= 0)
658 /* Ensure source and destination linked lists are created */
659 if (src_dma_list
== DMA_MAPPING_ERROR
||
660 dst_dma_list
== DMA_MAPPING_ERROR
)
663 /* Ensure IV is present and block size in length */
664 if (!iv
|| iv_size
!= AES_BLOCK_SIZE
)
670 /* Ensure input length of 1 byte or greater */
674 /* Ensure source and destination linked lists are created */
675 if (src_dma_list
== DMA_MAPPING_ERROR
||
676 dst_dma_list
== DMA_MAPPING_ERROR
)
679 /* Ensure IV is present and block size in length */
680 if (!iv
|| iv_size
!= AES_BLOCK_SIZE
)
686 /* Ensure input length >= block size */
687 if (src_size
< AES_BLOCK_SIZE
)
690 /* Ensure source and destination linked lists are created */
691 if (src_dma_list
== DMA_MAPPING_ERROR
||
692 dst_dma_list
== DMA_MAPPING_ERROR
)
695 /* Ensure IV is present and block size in length */
696 if (!iv
|| iv_size
!= AES_BLOCK_SIZE
)
702 /* Ensure IV is present and GCM_AES_IV_SIZE in length */
703 if (!iv
|| iv_size
!= GCM_AES_IV_SIZE
)
707 * If input data present ensure source and destination linked
710 if (src_size
&& (src_dma_list
== DMA_MAPPING_ERROR
||
711 dst_dma_list
== DMA_MAPPING_ERROR
))
714 /* If aad present ensure aad linked list is created */
715 if (aad_size
&& aad_dma_list
== DMA_MAPPING_ERROR
)
718 /* Ensure tag destination is set */
722 /* Just ensure that tag_size doesn't cause overflows. */
723 if (tag_size
> (AES_MAX_TAG_SIZE_U32
* sizeof(u32
)))
729 /* Ensure IV is present and block size in length */
730 if (!iv
|| iv_size
!= AES_BLOCK_SIZE
)
733 /* 2 <= L <= 8, so 1 <= L' <= 7 */
734 if (iv
[L_PRIME_IDX
] < L_PRIME_MIN
||
735 iv
[L_PRIME_IDX
] > L_PRIME_MAX
)
738 /* If aad present ensure aad linked list is created */
739 if (aad_size
&& aad_dma_list
== DMA_MAPPING_ERROR
)
742 /* Just ensure that tag_size doesn't cause overflows. */
743 if (tag_size
> (AES_MAX_TAG_SIZE_U32
* sizeof(u32
)))
746 if (instruction
== OCS_DECRYPT
) {
748 * If input data present ensure source and destination
749 * linked lists are created
751 if (src_size
&& (src_dma_list
== DMA_MAPPING_ERROR
||
752 dst_dma_list
== DMA_MAPPING_ERROR
))
755 /* Ensure input tag is present */
762 /* Instruction == OCS_ENCRYPT */
765 * Destination linked list always required (for tag even if no
768 if (dst_dma_list
== DMA_MAPPING_ERROR
)
771 /* If input data present ensure src linked list is created */
772 if (src_size
&& src_dma_list
== DMA_MAPPING_ERROR
)
783 * ocs_aes_op() - Perform AES/SM4 operation.
784 * @aes_dev: The OCS AES device to use.
785 * @mode: The mode to use (ECB, CBC, CTR, or CTS).
786 * @cipher: The cipher to use (AES or SM4).
787 * @instruction: The instruction to perform (encrypt or decrypt).
788 * @dst_dma_list: The OCS DMA list mapping output memory.
789 * @src_dma_list: The OCS DMA list mapping input payload data.
790 * @src_size: The amount of data mapped by @src_dma_list.
791 * @iv: The IV vector.
792 * @iv_size: The size (in bytes) of @iv.
794 * Return: 0 on success, negative error code otherwise.
796 int ocs_aes_op(struct ocs_aes_dev
*aes_dev
,
798 enum ocs_cipher cipher
,
799 enum ocs_instruction instruction
,
800 dma_addr_t dst_dma_list
,
801 dma_addr_t src_dma_list
,
809 rc
= ocs_aes_validate_inputs(src_dma_list
, src_size
, iv
, iv_size
, 0, 0,
810 NULL
, 0, cipher
, mode
, instruction
,
815 * ocs_aes_validate_inputs() is a generic check, now ensure mode is not
818 if (mode
== OCS_MODE_GCM
|| mode
== OCS_MODE_CCM
)
821 /* Cast IV to u32 array. */
824 ocs_aes_init(aes_dev
, mode
, cipher
, instruction
);
826 if (mode
== OCS_MODE_CTS
) {
827 /* Write the byte length of the last data block to engine. */
828 ocs_aes_write_last_data_blk_len(aes_dev
, src_size
);
831 /* ECB is the only mode that doesn't use IV. */
832 if (mode
!= OCS_MODE_ECB
) {
833 iowrite32(iv32
[0], aes_dev
->base_reg
+ AES_IV_0_OFFSET
);
834 iowrite32(iv32
[1], aes_dev
->base_reg
+ AES_IV_1_OFFSET
);
835 iowrite32(iv32
[2], aes_dev
->base_reg
+ AES_IV_2_OFFSET
);
836 iowrite32(iv32
[3], aes_dev
->base_reg
+ AES_IV_3_OFFSET
);
839 /* Set AES_ACTIVE.TRIGGER to start the operation. */
840 aes_a_op_trigger(aes_dev
);
842 /* Configure and activate input / output DMA. */
843 dma_to_ocs_aes_ll(aes_dev
, src_dma_list
);
844 dma_from_ocs_aes_ll(aes_dev
, dst_dma_list
);
845 aes_a_dma_active_src_dst_ll_en(aes_dev
);
847 if (mode
== OCS_MODE_CTS
) {
849 * For CTS mode, instruct engine to activate ciphertext
850 * stealing if last block of data is incomplete.
852 aes_a_set_last_gcx(aes_dev
);
854 /* For all other modes, just write the 'termination' bit. */
855 aes_a_op_termination(aes_dev
);
858 /* Wait for engine to complete processing. */
859 rc
= ocs_aes_irq_enable_and_wait(aes_dev
, AES_COMPLETE_INT
);
863 if (mode
== OCS_MODE_CTR
) {
864 /* Read back IV for streaming mode */
865 iv32
[0] = ioread32(aes_dev
->base_reg
+ AES_IV_0_OFFSET
);
866 iv32
[1] = ioread32(aes_dev
->base_reg
+ AES_IV_1_OFFSET
);
867 iv32
[2] = ioread32(aes_dev
->base_reg
+ AES_IV_2_OFFSET
);
868 iv32
[3] = ioread32(aes_dev
->base_reg
+ AES_IV_3_OFFSET
);
874 /* Compute and write J0 to engine registers. */
875 static void ocs_aes_gcm_write_j0(const struct ocs_aes_dev
*aes_dev
,
878 const u32
*j0
= (u32
*)iv
;
881 * IV must be 12 bytes; Other sizes not supported as Linux crypto API
882 * does only expects/allows 12 byte IV for GCM
884 iowrite32(0x00000001, aes_dev
->base_reg
+ AES_IV_0_OFFSET
);
885 iowrite32(__swab32(j0
[2]), aes_dev
->base_reg
+ AES_IV_1_OFFSET
);
886 iowrite32(__swab32(j0
[1]), aes_dev
->base_reg
+ AES_IV_2_OFFSET
);
887 iowrite32(__swab32(j0
[0]), aes_dev
->base_reg
+ AES_IV_3_OFFSET
);
890 /* Read GCM tag from engine registers. */
891 static inline void ocs_aes_gcm_read_tag(struct ocs_aes_dev
*aes_dev
,
892 u8
*tag
, u32 tag_size
)
894 u32 tag_u32
[AES_MAX_TAG_SIZE_U32
];
897 * The Authentication Tag T is stored in Little Endian order in the
898 * registers with the most significant bytes stored from AES_T_MAC[3]
901 tag_u32
[0] = __swab32(ioread32(aes_dev
->base_reg
+ AES_T_MAC_3_OFFSET
));
902 tag_u32
[1] = __swab32(ioread32(aes_dev
->base_reg
+ AES_T_MAC_2_OFFSET
));
903 tag_u32
[2] = __swab32(ioread32(aes_dev
->base_reg
+ AES_T_MAC_1_OFFSET
));
904 tag_u32
[3] = __swab32(ioread32(aes_dev
->base_reg
+ AES_T_MAC_0_OFFSET
));
906 memcpy(tag
, tag_u32
, tag_size
);
910 * ocs_aes_gcm_op() - Perform GCM operation.
911 * @aes_dev: The OCS AES device to use.
912 * @cipher: The Cipher to use (AES or SM4).
913 * @instruction: The instruction to perform (encrypt or decrypt).
914 * @dst_dma_list: The OCS DMA list mapping output memory.
915 * @src_dma_list: The OCS DMA list mapping input payload data.
916 * @src_size: The amount of data mapped by @src_dma_list.
917 * @iv: The input IV vector.
918 * @aad_dma_list: The OCS DMA list mapping input AAD data.
919 * @aad_size: The amount of data mapped by @aad_dma_list.
920 * @out_tag: Where to store computed tag.
921 * @tag_size: The size (in bytes) of @out_tag.
923 * Return: 0 on success, negative error code otherwise.
925 int ocs_aes_gcm_op(struct ocs_aes_dev
*aes_dev
,
926 enum ocs_cipher cipher
,
927 enum ocs_instruction instruction
,
928 dma_addr_t dst_dma_list
,
929 dma_addr_t src_dma_list
,
932 dma_addr_t aad_dma_list
,
941 rc
= ocs_aes_validate_inputs(src_dma_list
, src_size
, iv
,
942 GCM_AES_IV_SIZE
, aad_dma_list
,
943 aad_size
, out_tag
, tag_size
, cipher
,
944 OCS_MODE_GCM
, instruction
,
949 ocs_aes_init(aes_dev
, OCS_MODE_GCM
, cipher
, instruction
);
951 /* Compute and write J0 to OCS HW. */
952 ocs_aes_gcm_write_j0(aes_dev
, iv
);
954 /* Write out_tag byte length */
955 iowrite32(tag_size
, aes_dev
->base_reg
+ AES_TLEN_OFFSET
);
957 /* Write the byte length of the last plaintext / ciphertext block. */
958 ocs_aes_write_last_data_blk_len(aes_dev
, src_size
);
960 /* Write ciphertext bit length */
961 bit_len
= src_size
* 8;
962 val
= bit_len
& 0xFFFFFFFF;
963 iowrite32(val
, aes_dev
->base_reg
+ AES_MULTIPURPOSE2_0_OFFSET
);
965 iowrite32(val
, aes_dev
->base_reg
+ AES_MULTIPURPOSE2_1_OFFSET
);
967 /* Write aad bit length */
968 bit_len
= aad_size
* 8;
969 val
= bit_len
& 0xFFFFFFFF;
970 iowrite32(val
, aes_dev
->base_reg
+ AES_MULTIPURPOSE2_2_OFFSET
);
972 iowrite32(val
, aes_dev
->base_reg
+ AES_MULTIPURPOSE2_3_OFFSET
);
974 /* Set AES_ACTIVE.TRIGGER to start the operation. */
975 aes_a_op_trigger(aes_dev
);
979 /* If aad present, configure DMA to feed it to the engine. */
980 dma_to_ocs_aes_ll(aes_dev
, aad_dma_list
);
981 aes_a_dma_active_src_ll_en(aes_dev
);
983 /* Instructs engine to pad last block of aad, if needed. */
984 aes_a_set_last_gcx_and_adata(aes_dev
);
986 /* Wait for DMA transfer to complete. */
987 rc
= ocs_aes_irq_enable_and_wait(aes_dev
, AES_DMA_SRC_DONE_INT
);
991 aes_a_set_last_gcx_and_adata(aes_dev
);
994 /* Wait until adata (if present) has been processed. */
995 aes_a_wait_last_gcx(aes_dev
);
996 aes_a_dma_wait_input_buffer_occupancy(aes_dev
);
998 /* Now process payload. */
1000 /* Configure and activate DMA for both input and output data. */
1001 dma_to_ocs_aes_ll(aes_dev
, src_dma_list
);
1002 dma_from_ocs_aes_ll(aes_dev
, dst_dma_list
);
1003 aes_a_dma_active_src_dst_ll_en(aes_dev
);
1005 aes_a_dma_set_xfer_size_zero(aes_dev
);
1006 aes_a_dma_active(aes_dev
);
1009 /* Instruct AES/SMA4 engine payload processing is over. */
1010 aes_a_set_last_gcx(aes_dev
);
1012 /* Wait for OCS AES engine to complete processing. */
1013 rc
= ocs_aes_irq_enable_and_wait(aes_dev
, AES_COMPLETE_INT
);
1017 ocs_aes_gcm_read_tag(aes_dev
, out_tag
, tag_size
);
1022 /* Write encrypted tag to AES/SM4 engine. */
1023 static void ocs_aes_ccm_write_encrypted_tag(struct ocs_aes_dev
*aes_dev
,
1024 const u8
*in_tag
, u32 tag_size
)
1028 /* Ensure DMA input buffer is empty */
1029 aes_a_dma_wait_input_buffer_occupancy(aes_dev
);
1032 * During CCM decrypt, the OCS block needs to finish processing the
1033 * ciphertext before the tag is written. So delay needed after DMA has
1034 * completed writing the ciphertext
1036 aes_a_dma_reset_and_activate_perf_cntr(aes_dev
);
1037 aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev
,
1038 CCM_DECRYPT_DELAY_TAG_CLK_COUNT
);
1040 /* Write encrypted tag to AES/SM4 engine. */
1041 for (i
= 0; i
< tag_size
; i
++) {
1042 iowrite8(in_tag
[i
], aes_dev
->base_reg
+
1043 AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET
);
1048 * Write B0 CCM block to OCS AES HW.
1050 * Note: B0 format is documented in NIST Special Publication 800-38C
1051 * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
1052 * (see Section A.2.1)
1054 static int ocs_aes_ccm_write_b0(const struct ocs_aes_dev
*aes_dev
,
1055 const u8
*iv
, u32 adata_size
, u32 tag_size
,
1058 u8 b0
[16]; /* CCM B0 block is 16 bytes long. */
1061 /* Initialize B0 to 0. */
1062 memset(b0
, 0, sizeof(b0
));
1065 * B0[0] is the 'Flags Octet' and has the following structure:
1068 * bit 5-3: t value encoded as (t-2)/2
1069 * bit 2-0: q value encoded as q - 1
1071 /* If there is AAD data, set the Adata flag. */
1075 * t denotes the octet length of T.
1076 * t can only be an element of { 4, 6, 8, 10, 12, 14, 16} and is
1077 * encoded as (t - 2) / 2
1079 b0
[0] |= (((tag_size
- 2) / 2) & 0x7) << 3;
1081 * q is the octet length of Q.
1082 * q can only be an element of {2, 3, 4, 5, 6, 7, 8} and is encoded as
1085 b0
[0] |= iv
[0] & 0x7;
1087 * Copy the Nonce N from IV to B0; N is located in iv[1]..iv[15 - q]
1088 * and must be copied to b0[1]..b0[15-q].
1092 for (i
= 1; i
<= 15 - q
; i
++)
1095 * The rest of B0 must contain Q, i.e., the message length.
1096 * Q is encoded in q octets, in big-endian order, so to write it, we
1097 * start from the end of B0 and we move backward.
1101 b0
[i
] = cryptlen
& 0xff;
1107 * If cryptlen is not zero at this point, it means that its original
1108 * value was too big.
1112 /* Now write B0 to OCS AES input buffer. */
1113 for (i
= 0; i
< sizeof(b0
); i
++)
1114 iowrite8(b0
[i
], aes_dev
->base_reg
+
1115 AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET
);
1120 * Write adata length to OCS AES HW.
1122 * Note: adata len encoding is documented in NIST Special Publication 800-38C
1123 * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
1124 * (see Section A.2.2)
1126 static void ocs_aes_ccm_write_adata_len(const struct ocs_aes_dev
*aes_dev
,
1129 u8 enc_a
[10]; /* Maximum encoded size: 10 octets. */
1133 * adata_len ('a') is encoded as follows:
1134 * If 0 < a < 2^16 - 2^8 ==> 'a' encoded as [a]16, i.e., two octets
1136 * If 2^16 - 2^8 ≤ a < 2^32 ==> 'a' encoded as 0xff || 0xfe || [a]32,
1137 * i.e., six octets (big endian).
1138 * If 2^32 ≤ a < 2^64 ==> 'a' encoded as 0xff || 0xff || [a]64,
1139 * i.e., ten octets (big endian).
1141 if (adata_len
< 65280) {
1143 *(__be16
*)enc_a
= cpu_to_be16(adata_len
);
1144 } else if (adata_len
<= 0xFFFFFFFF) {
1146 *(__be16
*)enc_a
= cpu_to_be16(0xfffe);
1147 *(__be32
*)&enc_a
[2] = cpu_to_be32(adata_len
);
1148 } else { /* adata_len >= 2^32 */
1150 *(__be16
*)enc_a
= cpu_to_be16(0xffff);
1151 *(__be64
*)&enc_a
[2] = cpu_to_be64(adata_len
);
1153 for (i
= 0; i
< len
; i
++)
1156 AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET
);
1159 static int ocs_aes_ccm_do_adata(struct ocs_aes_dev
*aes_dev
,
1160 dma_addr_t adata_dma_list
, u32 adata_size
)
1165 /* Since no aad the LAST_GCX bit can be set now */
1166 aes_a_set_last_gcx_and_adata(aes_dev
);
1173 * Form the encoding of the Associated data length and write it
1174 * to the AES/SM4 input buffer.
1176 ocs_aes_ccm_write_adata_len(aes_dev
, adata_size
);
1178 /* Configure the AES/SM4 DMA to fetch the Associated Data */
1179 dma_to_ocs_aes_ll(aes_dev
, adata_dma_list
);
1181 /* Activate DMA to fetch Associated data. */
1182 aes_a_dma_active_src_ll_en(aes_dev
);
1184 /* Set LAST_GCX and LAST_ADATA in AES ACTIVE register. */
1185 aes_a_set_last_gcx_and_adata(aes_dev
);
1187 /* Wait for DMA transfer to complete. */
1188 rc
= ocs_aes_irq_enable_and_wait(aes_dev
, AES_DMA_SRC_DONE_INT
);
1193 /* Wait until adata (if present) has been processed. */
1194 aes_a_wait_last_gcx(aes_dev
);
1195 aes_a_dma_wait_input_buffer_occupancy(aes_dev
);
1200 static int ocs_aes_ccm_encrypt_do_payload(struct ocs_aes_dev
*aes_dev
,
1201 dma_addr_t dst_dma_list
,
1202 dma_addr_t src_dma_list
,
1207 * Configure and activate DMA for both input and output
1210 dma_to_ocs_aes_ll(aes_dev
, src_dma_list
);
1211 dma_from_ocs_aes_ll(aes_dev
, dst_dma_list
);
1212 aes_a_dma_active_src_dst_ll_en(aes_dev
);
1214 /* Configure and activate DMA for output data only. */
1215 dma_from_ocs_aes_ll(aes_dev
, dst_dma_list
);
1216 aes_a_dma_active_dst_ll_en(aes_dev
);
1220 * Set the LAST GCX bit in AES_ACTIVE Register to instruct
1221 * AES/SM4 engine to pad the last block of data.
1223 aes_a_set_last_gcx(aes_dev
);
1225 /* We are done, wait for IRQ and return. */
1226 return ocs_aes_irq_enable_and_wait(aes_dev
, AES_COMPLETE_INT
);
1229 static int ocs_aes_ccm_decrypt_do_payload(struct ocs_aes_dev
*aes_dev
,
1230 dma_addr_t dst_dma_list
,
1231 dma_addr_t src_dma_list
,
1235 /* Let engine process 0-length input. */
1236 aes_a_dma_set_xfer_size_zero(aes_dev
);
1237 aes_a_dma_active(aes_dev
);
1238 aes_a_set_last_gcx(aes_dev
);
1244 * Configure and activate DMA for both input and output
1247 dma_to_ocs_aes_ll(aes_dev
, src_dma_list
);
1248 dma_from_ocs_aes_ll(aes_dev
, dst_dma_list
);
1249 aes_a_dma_active_src_dst_ll_en(aes_dev
);
1251 * Set the LAST GCX bit in AES_ACTIVE Register; this allows the
1252 * AES/SM4 engine to differentiate between encrypted data and
1255 aes_a_set_last_gcx(aes_dev
);
1257 * Enable DMA DONE interrupt; once DMA transfer is over,
1258 * interrupt handler will process the MAC/tag.
1260 return ocs_aes_irq_enable_and_wait(aes_dev
, AES_DMA_SRC_DONE_INT
);
1264 * Compare Tag to Yr.
1266 * Only used at the end of CCM decrypt. If tag == yr, message authentication
1269 static inline int ccm_compare_tag_to_yr(struct ocs_aes_dev
*aes_dev
,
1272 u32 tag
[AES_MAX_TAG_SIZE_U32
];
1273 u32 yr
[AES_MAX_TAG_SIZE_U32
];
1276 /* Read Tag and Yr from AES registers. */
1277 for (i
= 0; i
< AES_MAX_TAG_SIZE_U32
; i
++) {
1278 tag
[i
] = ioread32(aes_dev
->base_reg
+
1279 AES_T_MAC_0_OFFSET
+ (i
* sizeof(u32
)));
1280 yr
[i
] = ioread32(aes_dev
->base_reg
+
1281 AES_MULTIPURPOSE2_0_OFFSET
+
1285 return memcmp(tag
, yr
, tag_size_bytes
) ? -EBADMSG
: 0;
1289 * ocs_aes_ccm_op() - Perform CCM operation.
1290 * @aes_dev: The OCS AES device to use.
1291 * @cipher: The Cipher to use (AES or SM4).
1292 * @instruction: The instruction to perform (encrypt or decrypt).
1293 * @dst_dma_list: The OCS DMA list mapping output memory.
1294 * @src_dma_list: The OCS DMA list mapping input payload data.
1295 * @src_size: The amount of data mapped by @src_dma_list.
1296 * @iv: The input IV vector.
1297 * @adata_dma_list: The OCS DMA list mapping input A-data.
1298 * @adata_size: The amount of data mapped by @adata_dma_list.
1299 * @in_tag: Input tag.
1300 * @tag_size: The size (in bytes) of @in_tag.
1302 * Note: for encrypt the tag is appended to the ciphertext (in the memory
1303 * mapped by @dst_dma_list).
1305 * Return: 0 on success, negative error code otherwise.
1307 int ocs_aes_ccm_op(struct ocs_aes_dev
*aes_dev
,
1308 enum ocs_cipher cipher
,
1309 enum ocs_instruction instruction
,
1310 dma_addr_t dst_dma_list
,
1311 dma_addr_t src_dma_list
,
1314 dma_addr_t adata_dma_list
,
1323 rc
= ocs_aes_validate_inputs(src_dma_list
, src_size
, iv
,
1324 AES_BLOCK_SIZE
, adata_dma_list
, adata_size
,
1325 in_tag
, tag_size
, cipher
, OCS_MODE_CCM
,
1326 instruction
, dst_dma_list
);
1330 ocs_aes_init(aes_dev
, OCS_MODE_CCM
, cipher
, instruction
);
1333 * Note: rfc 3610 and NIST 800-38C require counter of zero to encrypt
1334 * auth tag so ensure this is the case
1336 lprime
= iv
[L_PRIME_IDX
];
1337 memset(&iv
[COUNTER_START(lprime
)], 0, COUNTER_LEN(lprime
));
1340 * Nonce is already converted to ctr0 before being passed into this
1344 iowrite32(__swab32(iv_32
[0]),
1345 aes_dev
->base_reg
+ AES_MULTIPURPOSE1_3_OFFSET
);
1346 iowrite32(__swab32(iv_32
[1]),
1347 aes_dev
->base_reg
+ AES_MULTIPURPOSE1_2_OFFSET
);
1348 iowrite32(__swab32(iv_32
[2]),
1349 aes_dev
->base_reg
+ AES_MULTIPURPOSE1_1_OFFSET
);
1350 iowrite32(__swab32(iv_32
[3]),
1351 aes_dev
->base_reg
+ AES_MULTIPURPOSE1_0_OFFSET
);
1353 /* Write MAC/tag length in register AES_TLEN */
1354 iowrite32(tag_size
, aes_dev
->base_reg
+ AES_TLEN_OFFSET
);
1356 * Write the byte length of the last AES/SM4 block of Payload data
1357 * (without zero padding and without the length of the MAC) in register
1360 ocs_aes_write_last_data_blk_len(aes_dev
, src_size
);
1362 /* Set AES_ACTIVE.TRIGGER to start the operation. */
1363 aes_a_op_trigger(aes_dev
);
1365 aes_a_dma_reset_and_activate_perf_cntr(aes_dev
);
1367 /* Form block B0 and write it to the AES/SM4 input buffer. */
1368 rc
= ocs_aes_ccm_write_b0(aes_dev
, iv
, adata_size
, tag_size
, src_size
);
1372 * Ensure there has been at least CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT
1373 * clock cycles since TRIGGER bit was set
1375 aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev
,
1376 CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT
);
1378 /* Process Adata. */
1379 ocs_aes_ccm_do_adata(aes_dev
, adata_dma_list
, adata_size
);
1381 /* For Encrypt case we just process the payload and return. */
1382 if (instruction
== OCS_ENCRYPT
) {
1383 return ocs_aes_ccm_encrypt_do_payload(aes_dev
, dst_dma_list
,
1384 src_dma_list
, src_size
);
1386 /* For Decypt we need to process the payload and then the tag. */
1387 rc
= ocs_aes_ccm_decrypt_do_payload(aes_dev
, dst_dma_list
,
1388 src_dma_list
, src_size
);
1392 /* Process MAC/tag directly: feed tag to engine and wait for IRQ. */
1393 ocs_aes_ccm_write_encrypted_tag(aes_dev
, in_tag
, tag_size
);
1394 rc
= ocs_aes_irq_enable_and_wait(aes_dev
, AES_COMPLETE_INT
);
1398 return ccm_compare_tag_to_yr(aes_dev
, tag_size
);
1402 * ocs_create_linked_list_from_sg() - Create OCS DMA linked list from SG list.
1403 * @aes_dev: The OCS AES device the list will be created for.
1404 * @sg: The SG list OCS DMA linked list will be created from. When
1405 * passed to this function, @sg must have been already mapped
1406 * with dma_map_sg().
1407 * @sg_dma_count: The number of DMA-mapped entries in @sg. This must be the
1408 * value returned by dma_map_sg() when @sg was mapped.
1409 * @dll_desc: The OCS DMA dma_list to use to store information about the
1410 * created linked list.
1411 * @data_size: The size of the data (from the SG list) to be mapped into the
1412 * OCS DMA linked list.
1413 * @data_offset: The offset (within the SG list) of the data to be mapped.
1415 * Return: 0 on success, negative error code otherwise.
1417 int ocs_create_linked_list_from_sg(const struct ocs_aes_dev
*aes_dev
,
1418 struct scatterlist
*sg
,
1420 struct ocs_dll_desc
*dll_desc
,
1421 size_t data_size
, size_t data_offset
)
1423 struct ocs_dma_linked_list
*ll
= NULL
;
1424 struct scatterlist
*sg_tmp
;
1429 if (!dll_desc
|| !sg
|| !aes_dev
)
1432 /* Default values for when no ddl_desc is created. */
1433 dll_desc
->vaddr
= NULL
;
1434 dll_desc
->dma_addr
= DMA_MAPPING_ERROR
;
1440 /* Loop over sg_list until we reach entry at specified offset. */
1441 while (data_offset
>= sg_dma_len(sg
)) {
1442 data_offset
-= sg_dma_len(sg
);
1445 /* If we reach the end of the list, offset was invalid. */
1446 if (!sg
|| sg_dma_count
== 0)
1450 /* Compute number of DMA-mapped SG entries to add into OCS DMA list. */
1454 while (tmp
< data_offset
+ data_size
) {
1455 /* If we reach the end of the list, data_size was invalid. */
1458 tmp
+= sg_dma_len(sg_tmp
);
1460 sg_tmp
= sg_next(sg_tmp
);
1462 if (dma_nents
> sg_dma_count
)
1465 /* Allocate the DMA list, one entry for each SG entry. */
1466 dll_desc
->size
= sizeof(struct ocs_dma_linked_list
) * dma_nents
;
1467 dll_desc
->vaddr
= dma_alloc_coherent(aes_dev
->dev
, dll_desc
->size
,
1468 &dll_desc
->dma_addr
, GFP_KERNEL
);
1469 if (!dll_desc
->vaddr
)
1472 /* Populate DMA linked list entries. */
1473 ll
= dll_desc
->vaddr
;
1474 for (i
= 0; i
< dma_nents
; i
++, sg
= sg_next(sg
)) {
1475 ll
[i
].src_addr
= sg_dma_address(sg
) + data_offset
;
1476 ll
[i
].src_len
= (sg_dma_len(sg
) - data_offset
) < data_size
?
1477 (sg_dma_len(sg
) - data_offset
) : data_size
;
1479 data_size
-= ll
[i
].src_len
;
1480 /* Current element points to the DMA address of the next one. */
1481 ll
[i
].next
= dll_desc
->dma_addr
+ (sizeof(*ll
) * (i
+ 1));
1484 /* Terminate last element. */
1486 ll
[i
- 1].ll_flags
= OCS_LL_DMA_FLAG_TERMINATE
;