1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel Keem Bay OCS HCU Crypto Driver.
5 * Copyright (C) 2018-2020 Intel Corporation
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/iopoll.h>
11 #include <linux/irq.h>
12 #include <linux/module.h>
14 #include <crypto/sha2.h>
19 #define OCS_HCU_MODE 0x00
20 #define OCS_HCU_CHAIN 0x04
21 #define OCS_HCU_OPERATION 0x08
22 #define OCS_HCU_KEY_0 0x0C
23 #define OCS_HCU_ISR 0x50
24 #define OCS_HCU_IER 0x54
25 #define OCS_HCU_STATUS 0x58
26 #define OCS_HCU_MSG_LEN_LO 0x60
27 #define OCS_HCU_MSG_LEN_HI 0x64
28 #define OCS_HCU_KEY_BYTE_ORDER_CFG 0x80
29 #define OCS_HCU_DMA_SRC_ADDR 0x400
30 #define OCS_HCU_DMA_SRC_SIZE 0x408
31 #define OCS_HCU_DMA_DST_SIZE 0x40C
32 #define OCS_HCU_DMA_DMA_MODE 0x410
33 #define OCS_HCU_DMA_NEXT_SRC_DESCR 0x418
34 #define OCS_HCU_DMA_MSI_ISR 0x480
35 #define OCS_HCU_DMA_MSI_IER 0x484
36 #define OCS_HCU_DMA_MSI_MASK 0x488
38 /* Register bit definitions. */
39 #define HCU_MODE_ALGO_SHIFT 16
40 #define HCU_MODE_HMAC_SHIFT 22
42 #define HCU_STATUS_BUSY BIT(0)
44 #define HCU_BYTE_ORDER_SWAP BIT(0)
46 #define HCU_IRQ_HASH_DONE BIT(2)
47 #define HCU_IRQ_HASH_ERR_MASK (BIT(3) | BIT(1) | BIT(0))
49 #define HCU_DMA_IRQ_SRC_DONE BIT(0)
50 #define HCU_DMA_IRQ_SAI_ERR BIT(2)
51 #define HCU_DMA_IRQ_BAD_COMP_ERR BIT(3)
52 #define HCU_DMA_IRQ_INBUF_RD_ERR BIT(4)
53 #define HCU_DMA_IRQ_INBUF_WD_ERR BIT(5)
54 #define HCU_DMA_IRQ_OUTBUF_WR_ERR BIT(6)
55 #define HCU_DMA_IRQ_OUTBUF_RD_ERR BIT(7)
56 #define HCU_DMA_IRQ_CRD_ERR BIT(8)
57 #define HCU_DMA_IRQ_ERR_MASK (HCU_DMA_IRQ_SAI_ERR | \
58 HCU_DMA_IRQ_BAD_COMP_ERR | \
59 HCU_DMA_IRQ_INBUF_RD_ERR | \
60 HCU_DMA_IRQ_INBUF_WD_ERR | \
61 HCU_DMA_IRQ_OUTBUF_WR_ERR | \
62 HCU_DMA_IRQ_OUTBUF_RD_ERR | \
65 #define HCU_DMA_SNOOP_MASK (0x7 << 28)
66 #define HCU_DMA_SRC_LL_EN BIT(25)
67 #define HCU_DMA_EN BIT(31)
69 #define OCS_HCU_ENDIANNESS_VALUE 0x2A
71 #define HCU_DMA_MSI_UNMASK BIT(0)
72 #define HCU_DMA_MSI_DISABLE 0
73 #define HCU_IRQ_DISABLE 0
75 #define OCS_HCU_START BIT(0)
76 #define OCS_HCU_TERMINATE BIT(1)
78 #define OCS_LL_DMA_FLAG_TERMINATE BIT(31)
80 #define OCS_HCU_HW_KEY_LEN_U32 (OCS_HCU_HW_KEY_LEN / sizeof(u32))
82 #define HCU_DATA_WRITE_ENDIANNESS_OFFSET 26
84 #define OCS_HCU_NUM_CHAINS_SHA256_224_SM3 (SHA256_DIGEST_SIZE / sizeof(u32))
85 #define OCS_HCU_NUM_CHAINS_SHA384_512 (SHA512_DIGEST_SIZE / sizeof(u32))
88 * While polling on a busy HCU, wait maximum 200us between one check and the
91 #define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US 200
92 /* Wait on a busy HCU for maximum 1 second. */
93 #define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000
96 * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
97 * @src_addr: Source address of the data.
98 * @src_len: Length of data to be fetched.
99 * @nxt_desc: Next descriptor to fetch.
100 * @ll_flags: Flags (Freeze @ terminate) for the DMA engine.
102 struct ocs_hcu_dma_entry
{
110 * struct ocs_hcu_dma_list - OCS-specific DMA linked list.
111 * @head: The head of the list (points to the array backing the list).
112 * @tail: The current tail of the list; NULL if the list is empty.
113 * @dma_addr: The DMA address of @head (i.e., the DMA address of the backing
115 * @max_nents: Maximum number of entries in the list (i.e., number of elements
116 * in the backing array).
118 * The OCS DMA list is an array-backed list of OCS DMA descriptors. The array
119 * backing the list is allocated with dma_alloc_coherent() and pointed by
122 struct ocs_hcu_dma_list
{
123 struct ocs_hcu_dma_entry
*head
;
124 struct ocs_hcu_dma_entry
*tail
;
129 static inline u32
ocs_hcu_num_chains(enum ocs_hcu_algo algo
)
132 case OCS_HCU_ALGO_SHA224
:
133 case OCS_HCU_ALGO_SHA256
:
134 case OCS_HCU_ALGO_SM3
:
135 return OCS_HCU_NUM_CHAINS_SHA256_224_SM3
;
136 case OCS_HCU_ALGO_SHA384
:
137 case OCS_HCU_ALGO_SHA512
:
138 return OCS_HCU_NUM_CHAINS_SHA384_512
;
144 static inline u32
ocs_hcu_digest_size(enum ocs_hcu_algo algo
)
147 case OCS_HCU_ALGO_SHA224
:
148 return SHA224_DIGEST_SIZE
;
149 case OCS_HCU_ALGO_SHA256
:
150 case OCS_HCU_ALGO_SM3
:
151 /* SM3 shares the same block size. */
152 return SHA256_DIGEST_SIZE
;
153 case OCS_HCU_ALGO_SHA384
:
154 return SHA384_DIGEST_SIZE
;
155 case OCS_HCU_ALGO_SHA512
:
156 return SHA512_DIGEST_SIZE
;
163 * ocs_hcu_wait_busy() - Wait for HCU OCS hardware to became usable.
164 * @hcu_dev: OCS HCU device to wait for.
166 * Return: 0 if device free, -ETIMEOUT if device busy and internal timeout has
169 static int ocs_hcu_wait_busy(struct ocs_hcu_dev
*hcu_dev
)
173 return readl_poll_timeout(hcu_dev
->io_base
+ OCS_HCU_STATUS
, val
,
174 !(val
& HCU_STATUS_BUSY
),
175 OCS_HCU_WAIT_BUSY_RETRY_DELAY_US
,
176 OCS_HCU_WAIT_BUSY_TIMEOUT_US
);
179 static void ocs_hcu_done_irq_en(struct ocs_hcu_dev
*hcu_dev
)
181 /* Clear any pending interrupts. */
182 writel(0xFFFFFFFF, hcu_dev
->io_base
+ OCS_HCU_ISR
);
183 hcu_dev
->irq_err
= false;
184 /* Enable error and HCU done interrupts. */
185 writel(HCU_IRQ_HASH_DONE
| HCU_IRQ_HASH_ERR_MASK
,
186 hcu_dev
->io_base
+ OCS_HCU_IER
);
189 static void ocs_hcu_dma_irq_en(struct ocs_hcu_dev
*hcu_dev
)
191 /* Clear any pending interrupts. */
192 writel(0xFFFFFFFF, hcu_dev
->io_base
+ OCS_HCU_DMA_MSI_ISR
);
193 hcu_dev
->irq_err
= false;
194 /* Only operating on DMA source completion and error interrupts. */
195 writel(HCU_DMA_IRQ_ERR_MASK
| HCU_DMA_IRQ_SRC_DONE
,
196 hcu_dev
->io_base
+ OCS_HCU_DMA_MSI_IER
);
198 writel(HCU_DMA_MSI_UNMASK
, hcu_dev
->io_base
+ OCS_HCU_DMA_MSI_MASK
);
201 static void ocs_hcu_irq_dis(struct ocs_hcu_dev
*hcu_dev
)
203 writel(HCU_IRQ_DISABLE
, hcu_dev
->io_base
+ OCS_HCU_IER
);
204 writel(HCU_DMA_MSI_DISABLE
, hcu_dev
->io_base
+ OCS_HCU_DMA_MSI_IER
);
207 static int ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev
*hcu_dev
)
211 rc
= wait_for_completion_interruptible(&hcu_dev
->irq_done
);
215 if (hcu_dev
->irq_err
) {
216 /* Unset flag and return error. */
217 hcu_dev
->irq_err
= false;
223 ocs_hcu_irq_dis(hcu_dev
);
229 * ocs_hcu_get_intermediate_data() - Get intermediate data.
230 * @hcu_dev: The target HCU device.
231 * @data: Where to store the intermediate.
232 * @algo: The algorithm being used.
234 * This function is used to save the current hashing process state in order to
235 * continue it in the future.
237 * Note: once all data has been processed, the intermediate data actually
238 * contains the hashing result. So this function is also used to retrieve the
239 * final result of a hashing process.
241 * Return: 0 on success, negative error code otherwise.
243 static int ocs_hcu_get_intermediate_data(struct ocs_hcu_dev
*hcu_dev
,
244 struct ocs_hcu_idata
*data
,
245 enum ocs_hcu_algo algo
)
247 const int n
= ocs_hcu_num_chains(algo
);
252 /* Data not requested. */
256 chain
= (u32
*)data
->digest
;
258 /* Ensure that the OCS is no longer busy before reading the chains. */
259 rc
= ocs_hcu_wait_busy(hcu_dev
);
264 * This loops is safe because data->digest is an array of
265 * SHA512_DIGEST_SIZE bytes and the maximum value returned by
266 * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
267 * to SHA512_DIGEST_SIZE / sizeof(u32).
269 for (i
= 0; i
< n
; i
++)
270 chain
[i
] = readl(hcu_dev
->io_base
+ OCS_HCU_CHAIN
);
272 data
->msg_len_lo
= readl(hcu_dev
->io_base
+ OCS_HCU_MSG_LEN_LO
);
273 data
->msg_len_hi
= readl(hcu_dev
->io_base
+ OCS_HCU_MSG_LEN_HI
);
279 * ocs_hcu_set_intermediate_data() - Set intermediate data.
280 * @hcu_dev: The target HCU device.
281 * @data: The intermediate data to be set.
282 * @algo: The algorithm being used.
284 * This function is used to continue a previous hashing process.
286 static void ocs_hcu_set_intermediate_data(struct ocs_hcu_dev
*hcu_dev
,
287 const struct ocs_hcu_idata
*data
,
288 enum ocs_hcu_algo algo
)
290 const int n
= ocs_hcu_num_chains(algo
);
291 u32
*chain
= (u32
*)data
->digest
;
295 * This loops is safe because data->digest is an array of
296 * SHA512_DIGEST_SIZE bytes and the maximum value returned by
297 * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
298 * to SHA512_DIGEST_SIZE / sizeof(u32).
300 for (i
= 0; i
< n
; i
++)
301 writel(chain
[i
], hcu_dev
->io_base
+ OCS_HCU_CHAIN
);
303 writel(data
->msg_len_lo
, hcu_dev
->io_base
+ OCS_HCU_MSG_LEN_LO
);
304 writel(data
->msg_len_hi
, hcu_dev
->io_base
+ OCS_HCU_MSG_LEN_HI
);
307 static int ocs_hcu_get_digest(struct ocs_hcu_dev
*hcu_dev
,
308 enum ocs_hcu_algo algo
, u8
*dgst
, size_t dgst_len
)
317 /* Length of the output buffer must match the algo digest size. */
318 if (dgst_len
!= ocs_hcu_digest_size(algo
))
321 /* Ensure that the OCS is no longer busy before reading the chains. */
322 rc
= ocs_hcu_wait_busy(hcu_dev
);
327 for (i
= 0; i
< dgst_len
/ sizeof(u32
); i
++)
328 chain
[i
] = readl(hcu_dev
->io_base
+ OCS_HCU_CHAIN
);
334 * ocs_hcu_hw_cfg() - Configure the HCU hardware.
335 * @hcu_dev: The HCU device to configure.
336 * @algo: The algorithm to be used by the HCU device.
337 * @use_hmac: Whether or not HW HMAC should be used.
339 * Return: 0 on success, negative error code otherwise.
341 static int ocs_hcu_hw_cfg(struct ocs_hcu_dev
*hcu_dev
, enum ocs_hcu_algo algo
,
347 if (algo
!= OCS_HCU_ALGO_SHA256
&& algo
!= OCS_HCU_ALGO_SHA224
&&
348 algo
!= OCS_HCU_ALGO_SHA384
&& algo
!= OCS_HCU_ALGO_SHA512
&&
349 algo
!= OCS_HCU_ALGO_SM3
)
352 rc
= ocs_hcu_wait_busy(hcu_dev
);
356 /* Ensure interrupts are disabled. */
357 ocs_hcu_irq_dis(hcu_dev
);
359 /* Configure endianness, hashing algorithm and HW HMAC (if needed) */
360 cfg
= OCS_HCU_ENDIANNESS_VALUE
<< HCU_DATA_WRITE_ENDIANNESS_OFFSET
;
361 cfg
|= algo
<< HCU_MODE_ALGO_SHIFT
;
363 cfg
|= BIT(HCU_MODE_HMAC_SHIFT
);
365 writel(cfg
, hcu_dev
->io_base
+ OCS_HCU_MODE
);
371 * ocs_hcu_clear_key() - Clear key stored in OCS HMAC KEY registers.
372 * @hcu_dev: The OCS HCU device whose key registers should be cleared.
374 static void ocs_hcu_clear_key(struct ocs_hcu_dev
*hcu_dev
)
378 /* Clear OCS_HCU_KEY_[0..15] */
379 for (reg_off
= 0; reg_off
< OCS_HCU_HW_KEY_LEN
; reg_off
+= sizeof(u32
))
380 writel(0, hcu_dev
->io_base
+ OCS_HCU_KEY_0
+ reg_off
);
384 * ocs_hcu_write_key() - Write key to OCS HMAC KEY registers.
385 * @hcu_dev: The OCS HCU device the key should be written to.
386 * @key: The key to be written.
387 * @len: The size of the key to write. It must be OCS_HCU_HW_KEY_LEN.
389 * Return: 0 on success, negative error code otherwise.
391 static int ocs_hcu_write_key(struct ocs_hcu_dev
*hcu_dev
, const u8
*key
, size_t len
)
393 u32 key_u32
[OCS_HCU_HW_KEY_LEN_U32
];
396 if (len
> OCS_HCU_HW_KEY_LEN
)
399 /* Copy key into temporary u32 array. */
400 memcpy(key_u32
, key
, len
);
403 * Hardware requires all the bytes of the HW Key vector to be
404 * written. So pad with zero until we reach OCS_HCU_HW_KEY_LEN.
406 memzero_explicit((u8
*)key_u32
+ len
, OCS_HCU_HW_KEY_LEN
- len
);
409 * OCS hardware expects the MSB of the key to be written at the highest
410 * address of the HCU Key vector; in other word, the key must be
411 * written in reverse order.
413 * Therefore, we first enable byte swapping for the HCU key vector;
414 * so that bytes of 32-bit word written to OCS_HCU_KEY_[0..15] will be
416 * 3 <---> 0, 2 <---> 1.
418 writel(HCU_BYTE_ORDER_SWAP
,
419 hcu_dev
->io_base
+ OCS_HCU_KEY_BYTE_ORDER_CFG
);
421 * And then we write the 32-bit words composing the key starting from
422 * the end of the key.
424 for (i
= 0; i
< OCS_HCU_HW_KEY_LEN_U32
; i
++)
425 writel(key_u32
[OCS_HCU_HW_KEY_LEN_U32
- 1 - i
],
426 hcu_dev
->io_base
+ OCS_HCU_KEY_0
+ (sizeof(u32
) * i
));
428 memzero_explicit(key_u32
, OCS_HCU_HW_KEY_LEN
);
434 * ocs_hcu_ll_dma_start() - Start OCS HCU hashing via DMA
435 * @hcu_dev: The OCS HCU device to use.
436 * @dma_list: The OCS DMA list mapping the data to hash.
437 * @finalize: Whether or not this is the last hashing operation and therefore
438 * the final hash should be compute even if data is not
441 * Return: 0 on success, negative error code otherwise.
443 static int ocs_hcu_ll_dma_start(struct ocs_hcu_dev
*hcu_dev
,
444 const struct ocs_hcu_dma_list
*dma_list
,
447 u32 cfg
= HCU_DMA_SNOOP_MASK
| HCU_DMA_SRC_LL_EN
| HCU_DMA_EN
;
454 * For final requests we use HCU_DONE IRQ to be notified when all input
455 * data has been processed by the HCU; however, we cannot do so for
456 * non-final requests, because we don't get a HCU_DONE IRQ when we
457 * don't terminate the operation.
459 * Therefore, for non-final requests, we use the DMA IRQ, which
460 * triggers when DMA has finishing feeding all the input data to the
461 * HCU, but the HCU may still be processing it. This is fine, since we
462 * will wait for the HCU processing to be completed when we try to read
463 * intermediate results, in ocs_hcu_get_intermediate_data().
466 ocs_hcu_done_irq_en(hcu_dev
);
468 ocs_hcu_dma_irq_en(hcu_dev
);
470 reinit_completion(&hcu_dev
->irq_done
);
471 writel(dma_list
->dma_addr
, hcu_dev
->io_base
+ OCS_HCU_DMA_NEXT_SRC_DESCR
);
472 writel(0, hcu_dev
->io_base
+ OCS_HCU_DMA_SRC_SIZE
);
473 writel(0, hcu_dev
->io_base
+ OCS_HCU_DMA_DST_SIZE
);
475 writel(OCS_HCU_START
, hcu_dev
->io_base
+ OCS_HCU_OPERATION
);
477 writel(cfg
, hcu_dev
->io_base
+ OCS_HCU_DMA_DMA_MODE
);
480 writel(OCS_HCU_TERMINATE
, hcu_dev
->io_base
+ OCS_HCU_OPERATION
);
482 rc
= ocs_hcu_wait_and_disable_irq(hcu_dev
);
489 struct ocs_hcu_dma_list
*ocs_hcu_dma_list_alloc(struct ocs_hcu_dev
*hcu_dev
,
492 struct ocs_hcu_dma_list
*dma_list
;
494 dma_list
= kmalloc(sizeof(*dma_list
), GFP_KERNEL
);
498 /* Total size of the DMA list to allocate. */
499 dma_list
->head
= dma_alloc_coherent(hcu_dev
->dev
,
500 sizeof(*dma_list
->head
) * max_nents
,
501 &dma_list
->dma_addr
, GFP_KERNEL
);
502 if (!dma_list
->head
) {
506 dma_list
->max_nents
= max_nents
;
507 dma_list
->tail
= NULL
;
512 void ocs_hcu_dma_list_free(struct ocs_hcu_dev
*hcu_dev
,
513 struct ocs_hcu_dma_list
*dma_list
)
518 dma_free_coherent(hcu_dev
->dev
,
519 sizeof(*dma_list
->head
) * dma_list
->max_nents
,
520 dma_list
->head
, dma_list
->dma_addr
);
525 /* Add a new DMA entry at the end of the OCS DMA list. */
526 int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev
*hcu_dev
,
527 struct ocs_hcu_dma_list
*dma_list
,
528 dma_addr_t addr
, u32 len
)
530 struct device
*dev
= hcu_dev
->dev
;
531 struct ocs_hcu_dma_entry
*old_tail
;
532 struct ocs_hcu_dma_entry
*new_tail
;
540 if (addr
& ~OCS_HCU_DMA_BIT_MASK
) {
542 "Unexpected error: Invalid DMA address for OCS HCU\n");
546 old_tail
= dma_list
->tail
;
547 new_tail
= old_tail
? old_tail
+ 1 : dma_list
->head
;
549 /* Check if list is full. */
550 if (new_tail
- dma_list
->head
>= dma_list
->max_nents
)
554 * If there was an old tail (i.e., this is not the first element we are
555 * adding), un-terminate the old tail and make it point to the new one.
558 old_tail
->ll_flags
&= ~OCS_LL_DMA_FLAG_TERMINATE
;
560 * The old tail 'nxt_desc' must point to the DMA address of the
563 old_tail
->nxt_desc
= dma_list
->dma_addr
+
564 sizeof(*dma_list
->tail
) * (new_tail
-
568 new_tail
->src_addr
= (u32
)addr
;
569 new_tail
->src_len
= (u32
)len
;
570 new_tail
->ll_flags
= OCS_LL_DMA_FLAG_TERMINATE
;
571 new_tail
->nxt_desc
= 0;
573 /* Update list tail with new tail. */
574 dma_list
->tail
= new_tail
;
580 * ocs_hcu_hash_init() - Initialize hash operation context.
581 * @ctx: The context to initialize.
582 * @algo: The hashing algorithm to use.
584 * Return: 0 on success, negative error code otherwise.
586 int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx
*ctx
, enum ocs_hcu_algo algo
)
592 ctx
->idata
.msg_len_lo
= 0;
593 ctx
->idata
.msg_len_hi
= 0;
594 /* No need to set idata.digest to 0. */
600 * ocs_hcu_hash_update() - Perform a hashing iteration.
601 * @hcu_dev: The OCS HCU device to use.
602 * @ctx: The OCS HCU hashing context.
603 * @dma_list: The OCS DMA list mapping the input data to process.
605 * Return: 0 on success; negative error code otherwise.
607 int ocs_hcu_hash_update(struct ocs_hcu_dev
*hcu_dev
,
608 struct ocs_hcu_hash_ctx
*ctx
,
609 const struct ocs_hcu_dma_list
*dma_list
)
613 if (!hcu_dev
|| !ctx
)
616 /* Configure the hardware for the current request. */
617 rc
= ocs_hcu_hw_cfg(hcu_dev
, ctx
->algo
, false);
621 /* If we already processed some data, idata needs to be set. */
622 if (ctx
->idata
.msg_len_lo
|| ctx
->idata
.msg_len_hi
)
623 ocs_hcu_set_intermediate_data(hcu_dev
, &ctx
->idata
, ctx
->algo
);
625 /* Start linked-list DMA hashing. */
626 rc
= ocs_hcu_ll_dma_start(hcu_dev
, dma_list
, false);
630 /* Update idata and return. */
631 return ocs_hcu_get_intermediate_data(hcu_dev
, &ctx
->idata
, ctx
->algo
);
635 * ocs_hcu_hash_finup() - Update and finalize hash computation.
636 * @hcu_dev: The OCS HCU device to use.
637 * @ctx: The OCS HCU hashing context.
638 * @dma_list: The OCS DMA list mapping the input data to process.
639 * @dgst: The buffer where to save the computed digest.
640 * @dgst_len: The length of @dgst.
642 * Return: 0 on success; negative error code otherwise.
644 int ocs_hcu_hash_finup(struct ocs_hcu_dev
*hcu_dev
,
645 const struct ocs_hcu_hash_ctx
*ctx
,
646 const struct ocs_hcu_dma_list
*dma_list
,
647 u8
*dgst
, size_t dgst_len
)
651 if (!hcu_dev
|| !ctx
)
654 /* Configure the hardware for the current request. */
655 rc
= ocs_hcu_hw_cfg(hcu_dev
, ctx
->algo
, false);
659 /* If we already processed some data, idata needs to be set. */
660 if (ctx
->idata
.msg_len_lo
|| ctx
->idata
.msg_len_hi
)
661 ocs_hcu_set_intermediate_data(hcu_dev
, &ctx
->idata
, ctx
->algo
);
663 /* Start linked-list DMA hashing. */
664 rc
= ocs_hcu_ll_dma_start(hcu_dev
, dma_list
, true);
668 /* Get digest and return. */
669 return ocs_hcu_get_digest(hcu_dev
, ctx
->algo
, dgst
, dgst_len
);
673 * ocs_hcu_hash_final() - Finalize hash computation.
674 * @hcu_dev: The OCS HCU device to use.
675 * @ctx: The OCS HCU hashing context.
676 * @dgst: The buffer where to save the computed digest.
677 * @dgst_len: The length of @dgst.
679 * Return: 0 on success; negative error code otherwise.
681 int ocs_hcu_hash_final(struct ocs_hcu_dev
*hcu_dev
,
682 const struct ocs_hcu_hash_ctx
*ctx
, u8
*dgst
,
687 if (!hcu_dev
|| !ctx
)
690 /* Configure the hardware for the current request. */
691 rc
= ocs_hcu_hw_cfg(hcu_dev
, ctx
->algo
, false);
695 /* If we already processed some data, idata needs to be set. */
696 if (ctx
->idata
.msg_len_lo
|| ctx
->idata
.msg_len_hi
)
697 ocs_hcu_set_intermediate_data(hcu_dev
, &ctx
->idata
, ctx
->algo
);
700 * Enable HCU interrupts, so that HCU_DONE will be triggered once the
701 * final hash is computed.
703 ocs_hcu_done_irq_en(hcu_dev
);
704 reinit_completion(&hcu_dev
->irq_done
);
705 writel(OCS_HCU_TERMINATE
, hcu_dev
->io_base
+ OCS_HCU_OPERATION
);
707 rc
= ocs_hcu_wait_and_disable_irq(hcu_dev
);
711 /* Get digest and return. */
712 return ocs_hcu_get_digest(hcu_dev
, ctx
->algo
, dgst
, dgst_len
);
716 * ocs_hcu_digest() - Compute hash digest.
717 * @hcu_dev: The OCS HCU device to use.
718 * @algo: The hash algorithm to use.
719 * @data: The input data to process.
720 * @data_len: The length of @data.
721 * @dgst: The buffer where to save the computed digest.
722 * @dgst_len: The length of @dgst.
724 * Return: 0 on success; negative error code otherwise.
726 int ocs_hcu_digest(struct ocs_hcu_dev
*hcu_dev
, enum ocs_hcu_algo algo
,
727 void *data
, size_t data_len
, u8
*dgst
, size_t dgst_len
)
729 struct device
*dev
= hcu_dev
->dev
;
730 dma_addr_t dma_handle
;
734 /* Configure the hardware for the current request. */
735 rc
= ocs_hcu_hw_cfg(hcu_dev
, algo
, false);
739 dma_handle
= dma_map_single(dev
, data
, data_len
, DMA_TO_DEVICE
);
740 if (dma_mapping_error(dev
, dma_handle
))
743 reg
= HCU_DMA_SNOOP_MASK
| HCU_DMA_EN
;
745 ocs_hcu_done_irq_en(hcu_dev
);
747 reinit_completion(&hcu_dev
->irq_done
);
749 writel(dma_handle
, hcu_dev
->io_base
+ OCS_HCU_DMA_SRC_ADDR
);
750 writel(data_len
, hcu_dev
->io_base
+ OCS_HCU_DMA_SRC_SIZE
);
751 writel(OCS_HCU_START
, hcu_dev
->io_base
+ OCS_HCU_OPERATION
);
752 writel(reg
, hcu_dev
->io_base
+ OCS_HCU_DMA_DMA_MODE
);
754 writel(OCS_HCU_TERMINATE
, hcu_dev
->io_base
+ OCS_HCU_OPERATION
);
756 rc
= ocs_hcu_wait_and_disable_irq(hcu_dev
);
760 dma_unmap_single(dev
, dma_handle
, data_len
, DMA_TO_DEVICE
);
762 return ocs_hcu_get_digest(hcu_dev
, algo
, dgst
, dgst_len
);
766 * ocs_hcu_hmac() - Compute HMAC.
767 * @hcu_dev: The OCS HCU device to use.
768 * @algo: The hash algorithm to use with HMAC.
769 * @key: The key to use.
770 * @dma_list: The OCS DMA list mapping the input data to process.
771 * @key_len: The length of @key.
772 * @dgst: The buffer where to save the computed HMAC.
773 * @dgst_len: The length of @dgst.
775 * Return: 0 on success; negative error code otherwise.
777 int ocs_hcu_hmac(struct ocs_hcu_dev
*hcu_dev
, enum ocs_hcu_algo algo
,
778 const u8
*key
, size_t key_len
,
779 const struct ocs_hcu_dma_list
*dma_list
,
780 u8
*dgst
, size_t dgst_len
)
784 /* Ensure 'key' is not NULL. */
785 if (!key
|| key_len
== 0)
788 /* Configure the hardware for the current request. */
789 rc
= ocs_hcu_hw_cfg(hcu_dev
, algo
, true);
793 rc
= ocs_hcu_write_key(hcu_dev
, key
, key_len
);
797 rc
= ocs_hcu_ll_dma_start(hcu_dev
, dma_list
, true);
799 /* Clear HW key before processing return code. */
800 ocs_hcu_clear_key(hcu_dev
);
805 return ocs_hcu_get_digest(hcu_dev
, algo
, dgst
, dgst_len
);
808 irqreturn_t
ocs_hcu_irq_handler(int irq
, void *dev_id
)
810 struct ocs_hcu_dev
*hcu_dev
= dev_id
;
814 /* Read and clear the HCU interrupt. */
815 hcu_irq
= readl(hcu_dev
->io_base
+ OCS_HCU_ISR
);
816 writel(hcu_irq
, hcu_dev
->io_base
+ OCS_HCU_ISR
);
818 /* Read and clear the HCU DMA interrupt. */
819 dma_irq
= readl(hcu_dev
->io_base
+ OCS_HCU_DMA_MSI_ISR
);
820 writel(dma_irq
, hcu_dev
->io_base
+ OCS_HCU_DMA_MSI_ISR
);
822 /* Check for errors. */
823 if (hcu_irq
& HCU_IRQ_HASH_ERR_MASK
|| dma_irq
& HCU_DMA_IRQ_ERR_MASK
) {
824 hcu_dev
->irq_err
= true;
828 /* Check for DONE IRQs. */
829 if (hcu_irq
& HCU_IRQ_HASH_DONE
|| dma_irq
& HCU_DMA_IRQ_SRC_DONE
)
835 complete(&hcu_dev
->irq_done
);
840 MODULE_DESCRIPTION("Intel Keem Bay OCS HCU Crypto Driver");
841 MODULE_LICENSE("GPL");