1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Marvell
5 * Antoine Tenart <antoine.tenart@free-electrons.com>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/of_irq.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/workqueue.h>
21 #include <crypto/internal/aead.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/skcipher.h>
27 static u32 max_rings
= EIP197_MAX_RINGS
;
28 module_param(max_rings
, uint
, 0644);
29 MODULE_PARM_DESC(max_rings
, "Maximum number of rings to use.");
31 static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv
*priv
)
36 * Map all interfaces/rings to register index 0
37 * so they can share contexts. Without this, the EIP197 will
38 * assume each interface/ring to be in its own memory domain
39 * i.e. have its own subset of UNIQUE memory addresses.
40 * Which would cause records with the SAME memory address to
41 * use DIFFERENT cache buffers, causing both poor cache utilization
42 * AND serious coherence/invalidation issues.
44 for (i
= 0; i
< 4; i
++)
45 writel(0, priv
->base
+ EIP197_FLUE_IFC_LUT(i
));
48 * Initialize other virtualization regs for cache
49 * These may not be in their reset state ...
51 for (i
= 0; i
< priv
->config
.rings
; i
++) {
52 writel(0, priv
->base
+ EIP197_FLUE_CACHEBASE_LO(i
));
53 writel(0, priv
->base
+ EIP197_FLUE_CACHEBASE_HI(i
));
54 writel(EIP197_FLUE_CONFIG_MAGIC
,
55 priv
->base
+ EIP197_FLUE_CONFIG(i
));
57 writel(0, priv
->base
+ EIP197_FLUE_OFFSETS
);
58 writel(0, priv
->base
+ EIP197_FLUE_ARC4_OFFSET
);
61 static void eip197_trc_cache_banksel(struct safexcel_crypto_priv
*priv
,
62 u32 addrmid
, int *actbank
)
67 curbank
= addrmid
>> 16;
68 if (curbank
!= *actbank
) {
69 val
= readl(priv
->base
+ EIP197_CS_RAM_CTRL
);
70 val
= (val
& ~EIP197_CS_BANKSEL_MASK
) |
71 (curbank
<< EIP197_CS_BANKSEL_OFS
);
72 writel(val
, priv
->base
+ EIP197_CS_RAM_CTRL
);
77 static u32
eip197_trc_cache_probe(struct safexcel_crypto_priv
*priv
,
78 int maxbanks
, u32 probemask
, u32 stride
)
80 u32 val
, addrhi
, addrlo
, addrmid
, addralias
, delta
, marker
;
84 * And probe the actual size of the physically attached cache data RAM
85 * Using a binary subdivision algorithm downto 32 byte cache lines.
87 addrhi
= 1 << (16 + maxbanks
);
89 actbank
= min(maxbanks
- 1, 0);
90 while ((addrhi
- addrlo
) > stride
) {
91 /* write marker to lowest address in top half */
92 addrmid
= (addrhi
+ addrlo
) >> 1;
93 marker
= (addrmid
^ 0xabadbabe) & probemask
; /* Unique */
94 eip197_trc_cache_banksel(priv
, addrmid
, &actbank
);
96 priv
->base
+ EIP197_CLASSIFICATION_RAMS
+
99 /* write invalid markers to possible aliases */
100 delta
= 1 << __fls(addrmid
);
101 while (delta
>= stride
) {
102 addralias
= addrmid
- delta
;
103 eip197_trc_cache_banksel(priv
, addralias
, &actbank
);
105 priv
->base
+ EIP197_CLASSIFICATION_RAMS
+
106 (addralias
& 0xffff));
110 /* read back marker from top half */
111 eip197_trc_cache_banksel(priv
, addrmid
, &actbank
);
112 val
= readl(priv
->base
+ EIP197_CLASSIFICATION_RAMS
+
115 if ((val
& probemask
) == marker
)
116 /* read back correct, continue with top half */
119 /* not read back correct, continue with bottom half */
125 static void eip197_trc_cache_clear(struct safexcel_crypto_priv
*priv
,
126 int cs_rc_max
, int cs_ht_wc
)
129 u32 htable_offset
, val
, offset
;
131 /* Clear all records in administration RAM */
132 for (i
= 0; i
< cs_rc_max
; i
++) {
133 offset
= EIP197_CLASSIFICATION_RAMS
+ i
* EIP197_CS_RC_SIZE
;
135 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL
) |
136 EIP197_CS_RC_PREV(EIP197_RC_NULL
),
137 priv
->base
+ offset
);
139 val
= EIP197_CS_RC_NEXT(i
+ 1) | EIP197_CS_RC_PREV(i
- 1);
141 val
|= EIP197_CS_RC_PREV(EIP197_RC_NULL
);
142 else if (i
== cs_rc_max
- 1)
143 val
|= EIP197_CS_RC_NEXT(EIP197_RC_NULL
);
144 writel(val
, priv
->base
+ offset
+ 4);
145 /* must also initialize the address key due to ECC! */
146 writel(0, priv
->base
+ offset
+ 8);
147 writel(0, priv
->base
+ offset
+ 12);
150 /* Clear the hash table entries */
151 htable_offset
= cs_rc_max
* EIP197_CS_RC_SIZE
;
152 for (i
= 0; i
< cs_ht_wc
; i
++)
153 writel(GENMASK(29, 0),
154 priv
->base
+ EIP197_CLASSIFICATION_RAMS
+
155 htable_offset
+ i
* sizeof(u32
));
158 static int eip197_trc_cache_init(struct safexcel_crypto_priv
*priv
)
160 u32 val
, dsize
, asize
;
161 int cs_rc_max
, cs_ht_wc
, cs_trc_rec_wc
, cs_trc_lg_rec_wc
;
162 int cs_rc_abs_max
, cs_ht_sz
;
165 /* Setup (dummy) virtualization for cache */
166 eip197_trc_cache_setupvirt(priv
);
169 * Enable the record cache memory access and
170 * probe the bank select width
172 val
= readl(priv
->base
+ EIP197_CS_RAM_CTRL
);
173 val
&= ~EIP197_TRC_ENABLE_MASK
;
174 val
|= EIP197_TRC_ENABLE_0
| EIP197_CS_BANKSEL_MASK
;
175 writel(val
, priv
->base
+ EIP197_CS_RAM_CTRL
);
176 val
= readl(priv
->base
+ EIP197_CS_RAM_CTRL
);
177 maxbanks
= ((val
&EIP197_CS_BANKSEL_MASK
)>>EIP197_CS_BANKSEL_OFS
) + 1;
179 /* Clear all ECC errors */
180 writel(0, priv
->base
+ EIP197_TRC_ECCCTRL
);
183 * Make sure the cache memory is accessible by taking record cache into
184 * reset. Need data memory access here, not admin access.
186 val
= readl(priv
->base
+ EIP197_TRC_PARAMS
);
187 val
|= EIP197_TRC_PARAMS_SW_RESET
| EIP197_TRC_PARAMS_DATA_ACCESS
;
188 writel(val
, priv
->base
+ EIP197_TRC_PARAMS
);
190 /* Probed data RAM size in bytes */
191 dsize
= eip197_trc_cache_probe(priv
, maxbanks
, 0xffffffff, 32);
194 * Now probe the administration RAM size pretty much the same way
195 * Except that only the lower 30 bits are writable and we don't need
198 val
= readl(priv
->base
+ EIP197_TRC_PARAMS
);
199 /* admin access now */
200 val
&= ~(EIP197_TRC_PARAMS_DATA_ACCESS
| EIP197_CS_BANKSEL_MASK
);
201 writel(val
, priv
->base
+ EIP197_TRC_PARAMS
);
203 /* Probed admin RAM size in admin words */
204 asize
= eip197_trc_cache_probe(priv
, 0, 0x3fffffff, 16) >> 4;
206 /* Clear any ECC errors detected while probing! */
207 writel(0, priv
->base
+ EIP197_TRC_ECCCTRL
);
209 /* Sanity check probing results */
210 if (dsize
< EIP197_MIN_DSIZE
|| asize
< EIP197_MIN_ASIZE
) {
211 dev_err(priv
->dev
, "Record cache probing failed (%d,%d).",
217 * Determine optimal configuration from RAM sizes
218 * Note that we assume that the physical RAM configuration is sane
219 * Therefore, we don't do any parameter error checking here ...
222 /* For now, just use a single record format covering everything */
223 cs_trc_rec_wc
= EIP197_CS_TRC_REC_WC
;
224 cs_trc_lg_rec_wc
= EIP197_CS_TRC_REC_WC
;
227 * Step #1: How many records will physically fit?
228 * Hard upper limit is 1023!
230 cs_rc_abs_max
= min_t(uint
, ((dsize
>> 2) / cs_trc_lg_rec_wc
), 1023);
231 /* Step #2: Need at least 2 words in the admin RAM per record */
232 cs_rc_max
= min_t(uint
, cs_rc_abs_max
, (asize
>> 1));
233 /* Step #3: Determine log2 of hash table size */
234 cs_ht_sz
= __fls(asize
- cs_rc_max
) - 2;
235 /* Step #4: determine current size of hash table in dwords */
236 cs_ht_wc
= 16 << cs_ht_sz
; /* dwords, not admin words */
237 /* Step #5: add back excess words and see if we can fit more records */
238 cs_rc_max
= min_t(uint
, cs_rc_abs_max
, asize
- (cs_ht_wc
>> 2));
240 /* Clear the cache RAMs */
241 eip197_trc_cache_clear(priv
, cs_rc_max
, cs_ht_wc
);
243 /* Disable the record cache memory access */
244 val
= readl(priv
->base
+ EIP197_CS_RAM_CTRL
);
245 val
&= ~EIP197_TRC_ENABLE_MASK
;
246 writel(val
, priv
->base
+ EIP197_CS_RAM_CTRL
);
248 /* Write head and tail pointers of the record free chain */
249 val
= EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
250 EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max
- 1);
251 writel(val
, priv
->base
+ EIP197_TRC_FREECHAIN
);
253 /* Configure the record cache #1 */
254 val
= EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc
) |
255 EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max
);
256 writel(val
, priv
->base
+ EIP197_TRC_PARAMS2
);
258 /* Configure the record cache #2 */
259 val
= EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc
) |
260 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
261 EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz
);
262 writel(val
, priv
->base
+ EIP197_TRC_PARAMS
);
264 dev_info(priv
->dev
, "TRC init: %dd,%da (%dr,%dh)\n",
265 dsize
, asize
, cs_rc_max
, cs_ht_wc
+ cs_ht_wc
);
269 static void eip197_init_firmware(struct safexcel_crypto_priv
*priv
)
274 for (pe
= 0; pe
< priv
->config
.pes
; pe
++) {
275 /* Configure the token FIFO's */
276 writel(3, EIP197_PE(priv
) + EIP197_PE_ICE_PUTF_CTRL(pe
));
277 writel(0, EIP197_PE(priv
) + EIP197_PE_ICE_PPTF_CTRL(pe
));
279 /* Clear the ICE scratchpad memory */
280 val
= readl(EIP197_PE(priv
) + EIP197_PE_ICE_SCRATCH_CTRL(pe
));
281 val
|= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER
|
282 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN
|
283 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS
|
284 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS
;
285 writel(val
, EIP197_PE(priv
) + EIP197_PE_ICE_SCRATCH_CTRL(pe
));
287 /* clear the scratchpad RAM using 32 bit writes only */
288 for (i
= 0; i
< EIP197_NUM_OF_SCRATCH_BLOCKS
; i
++)
289 writel(0, EIP197_PE(priv
) +
290 EIP197_PE_ICE_SCRATCH_RAM(pe
) + (i
<< 2));
292 /* Reset the IFPP engine to make its program mem accessible */
293 writel(EIP197_PE_ICE_x_CTRL_SW_RESET
|
294 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR
|
295 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR
,
296 EIP197_PE(priv
) + EIP197_PE_ICE_FPP_CTRL(pe
));
298 /* Reset the IPUE engine to make its program mem accessible */
299 writel(EIP197_PE_ICE_x_CTRL_SW_RESET
|
300 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR
|
301 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR
,
302 EIP197_PE(priv
) + EIP197_PE_ICE_PUE_CTRL(pe
));
304 /* Enable access to all IFPP program memories */
305 writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN
,
306 EIP197_PE(priv
) + EIP197_PE_ICE_RAM_CTRL(pe
));
308 /* bypass the OCE, if present */
309 if (priv
->flags
& EIP197_OCE
)
310 writel(EIP197_DEBUG_OCE_BYPASS
, EIP197_PE(priv
) +
311 EIP197_PE_DEBUG(pe
));
316 static int eip197_write_firmware(struct safexcel_crypto_priv
*priv
,
317 const struct firmware
*fw
)
319 const __be32
*data
= (const __be32
*)fw
->data
;
322 /* Write the firmware */
323 for (i
= 0; i
< fw
->size
/ sizeof(u32
); i
++)
324 writel(be32_to_cpu(data
[i
]),
325 priv
->base
+ EIP197_CLASSIFICATION_RAMS
+
328 /* Exclude final 2 NOPs from size */
329 return i
- EIP197_FW_TERMINAL_NOPS
;
333 * If FW is actual production firmware, then poll for its initialization
334 * to complete and check if it is good for the HW, otherwise just return OK.
336 static bool poll_fw_ready(struct safexcel_crypto_priv
*priv
, int fpp
)
342 pollofs
= EIP197_FW_FPP_READY
;
344 pollofs
= EIP197_FW_PUE_READY
;
346 for (pe
= 0; pe
< priv
->config
.pes
; pe
++) {
347 base
= EIP197_PE_ICE_SCRATCH_RAM(pe
);
348 pollcnt
= EIP197_FW_START_POLLCNT
;
350 (readl_relaxed(EIP197_PE(priv
) + base
+
355 dev_err(priv
->dev
, "FW(%d) for PE %d failed to start\n",
363 static bool eip197_start_firmware(struct safexcel_crypto_priv
*priv
,
364 int ipuesz
, int ifppsz
, int minifw
)
369 for (pe
= 0; pe
< priv
->config
.pes
; pe
++) {
370 /* Disable access to all program memory */
371 writel(0, EIP197_PE(priv
) + EIP197_PE_ICE_RAM_CTRL(pe
));
373 /* Start IFPP microengines */
377 val
= EIP197_PE_ICE_UENG_START_OFFSET((ifppsz
- 1) &
378 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK
) |
379 EIP197_PE_ICE_UENG_DEBUG_RESET
;
380 writel(val
, EIP197_PE(priv
) + EIP197_PE_ICE_FPP_CTRL(pe
));
382 /* Start IPUE microengines */
386 val
= EIP197_PE_ICE_UENG_START_OFFSET((ipuesz
- 1) &
387 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK
) |
388 EIP197_PE_ICE_UENG_DEBUG_RESET
;
389 writel(val
, EIP197_PE(priv
) + EIP197_PE_ICE_PUE_CTRL(pe
));
392 /* For miniFW startup, there is no initialization, so always succeed */
396 /* Wait until all the firmwares have properly started up */
397 if (!poll_fw_ready(priv
, 1))
399 if (!poll_fw_ready(priv
, 0))
405 static int eip197_load_firmwares(struct safexcel_crypto_priv
*priv
)
407 const char *fw_name
[] = {"ifpp.bin", "ipue.bin"};
408 const struct firmware
*fw
[FW_NB
];
409 char fw_path
[37], *dir
= NULL
;
410 int i
, j
, ret
= 0, pe
;
411 int ipuesz
, ifppsz
, minifw
= 0;
413 if (priv
->version
== EIP197D_MRVL
)
415 else if (priv
->version
== EIP197B_MRVL
||
416 priv
->version
== EIP197_DEVBRD
)
422 for (i
= 0; i
< FW_NB
; i
++) {
423 snprintf(fw_path
, 37, "inside-secure/%s/%s", dir
, fw_name
[i
]);
424 ret
= firmware_request_nowarn(&fw
[i
], fw_path
, priv
->dev
);
426 if (minifw
|| priv
->version
!= EIP197B_MRVL
)
429 /* Fallback to the old firmware location for the
432 ret
= firmware_request_nowarn(&fw
[i
], fw_name
[i
],
439 eip197_init_firmware(priv
);
441 ifppsz
= eip197_write_firmware(priv
, fw
[FW_IFPP
]);
443 /* Enable access to IPUE program memories */
444 for (pe
= 0; pe
< priv
->config
.pes
; pe
++)
445 writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN
,
446 EIP197_PE(priv
) + EIP197_PE_ICE_RAM_CTRL(pe
));
448 ipuesz
= eip197_write_firmware(priv
, fw
[FW_IPUE
]);
450 if (eip197_start_firmware(priv
, ipuesz
, ifppsz
, minifw
)) {
451 dev_dbg(priv
->dev
, "Firmware loaded successfully\n");
458 for (j
= 0; j
< i
; j
++)
459 release_firmware(fw
[j
]);
462 /* Retry with minifw path */
463 dev_dbg(priv
->dev
, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
464 dir
= "eip197_minifw";
469 dev_dbg(priv
->dev
, "Firmware load failed.\n");
474 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv
*priv
)
476 u32 cd_size_rnd
, val
;
479 cd_size_rnd
= (priv
->config
.cd_size
+
480 (BIT(priv
->hwconfig
.hwdataw
) - 1)) >>
481 priv
->hwconfig
.hwdataw
;
482 /* determine number of CD's we can fetch into the CD FIFO as 1 block */
483 if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
484 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
485 cd_fetch_cnt
= (1 << priv
->hwconfig
.hwcfsize
) / cd_size_rnd
;
486 cd_fetch_cnt
= min_t(uint
, cd_fetch_cnt
,
487 (priv
->config
.pes
* EIP197_FETCH_DEPTH
));
489 /* for the EIP97, just fetch all that fits minus 1 */
490 cd_fetch_cnt
= ((1 << priv
->hwconfig
.hwcfsize
) /
494 * Since we're using command desc's way larger than formally specified,
495 * we need to check whether we can fit even 1 for low-end EIP196's!
498 dev_err(priv
->dev
, "Unable to fit even 1 command desc!\n");
502 for (i
= 0; i
< priv
->config
.rings
; i
++) {
503 /* ring base address */
504 writel(lower_32_bits(priv
->ring
[i
].cdr
.base_dma
),
505 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_LO
);
506 writel(upper_32_bits(priv
->ring
[i
].cdr
.base_dma
),
507 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_HI
);
509 writel(EIP197_xDR_DESC_MODE_64BIT
| EIP197_CDR_DESC_MODE_ADCP
|
510 (priv
->config
.cd_offset
<< 14) | priv
->config
.cd_size
,
511 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_DESC_SIZE
);
512 writel(((cd_fetch_cnt
*
513 (cd_size_rnd
<< priv
->hwconfig
.hwdataw
)) << 16) |
514 (cd_fetch_cnt
* (priv
->config
.cd_offset
/ sizeof(u32
))),
515 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
517 /* Configure DMA tx control */
518 val
= EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS
);
519 val
|= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS
);
520 writel(val
, EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_DMA_CFG
);
522 /* clear any pending interrupt */
523 writel(GENMASK(5, 0),
524 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_STAT
);
530 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv
*priv
)
532 u32 rd_size_rnd
, val
;
535 /* determine number of RD's we can fetch into the FIFO as one block */
536 rd_size_rnd
= (EIP197_RD64_FETCH_SIZE
+
537 (BIT(priv
->hwconfig
.hwdataw
) - 1)) >>
538 priv
->hwconfig
.hwdataw
;
539 if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
540 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
541 rd_fetch_cnt
= (1 << priv
->hwconfig
.hwrfsize
) / rd_size_rnd
;
542 rd_fetch_cnt
= min_t(uint
, rd_fetch_cnt
,
543 (priv
->config
.pes
* EIP197_FETCH_DEPTH
));
545 /* for the EIP97, just fetch all that fits minus 1 */
546 rd_fetch_cnt
= ((1 << priv
->hwconfig
.hwrfsize
) /
550 for (i
= 0; i
< priv
->config
.rings
; i
++) {
551 /* ring base address */
552 writel(lower_32_bits(priv
->ring
[i
].rdr
.base_dma
),
553 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_LO
);
554 writel(upper_32_bits(priv
->ring
[i
].rdr
.base_dma
),
555 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_HI
);
557 writel(EIP197_xDR_DESC_MODE_64BIT
| (priv
->config
.rd_offset
<< 14) |
558 priv
->config
.rd_size
,
559 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_DESC_SIZE
);
561 writel(((rd_fetch_cnt
*
562 (rd_size_rnd
<< priv
->hwconfig
.hwdataw
)) << 16) |
563 (rd_fetch_cnt
* (priv
->config
.rd_offset
/ sizeof(u32
))),
564 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
566 /* Configure DMA tx control */
567 val
= EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS
);
568 val
|= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS
);
569 val
|= EIP197_HIA_xDR_WR_RES_BUF
| EIP197_HIA_xDR_WR_CTRL_BUF
;
571 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_DMA_CFG
);
573 /* clear any pending interrupt */
574 writel(GENMASK(7, 0),
575 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_STAT
);
577 /* enable ring interrupt */
578 val
= readl(EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLE_CTRL(i
));
579 val
|= EIP197_RDR_IRQ(i
);
580 writel(val
, EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLE_CTRL(i
));
586 static int safexcel_hw_init(struct safexcel_crypto_priv
*priv
)
589 int i
, ret
, pe
, opbuflo
, opbufhi
;
591 dev_dbg(priv
->dev
, "HW init: using %d pipe(s) and %d ring(s)\n",
592 priv
->config
.pes
, priv
->config
.rings
);
595 * For EIP197's only set maximum number of TX commands to 2^5 = 32
596 * Skip for the EIP97 as it does not have this field.
598 if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
599 val
= readl(EIP197_HIA_AIC(priv
) + EIP197_HIA_MST_CTRL
);
600 val
|= EIP197_MST_CTRL_TX_MAX_CMD(5);
601 writel(val
, EIP197_HIA_AIC(priv
) + EIP197_HIA_MST_CTRL
);
604 /* Configure wr/rd cache values */
605 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS
) |
606 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS
),
607 EIP197_HIA_GEN_CFG(priv
) + EIP197_MST_CTRL
);
609 /* Interrupts reset */
611 /* Disable all global interrupts */
612 writel(0, EIP197_HIA_AIC_G(priv
) + EIP197_HIA_AIC_G_ENABLE_CTRL
);
614 /* Clear any pending interrupt */
615 writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv
) + EIP197_HIA_AIC_G_ACK
);
617 /* Processing Engine configuration */
618 for (pe
= 0; pe
< priv
->config
.pes
; pe
++) {
619 /* Data Fetch Engine configuration */
621 /* Reset all DFE threads */
622 writel(EIP197_DxE_THR_CTRL_RESET_PE
,
623 EIP197_HIA_DFE_THR(priv
) + EIP197_HIA_DFE_THR_CTRL(pe
));
625 if (priv
->flags
& EIP197_PE_ARB
)
626 /* Reset HIA input interface arbiter (if present) */
627 writel(EIP197_HIA_RA_PE_CTRL_RESET
,
628 EIP197_HIA_AIC(priv
) + EIP197_HIA_RA_PE_CTRL(pe
));
630 /* DMA transfer size to use */
631 val
= EIP197_HIA_DFE_CFG_DIS_DEBUG
;
632 val
|= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
633 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
634 val
|= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
635 EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
636 val
|= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS
);
637 val
|= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS
);
638 writel(val
, EIP197_HIA_DFE(priv
) + EIP197_HIA_DFE_CFG(pe
));
640 /* Leave the DFE threads reset state */
641 writel(0, EIP197_HIA_DFE_THR(priv
) + EIP197_HIA_DFE_THR_CTRL(pe
));
643 /* Configure the processing engine thresholds */
644 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
645 EIP197_PE_IN_xBUF_THRES_MAX(9),
646 EIP197_PE(priv
) + EIP197_PE_IN_DBUF_THRES(pe
));
647 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
648 EIP197_PE_IN_xBUF_THRES_MAX(7),
649 EIP197_PE(priv
) + EIP197_PE_IN_TBUF_THRES(pe
));
651 if (priv
->flags
& SAFEXCEL_HW_EIP197
)
652 /* enable HIA input interface arbiter and rings */
653 writel(EIP197_HIA_RA_PE_CTRL_EN
|
654 GENMASK(priv
->config
.rings
- 1, 0),
655 EIP197_HIA_AIC(priv
) + EIP197_HIA_RA_PE_CTRL(pe
));
657 /* Data Store Engine configuration */
659 /* Reset all DSE threads */
660 writel(EIP197_DxE_THR_CTRL_RESET_PE
,
661 EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_CTRL(pe
));
663 /* Wait for all DSE threads to complete */
664 while ((readl(EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_STAT(pe
)) &
665 GENMASK(15, 12)) != GENMASK(15, 12))
668 /* DMA transfer size to use */
669 if (priv
->hwconfig
.hwnumpes
> 4) {
676 val
= EIP197_HIA_DSE_CFG_DIS_DEBUG
;
677 val
|= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo
) |
678 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi
);
679 val
|= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS
);
680 val
|= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE
;
681 /* FIXME: instability issues can occur for EIP97 but disabling
682 * it impacts performance.
684 if (priv
->flags
& SAFEXCEL_HW_EIP197
)
685 val
|= EIP197_HIA_DSE_CFG_EN_SINGLE_WR
;
686 writel(val
, EIP197_HIA_DSE(priv
) + EIP197_HIA_DSE_CFG(pe
));
688 /* Leave the DSE threads reset state */
689 writel(0, EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_CTRL(pe
));
691 /* Configure the procesing engine thresholds */
692 writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo
) |
693 EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi
),
694 EIP197_PE(priv
) + EIP197_PE_OUT_DBUF_THRES(pe
));
696 /* Processing Engine configuration */
698 /* Token & context configuration */
699 val
= EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES
|
700 EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT
|
701 EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT
;
702 writel(val
, EIP197_PE(priv
) + EIP197_PE_EIP96_TOKEN_CTRL(pe
));
704 /* H/W capabilities selection: just enable everything */
705 writel(EIP197_FUNCTION_ALL
,
706 EIP197_PE(priv
) + EIP197_PE_EIP96_FUNCTION_EN(pe
));
707 writel(EIP197_FUNCTION_ALL
,
708 EIP197_PE(priv
) + EIP197_PE_EIP96_FUNCTION2_EN(pe
));
711 /* Command Descriptor Rings prepare */
712 for (i
= 0; i
< priv
->config
.rings
; i
++) {
713 /* Clear interrupts for this ring */
714 writel(GENMASK(31, 0),
715 EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLE_CLR(i
));
717 /* Disable external triggering */
718 writel(0, EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
720 /* Clear the pending prepared counter */
721 writel(EIP197_xDR_PREP_CLR_COUNT
,
722 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PREP_COUNT
);
724 /* Clear the pending processed counter */
725 writel(EIP197_xDR_PROC_CLR_COUNT
,
726 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PROC_COUNT
);
729 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PREP_PNTR
);
731 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PROC_PNTR
);
733 writel((EIP197_DEFAULT_RING_SIZE
* priv
->config
.cd_offset
),
734 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_SIZE
);
737 /* Result Descriptor Ring prepare */
738 for (i
= 0; i
< priv
->config
.rings
; i
++) {
739 /* Disable external triggering*/
740 writel(0, EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
742 /* Clear the pending prepared counter */
743 writel(EIP197_xDR_PREP_CLR_COUNT
,
744 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PREP_COUNT
);
746 /* Clear the pending processed counter */
747 writel(EIP197_xDR_PROC_CLR_COUNT
,
748 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PROC_COUNT
);
751 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PREP_PNTR
);
753 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PROC_PNTR
);
756 writel((EIP197_DEFAULT_RING_SIZE
* priv
->config
.rd_offset
),
757 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_SIZE
);
760 for (pe
= 0; pe
< priv
->config
.pes
; pe
++) {
761 /* Enable command descriptor rings */
762 writel(EIP197_DxE_THR_CTRL_EN
| GENMASK(priv
->config
.rings
- 1, 0),
763 EIP197_HIA_DFE_THR(priv
) + EIP197_HIA_DFE_THR_CTRL(pe
));
765 /* Enable result descriptor rings */
766 writel(EIP197_DxE_THR_CTRL_EN
| GENMASK(priv
->config
.rings
- 1, 0),
767 EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_CTRL(pe
));
770 /* Clear any HIA interrupt */
771 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv
) + EIP197_HIA_AIC_G_ACK
);
773 if (priv
->flags
& EIP197_SIMPLE_TRC
) {
774 writel(EIP197_STRC_CONFIG_INIT
|
775 EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC
) |
776 EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC
),
777 priv
->base
+ EIP197_STRC_CONFIG
);
778 writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE
,
779 EIP197_PE(priv
) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
780 } else if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
781 ret
= eip197_trc_cache_init(priv
);
786 if (priv
->flags
& EIP197_ICE
) {
787 ret
= eip197_load_firmwares(priv
);
792 return safexcel_hw_setup_cdesc_rings(priv
) ?:
793 safexcel_hw_setup_rdesc_rings(priv
) ?:
797 /* Called with ring's lock taken */
798 static void safexcel_try_push_requests(struct safexcel_crypto_priv
*priv
,
801 int coal
= min_t(int, priv
->ring
[ring
].requests
, EIP197_MAX_BATCH_SZ
);
806 /* Configure when we want an interrupt */
807 writel(EIP197_HIA_RDR_THRESH_PKT_MODE
|
808 EIP197_HIA_RDR_THRESH_PROC_PKT(coal
),
809 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_THRESH
);
812 void safexcel_dequeue(struct safexcel_crypto_priv
*priv
, int ring
)
814 struct crypto_async_request
*req
, *backlog
;
815 struct safexcel_context
*ctx
;
816 int ret
, nreq
= 0, cdesc
= 0, rdesc
= 0, commands
, results
;
818 /* If a request wasn't properly dequeued because of a lack of resources,
819 * proceeded it first,
821 req
= priv
->ring
[ring
].req
;
822 backlog
= priv
->ring
[ring
].backlog
;
827 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
828 backlog
= crypto_get_backlog(&priv
->ring
[ring
].queue
);
829 req
= crypto_dequeue_request(&priv
->ring
[ring
].queue
);
830 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
833 priv
->ring
[ring
].req
= NULL
;
834 priv
->ring
[ring
].backlog
= NULL
;
839 ctx
= crypto_tfm_ctx(req
->tfm
);
840 ret
= ctx
->send(req
, ring
, &commands
, &results
);
845 backlog
->complete(backlog
, -EINPROGRESS
);
847 /* In case the send() helper did not issue any command to push
848 * to the engine because the input data was cached, continue to
849 * dequeue other requests as this is valid and not an error.
851 if (!commands
&& !results
)
860 /* Not enough resources to handle all the requests. Bail out and save
861 * the request and the backlog for the next dequeue call (per-ring).
863 priv
->ring
[ring
].req
= req
;
864 priv
->ring
[ring
].backlog
= backlog
;
870 spin_lock_bh(&priv
->ring
[ring
].lock
);
872 priv
->ring
[ring
].requests
+= nreq
;
874 if (!priv
->ring
[ring
].busy
) {
875 safexcel_try_push_requests(priv
, ring
);
876 priv
->ring
[ring
].busy
= true;
879 spin_unlock_bh(&priv
->ring
[ring
].lock
);
881 /* let the RDR know we have pending descriptors */
882 writel((rdesc
* priv
->config
.rd_offset
),
883 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_PREP_COUNT
);
885 /* let the CDR know we have pending descriptors */
886 writel((cdesc
* priv
->config
.cd_offset
),
887 EIP197_HIA_CDR(priv
, ring
) + EIP197_HIA_xDR_PREP_COUNT
);
890 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv
*priv
,
893 struct safexcel_result_desc
*rdesc
= rdp
;
894 struct result_data_desc
*result_data
= rdp
+ priv
->config
.res_offset
;
896 if (likely((!rdesc
->last_seg
) || /* Rest only valid if last seg! */
897 ((!rdesc
->descriptor_overflow
) &&
898 (!rdesc
->buffer_overflow
) &&
899 (!result_data
->error_code
))))
902 if (rdesc
->descriptor_overflow
)
903 dev_err(priv
->dev
, "Descriptor overflow detected");
905 if (rdesc
->buffer_overflow
)
906 dev_err(priv
->dev
, "Buffer overflow detected");
908 if (result_data
->error_code
& 0x4066) {
909 /* Fatal error (bits 1,2,5,6 & 14) */
911 "result descriptor error (%x)",
912 result_data
->error_code
);
915 } else if (result_data
->error_code
&
916 (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
918 * Give priority over authentication fails:
919 * Blocksize, length & overflow errors,
920 * something wrong with the input!
923 } else if (result_data
->error_code
& BIT(9)) {
924 /* Authentication failed */
928 /* All other non-fatal errors */
932 inline void safexcel_rdr_req_set(struct safexcel_crypto_priv
*priv
,
934 struct safexcel_result_desc
*rdesc
,
935 struct crypto_async_request
*req
)
937 int i
= safexcel_ring_rdr_rdesc_index(priv
, ring
, rdesc
);
939 priv
->ring
[ring
].rdr_req
[i
] = req
;
942 inline struct crypto_async_request
*
943 safexcel_rdr_req_get(struct safexcel_crypto_priv
*priv
, int ring
)
945 int i
= safexcel_ring_first_rdr_index(priv
, ring
);
947 return priv
->ring
[ring
].rdr_req
[i
];
950 void safexcel_complete(struct safexcel_crypto_priv
*priv
, int ring
)
952 struct safexcel_command_desc
*cdesc
;
954 /* Acknowledge the command descriptors */
956 cdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].cdr
);
959 "Could not retrieve the command descriptor\n");
962 } while (!cdesc
->last_seg
);
965 void safexcel_inv_complete(struct crypto_async_request
*req
, int error
)
967 struct safexcel_inv_result
*result
= req
->data
;
969 if (error
== -EINPROGRESS
)
972 result
->error
= error
;
973 complete(&result
->completion
);
976 int safexcel_invalidate_cache(struct crypto_async_request
*async
,
977 struct safexcel_crypto_priv
*priv
,
978 dma_addr_t ctxr_dma
, int ring
)
980 struct safexcel_command_desc
*cdesc
;
981 struct safexcel_result_desc
*rdesc
;
982 struct safexcel_token
*dmmy
;
985 /* Prepare command descriptor */
986 cdesc
= safexcel_add_cdesc(priv
, ring
, true, true, 0, 0, 0, ctxr_dma
,
989 return PTR_ERR(cdesc
);
991 cdesc
->control_data
.type
= EIP197_TYPE_EXTENDED
;
992 cdesc
->control_data
.options
= 0;
993 cdesc
->control_data
.context_lo
&= ~EIP197_CONTEXT_SIZE_MASK
;
994 cdesc
->control_data
.control0
= CONTEXT_CONTROL_INV_TR
;
996 /* Prepare result descriptor */
997 rdesc
= safexcel_add_rdesc(priv
, ring
, true, true, 0, 0);
1000 ret
= PTR_ERR(rdesc
);
1001 goto cdesc_rollback
;
1004 safexcel_rdr_req_set(priv
, ring
, rdesc
, async
);
1009 safexcel_ring_rollback_wptr(priv
, &priv
->ring
[ring
].cdr
);
1014 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
*priv
,
1017 struct crypto_async_request
*req
;
1018 struct safexcel_context
*ctx
;
1019 int ret
, i
, nreq
, ndesc
, tot_descs
, handled
= 0;
1020 bool should_complete
;
1025 nreq
= readl(EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_PROC_COUNT
);
1026 nreq
>>= EIP197_xDR_PROC_xD_PKT_OFFSET
;
1027 nreq
&= EIP197_xDR_PROC_xD_PKT_MASK
;
1031 for (i
= 0; i
< nreq
; i
++) {
1032 req
= safexcel_rdr_req_get(priv
, ring
);
1034 ctx
= crypto_tfm_ctx(req
->tfm
);
1035 ndesc
= ctx
->handle_result(priv
, ring
, req
,
1036 &should_complete
, &ret
);
1038 dev_err(priv
->dev
, "failed to handle result (%d)\n",
1043 if (should_complete
) {
1045 req
->complete(req
, ret
);
1055 writel(EIP197_xDR_PROC_xD_PKT(i
) |
1056 (tot_descs
* priv
->config
.rd_offset
),
1057 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_PROC_COUNT
);
1059 /* If the number of requests overflowed the counter, try to proceed more
1062 if (nreq
== EIP197_xDR_PROC_xD_PKT_MASK
)
1063 goto handle_results
;
1066 spin_lock_bh(&priv
->ring
[ring
].lock
);
1068 priv
->ring
[ring
].requests
-= handled
;
1069 safexcel_try_push_requests(priv
, ring
);
1071 if (!priv
->ring
[ring
].requests
)
1072 priv
->ring
[ring
].busy
= false;
1074 spin_unlock_bh(&priv
->ring
[ring
].lock
);
1077 static void safexcel_dequeue_work(struct work_struct
*work
)
1079 struct safexcel_work_data
*data
=
1080 container_of(work
, struct safexcel_work_data
, work
);
1082 safexcel_dequeue(data
->priv
, data
->ring
);
1085 struct safexcel_ring_irq_data
{
1086 struct safexcel_crypto_priv
*priv
;
1090 static irqreturn_t
safexcel_irq_ring(int irq
, void *data
)
1092 struct safexcel_ring_irq_data
*irq_data
= data
;
1093 struct safexcel_crypto_priv
*priv
= irq_data
->priv
;
1094 int ring
= irq_data
->ring
, rc
= IRQ_NONE
;
1097 status
= readl(EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLED_STAT(ring
));
1101 /* RDR interrupts */
1102 if (status
& EIP197_RDR_IRQ(ring
)) {
1103 stat
= readl(EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_STAT
);
1105 if (unlikely(stat
& EIP197_xDR_ERR
)) {
1107 * Fatal error, the RDR is unusable and must be
1108 * reinitialized. This should not happen under
1109 * normal circumstances.
1111 dev_err(priv
->dev
, "RDR: fatal error.\n");
1112 } else if (likely(stat
& EIP197_xDR_THRESH
)) {
1113 rc
= IRQ_WAKE_THREAD
;
1116 /* ACK the interrupts */
1118 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_STAT
);
1121 /* ACK the interrupts */
1122 writel(status
, EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ACK(ring
));
1127 static irqreturn_t
safexcel_irq_ring_thread(int irq
, void *data
)
1129 struct safexcel_ring_irq_data
*irq_data
= data
;
1130 struct safexcel_crypto_priv
*priv
= irq_data
->priv
;
1131 int ring
= irq_data
->ring
;
1133 safexcel_handle_result_descriptor(priv
, ring
);
1135 queue_work(priv
->ring
[ring
].workqueue
,
1136 &priv
->ring
[ring
].work_data
.work
);
1141 static int safexcel_request_ring_irq(void *pdev
, int irqid
,
1144 irq_handler_t handler
,
1145 irq_handler_t threaded_handler
,
1146 struct safexcel_ring_irq_data
*ring_irq_priv
)
1151 if (IS_ENABLED(CONFIG_PCI
) && is_pci_dev
) {
1152 struct pci_dev
*pci_pdev
= pdev
;
1154 dev
= &pci_pdev
->dev
;
1155 irq
= pci_irq_vector(pci_pdev
, irqid
);
1157 dev_err(dev
, "unable to get device MSI IRQ %d (err %d)\n",
1161 } else if (IS_ENABLED(CONFIG_OF
)) {
1162 struct platform_device
*plf_pdev
= pdev
;
1163 char irq_name
[6] = {0}; /* "ringX\0" */
1165 snprintf(irq_name
, 6, "ring%d", irqid
);
1166 dev
= &plf_pdev
->dev
;
1167 irq
= platform_get_irq_byname(plf_pdev
, irq_name
);
1170 dev_err(dev
, "unable to get IRQ '%s' (err %d)\n",
1178 ret
= devm_request_threaded_irq(dev
, irq
, handler
,
1179 threaded_handler
, IRQF_ONESHOT
,
1180 dev_name(dev
), ring_irq_priv
);
1182 dev_err(dev
, "unable to request IRQ %d\n", irq
);
1187 cpu
= cpumask_local_spread(ring_id
, NUMA_NO_NODE
);
1188 irq_set_affinity_hint(irq
, get_cpu_mask(cpu
));
1193 static struct safexcel_alg_template
*safexcel_algs
[] = {
1194 &safexcel_alg_ecb_des
,
1195 &safexcel_alg_cbc_des
,
1196 &safexcel_alg_ecb_des3_ede
,
1197 &safexcel_alg_cbc_des3_ede
,
1198 &safexcel_alg_ecb_aes
,
1199 &safexcel_alg_cbc_aes
,
1200 &safexcel_alg_cfb_aes
,
1201 &safexcel_alg_ofb_aes
,
1202 &safexcel_alg_ctr_aes
,
1205 &safexcel_alg_sha224
,
1206 &safexcel_alg_sha256
,
1207 &safexcel_alg_sha384
,
1208 &safexcel_alg_sha512
,
1209 &safexcel_alg_hmac_md5
,
1210 &safexcel_alg_hmac_sha1
,
1211 &safexcel_alg_hmac_sha224
,
1212 &safexcel_alg_hmac_sha256
,
1213 &safexcel_alg_hmac_sha384
,
1214 &safexcel_alg_hmac_sha512
,
1215 &safexcel_alg_authenc_hmac_sha1_cbc_aes
,
1216 &safexcel_alg_authenc_hmac_sha224_cbc_aes
,
1217 &safexcel_alg_authenc_hmac_sha256_cbc_aes
,
1218 &safexcel_alg_authenc_hmac_sha384_cbc_aes
,
1219 &safexcel_alg_authenc_hmac_sha512_cbc_aes
,
1220 &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede
,
1221 &safexcel_alg_authenc_hmac_sha1_ctr_aes
,
1222 &safexcel_alg_authenc_hmac_sha224_ctr_aes
,
1223 &safexcel_alg_authenc_hmac_sha256_ctr_aes
,
1224 &safexcel_alg_authenc_hmac_sha384_ctr_aes
,
1225 &safexcel_alg_authenc_hmac_sha512_ctr_aes
,
1226 &safexcel_alg_xts_aes
,
1229 &safexcel_alg_crc32
,
1230 &safexcel_alg_cbcmac
,
1231 &safexcel_alg_xcbcmac
,
1233 &safexcel_alg_chacha20
,
1234 &safexcel_alg_chachapoly
,
1235 &safexcel_alg_chachapoly_esp
,
1237 &safexcel_alg_hmac_sm3
,
1238 &safexcel_alg_ecb_sm4
,
1239 &safexcel_alg_cbc_sm4
,
1240 &safexcel_alg_ofb_sm4
,
1241 &safexcel_alg_cfb_sm4
,
1242 &safexcel_alg_ctr_sm4
,
1243 &safexcel_alg_authenc_hmac_sha1_cbc_sm4
,
1244 &safexcel_alg_authenc_hmac_sm3_cbc_sm4
,
1245 &safexcel_alg_authenc_hmac_sha1_ctr_sm4
,
1246 &safexcel_alg_authenc_hmac_sm3_ctr_sm4
,
1247 &safexcel_alg_sha3_224
,
1248 &safexcel_alg_sha3_256
,
1249 &safexcel_alg_sha3_384
,
1250 &safexcel_alg_sha3_512
,
1251 &safexcel_alg_hmac_sha3_224
,
1252 &safexcel_alg_hmac_sha3_256
,
1253 &safexcel_alg_hmac_sha3_384
,
1254 &safexcel_alg_hmac_sha3_512
,
1255 &safexcel_alg_authenc_hmac_sha1_cbc_des
,
1256 &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede
,
1257 &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede
,
1258 &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede
,
1259 &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede
,
1260 &safexcel_alg_authenc_hmac_sha256_cbc_des
,
1261 &safexcel_alg_authenc_hmac_sha224_cbc_des
,
1262 &safexcel_alg_authenc_hmac_sha512_cbc_des
,
1263 &safexcel_alg_authenc_hmac_sha384_cbc_des
,
1264 &safexcel_alg_rfc4106_gcm
,
1265 &safexcel_alg_rfc4543_gcm
,
1266 &safexcel_alg_rfc4309_ccm
,
1269 static int safexcel_register_algorithms(struct safexcel_crypto_priv
*priv
)
1273 for (i
= 0; i
< ARRAY_SIZE(safexcel_algs
); i
++) {
1274 safexcel_algs
[i
]->priv
= priv
;
1276 /* Do we have all required base algorithms available? */
1277 if ((safexcel_algs
[i
]->algo_mask
& priv
->hwconfig
.algo_flags
) !=
1278 safexcel_algs
[i
]->algo_mask
)
1279 /* No, so don't register this ciphersuite */
1282 if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_SKCIPHER
)
1283 ret
= crypto_register_skcipher(&safexcel_algs
[i
]->alg
.skcipher
);
1284 else if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_AEAD
)
1285 ret
= crypto_register_aead(&safexcel_algs
[i
]->alg
.aead
);
1287 ret
= crypto_register_ahash(&safexcel_algs
[i
]->alg
.ahash
);
1296 for (j
= 0; j
< i
; j
++) {
1297 /* Do we have all required base algorithms available? */
1298 if ((safexcel_algs
[j
]->algo_mask
& priv
->hwconfig
.algo_flags
) !=
1299 safexcel_algs
[j
]->algo_mask
)
1300 /* No, so don't unregister this ciphersuite */
1303 if (safexcel_algs
[j
]->type
== SAFEXCEL_ALG_TYPE_SKCIPHER
)
1304 crypto_unregister_skcipher(&safexcel_algs
[j
]->alg
.skcipher
);
1305 else if (safexcel_algs
[j
]->type
== SAFEXCEL_ALG_TYPE_AEAD
)
1306 crypto_unregister_aead(&safexcel_algs
[j
]->alg
.aead
);
1308 crypto_unregister_ahash(&safexcel_algs
[j
]->alg
.ahash
);
1314 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv
*priv
)
1318 for (i
= 0; i
< ARRAY_SIZE(safexcel_algs
); i
++) {
1319 /* Do we have all required base algorithms available? */
1320 if ((safexcel_algs
[i
]->algo_mask
& priv
->hwconfig
.algo_flags
) !=
1321 safexcel_algs
[i
]->algo_mask
)
1322 /* No, so don't unregister this ciphersuite */
1325 if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_SKCIPHER
)
1326 crypto_unregister_skcipher(&safexcel_algs
[i
]->alg
.skcipher
);
1327 else if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_AEAD
)
1328 crypto_unregister_aead(&safexcel_algs
[i
]->alg
.aead
);
1330 crypto_unregister_ahash(&safexcel_algs
[i
]->alg
.ahash
);
1334 static void safexcel_configure(struct safexcel_crypto_priv
*priv
)
1336 u32 mask
= BIT(priv
->hwconfig
.hwdataw
) - 1;
1338 priv
->config
.pes
= priv
->hwconfig
.hwnumpes
;
1339 priv
->config
.rings
= min_t(u32
, priv
->hwconfig
.hwnumrings
, max_rings
);
1340 /* Cannot currently support more rings than we have ring AICs! */
1341 priv
->config
.rings
= min_t(u32
, priv
->config
.rings
,
1342 priv
->hwconfig
.hwnumraic
);
1344 priv
->config
.cd_size
= EIP197_CD64_FETCH_SIZE
;
1345 priv
->config
.cd_offset
= (priv
->config
.cd_size
+ mask
) & ~mask
;
1346 priv
->config
.cdsh_offset
= (EIP197_MAX_TOKENS
+ mask
) & ~mask
;
1348 /* res token is behind the descr, but ofs must be rounded to buswdth */
1349 priv
->config
.res_offset
= (EIP197_RD64_FETCH_SIZE
+ mask
) & ~mask
;
1350 /* now the size of the descr is this 1st part plus the result struct */
1351 priv
->config
.rd_size
= priv
->config
.res_offset
+
1352 EIP197_RD64_RESULT_SIZE
;
1353 priv
->config
.rd_offset
= (priv
->config
.rd_size
+ mask
) & ~mask
;
1355 /* convert dwords to bytes */
1356 priv
->config
.cd_offset
*= sizeof(u32
);
1357 priv
->config
.cdsh_offset
*= sizeof(u32
);
1358 priv
->config
.rd_offset
*= sizeof(u32
);
1359 priv
->config
.res_offset
*= sizeof(u32
);
1362 static void safexcel_init_register_offsets(struct safexcel_crypto_priv
*priv
)
1364 struct safexcel_register_offsets
*offsets
= &priv
->offsets
;
1366 if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
1367 offsets
->hia_aic
= EIP197_HIA_AIC_BASE
;
1368 offsets
->hia_aic_g
= EIP197_HIA_AIC_G_BASE
;
1369 offsets
->hia_aic_r
= EIP197_HIA_AIC_R_BASE
;
1370 offsets
->hia_aic_xdr
= EIP197_HIA_AIC_xDR_BASE
;
1371 offsets
->hia_dfe
= EIP197_HIA_DFE_BASE
;
1372 offsets
->hia_dfe_thr
= EIP197_HIA_DFE_THR_BASE
;
1373 offsets
->hia_dse
= EIP197_HIA_DSE_BASE
;
1374 offsets
->hia_dse_thr
= EIP197_HIA_DSE_THR_BASE
;
1375 offsets
->hia_gen_cfg
= EIP197_HIA_GEN_CFG_BASE
;
1376 offsets
->pe
= EIP197_PE_BASE
;
1377 offsets
->global
= EIP197_GLOBAL_BASE
;
1379 offsets
->hia_aic
= EIP97_HIA_AIC_BASE
;
1380 offsets
->hia_aic_g
= EIP97_HIA_AIC_G_BASE
;
1381 offsets
->hia_aic_r
= EIP97_HIA_AIC_R_BASE
;
1382 offsets
->hia_aic_xdr
= EIP97_HIA_AIC_xDR_BASE
;
1383 offsets
->hia_dfe
= EIP97_HIA_DFE_BASE
;
1384 offsets
->hia_dfe_thr
= EIP97_HIA_DFE_THR_BASE
;
1385 offsets
->hia_dse
= EIP97_HIA_DSE_BASE
;
1386 offsets
->hia_dse_thr
= EIP97_HIA_DSE_THR_BASE
;
1387 offsets
->hia_gen_cfg
= EIP97_HIA_GEN_CFG_BASE
;
1388 offsets
->pe
= EIP97_PE_BASE
;
1389 offsets
->global
= EIP97_GLOBAL_BASE
;
1394 * Generic part of probe routine, shared by platform and PCI driver
1396 * Assumes IO resources have been mapped, private data mem has been allocated,
1397 * clocks have been enabled, device pointer has been assigned etc.
1400 static int safexcel_probe_generic(void *pdev
,
1401 struct safexcel_crypto_priv
*priv
,
1404 struct device
*dev
= priv
->dev
;
1405 u32 peid
, version
, mask
, val
, hiaopt
, hwopt
, peopt
;
1408 priv
->context_pool
= dmam_pool_create("safexcel-context", dev
,
1409 sizeof(struct safexcel_context_record
),
1411 if (!priv
->context_pool
)
1415 * First try the EIP97 HIA version regs
1416 * For the EIP197, this is guaranteed to NOT return any of the test
1419 version
= readl(priv
->base
+ EIP97_HIA_AIC_BASE
+ EIP197_HIA_VERSION
);
1421 mask
= 0; /* do not swap */
1422 if (EIP197_REG_LO16(version
) == EIP197_HIA_VERSION_LE
) {
1423 priv
->hwconfig
.hiaver
= EIP197_VERSION_MASK(version
);
1424 } else if (EIP197_REG_HI16(version
) == EIP197_HIA_VERSION_BE
) {
1425 /* read back byte-swapped, so complement byte swap bits */
1426 mask
= EIP197_MST_CTRL_BYTE_SWAP_BITS
;
1427 priv
->hwconfig
.hiaver
= EIP197_VERSION_SWAP(version
);
1429 /* So it wasn't an EIP97 ... maybe it's an EIP197? */
1430 version
= readl(priv
->base
+ EIP197_HIA_AIC_BASE
+
1431 EIP197_HIA_VERSION
);
1432 if (EIP197_REG_LO16(version
) == EIP197_HIA_VERSION_LE
) {
1433 priv
->hwconfig
.hiaver
= EIP197_VERSION_MASK(version
);
1434 priv
->flags
|= SAFEXCEL_HW_EIP197
;
1435 } else if (EIP197_REG_HI16(version
) ==
1436 EIP197_HIA_VERSION_BE
) {
1437 /* read back byte-swapped, so complement swap bits */
1438 mask
= EIP197_MST_CTRL_BYTE_SWAP_BITS
;
1439 priv
->hwconfig
.hiaver
= EIP197_VERSION_SWAP(version
);
1440 priv
->flags
|= SAFEXCEL_HW_EIP197
;
1446 /* Now initialize the reg offsets based on the probing info so far */
1447 safexcel_init_register_offsets(priv
);
1450 * If the version was read byte-swapped, we need to flip the device
1451 * swapping Keep in mind here, though, that what we write will also be
1455 val
= readl(EIP197_HIA_AIC(priv
) + EIP197_HIA_MST_CTRL
);
1456 val
= val
^ (mask
>> 24); /* toggle byte swap bits */
1457 writel(val
, EIP197_HIA_AIC(priv
) + EIP197_HIA_MST_CTRL
);
1461 * We're not done probing yet! We may fall through to here if no HIA
1462 * was found at all. So, with the endianness presumably correct now and
1463 * the offsets setup, *really* probe for the EIP97/EIP197.
1465 version
= readl(EIP197_GLOBAL(priv
) + EIP197_VERSION
);
1466 if (((priv
->flags
& SAFEXCEL_HW_EIP197
) &&
1467 (EIP197_REG_LO16(version
) != EIP197_VERSION_LE
) &&
1468 (EIP197_REG_LO16(version
) != EIP196_VERSION_LE
)) ||
1469 ((!(priv
->flags
& SAFEXCEL_HW_EIP197
) &&
1470 (EIP197_REG_LO16(version
) != EIP97_VERSION_LE
)))) {
1472 * We did not find the device that matched our initial probing
1473 * (or our initial probing failed) Report appropriate error.
1475 dev_err(priv
->dev
, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
1480 priv
->hwconfig
.hwver
= EIP197_VERSION_MASK(version
);
1481 hwctg
= version
>> 28;
1482 peid
= version
& 255;
1484 /* Detect EIP206 processing pipe */
1485 version
= readl(EIP197_PE(priv
) + + EIP197_PE_VERSION(0));
1486 if (EIP197_REG_LO16(version
) != EIP206_VERSION_LE
) {
1487 dev_err(priv
->dev
, "EIP%d: EIP206 not detected\n", peid
);
1490 priv
->hwconfig
.ppver
= EIP197_VERSION_MASK(version
);
1492 /* Detect EIP96 packet engine and version */
1493 version
= readl(EIP197_PE(priv
) + EIP197_PE_EIP96_VERSION(0));
1494 if (EIP197_REG_LO16(version
) != EIP96_VERSION_LE
) {
1495 dev_err(dev
, "EIP%d: EIP96 not detected.\n", peid
);
1498 priv
->hwconfig
.pever
= EIP197_VERSION_MASK(version
);
1500 hwopt
= readl(EIP197_GLOBAL(priv
) + EIP197_OPTIONS
);
1501 hiaopt
= readl(EIP197_HIA_AIC(priv
) + EIP197_HIA_OPTIONS
);
1503 priv
->hwconfig
.icever
= 0;
1504 priv
->hwconfig
.ocever
= 0;
1505 priv
->hwconfig
.psever
= 0;
1506 if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
1508 peopt
= readl(EIP197_PE(priv
) + EIP197_PE_OPTIONS(0));
1510 priv
->hwconfig
.hwdataw
= (hiaopt
>> EIP197_HWDATAW_OFFSET
) &
1511 EIP197_HWDATAW_MASK
;
1512 priv
->hwconfig
.hwcfsize
= ((hiaopt
>> EIP197_CFSIZE_OFFSET
) &
1513 EIP197_CFSIZE_MASK
) +
1514 EIP197_CFSIZE_ADJUST
;
1515 priv
->hwconfig
.hwrfsize
= ((hiaopt
>> EIP197_RFSIZE_OFFSET
) &
1516 EIP197_RFSIZE_MASK
) +
1517 EIP197_RFSIZE_ADJUST
;
1518 priv
->hwconfig
.hwnumpes
= (hiaopt
>> EIP197_N_PES_OFFSET
) &
1520 priv
->hwconfig
.hwnumrings
= (hiaopt
>> EIP197_N_RINGS_OFFSET
) &
1521 EIP197_N_RINGS_MASK
;
1522 if (hiaopt
& EIP197_HIA_OPT_HAS_PE_ARB
)
1523 priv
->flags
|= EIP197_PE_ARB
;
1524 if (EIP206_OPT_ICE_TYPE(peopt
) == 1) {
1525 priv
->flags
|= EIP197_ICE
;
1526 /* Detect ICE EIP207 class. engine and version */
1527 version
= readl(EIP197_PE(priv
) +
1528 EIP197_PE_ICE_VERSION(0));
1529 if (EIP197_REG_LO16(version
) != EIP207_VERSION_LE
) {
1530 dev_err(dev
, "EIP%d: ICE EIP207 not detected.\n",
1534 priv
->hwconfig
.icever
= EIP197_VERSION_MASK(version
);
1536 if (EIP206_OPT_OCE_TYPE(peopt
) == 1) {
1537 priv
->flags
|= EIP197_OCE
;
1538 /* Detect EIP96PP packet stream editor and version */
1539 version
= readl(EIP197_PE(priv
) + EIP197_PE_PSE_VERSION(0));
1540 if (EIP197_REG_LO16(version
) != EIP96_VERSION_LE
) {
1541 dev_err(dev
, "EIP%d: EIP96PP not detected.\n", peid
);
1544 priv
->hwconfig
.psever
= EIP197_VERSION_MASK(version
);
1545 /* Detect OCE EIP207 class. engine and version */
1546 version
= readl(EIP197_PE(priv
) +
1547 EIP197_PE_ICE_VERSION(0));
1548 if (EIP197_REG_LO16(version
) != EIP207_VERSION_LE
) {
1549 dev_err(dev
, "EIP%d: OCE EIP207 not detected.\n",
1553 priv
->hwconfig
.ocever
= EIP197_VERSION_MASK(version
);
1555 /* If not a full TRC, then assume simple TRC */
1556 if (!(hwopt
& EIP197_OPT_HAS_TRC
))
1557 priv
->flags
|= EIP197_SIMPLE_TRC
;
1558 /* EIP197 always has SOME form of TRC */
1559 priv
->flags
|= EIP197_TRC_CACHE
;
1562 priv
->hwconfig
.hwdataw
= (hiaopt
>> EIP197_HWDATAW_OFFSET
) &
1564 priv
->hwconfig
.hwcfsize
= (hiaopt
>> EIP97_CFSIZE_OFFSET
) &
1566 priv
->hwconfig
.hwrfsize
= (hiaopt
>> EIP97_RFSIZE_OFFSET
) &
1568 priv
->hwconfig
.hwnumpes
= 1; /* by definition */
1569 priv
->hwconfig
.hwnumrings
= (hiaopt
>> EIP197_N_RINGS_OFFSET
) &
1570 EIP197_N_RINGS_MASK
;
1573 /* Scan for ring AIC's */
1574 for (i
= 0; i
< EIP197_MAX_RING_AIC
; i
++) {
1575 version
= readl(EIP197_HIA_AIC_R(priv
) +
1576 EIP197_HIA_AIC_R_VERSION(i
));
1577 if (EIP197_REG_LO16(version
) != EIP201_VERSION_LE
)
1580 priv
->hwconfig
.hwnumraic
= i
;
1581 /* Low-end EIP196 may not have any ring AIC's ... */
1582 if (!priv
->hwconfig
.hwnumraic
) {
1583 dev_err(priv
->dev
, "No ring interrupt controller present!\n");
1587 /* Get supported algorithms from EIP96 transform engine */
1588 priv
->hwconfig
.algo_flags
= readl(EIP197_PE(priv
) +
1589 EIP197_PE_EIP96_OPTIONS(0));
1591 /* Print single info line describing what we just detected */
1592 dev_info(priv
->dev
, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n",
1593 peid
, priv
->hwconfig
.hwver
, hwctg
, priv
->hwconfig
.hwnumpes
,
1594 priv
->hwconfig
.hwnumrings
, priv
->hwconfig
.hwnumraic
,
1595 priv
->hwconfig
.hiaver
, priv
->hwconfig
.hwdataw
,
1596 priv
->hwconfig
.hwcfsize
, priv
->hwconfig
.hwrfsize
,
1597 priv
->hwconfig
.ppver
, priv
->hwconfig
.pever
,
1598 priv
->hwconfig
.algo_flags
, priv
->hwconfig
.icever
,
1599 priv
->hwconfig
.ocever
, priv
->hwconfig
.psever
);
1601 safexcel_configure(priv
);
1603 if (IS_ENABLED(CONFIG_PCI
) && priv
->version
== EIP197_DEVBRD
) {
1605 * Request MSI vectors for global + 1 per ring -
1606 * or just 1 for older dev images
1608 struct pci_dev
*pci_pdev
= pdev
;
1610 ret
= pci_alloc_irq_vectors(pci_pdev
,
1611 priv
->config
.rings
+ 1,
1612 priv
->config
.rings
+ 1,
1613 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
1615 dev_err(dev
, "Failed to allocate PCI MSI interrupts\n");
1620 /* Register the ring IRQ handlers and configure the rings */
1621 priv
->ring
= devm_kcalloc(dev
, priv
->config
.rings
,
1622 sizeof(*priv
->ring
),
1627 for (i
= 0; i
< priv
->config
.rings
; i
++) {
1628 char wq_name
[9] = {0};
1630 struct safexcel_ring_irq_data
*ring_irq
;
1632 ret
= safexcel_init_ring_descriptors(priv
,
1634 &priv
->ring
[i
].rdr
);
1636 dev_err(dev
, "Failed to initialize rings\n");
1640 priv
->ring
[i
].rdr_req
= devm_kcalloc(dev
,
1641 EIP197_DEFAULT_RING_SIZE
,
1642 sizeof(*priv
->ring
[i
].rdr_req
),
1644 if (!priv
->ring
[i
].rdr_req
)
1647 ring_irq
= devm_kzalloc(dev
, sizeof(*ring_irq
), GFP_KERNEL
);
1651 ring_irq
->priv
= priv
;
1654 irq
= safexcel_request_ring_irq(pdev
,
1655 EIP197_IRQ_NUMBER(i
, is_pci_dev
),
1659 safexcel_irq_ring_thread
,
1662 dev_err(dev
, "Failed to get IRQ ID for ring %d\n", i
);
1666 priv
->ring
[i
].irq
= irq
;
1667 priv
->ring
[i
].work_data
.priv
= priv
;
1668 priv
->ring
[i
].work_data
.ring
= i
;
1669 INIT_WORK(&priv
->ring
[i
].work_data
.work
,
1670 safexcel_dequeue_work
);
1672 snprintf(wq_name
, 9, "wq_ring%d", i
);
1673 priv
->ring
[i
].workqueue
=
1674 create_singlethread_workqueue(wq_name
);
1675 if (!priv
->ring
[i
].workqueue
)
1678 priv
->ring
[i
].requests
= 0;
1679 priv
->ring
[i
].busy
= false;
1681 crypto_init_queue(&priv
->ring
[i
].queue
,
1682 EIP197_DEFAULT_RING_SIZE
);
1684 spin_lock_init(&priv
->ring
[i
].lock
);
1685 spin_lock_init(&priv
->ring
[i
].queue_lock
);
1688 atomic_set(&priv
->ring_used
, 0);
1690 ret
= safexcel_hw_init(priv
);
1692 dev_err(dev
, "HW init failed (%d)\n", ret
);
1696 ret
= safexcel_register_algorithms(priv
);
1698 dev_err(dev
, "Failed to register algorithms (%d)\n", ret
);
1705 static void safexcel_hw_reset_rings(struct safexcel_crypto_priv
*priv
)
1709 for (i
= 0; i
< priv
->config
.rings
; i
++) {
1710 /* clear any pending interrupt */
1711 writel(GENMASK(5, 0), EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_STAT
);
1712 writel(GENMASK(7, 0), EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_STAT
);
1714 /* Reset the CDR base address */
1715 writel(0, EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_LO
);
1716 writel(0, EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_HI
);
1718 /* Reset the RDR base address */
1719 writel(0, EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_LO
);
1720 writel(0, EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_HI
);
1724 /* for Device Tree platform driver */
1726 static int safexcel_probe(struct platform_device
*pdev
)
1728 struct device
*dev
= &pdev
->dev
;
1729 struct safexcel_crypto_priv
*priv
;
1732 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
1737 priv
->version
= (enum safexcel_eip_version
)of_device_get_match_data(dev
);
1739 platform_set_drvdata(pdev
, priv
);
1741 priv
->base
= devm_platform_ioremap_resource(pdev
, 0);
1742 if (IS_ERR(priv
->base
)) {
1743 dev_err(dev
, "failed to get resource\n");
1744 return PTR_ERR(priv
->base
);
1747 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1748 ret
= PTR_ERR_OR_ZERO(priv
->clk
);
1749 /* The clock isn't mandatory */
1750 if (ret
!= -ENOENT
) {
1754 ret
= clk_prepare_enable(priv
->clk
);
1756 dev_err(dev
, "unable to enable clk (%d)\n", ret
);
1761 priv
->reg_clk
= devm_clk_get(&pdev
->dev
, "reg");
1762 ret
= PTR_ERR_OR_ZERO(priv
->reg_clk
);
1763 /* The clock isn't mandatory */
1764 if (ret
!= -ENOENT
) {
1768 ret
= clk_prepare_enable(priv
->reg_clk
);
1770 dev_err(dev
, "unable to enable reg clk (%d)\n", ret
);
1775 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
1779 /* Generic EIP97/EIP197 device probing */
1780 ret
= safexcel_probe_generic(pdev
, priv
, 0);
1787 clk_disable_unprepare(priv
->reg_clk
);
1789 clk_disable_unprepare(priv
->clk
);
1793 static int safexcel_remove(struct platform_device
*pdev
)
1795 struct safexcel_crypto_priv
*priv
= platform_get_drvdata(pdev
);
1798 safexcel_unregister_algorithms(priv
);
1799 safexcel_hw_reset_rings(priv
);
1801 clk_disable_unprepare(priv
->reg_clk
);
1802 clk_disable_unprepare(priv
->clk
);
1804 for (i
= 0; i
< priv
->config
.rings
; i
++) {
1805 irq_set_affinity_hint(priv
->ring
[i
].irq
, NULL
);
1806 destroy_workqueue(priv
->ring
[i
].workqueue
);
1812 static const struct of_device_id safexcel_of_match_table
[] = {
1814 .compatible
= "inside-secure,safexcel-eip97ies",
1815 .data
= (void *)EIP97IES_MRVL
,
1818 .compatible
= "inside-secure,safexcel-eip197b",
1819 .data
= (void *)EIP197B_MRVL
,
1822 .compatible
= "inside-secure,safexcel-eip197d",
1823 .data
= (void *)EIP197D_MRVL
,
1825 /* For backward compatibility and intended for generic use */
1827 .compatible
= "inside-secure,safexcel-eip97",
1828 .data
= (void *)EIP97IES_MRVL
,
1831 .compatible
= "inside-secure,safexcel-eip197",
1832 .data
= (void *)EIP197B_MRVL
,
1837 static struct platform_driver crypto_safexcel
= {
1838 .probe
= safexcel_probe
,
1839 .remove
= safexcel_remove
,
1841 .name
= "crypto-safexcel",
1842 .of_match_table
= safexcel_of_match_table
,
1846 /* PCIE devices - i.e. Inside Secure development boards */
1848 static int safexcel_pci_probe(struct pci_dev
*pdev
,
1849 const struct pci_device_id
*ent
)
1851 struct device
*dev
= &pdev
->dev
;
1852 struct safexcel_crypto_priv
*priv
;
1853 void __iomem
*pciebase
;
1857 dev_dbg(dev
, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1858 ent
->vendor
, ent
->device
, ent
->subvendor
,
1859 ent
->subdevice
, ent
->driver_data
);
1861 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
1866 priv
->version
= (enum safexcel_eip_version
)ent
->driver_data
;
1868 pci_set_drvdata(pdev
, priv
);
1870 /* enable the device */
1871 rc
= pcim_enable_device(pdev
);
1873 dev_err(dev
, "Failed to enable PCI device\n");
1877 /* take ownership of PCI BAR0 */
1878 rc
= pcim_iomap_regions(pdev
, 1, "crypto_safexcel");
1880 dev_err(dev
, "Failed to map IO region for BAR0\n");
1883 priv
->base
= pcim_iomap_table(pdev
)[0];
1885 if (priv
->version
== EIP197_DEVBRD
) {
1886 dev_dbg(dev
, "Device identified as FPGA based development board - applying HW reset\n");
1888 rc
= pcim_iomap_regions(pdev
, 4, "crypto_safexcel");
1890 dev_err(dev
, "Failed to map IO region for BAR4\n");
1894 pciebase
= pcim_iomap_table(pdev
)[2];
1895 val
= readl(pciebase
+ EIP197_XLX_IRQ_BLOCK_ID_ADDR
);
1896 if ((val
>> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE
) {
1897 dev_dbg(dev
, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1900 /* Setup MSI identity map mapping */
1901 writel(EIP197_XLX_USER_VECT_LUT0_IDENT
,
1902 pciebase
+ EIP197_XLX_USER_VECT_LUT0_ADDR
);
1903 writel(EIP197_XLX_USER_VECT_LUT1_IDENT
,
1904 pciebase
+ EIP197_XLX_USER_VECT_LUT1_ADDR
);
1905 writel(EIP197_XLX_USER_VECT_LUT2_IDENT
,
1906 pciebase
+ EIP197_XLX_USER_VECT_LUT2_ADDR
);
1907 writel(EIP197_XLX_USER_VECT_LUT3_IDENT
,
1908 pciebase
+ EIP197_XLX_USER_VECT_LUT3_ADDR
);
1910 /* Enable all device interrupts */
1911 writel(GENMASK(31, 0),
1912 pciebase
+ EIP197_XLX_USER_INT_ENB_MSK
);
1914 dev_err(dev
, "Unrecognised IRQ block identifier %x\n",
1919 /* HW reset FPGA dev board */
1921 writel(1, priv
->base
+ EIP197_XLX_GPIO_BASE
);
1922 wmb(); /* maintain strict ordering for accesses here */
1923 /* deassert reset */
1924 writel(0, priv
->base
+ EIP197_XLX_GPIO_BASE
);
1925 wmb(); /* maintain strict ordering for accesses here */
1928 /* enable bus mastering */
1929 pci_set_master(pdev
);
1931 /* Generic EIP97/EIP197 device probing */
1932 rc
= safexcel_probe_generic(pdev
, priv
, 1);
1936 static void safexcel_pci_remove(struct pci_dev
*pdev
)
1938 struct safexcel_crypto_priv
*priv
= pci_get_drvdata(pdev
);
1941 safexcel_unregister_algorithms(priv
);
1943 for (i
= 0; i
< priv
->config
.rings
; i
++)
1944 destroy_workqueue(priv
->ring
[i
].workqueue
);
1946 safexcel_hw_reset_rings(priv
);
1949 static const struct pci_device_id safexcel_pci_ids
[] = {
1951 PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX
, 0x9038,
1953 .driver_data
= EIP197_DEVBRD
,
1958 MODULE_DEVICE_TABLE(pci
, safexcel_pci_ids
);
1960 static struct pci_driver safexcel_pci_driver
= {
1961 .name
= "crypto-safexcel",
1962 .id_table
= safexcel_pci_ids
,
1963 .probe
= safexcel_pci_probe
,
1964 .remove
= safexcel_pci_remove
,
1967 static int __init
safexcel_init(void)
1971 /* Register PCI driver */
1972 ret
= pci_register_driver(&safexcel_pci_driver
);
1974 /* Register platform driver */
1975 if (IS_ENABLED(CONFIG_OF
) && !ret
) {
1976 ret
= platform_driver_register(&crypto_safexcel
);
1978 pci_unregister_driver(&safexcel_pci_driver
);
1984 static void __exit
safexcel_exit(void)
1986 /* Unregister platform driver */
1987 if (IS_ENABLED(CONFIG_OF
))
1988 platform_driver_unregister(&crypto_safexcel
);
1990 /* Unregister PCI driver if successfully registered before */
1991 pci_unregister_driver(&safexcel_pci_driver
);
1994 module_init(safexcel_init
);
1995 module_exit(safexcel_exit
);
1997 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1998 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1999 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
2000 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
2001 MODULE_LICENSE("GPL v2");