1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Marvell
5 * Antoine Tenart <antoine.tenart@free-electrons.com>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/of_irq.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/workqueue.h>
21 #include <crypto/internal/aead.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/skcipher.h>
27 static u32 max_rings
= EIP197_MAX_RINGS
;
28 module_param(max_rings
, uint
, 0644);
29 MODULE_PARM_DESC(max_rings
, "Maximum number of rings to use.");
31 static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv
*priv
)
36 * Map all interfaces/rings to register index 0
37 * so they can share contexts. Without this, the EIP197 will
38 * assume each interface/ring to be in its own memory domain
39 * i.e. have its own subset of UNIQUE memory addresses.
40 * Which would cause records with the SAME memory address to
41 * use DIFFERENT cache buffers, causing both poor cache utilization
42 * AND serious coherence/invalidation issues.
44 for (i
= 0; i
< 4; i
++)
45 writel(0, priv
->base
+ EIP197_FLUE_IFC_LUT(i
));
48 * Initialize other virtualization regs for cache
49 * These may not be in their reset state ...
51 for (i
= 0; i
< priv
->config
.rings
; i
++) {
52 writel(0, priv
->base
+ EIP197_FLUE_CACHEBASE_LO(i
));
53 writel(0, priv
->base
+ EIP197_FLUE_CACHEBASE_HI(i
));
54 writel(EIP197_FLUE_CONFIG_MAGIC
,
55 priv
->base
+ EIP197_FLUE_CONFIG(i
));
57 writel(0, priv
->base
+ EIP197_FLUE_OFFSETS
);
58 writel(0, priv
->base
+ EIP197_FLUE_ARC4_OFFSET
);
61 static void eip197_trc_cache_banksel(struct safexcel_crypto_priv
*priv
,
62 u32 addrmid
, int *actbank
)
67 curbank
= addrmid
>> 16;
68 if (curbank
!= *actbank
) {
69 val
= readl(priv
->base
+ EIP197_CS_RAM_CTRL
);
70 val
= (val
& ~EIP197_CS_BANKSEL_MASK
) |
71 (curbank
<< EIP197_CS_BANKSEL_OFS
);
72 writel(val
, priv
->base
+ EIP197_CS_RAM_CTRL
);
77 static u32
eip197_trc_cache_probe(struct safexcel_crypto_priv
*priv
,
78 int maxbanks
, u32 probemask
, u32 stride
)
80 u32 val
, addrhi
, addrlo
, addrmid
, addralias
, delta
, marker
;
84 * And probe the actual size of the physically attached cache data RAM
85 * Using a binary subdivision algorithm downto 32 byte cache lines.
87 addrhi
= 1 << (16 + maxbanks
);
89 actbank
= min(maxbanks
- 1, 0);
90 while ((addrhi
- addrlo
) > stride
) {
91 /* write marker to lowest address in top half */
92 addrmid
= (addrhi
+ addrlo
) >> 1;
93 marker
= (addrmid
^ 0xabadbabe) & probemask
; /* Unique */
94 eip197_trc_cache_banksel(priv
, addrmid
, &actbank
);
96 priv
->base
+ EIP197_CLASSIFICATION_RAMS
+
99 /* write invalid markers to possible aliases */
100 delta
= 1 << __fls(addrmid
);
101 while (delta
>= stride
) {
102 addralias
= addrmid
- delta
;
103 eip197_trc_cache_banksel(priv
, addralias
, &actbank
);
105 priv
->base
+ EIP197_CLASSIFICATION_RAMS
+
106 (addralias
& 0xffff));
110 /* read back marker from top half */
111 eip197_trc_cache_banksel(priv
, addrmid
, &actbank
);
112 val
= readl(priv
->base
+ EIP197_CLASSIFICATION_RAMS
+
115 if ((val
& probemask
) == marker
)
116 /* read back correct, continue with top half */
119 /* not read back correct, continue with bottom half */
125 static void eip197_trc_cache_clear(struct safexcel_crypto_priv
*priv
,
126 int cs_rc_max
, int cs_ht_wc
)
129 u32 htable_offset
, val
, offset
;
131 /* Clear all records in administration RAM */
132 for (i
= 0; i
< cs_rc_max
; i
++) {
133 offset
= EIP197_CLASSIFICATION_RAMS
+ i
* EIP197_CS_RC_SIZE
;
135 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL
) |
136 EIP197_CS_RC_PREV(EIP197_RC_NULL
),
137 priv
->base
+ offset
);
139 val
= EIP197_CS_RC_NEXT(i
+ 1) | EIP197_CS_RC_PREV(i
- 1);
141 val
|= EIP197_CS_RC_PREV(EIP197_RC_NULL
);
142 else if (i
== cs_rc_max
- 1)
143 val
|= EIP197_CS_RC_NEXT(EIP197_RC_NULL
);
144 writel(val
, priv
->base
+ offset
+ 4);
145 /* must also initialize the address key due to ECC! */
146 writel(0, priv
->base
+ offset
+ 8);
147 writel(0, priv
->base
+ offset
+ 12);
150 /* Clear the hash table entries */
151 htable_offset
= cs_rc_max
* EIP197_CS_RC_SIZE
;
152 for (i
= 0; i
< cs_ht_wc
; i
++)
153 writel(GENMASK(29, 0),
154 priv
->base
+ EIP197_CLASSIFICATION_RAMS
+
155 htable_offset
+ i
* sizeof(u32
));
158 static int eip197_trc_cache_init(struct safexcel_crypto_priv
*priv
)
160 u32 val
, dsize
, asize
;
161 int cs_rc_max
, cs_ht_wc
, cs_trc_rec_wc
, cs_trc_lg_rec_wc
;
162 int cs_rc_abs_max
, cs_ht_sz
;
165 /* Setup (dummy) virtualization for cache */
166 eip197_trc_cache_setupvirt(priv
);
169 * Enable the record cache memory access and
170 * probe the bank select width
172 val
= readl(priv
->base
+ EIP197_CS_RAM_CTRL
);
173 val
&= ~EIP197_TRC_ENABLE_MASK
;
174 val
|= EIP197_TRC_ENABLE_0
| EIP197_CS_BANKSEL_MASK
;
175 writel(val
, priv
->base
+ EIP197_CS_RAM_CTRL
);
176 val
= readl(priv
->base
+ EIP197_CS_RAM_CTRL
);
177 maxbanks
= ((val
&EIP197_CS_BANKSEL_MASK
)>>EIP197_CS_BANKSEL_OFS
) + 1;
179 /* Clear all ECC errors */
180 writel(0, priv
->base
+ EIP197_TRC_ECCCTRL
);
183 * Make sure the cache memory is accessible by taking record cache into
184 * reset. Need data memory access here, not admin access.
186 val
= readl(priv
->base
+ EIP197_TRC_PARAMS
);
187 val
|= EIP197_TRC_PARAMS_SW_RESET
| EIP197_TRC_PARAMS_DATA_ACCESS
;
188 writel(val
, priv
->base
+ EIP197_TRC_PARAMS
);
190 /* Probed data RAM size in bytes */
191 dsize
= eip197_trc_cache_probe(priv
, maxbanks
, 0xffffffff, 32);
194 * Now probe the administration RAM size pretty much the same way
195 * Except that only the lower 30 bits are writable and we don't need
198 val
= readl(priv
->base
+ EIP197_TRC_PARAMS
);
199 /* admin access now */
200 val
&= ~(EIP197_TRC_PARAMS_DATA_ACCESS
| EIP197_CS_BANKSEL_MASK
);
201 writel(val
, priv
->base
+ EIP197_TRC_PARAMS
);
203 /* Probed admin RAM size in admin words */
204 asize
= eip197_trc_cache_probe(priv
, 0, 0x3fffffff, 16) >> 4;
206 /* Clear any ECC errors detected while probing! */
207 writel(0, priv
->base
+ EIP197_TRC_ECCCTRL
);
209 /* Sanity check probing results */
210 if (dsize
< EIP197_MIN_DSIZE
|| asize
< EIP197_MIN_ASIZE
) {
211 dev_err(priv
->dev
, "Record cache probing failed (%d,%d).",
217 * Determine optimal configuration from RAM sizes
218 * Note that we assume that the physical RAM configuration is sane
219 * Therefore, we don't do any parameter error checking here ...
222 /* For now, just use a single record format covering everything */
223 cs_trc_rec_wc
= EIP197_CS_TRC_REC_WC
;
224 cs_trc_lg_rec_wc
= EIP197_CS_TRC_REC_WC
;
227 * Step #1: How many records will physically fit?
228 * Hard upper limit is 1023!
230 cs_rc_abs_max
= min_t(uint
, ((dsize
>> 2) / cs_trc_lg_rec_wc
), 1023);
231 /* Step #2: Need at least 2 words in the admin RAM per record */
232 cs_rc_max
= min_t(uint
, cs_rc_abs_max
, (asize
>> 1));
233 /* Step #3: Determine log2 of hash table size */
234 cs_ht_sz
= __fls(asize
- cs_rc_max
) - 2;
235 /* Step #4: determine current size of hash table in dwords */
236 cs_ht_wc
= 16 << cs_ht_sz
; /* dwords, not admin words */
237 /* Step #5: add back excess words and see if we can fit more records */
238 cs_rc_max
= min_t(uint
, cs_rc_abs_max
, asize
- (cs_ht_wc
>> 2));
240 /* Clear the cache RAMs */
241 eip197_trc_cache_clear(priv
, cs_rc_max
, cs_ht_wc
);
243 /* Disable the record cache memory access */
244 val
= readl(priv
->base
+ EIP197_CS_RAM_CTRL
);
245 val
&= ~EIP197_TRC_ENABLE_MASK
;
246 writel(val
, priv
->base
+ EIP197_CS_RAM_CTRL
);
248 /* Write head and tail pointers of the record free chain */
249 val
= EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
250 EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max
- 1);
251 writel(val
, priv
->base
+ EIP197_TRC_FREECHAIN
);
253 /* Configure the record cache #1 */
254 val
= EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc
) |
255 EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max
);
256 writel(val
, priv
->base
+ EIP197_TRC_PARAMS2
);
258 /* Configure the record cache #2 */
259 val
= EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc
) |
260 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
261 EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz
);
262 writel(val
, priv
->base
+ EIP197_TRC_PARAMS
);
264 dev_info(priv
->dev
, "TRC init: %dd,%da (%dr,%dh)\n",
265 dsize
, asize
, cs_rc_max
, cs_ht_wc
+ cs_ht_wc
);
269 static void eip197_init_firmware(struct safexcel_crypto_priv
*priv
)
274 for (pe
= 0; pe
< priv
->config
.pes
; pe
++) {
275 /* Configure the token FIFO's */
276 writel(3, EIP197_PE(priv
) + EIP197_PE_ICE_PUTF_CTRL(pe
));
277 writel(0, EIP197_PE(priv
) + EIP197_PE_ICE_PPTF_CTRL(pe
));
279 /* Clear the ICE scratchpad memory */
280 val
= readl(EIP197_PE(priv
) + EIP197_PE_ICE_SCRATCH_CTRL(pe
));
281 val
|= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER
|
282 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN
|
283 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS
|
284 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS
;
285 writel(val
, EIP197_PE(priv
) + EIP197_PE_ICE_SCRATCH_CTRL(pe
));
287 /* clear the scratchpad RAM using 32 bit writes only */
288 for (i
= 0; i
< EIP197_NUM_OF_SCRATCH_BLOCKS
; i
++)
289 writel(0, EIP197_PE(priv
) +
290 EIP197_PE_ICE_SCRATCH_RAM(pe
) + (i
<< 2));
292 /* Reset the IFPP engine to make its program mem accessible */
293 writel(EIP197_PE_ICE_x_CTRL_SW_RESET
|
294 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR
|
295 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR
,
296 EIP197_PE(priv
) + EIP197_PE_ICE_FPP_CTRL(pe
));
298 /* Reset the IPUE engine to make its program mem accessible */
299 writel(EIP197_PE_ICE_x_CTRL_SW_RESET
|
300 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR
|
301 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR
,
302 EIP197_PE(priv
) + EIP197_PE_ICE_PUE_CTRL(pe
));
304 /* Enable access to all IFPP program memories */
305 writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN
,
306 EIP197_PE(priv
) + EIP197_PE_ICE_RAM_CTRL(pe
));
311 static int eip197_write_firmware(struct safexcel_crypto_priv
*priv
,
312 const struct firmware
*fw
)
314 const __be32
*data
= (const __be32
*)fw
->data
;
317 /* Write the firmware */
318 for (i
= 0; i
< fw
->size
/ sizeof(u32
); i
++)
319 writel(be32_to_cpu(data
[i
]),
320 priv
->base
+ EIP197_CLASSIFICATION_RAMS
+
323 /* Exclude final 2 NOPs from size */
324 return i
- EIP197_FW_TERMINAL_NOPS
;
328 * If FW is actual production firmware, then poll for its initialization
329 * to complete and check if it is good for the HW, otherwise just return OK.
331 static bool poll_fw_ready(struct safexcel_crypto_priv
*priv
, int fpp
)
337 pollofs
= EIP197_FW_FPP_READY
;
339 pollofs
= EIP197_FW_PUE_READY
;
341 for (pe
= 0; pe
< priv
->config
.pes
; pe
++) {
342 base
= EIP197_PE_ICE_SCRATCH_RAM(pe
);
343 pollcnt
= EIP197_FW_START_POLLCNT
;
345 (readl_relaxed(EIP197_PE(priv
) + base
+
350 dev_err(priv
->dev
, "FW(%d) for PE %d failed to start\n",
358 static bool eip197_start_firmware(struct safexcel_crypto_priv
*priv
,
359 int ipuesz
, int ifppsz
, int minifw
)
364 for (pe
= 0; pe
< priv
->config
.pes
; pe
++) {
365 /* Disable access to all program memory */
366 writel(0, EIP197_PE(priv
) + EIP197_PE_ICE_RAM_CTRL(pe
));
368 /* Start IFPP microengines */
372 val
= EIP197_PE_ICE_UENG_START_OFFSET((ifppsz
- 1) &
373 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK
) |
374 EIP197_PE_ICE_UENG_DEBUG_RESET
;
375 writel(val
, EIP197_PE(priv
) + EIP197_PE_ICE_FPP_CTRL(pe
));
377 /* Start IPUE microengines */
381 val
= EIP197_PE_ICE_UENG_START_OFFSET((ipuesz
- 1) &
382 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK
) |
383 EIP197_PE_ICE_UENG_DEBUG_RESET
;
384 writel(val
, EIP197_PE(priv
) + EIP197_PE_ICE_PUE_CTRL(pe
));
387 /* For miniFW startup, there is no initialization, so always succeed */
391 /* Wait until all the firmwares have properly started up */
392 if (!poll_fw_ready(priv
, 1))
394 if (!poll_fw_ready(priv
, 0))
400 static int eip197_load_firmwares(struct safexcel_crypto_priv
*priv
)
402 const char *fw_name
[] = {"ifpp.bin", "ipue.bin"};
403 const struct firmware
*fw
[FW_NB
];
404 char fw_path
[37], *dir
= NULL
;
405 int i
, j
, ret
= 0, pe
;
406 int ipuesz
, ifppsz
, minifw
= 0;
408 if (priv
->version
== EIP197D_MRVL
)
410 else if (priv
->version
== EIP197B_MRVL
||
411 priv
->version
== EIP197_DEVBRD
)
417 for (i
= 0; i
< FW_NB
; i
++) {
418 snprintf(fw_path
, 37, "inside-secure/%s/%s", dir
, fw_name
[i
]);
419 ret
= firmware_request_nowarn(&fw
[i
], fw_path
, priv
->dev
);
421 if (minifw
|| priv
->version
!= EIP197B_MRVL
)
424 /* Fallback to the old firmware location for the
427 ret
= firmware_request_nowarn(&fw
[i
], fw_name
[i
],
434 eip197_init_firmware(priv
);
436 ifppsz
= eip197_write_firmware(priv
, fw
[FW_IFPP
]);
438 /* Enable access to IPUE program memories */
439 for (pe
= 0; pe
< priv
->config
.pes
; pe
++)
440 writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN
,
441 EIP197_PE(priv
) + EIP197_PE_ICE_RAM_CTRL(pe
));
443 ipuesz
= eip197_write_firmware(priv
, fw
[FW_IPUE
]);
445 if (eip197_start_firmware(priv
, ipuesz
, ifppsz
, minifw
)) {
446 dev_dbg(priv
->dev
, "Firmware loaded successfully\n");
453 for (j
= 0; j
< i
; j
++)
454 release_firmware(fw
[j
]);
457 /* Retry with minifw path */
458 dev_dbg(priv
->dev
, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
459 dir
= "eip197_minifw";
464 dev_dbg(priv
->dev
, "Firmware load failed.\n");
469 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv
*priv
)
471 u32 cd_size_rnd
, val
;
474 cd_size_rnd
= (priv
->config
.cd_size
+
475 (BIT(priv
->hwconfig
.hwdataw
) - 1)) >>
476 priv
->hwconfig
.hwdataw
;
477 /* determine number of CD's we can fetch into the CD FIFO as 1 block */
478 if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
479 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
480 cd_fetch_cnt
= (1 << priv
->hwconfig
.hwcfsize
) / cd_size_rnd
;
481 cd_fetch_cnt
= min_t(uint
, cd_fetch_cnt
,
482 (priv
->config
.pes
* EIP197_FETCH_DEPTH
));
484 /* for the EIP97, just fetch all that fits minus 1 */
485 cd_fetch_cnt
= ((1 << priv
->hwconfig
.hwcfsize
) /
489 * Since we're using command desc's way larger than formally specified,
490 * we need to check whether we can fit even 1 for low-end EIP196's!
493 dev_err(priv
->dev
, "Unable to fit even 1 command desc!\n");
497 for (i
= 0; i
< priv
->config
.rings
; i
++) {
498 /* ring base address */
499 writel(lower_32_bits(priv
->ring
[i
].cdr
.base_dma
),
500 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_LO
);
501 writel(upper_32_bits(priv
->ring
[i
].cdr
.base_dma
),
502 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_HI
);
504 writel(EIP197_xDR_DESC_MODE_64BIT
| EIP197_CDR_DESC_MODE_ADCP
|
505 (priv
->config
.cd_offset
<< 14) | priv
->config
.cd_size
,
506 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_DESC_SIZE
);
507 writel(((cd_fetch_cnt
*
508 (cd_size_rnd
<< priv
->hwconfig
.hwdataw
)) << 16) |
509 (cd_fetch_cnt
* (priv
->config
.cd_offset
/ sizeof(u32
))),
510 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
512 /* Configure DMA tx control */
513 val
= EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS
);
514 val
|= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS
);
515 writel(val
, EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_DMA_CFG
);
517 /* clear any pending interrupt */
518 writel(GENMASK(5, 0),
519 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_STAT
);
525 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv
*priv
)
527 u32 rd_size_rnd
, val
;
530 /* determine number of RD's we can fetch into the FIFO as one block */
531 rd_size_rnd
= (EIP197_RD64_FETCH_SIZE
+
532 (BIT(priv
->hwconfig
.hwdataw
) - 1)) >>
533 priv
->hwconfig
.hwdataw
;
534 if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
535 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
536 rd_fetch_cnt
= (1 << priv
->hwconfig
.hwrfsize
) / rd_size_rnd
;
537 rd_fetch_cnt
= min_t(uint
, rd_fetch_cnt
,
538 (priv
->config
.pes
* EIP197_FETCH_DEPTH
));
540 /* for the EIP97, just fetch all that fits minus 1 */
541 rd_fetch_cnt
= ((1 << priv
->hwconfig
.hwrfsize
) /
545 for (i
= 0; i
< priv
->config
.rings
; i
++) {
546 /* ring base address */
547 writel(lower_32_bits(priv
->ring
[i
].rdr
.base_dma
),
548 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_LO
);
549 writel(upper_32_bits(priv
->ring
[i
].rdr
.base_dma
),
550 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_HI
);
552 writel(EIP197_xDR_DESC_MODE_64BIT
| (priv
->config
.rd_offset
<< 14) |
553 priv
->config
.rd_size
,
554 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_DESC_SIZE
);
556 writel(((rd_fetch_cnt
*
557 (rd_size_rnd
<< priv
->hwconfig
.hwdataw
)) << 16) |
558 (rd_fetch_cnt
* (priv
->config
.rd_offset
/ sizeof(u32
))),
559 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
561 /* Configure DMA tx control */
562 val
= EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS
);
563 val
|= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS
);
564 val
|= EIP197_HIA_xDR_WR_RES_BUF
| EIP197_HIA_xDR_WR_CTRL_BUF
;
566 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_DMA_CFG
);
568 /* clear any pending interrupt */
569 writel(GENMASK(7, 0),
570 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_STAT
);
572 /* enable ring interrupt */
573 val
= readl(EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLE_CTRL(i
));
574 val
|= EIP197_RDR_IRQ(i
);
575 writel(val
, EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLE_CTRL(i
));
581 static int safexcel_hw_init(struct safexcel_crypto_priv
*priv
)
584 int i
, ret
, pe
, opbuflo
, opbufhi
;
586 dev_dbg(priv
->dev
, "HW init: using %d pipe(s) and %d ring(s)\n",
587 priv
->config
.pes
, priv
->config
.rings
);
590 * For EIP197's only set maximum number of TX commands to 2^5 = 32
591 * Skip for the EIP97 as it does not have this field.
593 if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
594 val
= readl(EIP197_HIA_AIC(priv
) + EIP197_HIA_MST_CTRL
);
595 val
|= EIP197_MST_CTRL_TX_MAX_CMD(5);
596 writel(val
, EIP197_HIA_AIC(priv
) + EIP197_HIA_MST_CTRL
);
599 /* Configure wr/rd cache values */
600 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS
) |
601 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS
),
602 EIP197_HIA_GEN_CFG(priv
) + EIP197_MST_CTRL
);
604 /* Interrupts reset */
606 /* Disable all global interrupts */
607 writel(0, EIP197_HIA_AIC_G(priv
) + EIP197_HIA_AIC_G_ENABLE_CTRL
);
609 /* Clear any pending interrupt */
610 writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv
) + EIP197_HIA_AIC_G_ACK
);
612 /* Processing Engine configuration */
613 for (pe
= 0; pe
< priv
->config
.pes
; pe
++) {
614 /* Data Fetch Engine configuration */
616 /* Reset all DFE threads */
617 writel(EIP197_DxE_THR_CTRL_RESET_PE
,
618 EIP197_HIA_DFE_THR(priv
) + EIP197_HIA_DFE_THR_CTRL(pe
));
620 if (priv
->flags
& EIP197_PE_ARB
)
621 /* Reset HIA input interface arbiter (if present) */
622 writel(EIP197_HIA_RA_PE_CTRL_RESET
,
623 EIP197_HIA_AIC(priv
) + EIP197_HIA_RA_PE_CTRL(pe
));
625 /* DMA transfer size to use */
626 val
= EIP197_HIA_DFE_CFG_DIS_DEBUG
;
627 val
|= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
628 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
629 val
|= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
630 EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
631 val
|= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS
);
632 val
|= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS
);
633 writel(val
, EIP197_HIA_DFE(priv
) + EIP197_HIA_DFE_CFG(pe
));
635 /* Leave the DFE threads reset state */
636 writel(0, EIP197_HIA_DFE_THR(priv
) + EIP197_HIA_DFE_THR_CTRL(pe
));
638 /* Configure the processing engine thresholds */
639 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
640 EIP197_PE_IN_xBUF_THRES_MAX(9),
641 EIP197_PE(priv
) + EIP197_PE_IN_DBUF_THRES(pe
));
642 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
643 EIP197_PE_IN_xBUF_THRES_MAX(7),
644 EIP197_PE(priv
) + EIP197_PE_IN_TBUF_THRES(pe
));
646 if (priv
->flags
& SAFEXCEL_HW_EIP197
)
647 /* enable HIA input interface arbiter and rings */
648 writel(EIP197_HIA_RA_PE_CTRL_EN
|
649 GENMASK(priv
->config
.rings
- 1, 0),
650 EIP197_HIA_AIC(priv
) + EIP197_HIA_RA_PE_CTRL(pe
));
652 /* Data Store Engine configuration */
654 /* Reset all DSE threads */
655 writel(EIP197_DxE_THR_CTRL_RESET_PE
,
656 EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_CTRL(pe
));
658 /* Wait for all DSE threads to complete */
659 while ((readl(EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_STAT(pe
)) &
660 GENMASK(15, 12)) != GENMASK(15, 12))
663 /* DMA transfer size to use */
664 if (priv
->hwconfig
.hwnumpes
> 4) {
671 val
= EIP197_HIA_DSE_CFG_DIS_DEBUG
;
672 val
|= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo
) |
673 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi
);
674 val
|= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS
);
675 val
|= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE
;
676 /* FIXME: instability issues can occur for EIP97 but disabling
677 * it impacts performance.
679 if (priv
->flags
& SAFEXCEL_HW_EIP197
)
680 val
|= EIP197_HIA_DSE_CFG_EN_SINGLE_WR
;
681 writel(val
, EIP197_HIA_DSE(priv
) + EIP197_HIA_DSE_CFG(pe
));
683 /* Leave the DSE threads reset state */
684 writel(0, EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_CTRL(pe
));
686 /* Configure the procesing engine thresholds */
687 writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo
) |
688 EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi
),
689 EIP197_PE(priv
) + EIP197_PE_OUT_DBUF_THRES(pe
));
691 /* Processing Engine configuration */
693 /* Token & context configuration */
694 val
= EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES
|
695 EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT
|
696 EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT
;
697 writel(val
, EIP197_PE(priv
) + EIP197_PE_EIP96_TOKEN_CTRL(pe
));
699 /* H/W capabilities selection: just enable everything */
700 writel(EIP197_FUNCTION_ALL
,
701 EIP197_PE(priv
) + EIP197_PE_EIP96_FUNCTION_EN(pe
));
702 writel(EIP197_FUNCTION_ALL
,
703 EIP197_PE(priv
) + EIP197_PE_EIP96_FUNCTION2_EN(pe
));
706 /* Command Descriptor Rings prepare */
707 for (i
= 0; i
< priv
->config
.rings
; i
++) {
708 /* Clear interrupts for this ring */
709 writel(GENMASK(31, 0),
710 EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLE_CLR(i
));
712 /* Disable external triggering */
713 writel(0, EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
715 /* Clear the pending prepared counter */
716 writel(EIP197_xDR_PREP_CLR_COUNT
,
717 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PREP_COUNT
);
719 /* Clear the pending processed counter */
720 writel(EIP197_xDR_PROC_CLR_COUNT
,
721 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PROC_COUNT
);
724 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PREP_PNTR
);
726 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_PROC_PNTR
);
728 writel((EIP197_DEFAULT_RING_SIZE
* priv
->config
.cd_offset
),
729 EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_SIZE
);
732 /* Result Descriptor Ring prepare */
733 for (i
= 0; i
< priv
->config
.rings
; i
++) {
734 /* Disable external triggering*/
735 writel(0, EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_CFG
);
737 /* Clear the pending prepared counter */
738 writel(EIP197_xDR_PREP_CLR_COUNT
,
739 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PREP_COUNT
);
741 /* Clear the pending processed counter */
742 writel(EIP197_xDR_PROC_CLR_COUNT
,
743 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PROC_COUNT
);
746 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PREP_PNTR
);
748 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_PROC_PNTR
);
751 writel((EIP197_DEFAULT_RING_SIZE
* priv
->config
.rd_offset
),
752 EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_SIZE
);
755 for (pe
= 0; pe
< priv
->config
.pes
; pe
++) {
756 /* Enable command descriptor rings */
757 writel(EIP197_DxE_THR_CTRL_EN
| GENMASK(priv
->config
.rings
- 1, 0),
758 EIP197_HIA_DFE_THR(priv
) + EIP197_HIA_DFE_THR_CTRL(pe
));
760 /* Enable result descriptor rings */
761 writel(EIP197_DxE_THR_CTRL_EN
| GENMASK(priv
->config
.rings
- 1, 0),
762 EIP197_HIA_DSE_THR(priv
) + EIP197_HIA_DSE_THR_CTRL(pe
));
765 /* Clear any HIA interrupt */
766 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv
) + EIP197_HIA_AIC_G_ACK
);
768 if (priv
->flags
& EIP197_SIMPLE_TRC
) {
769 writel(EIP197_STRC_CONFIG_INIT
|
770 EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC
) |
771 EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC
),
772 priv
->base
+ EIP197_STRC_CONFIG
);
773 writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE
,
774 EIP197_PE(priv
) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
775 } else if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
776 ret
= eip197_trc_cache_init(priv
);
781 if (priv
->flags
& EIP197_ICE
) {
782 ret
= eip197_load_firmwares(priv
);
787 return safexcel_hw_setup_cdesc_rings(priv
) ?:
788 safexcel_hw_setup_rdesc_rings(priv
) ?:
792 /* Called with ring's lock taken */
793 static void safexcel_try_push_requests(struct safexcel_crypto_priv
*priv
,
796 int coal
= min_t(int, priv
->ring
[ring
].requests
, EIP197_MAX_BATCH_SZ
);
801 /* Configure when we want an interrupt */
802 writel(EIP197_HIA_RDR_THRESH_PKT_MODE
|
803 EIP197_HIA_RDR_THRESH_PROC_PKT(coal
),
804 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_THRESH
);
807 void safexcel_dequeue(struct safexcel_crypto_priv
*priv
, int ring
)
809 struct crypto_async_request
*req
, *backlog
;
810 struct safexcel_context
*ctx
;
811 int ret
, nreq
= 0, cdesc
= 0, rdesc
= 0, commands
, results
;
813 /* If a request wasn't properly dequeued because of a lack of resources,
814 * proceeded it first,
816 req
= priv
->ring
[ring
].req
;
817 backlog
= priv
->ring
[ring
].backlog
;
822 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
823 backlog
= crypto_get_backlog(&priv
->ring
[ring
].queue
);
824 req
= crypto_dequeue_request(&priv
->ring
[ring
].queue
);
825 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
828 priv
->ring
[ring
].req
= NULL
;
829 priv
->ring
[ring
].backlog
= NULL
;
834 ctx
= crypto_tfm_ctx(req
->tfm
);
835 ret
= ctx
->send(req
, ring
, &commands
, &results
);
840 backlog
->complete(backlog
, -EINPROGRESS
);
842 /* In case the send() helper did not issue any command to push
843 * to the engine because the input data was cached, continue to
844 * dequeue other requests as this is valid and not an error.
846 if (!commands
&& !results
)
855 /* Not enough resources to handle all the requests. Bail out and save
856 * the request and the backlog for the next dequeue call (per-ring).
858 priv
->ring
[ring
].req
= req
;
859 priv
->ring
[ring
].backlog
= backlog
;
865 spin_lock_bh(&priv
->ring
[ring
].lock
);
867 priv
->ring
[ring
].requests
+= nreq
;
869 if (!priv
->ring
[ring
].busy
) {
870 safexcel_try_push_requests(priv
, ring
);
871 priv
->ring
[ring
].busy
= true;
874 spin_unlock_bh(&priv
->ring
[ring
].lock
);
876 /* let the RDR know we have pending descriptors */
877 writel((rdesc
* priv
->config
.rd_offset
),
878 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_PREP_COUNT
);
880 /* let the CDR know we have pending descriptors */
881 writel((cdesc
* priv
->config
.cd_offset
),
882 EIP197_HIA_CDR(priv
, ring
) + EIP197_HIA_xDR_PREP_COUNT
);
885 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv
*priv
,
888 struct safexcel_result_desc
*rdesc
= rdp
;
889 struct result_data_desc
*result_data
= rdp
+ priv
->config
.res_offset
;
891 if (likely((!rdesc
->last_seg
) || /* Rest only valid if last seg! */
892 ((!rdesc
->descriptor_overflow
) &&
893 (!rdesc
->buffer_overflow
) &&
894 (!result_data
->error_code
))))
897 if (rdesc
->descriptor_overflow
)
898 dev_err(priv
->dev
, "Descriptor overflow detected");
900 if (rdesc
->buffer_overflow
)
901 dev_err(priv
->dev
, "Buffer overflow detected");
903 if (result_data
->error_code
& 0x4066) {
904 /* Fatal error (bits 1,2,5,6 & 14) */
906 "result descriptor error (%x)",
907 result_data
->error_code
);
910 } else if (result_data
->error_code
&
911 (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
913 * Give priority over authentication fails:
914 * Blocksize, length & overflow errors,
915 * something wrong with the input!
918 } else if (result_data
->error_code
& BIT(9)) {
919 /* Authentication failed */
923 /* All other non-fatal errors */
927 inline void safexcel_rdr_req_set(struct safexcel_crypto_priv
*priv
,
929 struct safexcel_result_desc
*rdesc
,
930 struct crypto_async_request
*req
)
932 int i
= safexcel_ring_rdr_rdesc_index(priv
, ring
, rdesc
);
934 priv
->ring
[ring
].rdr_req
[i
] = req
;
937 inline struct crypto_async_request
*
938 safexcel_rdr_req_get(struct safexcel_crypto_priv
*priv
, int ring
)
940 int i
= safexcel_ring_first_rdr_index(priv
, ring
);
942 return priv
->ring
[ring
].rdr_req
[i
];
945 void safexcel_complete(struct safexcel_crypto_priv
*priv
, int ring
)
947 struct safexcel_command_desc
*cdesc
;
949 /* Acknowledge the command descriptors */
951 cdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].cdr
);
954 "Could not retrieve the command descriptor\n");
957 } while (!cdesc
->last_seg
);
960 void safexcel_inv_complete(struct crypto_async_request
*req
, int error
)
962 struct safexcel_inv_result
*result
= req
->data
;
964 if (error
== -EINPROGRESS
)
967 result
->error
= error
;
968 complete(&result
->completion
);
971 int safexcel_invalidate_cache(struct crypto_async_request
*async
,
972 struct safexcel_crypto_priv
*priv
,
973 dma_addr_t ctxr_dma
, int ring
)
975 struct safexcel_command_desc
*cdesc
;
976 struct safexcel_result_desc
*rdesc
;
977 struct safexcel_token
*dmmy
;
980 /* Prepare command descriptor */
981 cdesc
= safexcel_add_cdesc(priv
, ring
, true, true, 0, 0, 0, ctxr_dma
,
984 return PTR_ERR(cdesc
);
986 cdesc
->control_data
.type
= EIP197_TYPE_EXTENDED
;
987 cdesc
->control_data
.options
= 0;
988 cdesc
->control_data
.context_lo
&= ~EIP197_CONTEXT_SIZE_MASK
;
989 cdesc
->control_data
.control0
= CONTEXT_CONTROL_INV_TR
;
991 /* Prepare result descriptor */
992 rdesc
= safexcel_add_rdesc(priv
, ring
, true, true, 0, 0);
995 ret
= PTR_ERR(rdesc
);
999 safexcel_rdr_req_set(priv
, ring
, rdesc
, async
);
1004 safexcel_ring_rollback_wptr(priv
, &priv
->ring
[ring
].cdr
);
1009 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
*priv
,
1012 struct crypto_async_request
*req
;
1013 struct safexcel_context
*ctx
;
1014 int ret
, i
, nreq
, ndesc
, tot_descs
, handled
= 0;
1015 bool should_complete
;
1020 nreq
= readl(EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_PROC_COUNT
);
1021 nreq
>>= EIP197_xDR_PROC_xD_PKT_OFFSET
;
1022 nreq
&= EIP197_xDR_PROC_xD_PKT_MASK
;
1026 for (i
= 0; i
< nreq
; i
++) {
1027 req
= safexcel_rdr_req_get(priv
, ring
);
1029 ctx
= crypto_tfm_ctx(req
->tfm
);
1030 ndesc
= ctx
->handle_result(priv
, ring
, req
,
1031 &should_complete
, &ret
);
1033 dev_err(priv
->dev
, "failed to handle result (%d)\n",
1038 if (should_complete
) {
1040 req
->complete(req
, ret
);
1050 writel(EIP197_xDR_PROC_xD_PKT(i
) |
1051 (tot_descs
* priv
->config
.rd_offset
),
1052 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_PROC_COUNT
);
1054 /* If the number of requests overflowed the counter, try to proceed more
1057 if (nreq
== EIP197_xDR_PROC_xD_PKT_MASK
)
1058 goto handle_results
;
1061 spin_lock_bh(&priv
->ring
[ring
].lock
);
1063 priv
->ring
[ring
].requests
-= handled
;
1064 safexcel_try_push_requests(priv
, ring
);
1066 if (!priv
->ring
[ring
].requests
)
1067 priv
->ring
[ring
].busy
= false;
1069 spin_unlock_bh(&priv
->ring
[ring
].lock
);
1072 static void safexcel_dequeue_work(struct work_struct
*work
)
1074 struct safexcel_work_data
*data
=
1075 container_of(work
, struct safexcel_work_data
, work
);
1077 safexcel_dequeue(data
->priv
, data
->ring
);
1080 struct safexcel_ring_irq_data
{
1081 struct safexcel_crypto_priv
*priv
;
1085 static irqreturn_t
safexcel_irq_ring(int irq
, void *data
)
1087 struct safexcel_ring_irq_data
*irq_data
= data
;
1088 struct safexcel_crypto_priv
*priv
= irq_data
->priv
;
1089 int ring
= irq_data
->ring
, rc
= IRQ_NONE
;
1092 status
= readl(EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ENABLED_STAT(ring
));
1096 /* RDR interrupts */
1097 if (status
& EIP197_RDR_IRQ(ring
)) {
1098 stat
= readl(EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_STAT
);
1100 if (unlikely(stat
& EIP197_xDR_ERR
)) {
1102 * Fatal error, the RDR is unusable and must be
1103 * reinitialized. This should not happen under
1104 * normal circumstances.
1106 dev_err(priv
->dev
, "RDR: fatal error.\n");
1107 } else if (likely(stat
& EIP197_xDR_THRESH
)) {
1108 rc
= IRQ_WAKE_THREAD
;
1111 /* ACK the interrupts */
1113 EIP197_HIA_RDR(priv
, ring
) + EIP197_HIA_xDR_STAT
);
1116 /* ACK the interrupts */
1117 writel(status
, EIP197_HIA_AIC_R(priv
) + EIP197_HIA_AIC_R_ACK(ring
));
1122 static irqreturn_t
safexcel_irq_ring_thread(int irq
, void *data
)
1124 struct safexcel_ring_irq_data
*irq_data
= data
;
1125 struct safexcel_crypto_priv
*priv
= irq_data
->priv
;
1126 int ring
= irq_data
->ring
;
1128 safexcel_handle_result_descriptor(priv
, ring
);
1130 queue_work(priv
->ring
[ring
].workqueue
,
1131 &priv
->ring
[ring
].work_data
.work
);
1136 static int safexcel_request_ring_irq(void *pdev
, int irqid
,
1138 irq_handler_t handler
,
1139 irq_handler_t threaded_handler
,
1140 struct safexcel_ring_irq_data
*ring_irq_priv
)
1145 if (IS_ENABLED(CONFIG_PCI
) && is_pci_dev
) {
1146 struct pci_dev
*pci_pdev
= pdev
;
1148 dev
= &pci_pdev
->dev
;
1149 irq
= pci_irq_vector(pci_pdev
, irqid
);
1151 dev_err(dev
, "unable to get device MSI IRQ %d (err %d)\n",
1155 } else if (IS_ENABLED(CONFIG_OF
)) {
1156 struct platform_device
*plf_pdev
= pdev
;
1157 char irq_name
[6] = {0}; /* "ringX\0" */
1159 snprintf(irq_name
, 6, "ring%d", irqid
);
1160 dev
= &plf_pdev
->dev
;
1161 irq
= platform_get_irq_byname(plf_pdev
, irq_name
);
1164 dev_err(dev
, "unable to get IRQ '%s' (err %d)\n",
1172 ret
= devm_request_threaded_irq(dev
, irq
, handler
,
1173 threaded_handler
, IRQF_ONESHOT
,
1174 dev_name(dev
), ring_irq_priv
);
1176 dev_err(dev
, "unable to request IRQ %d\n", irq
);
1183 static struct safexcel_alg_template
*safexcel_algs
[] = {
1184 &safexcel_alg_ecb_des
,
1185 &safexcel_alg_cbc_des
,
1186 &safexcel_alg_ecb_des3_ede
,
1187 &safexcel_alg_cbc_des3_ede
,
1188 &safexcel_alg_ecb_aes
,
1189 &safexcel_alg_cbc_aes
,
1190 &safexcel_alg_cfb_aes
,
1191 &safexcel_alg_ofb_aes
,
1192 &safexcel_alg_ctr_aes
,
1195 &safexcel_alg_sha224
,
1196 &safexcel_alg_sha256
,
1197 &safexcel_alg_sha384
,
1198 &safexcel_alg_sha512
,
1199 &safexcel_alg_hmac_md5
,
1200 &safexcel_alg_hmac_sha1
,
1201 &safexcel_alg_hmac_sha224
,
1202 &safexcel_alg_hmac_sha256
,
1203 &safexcel_alg_hmac_sha384
,
1204 &safexcel_alg_hmac_sha512
,
1205 &safexcel_alg_authenc_hmac_sha1_cbc_aes
,
1206 &safexcel_alg_authenc_hmac_sha224_cbc_aes
,
1207 &safexcel_alg_authenc_hmac_sha256_cbc_aes
,
1208 &safexcel_alg_authenc_hmac_sha384_cbc_aes
,
1209 &safexcel_alg_authenc_hmac_sha512_cbc_aes
,
1210 &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede
,
1211 &safexcel_alg_authenc_hmac_sha1_ctr_aes
,
1212 &safexcel_alg_authenc_hmac_sha224_ctr_aes
,
1213 &safexcel_alg_authenc_hmac_sha256_ctr_aes
,
1214 &safexcel_alg_authenc_hmac_sha384_ctr_aes
,
1215 &safexcel_alg_authenc_hmac_sha512_ctr_aes
,
1216 &safexcel_alg_xts_aes
,
1219 &safexcel_alg_crc32
,
1220 &safexcel_alg_cbcmac
,
1221 &safexcel_alg_xcbcmac
,
1223 &safexcel_alg_chacha20
,
1224 &safexcel_alg_chachapoly
,
1225 &safexcel_alg_chachapoly_esp
,
1227 &safexcel_alg_hmac_sm3
,
1228 &safexcel_alg_ecb_sm4
,
1229 &safexcel_alg_cbc_sm4
,
1230 &safexcel_alg_ofb_sm4
,
1231 &safexcel_alg_cfb_sm4
,
1232 &safexcel_alg_ctr_sm4
,
1233 &safexcel_alg_authenc_hmac_sha1_cbc_sm4
,
1234 &safexcel_alg_authenc_hmac_sm3_cbc_sm4
,
1235 &safexcel_alg_authenc_hmac_sha1_ctr_sm4
,
1236 &safexcel_alg_authenc_hmac_sm3_ctr_sm4
,
1237 &safexcel_alg_sha3_224
,
1238 &safexcel_alg_sha3_256
,
1239 &safexcel_alg_sha3_384
,
1240 &safexcel_alg_sha3_512
,
1241 &safexcel_alg_hmac_sha3_224
,
1242 &safexcel_alg_hmac_sha3_256
,
1243 &safexcel_alg_hmac_sha3_384
,
1244 &safexcel_alg_hmac_sha3_512
,
1245 &safexcel_alg_authenc_hmac_sha1_cbc_des
,
1246 &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede
,
1247 &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede
,
1248 &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede
,
1249 &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede
,
1250 &safexcel_alg_authenc_hmac_sha256_cbc_des
,
1251 &safexcel_alg_authenc_hmac_sha224_cbc_des
,
1252 &safexcel_alg_authenc_hmac_sha512_cbc_des
,
1253 &safexcel_alg_authenc_hmac_sha384_cbc_des
,
1254 &safexcel_alg_rfc4106_gcm
,
1255 &safexcel_alg_rfc4543_gcm
,
1256 &safexcel_alg_rfc4309_ccm
,
1259 static int safexcel_register_algorithms(struct safexcel_crypto_priv
*priv
)
1263 for (i
= 0; i
< ARRAY_SIZE(safexcel_algs
); i
++) {
1264 safexcel_algs
[i
]->priv
= priv
;
1266 /* Do we have all required base algorithms available? */
1267 if ((safexcel_algs
[i
]->algo_mask
& priv
->hwconfig
.algo_flags
) !=
1268 safexcel_algs
[i
]->algo_mask
)
1269 /* No, so don't register this ciphersuite */
1272 if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_SKCIPHER
)
1273 ret
= crypto_register_skcipher(&safexcel_algs
[i
]->alg
.skcipher
);
1274 else if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_AEAD
)
1275 ret
= crypto_register_aead(&safexcel_algs
[i
]->alg
.aead
);
1277 ret
= crypto_register_ahash(&safexcel_algs
[i
]->alg
.ahash
);
1286 for (j
= 0; j
< i
; j
++) {
1287 /* Do we have all required base algorithms available? */
1288 if ((safexcel_algs
[j
]->algo_mask
& priv
->hwconfig
.algo_flags
) !=
1289 safexcel_algs
[j
]->algo_mask
)
1290 /* No, so don't unregister this ciphersuite */
1293 if (safexcel_algs
[j
]->type
== SAFEXCEL_ALG_TYPE_SKCIPHER
)
1294 crypto_unregister_skcipher(&safexcel_algs
[j
]->alg
.skcipher
);
1295 else if (safexcel_algs
[j
]->type
== SAFEXCEL_ALG_TYPE_AEAD
)
1296 crypto_unregister_aead(&safexcel_algs
[j
]->alg
.aead
);
1298 crypto_unregister_ahash(&safexcel_algs
[j
]->alg
.ahash
);
1304 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv
*priv
)
1308 for (i
= 0; i
< ARRAY_SIZE(safexcel_algs
); i
++) {
1309 /* Do we have all required base algorithms available? */
1310 if ((safexcel_algs
[i
]->algo_mask
& priv
->hwconfig
.algo_flags
) !=
1311 safexcel_algs
[i
]->algo_mask
)
1312 /* No, so don't unregister this ciphersuite */
1315 if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_SKCIPHER
)
1316 crypto_unregister_skcipher(&safexcel_algs
[i
]->alg
.skcipher
);
1317 else if (safexcel_algs
[i
]->type
== SAFEXCEL_ALG_TYPE_AEAD
)
1318 crypto_unregister_aead(&safexcel_algs
[i
]->alg
.aead
);
1320 crypto_unregister_ahash(&safexcel_algs
[i
]->alg
.ahash
);
1324 static void safexcel_configure(struct safexcel_crypto_priv
*priv
)
1326 u32 mask
= BIT(priv
->hwconfig
.hwdataw
) - 1;
1328 priv
->config
.pes
= priv
->hwconfig
.hwnumpes
;
1329 priv
->config
.rings
= min_t(u32
, priv
->hwconfig
.hwnumrings
, max_rings
);
1330 /* Cannot currently support more rings than we have ring AICs! */
1331 priv
->config
.rings
= min_t(u32
, priv
->config
.rings
,
1332 priv
->hwconfig
.hwnumraic
);
1334 priv
->config
.cd_size
= EIP197_CD64_FETCH_SIZE
;
1335 priv
->config
.cd_offset
= (priv
->config
.cd_size
+ mask
) & ~mask
;
1336 priv
->config
.cdsh_offset
= (EIP197_MAX_TOKENS
+ mask
) & ~mask
;
1338 /* res token is behind the descr, but ofs must be rounded to buswdth */
1339 priv
->config
.res_offset
= (EIP197_RD64_FETCH_SIZE
+ mask
) & ~mask
;
1340 /* now the size of the descr is this 1st part plus the result struct */
1341 priv
->config
.rd_size
= priv
->config
.res_offset
+
1342 EIP197_RD64_RESULT_SIZE
;
1343 priv
->config
.rd_offset
= (priv
->config
.rd_size
+ mask
) & ~mask
;
1345 /* convert dwords to bytes */
1346 priv
->config
.cd_offset
*= sizeof(u32
);
1347 priv
->config
.cdsh_offset
*= sizeof(u32
);
1348 priv
->config
.rd_offset
*= sizeof(u32
);
1349 priv
->config
.res_offset
*= sizeof(u32
);
1352 static void safexcel_init_register_offsets(struct safexcel_crypto_priv
*priv
)
1354 struct safexcel_register_offsets
*offsets
= &priv
->offsets
;
1356 if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
1357 offsets
->hia_aic
= EIP197_HIA_AIC_BASE
;
1358 offsets
->hia_aic_g
= EIP197_HIA_AIC_G_BASE
;
1359 offsets
->hia_aic_r
= EIP197_HIA_AIC_R_BASE
;
1360 offsets
->hia_aic_xdr
= EIP197_HIA_AIC_xDR_BASE
;
1361 offsets
->hia_dfe
= EIP197_HIA_DFE_BASE
;
1362 offsets
->hia_dfe_thr
= EIP197_HIA_DFE_THR_BASE
;
1363 offsets
->hia_dse
= EIP197_HIA_DSE_BASE
;
1364 offsets
->hia_dse_thr
= EIP197_HIA_DSE_THR_BASE
;
1365 offsets
->hia_gen_cfg
= EIP197_HIA_GEN_CFG_BASE
;
1366 offsets
->pe
= EIP197_PE_BASE
;
1367 offsets
->global
= EIP197_GLOBAL_BASE
;
1369 offsets
->hia_aic
= EIP97_HIA_AIC_BASE
;
1370 offsets
->hia_aic_g
= EIP97_HIA_AIC_G_BASE
;
1371 offsets
->hia_aic_r
= EIP97_HIA_AIC_R_BASE
;
1372 offsets
->hia_aic_xdr
= EIP97_HIA_AIC_xDR_BASE
;
1373 offsets
->hia_dfe
= EIP97_HIA_DFE_BASE
;
1374 offsets
->hia_dfe_thr
= EIP97_HIA_DFE_THR_BASE
;
1375 offsets
->hia_dse
= EIP97_HIA_DSE_BASE
;
1376 offsets
->hia_dse_thr
= EIP97_HIA_DSE_THR_BASE
;
1377 offsets
->hia_gen_cfg
= EIP97_HIA_GEN_CFG_BASE
;
1378 offsets
->pe
= EIP97_PE_BASE
;
1379 offsets
->global
= EIP97_GLOBAL_BASE
;
1384 * Generic part of probe routine, shared by platform and PCI driver
1386 * Assumes IO resources have been mapped, private data mem has been allocated,
1387 * clocks have been enabled, device pointer has been assigned etc.
1390 static int safexcel_probe_generic(void *pdev
,
1391 struct safexcel_crypto_priv
*priv
,
1394 struct device
*dev
= priv
->dev
;
1395 u32 peid
, version
, mask
, val
, hiaopt
, hwopt
, peopt
;
1398 priv
->context_pool
= dmam_pool_create("safexcel-context", dev
,
1399 sizeof(struct safexcel_context_record
),
1401 if (!priv
->context_pool
)
1405 * First try the EIP97 HIA version regs
1406 * For the EIP197, this is guaranteed to NOT return any of the test
1409 version
= readl(priv
->base
+ EIP97_HIA_AIC_BASE
+ EIP197_HIA_VERSION
);
1411 mask
= 0; /* do not swap */
1412 if (EIP197_REG_LO16(version
) == EIP197_HIA_VERSION_LE
) {
1413 priv
->hwconfig
.hiaver
= EIP197_VERSION_MASK(version
);
1414 } else if (EIP197_REG_HI16(version
) == EIP197_HIA_VERSION_BE
) {
1415 /* read back byte-swapped, so complement byte swap bits */
1416 mask
= EIP197_MST_CTRL_BYTE_SWAP_BITS
;
1417 priv
->hwconfig
.hiaver
= EIP197_VERSION_SWAP(version
);
1419 /* So it wasn't an EIP97 ... maybe it's an EIP197? */
1420 version
= readl(priv
->base
+ EIP197_HIA_AIC_BASE
+
1421 EIP197_HIA_VERSION
);
1422 if (EIP197_REG_LO16(version
) == EIP197_HIA_VERSION_LE
) {
1423 priv
->hwconfig
.hiaver
= EIP197_VERSION_MASK(version
);
1424 priv
->flags
|= SAFEXCEL_HW_EIP197
;
1425 } else if (EIP197_REG_HI16(version
) ==
1426 EIP197_HIA_VERSION_BE
) {
1427 /* read back byte-swapped, so complement swap bits */
1428 mask
= EIP197_MST_CTRL_BYTE_SWAP_BITS
;
1429 priv
->hwconfig
.hiaver
= EIP197_VERSION_SWAP(version
);
1430 priv
->flags
|= SAFEXCEL_HW_EIP197
;
1436 /* Now initialize the reg offsets based on the probing info so far */
1437 safexcel_init_register_offsets(priv
);
1440 * If the version was read byte-swapped, we need to flip the device
1441 * swapping Keep in mind here, though, that what we write will also be
1445 val
= readl(EIP197_HIA_AIC(priv
) + EIP197_HIA_MST_CTRL
);
1446 val
= val
^ (mask
>> 24); /* toggle byte swap bits */
1447 writel(val
, EIP197_HIA_AIC(priv
) + EIP197_HIA_MST_CTRL
);
1451 * We're not done probing yet! We may fall through to here if no HIA
1452 * was found at all. So, with the endianness presumably correct now and
1453 * the offsets setup, *really* probe for the EIP97/EIP197.
1455 version
= readl(EIP197_GLOBAL(priv
) + EIP197_VERSION
);
1456 if (((priv
->flags
& SAFEXCEL_HW_EIP197
) &&
1457 (EIP197_REG_LO16(version
) != EIP197_VERSION_LE
) &&
1458 (EIP197_REG_LO16(version
) != EIP196_VERSION_LE
)) ||
1459 ((!(priv
->flags
& SAFEXCEL_HW_EIP197
) &&
1460 (EIP197_REG_LO16(version
) != EIP97_VERSION_LE
)))) {
1462 * We did not find the device that matched our initial probing
1463 * (or our initial probing failed) Report appropriate error.
1465 dev_err(priv
->dev
, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
1470 priv
->hwconfig
.hwver
= EIP197_VERSION_MASK(version
);
1471 hwctg
= version
>> 28;
1472 peid
= version
& 255;
1474 /* Detect EIP206 processing pipe */
1475 version
= readl(EIP197_PE(priv
) + + EIP197_PE_VERSION(0));
1476 if (EIP197_REG_LO16(version
) != EIP206_VERSION_LE
) {
1477 dev_err(priv
->dev
, "EIP%d: EIP206 not detected\n", peid
);
1480 priv
->hwconfig
.ppver
= EIP197_VERSION_MASK(version
);
1482 /* Detect EIP96 packet engine and version */
1483 version
= readl(EIP197_PE(priv
) + EIP197_PE_EIP96_VERSION(0));
1484 if (EIP197_REG_LO16(version
) != EIP96_VERSION_LE
) {
1485 dev_err(dev
, "EIP%d: EIP96 not detected.\n", peid
);
1488 priv
->hwconfig
.pever
= EIP197_VERSION_MASK(version
);
1490 hwopt
= readl(EIP197_GLOBAL(priv
) + EIP197_OPTIONS
);
1491 hiaopt
= readl(EIP197_HIA_AIC(priv
) + EIP197_HIA_OPTIONS
);
1493 if (priv
->flags
& SAFEXCEL_HW_EIP197
) {
1495 peopt
= readl(EIP197_PE(priv
) + EIP197_PE_OPTIONS(0));
1497 priv
->hwconfig
.hwdataw
= (hiaopt
>> EIP197_HWDATAW_OFFSET
) &
1498 EIP197_HWDATAW_MASK
;
1499 priv
->hwconfig
.hwcfsize
= ((hiaopt
>> EIP197_CFSIZE_OFFSET
) &
1500 EIP197_CFSIZE_MASK
) +
1501 EIP197_CFSIZE_ADJUST
;
1502 priv
->hwconfig
.hwrfsize
= ((hiaopt
>> EIP197_RFSIZE_OFFSET
) &
1503 EIP197_RFSIZE_MASK
) +
1504 EIP197_RFSIZE_ADJUST
;
1505 priv
->hwconfig
.hwnumpes
= (hiaopt
>> EIP197_N_PES_OFFSET
) &
1507 priv
->hwconfig
.hwnumrings
= (hiaopt
>> EIP197_N_RINGS_OFFSET
) &
1508 EIP197_N_RINGS_MASK
;
1509 if (hiaopt
& EIP197_HIA_OPT_HAS_PE_ARB
)
1510 priv
->flags
|= EIP197_PE_ARB
;
1511 if (EIP206_OPT_ICE_TYPE(peopt
) == 1)
1512 priv
->flags
|= EIP197_ICE
;
1513 /* If not a full TRC, then assume simple TRC */
1514 if (!(hwopt
& EIP197_OPT_HAS_TRC
))
1515 priv
->flags
|= EIP197_SIMPLE_TRC
;
1516 /* EIP197 always has SOME form of TRC */
1517 priv
->flags
|= EIP197_TRC_CACHE
;
1520 priv
->hwconfig
.hwdataw
= (hiaopt
>> EIP197_HWDATAW_OFFSET
) &
1522 priv
->hwconfig
.hwcfsize
= (hiaopt
>> EIP97_CFSIZE_OFFSET
) &
1524 priv
->hwconfig
.hwrfsize
= (hiaopt
>> EIP97_RFSIZE_OFFSET
) &
1526 priv
->hwconfig
.hwnumpes
= 1; /* by definition */
1527 priv
->hwconfig
.hwnumrings
= (hiaopt
>> EIP197_N_RINGS_OFFSET
) &
1528 EIP197_N_RINGS_MASK
;
1531 /* Scan for ring AIC's */
1532 for (i
= 0; i
< EIP197_MAX_RING_AIC
; i
++) {
1533 version
= readl(EIP197_HIA_AIC_R(priv
) +
1534 EIP197_HIA_AIC_R_VERSION(i
));
1535 if (EIP197_REG_LO16(version
) != EIP201_VERSION_LE
)
1538 priv
->hwconfig
.hwnumraic
= i
;
1539 /* Low-end EIP196 may not have any ring AIC's ... */
1540 if (!priv
->hwconfig
.hwnumraic
) {
1541 dev_err(priv
->dev
, "No ring interrupt controller present!\n");
1545 /* Get supported algorithms from EIP96 transform engine */
1546 priv
->hwconfig
.algo_flags
= readl(EIP197_PE(priv
) +
1547 EIP197_PE_EIP96_OPTIONS(0));
1549 /* Print single info line describing what we just detected */
1550 dev_info(priv
->dev
, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x,alg:%08x\n",
1551 peid
, priv
->hwconfig
.hwver
, hwctg
, priv
->hwconfig
.hwnumpes
,
1552 priv
->hwconfig
.hwnumrings
, priv
->hwconfig
.hwnumraic
,
1553 priv
->hwconfig
.hiaver
, priv
->hwconfig
.hwdataw
,
1554 priv
->hwconfig
.hwcfsize
, priv
->hwconfig
.hwrfsize
,
1555 priv
->hwconfig
.ppver
, priv
->hwconfig
.pever
,
1556 priv
->hwconfig
.algo_flags
);
1558 safexcel_configure(priv
);
1560 if (IS_ENABLED(CONFIG_PCI
) && priv
->version
== EIP197_DEVBRD
) {
1562 * Request MSI vectors for global + 1 per ring -
1563 * or just 1 for older dev images
1565 struct pci_dev
*pci_pdev
= pdev
;
1567 ret
= pci_alloc_irq_vectors(pci_pdev
,
1568 priv
->config
.rings
+ 1,
1569 priv
->config
.rings
+ 1,
1570 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
1572 dev_err(dev
, "Failed to allocate PCI MSI interrupts\n");
1577 /* Register the ring IRQ handlers and configure the rings */
1578 priv
->ring
= devm_kcalloc(dev
, priv
->config
.rings
,
1579 sizeof(*priv
->ring
),
1584 for (i
= 0; i
< priv
->config
.rings
; i
++) {
1585 char wq_name
[9] = {0};
1587 struct safexcel_ring_irq_data
*ring_irq
;
1589 ret
= safexcel_init_ring_descriptors(priv
,
1591 &priv
->ring
[i
].rdr
);
1593 dev_err(dev
, "Failed to initialize rings\n");
1597 priv
->ring
[i
].rdr_req
= devm_kcalloc(dev
,
1598 EIP197_DEFAULT_RING_SIZE
,
1599 sizeof(priv
->ring
[i
].rdr_req
),
1601 if (!priv
->ring
[i
].rdr_req
)
1604 ring_irq
= devm_kzalloc(dev
, sizeof(*ring_irq
), GFP_KERNEL
);
1608 ring_irq
->priv
= priv
;
1611 irq
= safexcel_request_ring_irq(pdev
,
1612 EIP197_IRQ_NUMBER(i
, is_pci_dev
),
1615 safexcel_irq_ring_thread
,
1618 dev_err(dev
, "Failed to get IRQ ID for ring %d\n", i
);
1622 priv
->ring
[i
].work_data
.priv
= priv
;
1623 priv
->ring
[i
].work_data
.ring
= i
;
1624 INIT_WORK(&priv
->ring
[i
].work_data
.work
,
1625 safexcel_dequeue_work
);
1627 snprintf(wq_name
, 9, "wq_ring%d", i
);
1628 priv
->ring
[i
].workqueue
=
1629 create_singlethread_workqueue(wq_name
);
1630 if (!priv
->ring
[i
].workqueue
)
1633 priv
->ring
[i
].requests
= 0;
1634 priv
->ring
[i
].busy
= false;
1636 crypto_init_queue(&priv
->ring
[i
].queue
,
1637 EIP197_DEFAULT_RING_SIZE
);
1639 spin_lock_init(&priv
->ring
[i
].lock
);
1640 spin_lock_init(&priv
->ring
[i
].queue_lock
);
1643 atomic_set(&priv
->ring_used
, 0);
1645 ret
= safexcel_hw_init(priv
);
1647 dev_err(dev
, "HW init failed (%d)\n", ret
);
1651 ret
= safexcel_register_algorithms(priv
);
1653 dev_err(dev
, "Failed to register algorithms (%d)\n", ret
);
1660 static void safexcel_hw_reset_rings(struct safexcel_crypto_priv
*priv
)
1664 for (i
= 0; i
< priv
->config
.rings
; i
++) {
1665 /* clear any pending interrupt */
1666 writel(GENMASK(5, 0), EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_STAT
);
1667 writel(GENMASK(7, 0), EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_STAT
);
1669 /* Reset the CDR base address */
1670 writel(0, EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_LO
);
1671 writel(0, EIP197_HIA_CDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_HI
);
1673 /* Reset the RDR base address */
1674 writel(0, EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_LO
);
1675 writel(0, EIP197_HIA_RDR(priv
, i
) + EIP197_HIA_xDR_RING_BASE_ADDR_HI
);
1679 /* for Device Tree platform driver */
1681 static int safexcel_probe(struct platform_device
*pdev
)
1683 struct device
*dev
= &pdev
->dev
;
1684 struct safexcel_crypto_priv
*priv
;
1687 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
1692 priv
->version
= (enum safexcel_eip_version
)of_device_get_match_data(dev
);
1694 platform_set_drvdata(pdev
, priv
);
1696 priv
->base
= devm_platform_ioremap_resource(pdev
, 0);
1697 if (IS_ERR(priv
->base
)) {
1698 dev_err(dev
, "failed to get resource\n");
1699 return PTR_ERR(priv
->base
);
1702 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1703 ret
= PTR_ERR_OR_ZERO(priv
->clk
);
1704 /* The clock isn't mandatory */
1705 if (ret
!= -ENOENT
) {
1709 ret
= clk_prepare_enable(priv
->clk
);
1711 dev_err(dev
, "unable to enable clk (%d)\n", ret
);
1716 priv
->reg_clk
= devm_clk_get(&pdev
->dev
, "reg");
1717 ret
= PTR_ERR_OR_ZERO(priv
->reg_clk
);
1718 /* The clock isn't mandatory */
1719 if (ret
!= -ENOENT
) {
1723 ret
= clk_prepare_enable(priv
->reg_clk
);
1725 dev_err(dev
, "unable to enable reg clk (%d)\n", ret
);
1730 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
1734 /* Generic EIP97/EIP197 device probing */
1735 ret
= safexcel_probe_generic(pdev
, priv
, 0);
1742 clk_disable_unprepare(priv
->reg_clk
);
1744 clk_disable_unprepare(priv
->clk
);
1748 static int safexcel_remove(struct platform_device
*pdev
)
1750 struct safexcel_crypto_priv
*priv
= platform_get_drvdata(pdev
);
1753 safexcel_unregister_algorithms(priv
);
1754 safexcel_hw_reset_rings(priv
);
1756 clk_disable_unprepare(priv
->reg_clk
);
1757 clk_disable_unprepare(priv
->clk
);
1759 for (i
= 0; i
< priv
->config
.rings
; i
++)
1760 destroy_workqueue(priv
->ring
[i
].workqueue
);
1765 static const struct of_device_id safexcel_of_match_table
[] = {
1767 .compatible
= "inside-secure,safexcel-eip97ies",
1768 .data
= (void *)EIP97IES_MRVL
,
1771 .compatible
= "inside-secure,safexcel-eip197b",
1772 .data
= (void *)EIP197B_MRVL
,
1775 .compatible
= "inside-secure,safexcel-eip197d",
1776 .data
= (void *)EIP197D_MRVL
,
1778 /* For backward compatibility and intended for generic use */
1780 .compatible
= "inside-secure,safexcel-eip97",
1781 .data
= (void *)EIP97IES_MRVL
,
1784 .compatible
= "inside-secure,safexcel-eip197",
1785 .data
= (void *)EIP197B_MRVL
,
1790 static struct platform_driver crypto_safexcel
= {
1791 .probe
= safexcel_probe
,
1792 .remove
= safexcel_remove
,
1794 .name
= "crypto-safexcel",
1795 .of_match_table
= safexcel_of_match_table
,
1799 /* PCIE devices - i.e. Inside Secure development boards */
1801 static int safexcel_pci_probe(struct pci_dev
*pdev
,
1802 const struct pci_device_id
*ent
)
1804 struct device
*dev
= &pdev
->dev
;
1805 struct safexcel_crypto_priv
*priv
;
1806 void __iomem
*pciebase
;
1810 dev_dbg(dev
, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1811 ent
->vendor
, ent
->device
, ent
->subvendor
,
1812 ent
->subdevice
, ent
->driver_data
);
1814 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
1819 priv
->version
= (enum safexcel_eip_version
)ent
->driver_data
;
1821 pci_set_drvdata(pdev
, priv
);
1823 /* enable the device */
1824 rc
= pcim_enable_device(pdev
);
1826 dev_err(dev
, "Failed to enable PCI device\n");
1830 /* take ownership of PCI BAR0 */
1831 rc
= pcim_iomap_regions(pdev
, 1, "crypto_safexcel");
1833 dev_err(dev
, "Failed to map IO region for BAR0\n");
1836 priv
->base
= pcim_iomap_table(pdev
)[0];
1838 if (priv
->version
== EIP197_DEVBRD
) {
1839 dev_dbg(dev
, "Device identified as FPGA based development board - applying HW reset\n");
1841 rc
= pcim_iomap_regions(pdev
, 4, "crypto_safexcel");
1843 dev_err(dev
, "Failed to map IO region for BAR4\n");
1847 pciebase
= pcim_iomap_table(pdev
)[2];
1848 val
= readl(pciebase
+ EIP197_XLX_IRQ_BLOCK_ID_ADDR
);
1849 if ((val
>> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE
) {
1850 dev_dbg(dev
, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1853 /* Setup MSI identity map mapping */
1854 writel(EIP197_XLX_USER_VECT_LUT0_IDENT
,
1855 pciebase
+ EIP197_XLX_USER_VECT_LUT0_ADDR
);
1856 writel(EIP197_XLX_USER_VECT_LUT1_IDENT
,
1857 pciebase
+ EIP197_XLX_USER_VECT_LUT1_ADDR
);
1858 writel(EIP197_XLX_USER_VECT_LUT2_IDENT
,
1859 pciebase
+ EIP197_XLX_USER_VECT_LUT2_ADDR
);
1860 writel(EIP197_XLX_USER_VECT_LUT3_IDENT
,
1861 pciebase
+ EIP197_XLX_USER_VECT_LUT3_ADDR
);
1863 /* Enable all device interrupts */
1864 writel(GENMASK(31, 0),
1865 pciebase
+ EIP197_XLX_USER_INT_ENB_MSK
);
1867 dev_err(dev
, "Unrecognised IRQ block identifier %x\n",
1872 /* HW reset FPGA dev board */
1874 writel(1, priv
->base
+ EIP197_XLX_GPIO_BASE
);
1875 wmb(); /* maintain strict ordering for accesses here */
1876 /* deassert reset */
1877 writel(0, priv
->base
+ EIP197_XLX_GPIO_BASE
);
1878 wmb(); /* maintain strict ordering for accesses here */
1881 /* enable bus mastering */
1882 pci_set_master(pdev
);
1884 /* Generic EIP97/EIP197 device probing */
1885 rc
= safexcel_probe_generic(pdev
, priv
, 1);
1889 static void safexcel_pci_remove(struct pci_dev
*pdev
)
1891 struct safexcel_crypto_priv
*priv
= pci_get_drvdata(pdev
);
1894 safexcel_unregister_algorithms(priv
);
1896 for (i
= 0; i
< priv
->config
.rings
; i
++)
1897 destroy_workqueue(priv
->ring
[i
].workqueue
);
1899 safexcel_hw_reset_rings(priv
);
1902 static const struct pci_device_id safexcel_pci_ids
[] = {
1904 PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX
, 0x9038,
1906 .driver_data
= EIP197_DEVBRD
,
1911 MODULE_DEVICE_TABLE(pci
, safexcel_pci_ids
);
1913 static struct pci_driver safexcel_pci_driver
= {
1914 .name
= "crypto-safexcel",
1915 .id_table
= safexcel_pci_ids
,
1916 .probe
= safexcel_pci_probe
,
1917 .remove
= safexcel_pci_remove
,
1920 static int __init
safexcel_init(void)
1924 /* Register PCI driver */
1925 ret
= pci_register_driver(&safexcel_pci_driver
);
1927 /* Register platform driver */
1928 if (IS_ENABLED(CONFIG_OF
) && !ret
) {
1929 ret
= platform_driver_register(&crypto_safexcel
);
1931 pci_unregister_driver(&safexcel_pci_driver
);
1937 static void __exit
safexcel_exit(void)
1939 /* Unregister platform driver */
1940 if (IS_ENABLED(CONFIG_OF
))
1941 platform_driver_unregister(&crypto_safexcel
);
1943 /* Unregister PCI driver if successfully registered before */
1944 pci_unregister_driver(&safexcel_pci_driver
);
1947 module_init(safexcel_init
);
1948 module_exit(safexcel_exit
);
1950 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1951 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1952 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1953 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
1954 MODULE_LICENSE("GPL v2");