1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2019 HiSilicon Limited. */
3 #include <linux/acpi.h>
4 #include <linux/bitops.h>
5 #include <linux/debugfs.h>
6 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/topology.h>
13 #include <linux/uacce.h>
16 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
17 #define HPRE_CTRL_CNT_CLR_CE 0x301000
18 #define HPRE_FSM_MAX_CNT 0x301008
19 #define HPRE_VFG_AXQOS 0x30100c
20 #define HPRE_VFG_AXCACHE 0x301010
21 #define HPRE_RDCHN_INI_CFG 0x301014
22 #define HPRE_AWUSR_FP_CFG 0x301018
23 #define HPRE_BD_ENDIAN 0x301020
24 #define HPRE_ECC_BYPASS 0x301024
25 #define HPRE_RAS_WIDTH_CFG 0x301028
26 #define HPRE_POISON_BYPASS 0x30102c
27 #define HPRE_BD_ARUSR_CFG 0x301030
28 #define HPRE_BD_AWUSR_CFG 0x301034
29 #define HPRE_TYPES_ENB 0x301038
30 #define HPRE_RSA_ENB BIT(0)
31 #define HPRE_ECC_ENB BIT(1)
32 #define HPRE_DATA_RUSER_CFG 0x30103c
33 #define HPRE_DATA_WUSER_CFG 0x301040
34 #define HPRE_INT_MASK 0x301400
35 #define HPRE_INT_STATUS 0x301800
36 #define HPRE_HAC_INT_MSK 0x301400
37 #define HPRE_HAC_RAS_CE_ENB 0x301410
38 #define HPRE_HAC_RAS_NFE_ENB 0x301414
39 #define HPRE_HAC_RAS_FE_ENB 0x301418
40 #define HPRE_HAC_INT_SET 0x301500
41 #define HPRE_RNG_TIMEOUT_NUM 0x301A34
42 #define HPRE_CORE_INT_ENABLE 0
43 #define HPRE_RDCHN_INI_ST 0x301a00
44 #define HPRE_CLSTR_BASE 0x302000
45 #define HPRE_CORE_EN_OFFSET 0x04
46 #define HPRE_CORE_INI_CFG_OFFSET 0x20
47 #define HPRE_CORE_INI_STATUS_OFFSET 0x80
48 #define HPRE_CORE_HTBT_WARN_OFFSET 0x8c
49 #define HPRE_CORE_IS_SCHD_OFFSET 0x90
51 #define HPRE_RAS_CE_ENB 0x301410
52 #define HPRE_RAS_NFE_ENB 0x301414
53 #define HPRE_RAS_FE_ENB 0x301418
54 #define HPRE_OOO_SHUTDOWN_SEL 0x301a3c
55 #define HPRE_HAC_RAS_FE_ENABLE 0
57 #define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
58 #define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
59 #define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
60 #define HPRE_HAC_ECC1_CNT 0x301a04
61 #define HPRE_HAC_ECC2_CNT 0x301a08
62 #define HPRE_HAC_SOURCE_INT 0x301600
63 #define HPRE_CLSTR_ADDR_INTRVL 0x1000
64 #define HPRE_CLUSTER_INQURY 0x100
65 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
66 #define HPRE_PASID_EN_BIT 9
67 #define HPRE_REG_RD_INTVRL_US 10
68 #define HPRE_REG_RD_TMOUT_US 1000
69 #define HPRE_DBGFS_VAL_MAX_LEN 20
70 #define PCI_DEVICE_ID_HUAWEI_HPRE_PF 0xa258
71 #define HPRE_QM_USR_CFG_MASK GENMASK(31, 1)
72 #define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)
73 #define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)
74 #define HPRE_BD_USR_MASK GENMASK(1, 0)
75 #define HPRE_PREFETCH_CFG 0x301130
76 #define HPRE_SVA_PREFTCH_DFX 0x30115C
77 #define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
78 #define HPRE_PREFETCH_DISABLE BIT(30)
79 #define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
82 #define HPRE_CLKGATE_CTL 0x301a10
83 #define HPRE_PEH_CFG_AUTO_GATE 0x301a2c
84 #define HPRE_CLUSTER_DYN_CTL 0x302010
85 #define HPRE_CORE_SHB_CFG 0x302088
86 #define HPRE_CLKGATE_CTL_EN BIT(0)
87 #define HPRE_PEH_CFG_AUTO_GATE_EN BIT(0)
88 #define HPRE_CLUSTER_DYN_CTL_EN BIT(0)
89 #define HPRE_CORE_GATE_EN (BIT(30) | BIT(31))
91 #define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
92 #define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
93 #define HPRE_WR_MSI_PORT BIT(2)
95 #define HPRE_CORE_ECC_2BIT_ERR BIT(1)
96 #define HPRE_OOO_ECC_2BIT_ERR BIT(5)
98 #define HPRE_QM_BME_FLR BIT(7)
99 #define HPRE_QM_PM_FLR BIT(11)
100 #define HPRE_QM_SRIOV_FLR BIT(12)
102 #define HPRE_SHAPER_TYPE_RATE 640
103 #define HPRE_VIA_MSI_DSM 1
104 #define HPRE_SQE_MASK_OFFSET 8
105 #define HPRE_SQE_MASK_LEN 44
106 #define HPRE_CTX_Q_NUM_DEF 1
108 #define HPRE_DFX_BASE 0x301000
109 #define HPRE_DFX_COMMON1 0x301400
110 #define HPRE_DFX_COMMON2 0x301A00
111 #define HPRE_DFX_CORE 0x302000
112 #define HPRE_DFX_BASE_LEN 0x55
113 #define HPRE_DFX_COMMON1_LEN 0x41
114 #define HPRE_DFX_COMMON2_LEN 0xE
115 #define HPRE_DFX_CORE_LEN 0x43
117 static const char hpre_name
[] = "hisi_hpre";
118 static struct dentry
*hpre_debugfs_root
;
119 static const struct pci_device_id hpre_dev_ids
[] = {
120 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_HUAWEI_HPRE_PF
) },
121 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_HUAWEI_HPRE_VF
) },
125 MODULE_DEVICE_TABLE(pci
, hpre_dev_ids
);
127 struct hpre_hw_error
{
132 static const struct qm_dev_alg hpre_dev_algs
[] = {
159 static struct hisi_qm_list hpre_devices
= {
160 .register_to_crypto
= hpre_algs_register
,
161 .unregister_from_crypto
= hpre_algs_unregister
,
164 static const char * const hpre_debug_file_name
[] = {
165 [HPRE_CLEAR_ENABLE
] = "rdclr_en",
166 [HPRE_CLUSTER_CTRL
] = "cluster_ctrl",
170 HPRE_QM_NFE_MASK_CAP
,
171 HPRE_QM_RESET_MASK_CAP
,
172 HPRE_QM_OOO_SHUTDOWN_MASK_CAP
,
176 HPRE_OOO_SHUTDOWN_MASK_CAP
,
178 HPRE_CLUSTER_NUM_CAP
,
179 HPRE_CORE_TYPE_NUM_CAP
,
181 HPRE_CLUSTER_CORE_NUM_CAP
,
182 HPRE_CORE_ENABLE_BITMAP_CAP
,
183 HPRE_DRV_ALG_BITMAP_CAP
,
184 HPRE_DEV_ALG_BITMAP_CAP
,
185 HPRE_CORE1_ALG_BITMAP_CAP
,
186 HPRE_CORE2_ALG_BITMAP_CAP
,
187 HPRE_CORE3_ALG_BITMAP_CAP
,
188 HPRE_CORE4_ALG_BITMAP_CAP
,
189 HPRE_CORE5_ALG_BITMAP_CAP
,
190 HPRE_CORE6_ALG_BITMAP_CAP
,
191 HPRE_CORE7_ALG_BITMAP_CAP
,
192 HPRE_CORE8_ALG_BITMAP_CAP
,
193 HPRE_CORE9_ALG_BITMAP_CAP
,
194 HPRE_CORE10_ALG_BITMAP_CAP
197 static const struct hisi_qm_cap_info hpre_basic_info
[] = {
198 {HPRE_QM_NFE_MASK_CAP
, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37},
199 {HPRE_QM_RESET_MASK_CAP
, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
200 {HPRE_QM_OOO_SHUTDOWN_MASK_CAP
, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
201 {HPRE_QM_CE_MASK_CAP
, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
202 {HPRE_NFE_MASK_CAP
, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E},
203 {HPRE_RESET_MASK_CAP
, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E},
204 {HPRE_OOO_SHUTDOWN_MASK_CAP
, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E},
205 {HPRE_CE_MASK_CAP
, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
206 {HPRE_CLUSTER_NUM_CAP
, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
207 {HPRE_CORE_TYPE_NUM_CAP
, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
208 {HPRE_CORE_NUM_CAP
, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
209 {HPRE_CLUSTER_CORE_NUM_CAP
, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
210 {HPRE_CORE_ENABLE_BITMAP_CAP
, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF},
211 {HPRE_DRV_ALG_BITMAP_CAP
, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27},
212 {HPRE_DEV_ALG_BITMAP_CAP
, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F},
213 {HPRE_CORE1_ALG_BITMAP_CAP
, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
214 {HPRE_CORE2_ALG_BITMAP_CAP
, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
215 {HPRE_CORE3_ALG_BITMAP_CAP
, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
216 {HPRE_CORE4_ALG_BITMAP_CAP
, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
217 {HPRE_CORE5_ALG_BITMAP_CAP
, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
218 {HPRE_CORE6_ALG_BITMAP_CAP
, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
219 {HPRE_CORE7_ALG_BITMAP_CAP
, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
220 {HPRE_CORE8_ALG_BITMAP_CAP
, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
221 {HPRE_CORE9_ALG_BITMAP_CAP
, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10},
222 {HPRE_CORE10_ALG_BITMAP_CAP
, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
225 enum hpre_pre_store_cap_idx
{
226 HPRE_CLUSTER_NUM_CAP_IDX
= 0x0,
227 HPRE_CORE_ENABLE_BITMAP_CAP_IDX
,
228 HPRE_DRV_ALG_BITMAP_CAP_IDX
,
229 HPRE_DEV_ALG_BITMAP_CAP_IDX
,
232 static const u32 hpre_pre_store_caps
[] = {
233 HPRE_CLUSTER_NUM_CAP
,
234 HPRE_CORE_ENABLE_BITMAP_CAP
,
235 HPRE_DRV_ALG_BITMAP_CAP
,
236 HPRE_DEV_ALG_BITMAP_CAP
,
239 static const struct hpre_hw_error hpre_hw_errors
[] = {
242 .msg
= "core_ecc_1bit_err_int_set"
245 .msg
= "core_ecc_2bit_err_int_set"
248 .msg
= "dat_wb_poison_int_set"
251 .msg
= "dat_rd_poison_int_set"
254 .msg
= "bd_rd_poison_int_set"
257 .msg
= "ooo_ecc_2bit_err_int_set"
260 .msg
= "cluster1_shb_timeout_int_set"
263 .msg
= "cluster2_shb_timeout_int_set"
266 .msg
= "cluster3_shb_timeout_int_set"
269 .msg
= "cluster4_shb_timeout_int_set"
271 .int_msk
= GENMASK(15, 10),
272 .msg
= "ooo_rdrsp_err_int_set"
274 .int_msk
= GENMASK(21, 16),
275 .msg
= "ooo_wrrsp_err_int_set"
278 .msg
= "pt_rng_timeout_int_set"
281 .msg
= "sva_fsm_timeout_int_set"
290 static const u64 hpre_cluster_offsets
[] = {
292 HPRE_CLSTR_BASE
+ HPRE_CLUSTER0
* HPRE_CLSTR_ADDR_INTRVL
,
294 HPRE_CLSTR_BASE
+ HPRE_CLUSTER1
* HPRE_CLSTR_ADDR_INTRVL
,
296 HPRE_CLSTR_BASE
+ HPRE_CLUSTER2
* HPRE_CLSTR_ADDR_INTRVL
,
298 HPRE_CLSTR_BASE
+ HPRE_CLUSTER3
* HPRE_CLSTR_ADDR_INTRVL
,
301 static const struct debugfs_reg32 hpre_cluster_dfx_regs
[] = {
302 {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET
},
303 {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET
},
304 {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET
},
305 {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET
},
306 {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET
},
309 static const struct debugfs_reg32 hpre_com_dfx_regs
[] = {
310 {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE
},
311 {"AXQOS ", HPRE_VFG_AXQOS
},
312 {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG
},
313 {"BD_ENDIAN ", HPRE_BD_ENDIAN
},
314 {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS
},
315 {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG
},
316 {"POISON_BYPASS ", HPRE_POISON_BYPASS
},
317 {"BD_ARUSER ", HPRE_BD_ARUSR_CFG
},
318 {"BD_AWUSER ", HPRE_BD_AWUSR_CFG
},
319 {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG
},
320 {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG
},
321 {"INT_STATUS ", HPRE_INT_STATUS
},
322 {"INT_MASK ", HPRE_HAC_INT_MSK
},
323 {"RAS_CE_ENB ", HPRE_HAC_RAS_CE_ENB
},
324 {"RAS_NFE_ENB ", HPRE_HAC_RAS_NFE_ENB
},
325 {"RAS_FE_ENB ", HPRE_HAC_RAS_FE_ENB
},
326 {"INT_SET ", HPRE_HAC_INT_SET
},
327 {"RNG_TIMEOUT_NUM ", HPRE_RNG_TIMEOUT_NUM
},
330 static const char *hpre_dfx_files
[HPRE_DFX_FILE_NUM
] = {
340 /* define the HPRE's dfx regs region and region length */
341 static struct dfx_diff_registers hpre_diff_regs
[] = {
343 .reg_offset
= HPRE_DFX_BASE
,
344 .reg_len
= HPRE_DFX_BASE_LEN
,
346 .reg_offset
= HPRE_DFX_COMMON1
,
347 .reg_len
= HPRE_DFX_COMMON1_LEN
,
349 .reg_offset
= HPRE_DFX_COMMON2
,
350 .reg_len
= HPRE_DFX_COMMON2_LEN
,
352 .reg_offset
= HPRE_DFX_CORE
,
353 .reg_len
= HPRE_DFX_CORE_LEN
,
357 static const struct hisi_qm_err_ini hpre_err_ini
;
359 bool hpre_check_alg_support(struct hisi_qm
*qm
, u32 alg
)
363 cap_val
= qm
->cap_tables
.dev_cap_table
[HPRE_DRV_ALG_BITMAP_CAP_IDX
].cap_val
;
370 static int hpre_diff_regs_show(struct seq_file
*s
, void *unused
)
372 struct hisi_qm
*qm
= s
->private;
374 hisi_qm_acc_diff_regs_dump(qm
, s
, qm
->debug
.acc_diff_regs
,
375 ARRAY_SIZE(hpre_diff_regs
));
380 DEFINE_SHOW_ATTRIBUTE(hpre_diff_regs
);
382 static int hpre_com_regs_show(struct seq_file
*s
, void *unused
)
384 hisi_qm_regs_dump(s
, s
->private);
389 DEFINE_SHOW_ATTRIBUTE(hpre_com_regs
);
391 static int hpre_cluster_regs_show(struct seq_file
*s
, void *unused
)
393 hisi_qm_regs_dump(s
, s
->private);
398 DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs
);
400 static const struct kernel_param_ops hpre_uacce_mode_ops
= {
401 .set
= uacce_mode_set
,
402 .get
= param_get_int
,
406 * uacce_mode = 0 means hpre only register to crypto,
407 * uacce_mode = 1 means hpre both register to crypto and uacce.
409 static u32 uacce_mode
= UACCE_MODE_NOUACCE
;
410 module_param_cb(uacce_mode
, &hpre_uacce_mode_ops
, &uacce_mode
, 0444);
411 MODULE_PARM_DESC(uacce_mode
, UACCE_MODE_DESC
);
413 static bool pf_q_num_flag
;
414 static int pf_q_num_set(const char *val
, const struct kernel_param
*kp
)
416 pf_q_num_flag
= true;
418 return q_num_set(val
, kp
, PCI_DEVICE_ID_HUAWEI_HPRE_PF
);
421 static const struct kernel_param_ops hpre_pf_q_num_ops
= {
423 .get
= param_get_int
,
426 static u32 pf_q_num
= HPRE_PF_DEF_Q_NUM
;
427 module_param_cb(pf_q_num
, &hpre_pf_q_num_ops
, &pf_q_num
, 0444);
428 MODULE_PARM_DESC(pf_q_num
, "Number of queues in PF of CS(2-1024)");
430 static const struct kernel_param_ops vfs_num_ops
= {
432 .get
= param_get_int
,
436 module_param_cb(vfs_num
, &vfs_num_ops
, &vfs_num
, 0444);
437 MODULE_PARM_DESC(vfs_num
, "Number of VFs to enable(1-63), 0(default)");
439 struct hisi_qp
*hpre_create_qp(u8 type
)
441 int node
= cpu_to_node(raw_smp_processor_id());
442 struct hisi_qp
*qp
= NULL
;
445 if (type
!= HPRE_V2_ALG_TYPE
&& type
!= HPRE_V3_ECC_ALG_TYPE
)
449 * type: 0 - RSA/DH. algorithm supported in V2,
450 * 1 - ECC algorithm in V3.
452 ret
= hisi_qm_alloc_qps_node(&hpre_devices
, 1, type
, node
, &qp
);
459 static void hpre_config_pasid(struct hisi_qm
*qm
)
463 if (qm
->ver
>= QM_HW_V3
)
466 val1
= readl_relaxed(qm
->io_base
+ HPRE_DATA_RUSER_CFG
);
467 val2
= readl_relaxed(qm
->io_base
+ HPRE_DATA_WUSER_CFG
);
469 val1
|= BIT(HPRE_PASID_EN_BIT
);
470 val2
|= BIT(HPRE_PASID_EN_BIT
);
472 val1
&= ~BIT(HPRE_PASID_EN_BIT
);
473 val2
&= ~BIT(HPRE_PASID_EN_BIT
);
475 writel_relaxed(val1
, qm
->io_base
+ HPRE_DATA_RUSER_CFG
);
476 writel_relaxed(val2
, qm
->io_base
+ HPRE_DATA_WUSER_CFG
);
479 static int hpre_cfg_by_dsm(struct hisi_qm
*qm
)
481 struct device
*dev
= &qm
->pdev
->dev
;
482 union acpi_object
*obj
;
485 if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid
)) {
486 dev_err(dev
, "Hpre GUID failed\n");
490 /* Switch over to MSI handling due to non-standard PCI implementation */
491 obj
= acpi_evaluate_dsm(ACPI_HANDLE(dev
), &guid
,
492 0, HPRE_VIA_MSI_DSM
, NULL
);
494 dev_err(dev
, "ACPI handle failed!\n");
503 static int hpre_set_cluster(struct hisi_qm
*qm
)
505 struct device
*dev
= &qm
->pdev
->dev
;
506 unsigned long offset
;
507 u32 cluster_core_mask
;
512 cluster_core_mask
= qm
->cap_tables
.dev_cap_table
[HPRE_CORE_ENABLE_BITMAP_CAP_IDX
].cap_val
;
513 clusters_num
= qm
->cap_tables
.dev_cap_table
[HPRE_CLUSTER_NUM_CAP_IDX
].cap_val
;
514 for (i
= 0; i
< clusters_num
; i
++) {
515 offset
= i
* HPRE_CLSTR_ADDR_INTRVL
;
517 /* clusters initiating */
518 writel(cluster_core_mask
,
519 qm
->io_base
+ offset
+ HPRE_CORE_ENB
);
520 writel(0x1, qm
->io_base
+ offset
+ HPRE_CORE_INI_CFG
);
521 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ offset
+
522 HPRE_CORE_INI_STATUS
, val
,
523 ((val
& cluster_core_mask
) ==
525 HPRE_REG_RD_INTVRL_US
,
526 HPRE_REG_RD_TMOUT_US
);
529 "cluster %d int st status timeout!\n", i
);
538 * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).
539 * Or it may stay in D3 state when we bind and unbind hpre quickly,
540 * as it does FLR triggered by hardware.
542 static void disable_flr_of_bme(struct hisi_qm
*qm
)
546 val
= readl(qm
->io_base
+ QM_PEH_AXUSER_CFG
);
547 val
&= ~(HPRE_QM_BME_FLR
| HPRE_QM_SRIOV_FLR
);
548 val
|= HPRE_QM_PM_FLR
;
549 writel(val
, qm
->io_base
+ QM_PEH_AXUSER_CFG
);
550 writel(PEH_AXUSER_CFG_ENABLE
, qm
->io_base
+ QM_PEH_AXUSER_CFG_ENABLE
);
553 static void hpre_open_sva_prefetch(struct hisi_qm
*qm
)
558 if (!test_bit(QM_SUPPORT_SVA_PREFETCH
, &qm
->caps
))
561 /* Enable prefetch */
562 val
= readl_relaxed(qm
->io_base
+ HPRE_PREFETCH_CFG
);
563 val
&= HPRE_PREFETCH_ENABLE
;
564 writel(val
, qm
->io_base
+ HPRE_PREFETCH_CFG
);
566 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ HPRE_PREFETCH_CFG
,
567 val
, !(val
& HPRE_PREFETCH_DISABLE
),
568 HPRE_REG_RD_INTVRL_US
,
569 HPRE_REG_RD_TMOUT_US
);
571 pci_err(qm
->pdev
, "failed to open sva prefetch\n");
574 static void hpre_close_sva_prefetch(struct hisi_qm
*qm
)
579 if (!test_bit(QM_SUPPORT_SVA_PREFETCH
, &qm
->caps
))
582 val
= readl_relaxed(qm
->io_base
+ HPRE_PREFETCH_CFG
);
583 val
|= HPRE_PREFETCH_DISABLE
;
584 writel(val
, qm
->io_base
+ HPRE_PREFETCH_CFG
);
586 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ HPRE_SVA_PREFTCH_DFX
,
587 val
, !(val
& HPRE_SVA_DISABLE_READY
),
588 HPRE_REG_RD_INTVRL_US
,
589 HPRE_REG_RD_TMOUT_US
);
591 pci_err(qm
->pdev
, "failed to close sva prefetch\n");
594 static void hpre_enable_clock_gate(struct hisi_qm
*qm
)
598 if (qm
->ver
< QM_HW_V3
)
601 val
= readl(qm
->io_base
+ HPRE_CLKGATE_CTL
);
602 val
|= HPRE_CLKGATE_CTL_EN
;
603 writel(val
, qm
->io_base
+ HPRE_CLKGATE_CTL
);
605 val
= readl(qm
->io_base
+ HPRE_PEH_CFG_AUTO_GATE
);
606 val
|= HPRE_PEH_CFG_AUTO_GATE_EN
;
607 writel(val
, qm
->io_base
+ HPRE_PEH_CFG_AUTO_GATE
);
609 val
= readl(qm
->io_base
+ HPRE_CLUSTER_DYN_CTL
);
610 val
|= HPRE_CLUSTER_DYN_CTL_EN
;
611 writel(val
, qm
->io_base
+ HPRE_CLUSTER_DYN_CTL
);
613 val
= readl_relaxed(qm
->io_base
+ HPRE_CORE_SHB_CFG
);
614 val
|= HPRE_CORE_GATE_EN
;
615 writel(val
, qm
->io_base
+ HPRE_CORE_SHB_CFG
);
618 static void hpre_disable_clock_gate(struct hisi_qm
*qm
)
622 if (qm
->ver
< QM_HW_V3
)
625 val
= readl(qm
->io_base
+ HPRE_CLKGATE_CTL
);
626 val
&= ~HPRE_CLKGATE_CTL_EN
;
627 writel(val
, qm
->io_base
+ HPRE_CLKGATE_CTL
);
629 val
= readl(qm
->io_base
+ HPRE_PEH_CFG_AUTO_GATE
);
630 val
&= ~HPRE_PEH_CFG_AUTO_GATE_EN
;
631 writel(val
, qm
->io_base
+ HPRE_PEH_CFG_AUTO_GATE
);
633 val
= readl(qm
->io_base
+ HPRE_CLUSTER_DYN_CTL
);
634 val
&= ~HPRE_CLUSTER_DYN_CTL_EN
;
635 writel(val
, qm
->io_base
+ HPRE_CLUSTER_DYN_CTL
);
637 val
= readl_relaxed(qm
->io_base
+ HPRE_CORE_SHB_CFG
);
638 val
&= ~HPRE_CORE_GATE_EN
;
639 writel(val
, qm
->io_base
+ HPRE_CORE_SHB_CFG
);
642 static int hpre_set_user_domain_and_cache(struct hisi_qm
*qm
)
644 struct device
*dev
= &qm
->pdev
->dev
;
648 /* disabel dynamic clock gate before sram init */
649 hpre_disable_clock_gate(qm
);
651 writel(HPRE_QM_USR_CFG_MASK
, qm
->io_base
+ QM_ARUSER_M_CFG_ENABLE
);
652 writel(HPRE_QM_USR_CFG_MASK
, qm
->io_base
+ QM_AWUSER_M_CFG_ENABLE
);
653 writel_relaxed(HPRE_QM_AXI_CFG_MASK
, qm
->io_base
+ QM_AXI_M_CFG
);
655 if (qm
->ver
>= QM_HW_V3
)
656 writel(HPRE_RSA_ENB
| HPRE_ECC_ENB
,
657 qm
->io_base
+ HPRE_TYPES_ENB
);
659 writel(HPRE_RSA_ENB
, qm
->io_base
+ HPRE_TYPES_ENB
);
661 writel(HPRE_QM_VFG_AX_MASK
, qm
->io_base
+ HPRE_VFG_AXCACHE
);
662 writel(0x0, qm
->io_base
+ HPRE_BD_ENDIAN
);
663 writel(0x0, qm
->io_base
+ HPRE_POISON_BYPASS
);
664 writel(0x0, qm
->io_base
+ HPRE_ECC_BYPASS
);
666 writel(HPRE_BD_USR_MASK
, qm
->io_base
+ HPRE_BD_ARUSR_CFG
);
667 writel(HPRE_BD_USR_MASK
, qm
->io_base
+ HPRE_BD_AWUSR_CFG
);
668 writel(0x1, qm
->io_base
+ HPRE_RDCHN_INI_CFG
);
669 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ HPRE_RDCHN_INI_ST
, val
,
671 HPRE_REG_RD_INTVRL_US
,
672 HPRE_REG_RD_TMOUT_US
);
674 dev_err(dev
, "read rd channel timeout fail!\n");
678 ret
= hpre_set_cluster(qm
);
682 /* This setting is only needed by Kunpeng 920. */
683 if (qm
->ver
== QM_HW_V2
) {
684 ret
= hpre_cfg_by_dsm(qm
);
688 disable_flr_of_bme(qm
);
691 /* Config data buffer pasid needed by Kunpeng 920 */
692 hpre_config_pasid(qm
);
694 hpre_enable_clock_gate(qm
);
699 static void hpre_cnt_regs_clear(struct hisi_qm
*qm
)
701 unsigned long offset
;
705 /* clear clusterX/cluster_ctrl */
706 clusters_num
= qm
->cap_tables
.dev_cap_table
[HPRE_CLUSTER_NUM_CAP_IDX
].cap_val
;
707 for (i
= 0; i
< clusters_num
; i
++) {
708 offset
= HPRE_CLSTR_BASE
+ i
* HPRE_CLSTR_ADDR_INTRVL
;
709 writel(0x0, qm
->io_base
+ offset
+ HPRE_CLUSTER_INQURY
);
713 writel(0x0, qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
);
715 hisi_qm_debug_regs_clear(qm
);
718 static void hpre_master_ooo_ctrl(struct hisi_qm
*qm
, bool enable
)
722 val1
= readl(qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
724 val1
|= HPRE_AM_OOO_SHUTDOWN_ENABLE
;
725 val2
= hisi_qm_get_hw_info(qm
, hpre_basic_info
,
726 HPRE_OOO_SHUTDOWN_MASK_CAP
, qm
->cap_ver
);
728 val1
&= ~HPRE_AM_OOO_SHUTDOWN_ENABLE
;
732 if (qm
->ver
> QM_HW_V2
)
733 writel(val2
, qm
->io_base
+ HPRE_OOO_SHUTDOWN_SEL
);
735 writel(val1
, qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
738 static void hpre_hw_error_disable(struct hisi_qm
*qm
)
742 ce
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_CE_MASK_CAP
, qm
->cap_ver
);
743 nfe
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_NFE_MASK_CAP
, qm
->cap_ver
);
745 /* disable hpre hw error interrupts */
746 writel(ce
| nfe
| HPRE_HAC_RAS_FE_ENABLE
, qm
->io_base
+ HPRE_INT_MASK
);
747 /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
748 hpre_master_ooo_ctrl(qm
, false);
751 static void hpre_hw_error_enable(struct hisi_qm
*qm
)
755 ce
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_CE_MASK_CAP
, qm
->cap_ver
);
756 nfe
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_NFE_MASK_CAP
, qm
->cap_ver
);
758 /* clear HPRE hw error source if having */
759 writel(ce
| nfe
| HPRE_HAC_RAS_FE_ENABLE
, qm
->io_base
+ HPRE_HAC_SOURCE_INT
);
761 /* configure error type */
762 writel(ce
, qm
->io_base
+ HPRE_RAS_CE_ENB
);
763 writel(nfe
, qm
->io_base
+ HPRE_RAS_NFE_ENB
);
764 writel(HPRE_HAC_RAS_FE_ENABLE
, qm
->io_base
+ HPRE_RAS_FE_ENB
);
766 /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
767 hpre_master_ooo_ctrl(qm
, true);
769 /* enable hpre hw error interrupts */
770 err_en
= ce
| nfe
| HPRE_HAC_RAS_FE_ENABLE
;
771 writel(~err_en
, qm
->io_base
+ HPRE_INT_MASK
);
774 static inline struct hisi_qm
*hpre_file_to_qm(struct hpre_debugfs_file
*file
)
776 struct hpre
*hpre
= container_of(file
->debug
, struct hpre
, debug
);
781 static u32
hpre_clear_enable_read(struct hpre_debugfs_file
*file
)
783 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
785 return readl(qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
) &
786 HPRE_CTRL_CNT_CLR_CE_BIT
;
789 static int hpre_clear_enable_write(struct hpre_debugfs_file
*file
, u32 val
)
791 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
794 if (val
!= 1 && val
!= 0)
797 tmp
= (readl(qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
) &
798 ~HPRE_CTRL_CNT_CLR_CE_BIT
) | val
;
799 writel(tmp
, qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
);
804 static u32
hpre_cluster_inqry_read(struct hpre_debugfs_file
*file
)
806 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
807 int cluster_index
= file
->index
- HPRE_CLUSTER_CTRL
;
808 unsigned long offset
= HPRE_CLSTR_BASE
+
809 cluster_index
* HPRE_CLSTR_ADDR_INTRVL
;
811 return readl(qm
->io_base
+ offset
+ HPRE_CLSTR_ADDR_INQRY_RSLT
);
814 static void hpre_cluster_inqry_write(struct hpre_debugfs_file
*file
, u32 val
)
816 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
817 int cluster_index
= file
->index
- HPRE_CLUSTER_CTRL
;
818 unsigned long offset
= HPRE_CLSTR_BASE
+ cluster_index
*
819 HPRE_CLSTR_ADDR_INTRVL
;
821 writel(val
, qm
->io_base
+ offset
+ HPRE_CLUSTER_INQURY
);
824 static ssize_t
hpre_ctrl_debug_read(struct file
*filp
, char __user
*buf
,
825 size_t count
, loff_t
*pos
)
827 struct hpre_debugfs_file
*file
= filp
->private_data
;
828 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
829 char tbuf
[HPRE_DBGFS_VAL_MAX_LEN
];
833 ret
= hisi_qm_get_dfx_access(qm
);
837 spin_lock_irq(&file
->lock
);
838 switch (file
->type
) {
839 case HPRE_CLEAR_ENABLE
:
840 val
= hpre_clear_enable_read(file
);
842 case HPRE_CLUSTER_CTRL
:
843 val
= hpre_cluster_inqry_read(file
);
848 spin_unlock_irq(&file
->lock
);
850 hisi_qm_put_dfx_access(qm
);
851 ret
= snprintf(tbuf
, HPRE_DBGFS_VAL_MAX_LEN
, "%u\n", val
);
852 return simple_read_from_buffer(buf
, count
, pos
, tbuf
, ret
);
855 spin_unlock_irq(&file
->lock
);
856 hisi_qm_put_dfx_access(qm
);
860 static ssize_t
hpre_ctrl_debug_write(struct file
*filp
, const char __user
*buf
,
861 size_t count
, loff_t
*pos
)
863 struct hpre_debugfs_file
*file
= filp
->private_data
;
864 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
865 char tbuf
[HPRE_DBGFS_VAL_MAX_LEN
];
872 if (count
>= HPRE_DBGFS_VAL_MAX_LEN
)
875 len
= simple_write_to_buffer(tbuf
, HPRE_DBGFS_VAL_MAX_LEN
- 1,
881 if (kstrtoul(tbuf
, 0, &val
))
884 ret
= hisi_qm_get_dfx_access(qm
);
888 spin_lock_irq(&file
->lock
);
889 switch (file
->type
) {
890 case HPRE_CLEAR_ENABLE
:
891 ret
= hpre_clear_enable_write(file
, val
);
895 case HPRE_CLUSTER_CTRL
:
896 hpre_cluster_inqry_write(file
, val
);
906 spin_unlock_irq(&file
->lock
);
907 hisi_qm_put_dfx_access(qm
);
911 static const struct file_operations hpre_ctrl_debug_fops
= {
912 .owner
= THIS_MODULE
,
914 .read
= hpre_ctrl_debug_read
,
915 .write
= hpre_ctrl_debug_write
,
918 static int hpre_debugfs_atomic64_get(void *data
, u64
*val
)
920 struct hpre_dfx
*dfx_item
= data
;
922 *val
= atomic64_read(&dfx_item
->value
);
927 static int hpre_debugfs_atomic64_set(void *data
, u64 val
)
929 struct hpre_dfx
*dfx_item
= data
;
930 struct hpre_dfx
*hpre_dfx
= NULL
;
932 if (dfx_item
->type
== HPRE_OVERTIME_THRHLD
) {
933 hpre_dfx
= dfx_item
- HPRE_OVERTIME_THRHLD
;
934 atomic64_set(&hpre_dfx
[HPRE_OVER_THRHLD_CNT
].value
, 0);
939 atomic64_set(&dfx_item
->value
, val
);
944 DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops
, hpre_debugfs_atomic64_get
,
945 hpre_debugfs_atomic64_set
, "%llu\n");
947 static int hpre_create_debugfs_file(struct hisi_qm
*qm
, struct dentry
*dir
,
948 enum hpre_ctrl_dbgfs_file type
, int indx
)
950 struct hpre
*hpre
= container_of(qm
, struct hpre
, qm
);
951 struct hpre_debug
*dbg
= &hpre
->debug
;
952 struct dentry
*file_dir
;
957 file_dir
= qm
->debug
.debug_root
;
959 if (type
>= HPRE_DEBUG_FILE_NUM
)
962 spin_lock_init(&dbg
->files
[indx
].lock
);
963 dbg
->files
[indx
].debug
= dbg
;
964 dbg
->files
[indx
].type
= type
;
965 dbg
->files
[indx
].index
= indx
;
966 debugfs_create_file(hpre_debug_file_name
[type
], 0600, file_dir
,
967 dbg
->files
+ indx
, &hpre_ctrl_debug_fops
);
972 static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm
*qm
)
974 struct device
*dev
= &qm
->pdev
->dev
;
975 struct debugfs_regset32
*regset
;
977 regset
= devm_kzalloc(dev
, sizeof(*regset
), GFP_KERNEL
);
981 regset
->regs
= hpre_com_dfx_regs
;
982 regset
->nregs
= ARRAY_SIZE(hpre_com_dfx_regs
);
983 regset
->base
= qm
->io_base
;
986 debugfs_create_file("regs", 0444, qm
->debug
.debug_root
,
987 regset
, &hpre_com_regs_fops
);
992 static int hpre_cluster_debugfs_init(struct hisi_qm
*qm
)
994 struct device
*dev
= &qm
->pdev
->dev
;
995 char buf
[HPRE_DBGFS_VAL_MAX_LEN
];
996 struct debugfs_regset32
*regset
;
997 struct dentry
*tmp_d
;
1001 clusters_num
= qm
->cap_tables
.dev_cap_table
[HPRE_CLUSTER_NUM_CAP_IDX
].cap_val
;
1002 for (i
= 0; i
< clusters_num
; i
++) {
1003 ret
= snprintf(buf
, HPRE_DBGFS_VAL_MAX_LEN
, "cluster%d", i
);
1004 if (ret
>= HPRE_DBGFS_VAL_MAX_LEN
)
1006 tmp_d
= debugfs_create_dir(buf
, qm
->debug
.debug_root
);
1008 regset
= devm_kzalloc(dev
, sizeof(*regset
), GFP_KERNEL
);
1012 regset
->regs
= hpre_cluster_dfx_regs
;
1013 regset
->nregs
= ARRAY_SIZE(hpre_cluster_dfx_regs
);
1014 regset
->base
= qm
->io_base
+ hpre_cluster_offsets
[i
];
1017 debugfs_create_file("regs", 0444, tmp_d
, regset
,
1018 &hpre_cluster_regs_fops
);
1019 ret
= hpre_create_debugfs_file(qm
, tmp_d
, HPRE_CLUSTER_CTRL
,
1020 i
+ HPRE_CLUSTER_CTRL
);
1028 static int hpre_ctrl_debug_init(struct hisi_qm
*qm
)
1032 ret
= hpre_create_debugfs_file(qm
, NULL
, HPRE_CLEAR_ENABLE
,
1037 ret
= hpre_pf_comm_regs_debugfs_init(qm
);
1041 return hpre_cluster_debugfs_init(qm
);
1044 static void hpre_dfx_debug_init(struct hisi_qm
*qm
)
1046 struct dfx_diff_registers
*hpre_regs
= qm
->debug
.acc_diff_regs
;
1047 struct hpre
*hpre
= container_of(qm
, struct hpre
, qm
);
1048 struct hpre_dfx
*dfx
= hpre
->debug
.dfx
;
1049 struct dentry
*parent
;
1052 parent
= debugfs_create_dir("hpre_dfx", qm
->debug
.debug_root
);
1053 for (i
= 0; i
< HPRE_DFX_FILE_NUM
; i
++) {
1055 debugfs_create_file(hpre_dfx_files
[i
], 0644, parent
, &dfx
[i
],
1056 &hpre_atomic64_ops
);
1059 if (qm
->fun_type
== QM_HW_PF
&& hpre_regs
)
1060 debugfs_create_file("diff_regs", 0444, parent
,
1061 qm
, &hpre_diff_regs_fops
);
1064 static int hpre_debugfs_init(struct hisi_qm
*qm
)
1066 struct device
*dev
= &qm
->pdev
->dev
;
1069 ret
= hisi_qm_regs_debugfs_init(qm
, hpre_diff_regs
, ARRAY_SIZE(hpre_diff_regs
));
1071 dev_warn(dev
, "Failed to init HPRE diff regs!\n");
1075 qm
->debug
.debug_root
= debugfs_create_dir(dev_name(dev
),
1077 qm
->debug
.sqe_mask_offset
= HPRE_SQE_MASK_OFFSET
;
1078 qm
->debug
.sqe_mask_len
= HPRE_SQE_MASK_LEN
;
1080 hisi_qm_debug_init(qm
);
1082 if (qm
->pdev
->device
== PCI_DEVICE_ID_HUAWEI_HPRE_PF
) {
1083 ret
= hpre_ctrl_debug_init(qm
);
1085 goto debugfs_remove
;
1088 hpre_dfx_debug_init(qm
);
1093 debugfs_remove_recursive(qm
->debug
.debug_root
);
1094 hisi_qm_regs_debugfs_uninit(qm
, ARRAY_SIZE(hpre_diff_regs
));
1098 static void hpre_debugfs_exit(struct hisi_qm
*qm
)
1100 debugfs_remove_recursive(qm
->debug
.debug_root
);
1102 hisi_qm_regs_debugfs_uninit(qm
, ARRAY_SIZE(hpre_diff_regs
));
1105 static int hpre_pre_store_cap_reg(struct hisi_qm
*qm
)
1107 struct hisi_qm_cap_record
*hpre_cap
;
1108 struct device
*dev
= &qm
->pdev
->dev
;
1111 size
= ARRAY_SIZE(hpre_pre_store_caps
);
1112 hpre_cap
= devm_kzalloc(dev
, sizeof(*hpre_cap
) * size
, GFP_KERNEL
);
1116 for (i
= 0; i
< size
; i
++) {
1117 hpre_cap
[i
].type
= hpre_pre_store_caps
[i
];
1118 hpre_cap
[i
].cap_val
= hisi_qm_get_hw_info(qm
, hpre_basic_info
,
1119 hpre_pre_store_caps
[i
], qm
->cap_ver
);
1122 if (hpre_cap
[HPRE_CLUSTER_NUM_CAP_IDX
].cap_val
> HPRE_CLUSTERS_NUM_MAX
) {
1123 dev_err(dev
, "Device cluster num %u is out of range for driver supports %d!\n",
1124 hpre_cap
[HPRE_CLUSTER_NUM_CAP_IDX
].cap_val
, HPRE_CLUSTERS_NUM_MAX
);
1128 qm
->cap_tables
.dev_cap_table
= hpre_cap
;
1133 static int hpre_qm_init(struct hisi_qm
*qm
, struct pci_dev
*pdev
)
1138 if (pdev
->revision
== QM_HW_V1
) {
1139 pci_warn(pdev
, "HPRE version 1 is not supported!\n");
1143 qm
->mode
= uacce_mode
;
1145 qm
->ver
= pdev
->revision
;
1146 qm
->sqe_size
= HPRE_SQE_SIZE
;
1147 qm
->dev_name
= hpre_name
;
1149 qm
->fun_type
= (pdev
->device
== PCI_DEVICE_ID_HUAWEI_HPRE_PF
) ?
1150 QM_HW_PF
: QM_HW_VF
;
1151 if (qm
->fun_type
== QM_HW_PF
) {
1152 qm
->qp_base
= HPRE_PF_DEF_Q_BASE
;
1153 qm
->qp_num
= pf_q_num
;
1154 qm
->debug
.curr_qm_qp_num
= pf_q_num
;
1155 qm
->qm_list
= &hpre_devices
;
1156 qm
->err_ini
= &hpre_err_ini
;
1158 set_bit(QM_MODULE_PARAM
, &qm
->misc_ctl
);
1161 ret
= hisi_qm_init(qm
);
1163 pci_err(pdev
, "Failed to init hpre qm configures!\n");
1167 /* Fetch and save the value of capability registers */
1168 ret
= hpre_pre_store_cap_reg(qm
);
1170 pci_err(pdev
, "Failed to pre-store capability registers!\n");
1175 alg_msk
= qm
->cap_tables
.dev_cap_table
[HPRE_DEV_ALG_BITMAP_CAP_IDX
].cap_val
;
1176 ret
= hisi_qm_set_algs(qm
, alg_msk
, hpre_dev_algs
, ARRAY_SIZE(hpre_dev_algs
));
1178 pci_err(pdev
, "Failed to set hpre algs!\n");
1185 static int hpre_show_last_regs_init(struct hisi_qm
*qm
)
1187 int cluster_dfx_regs_num
= ARRAY_SIZE(hpre_cluster_dfx_regs
);
1188 int com_dfx_regs_num
= ARRAY_SIZE(hpre_com_dfx_regs
);
1189 struct qm_debug
*debug
= &qm
->debug
;
1190 void __iomem
*io_base
;
1194 clusters_num
= qm
->cap_tables
.dev_cap_table
[HPRE_CLUSTER_NUM_CAP_IDX
].cap_val
;
1195 debug
->last_words
= kcalloc(cluster_dfx_regs_num
* clusters_num
+
1196 com_dfx_regs_num
, sizeof(unsigned int), GFP_KERNEL
);
1197 if (!debug
->last_words
)
1200 for (i
= 0; i
< com_dfx_regs_num
; i
++)
1201 debug
->last_words
[i
] = readl_relaxed(qm
->io_base
+
1202 hpre_com_dfx_regs
[i
].offset
);
1204 for (i
= 0; i
< clusters_num
; i
++) {
1205 io_base
= qm
->io_base
+ hpre_cluster_offsets
[i
];
1206 for (j
= 0; j
< cluster_dfx_regs_num
; j
++) {
1207 idx
= com_dfx_regs_num
+ i
* cluster_dfx_regs_num
+ j
;
1208 debug
->last_words
[idx
] = readl_relaxed(
1209 io_base
+ hpre_cluster_dfx_regs
[j
].offset
);
1216 static void hpre_show_last_regs_uninit(struct hisi_qm
*qm
)
1218 struct qm_debug
*debug
= &qm
->debug
;
1220 if (qm
->fun_type
== QM_HW_VF
|| !debug
->last_words
)
1223 kfree(debug
->last_words
);
1224 debug
->last_words
= NULL
;
1227 static void hpre_show_last_dfx_regs(struct hisi_qm
*qm
)
1229 int cluster_dfx_regs_num
= ARRAY_SIZE(hpre_cluster_dfx_regs
);
1230 int com_dfx_regs_num
= ARRAY_SIZE(hpre_com_dfx_regs
);
1231 struct qm_debug
*debug
= &qm
->debug
;
1232 struct pci_dev
*pdev
= qm
->pdev
;
1233 void __iomem
*io_base
;
1238 if (qm
->fun_type
== QM_HW_VF
|| !debug
->last_words
)
1241 /* dumps last word of the debugging registers during controller reset */
1242 for (i
= 0; i
< com_dfx_regs_num
; i
++) {
1243 val
= readl_relaxed(qm
->io_base
+ hpre_com_dfx_regs
[i
].offset
);
1244 if (debug
->last_words
[i
] != val
)
1245 pci_info(pdev
, "Common_core:%s \t= 0x%08x => 0x%08x\n",
1246 hpre_com_dfx_regs
[i
].name
, debug
->last_words
[i
], val
);
1249 clusters_num
= qm
->cap_tables
.dev_cap_table
[HPRE_CLUSTER_NUM_CAP_IDX
].cap_val
;
1250 for (i
= 0; i
< clusters_num
; i
++) {
1251 io_base
= qm
->io_base
+ hpre_cluster_offsets
[i
];
1252 for (j
= 0; j
< cluster_dfx_regs_num
; j
++) {
1253 val
= readl_relaxed(io_base
+
1254 hpre_cluster_dfx_regs
[j
].offset
);
1255 idx
= com_dfx_regs_num
+ i
* cluster_dfx_regs_num
+ j
;
1256 if (debug
->last_words
[idx
] != val
)
1257 pci_info(pdev
, "cluster-%d:%s \t= 0x%08x => 0x%08x\n",
1258 i
, hpre_cluster_dfx_regs
[j
].name
, debug
->last_words
[idx
], val
);
1263 static void hpre_log_hw_error(struct hisi_qm
*qm
, u32 err_sts
)
1265 const struct hpre_hw_error
*err
= hpre_hw_errors
;
1266 struct device
*dev
= &qm
->pdev
->dev
;
1269 if (err
->int_msk
& err_sts
)
1270 dev_warn(dev
, "%s [error status=0x%x] found\n",
1271 err
->msg
, err
->int_msk
);
1276 static u32
hpre_get_hw_err_status(struct hisi_qm
*qm
)
1278 return readl(qm
->io_base
+ HPRE_INT_STATUS
);
1281 static void hpre_clear_hw_err_status(struct hisi_qm
*qm
, u32 err_sts
)
1285 writel(err_sts
, qm
->io_base
+ HPRE_HAC_SOURCE_INT
);
1286 nfe
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_NFE_MASK_CAP
, qm
->cap_ver
);
1287 writel(nfe
, qm
->io_base
+ HPRE_RAS_NFE_ENB
);
1290 static void hpre_open_axi_master_ooo(struct hisi_qm
*qm
)
1294 value
= readl(qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
1295 writel(value
& ~HPRE_AM_OOO_SHUTDOWN_ENABLE
,
1296 qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
1297 writel(value
| HPRE_AM_OOO_SHUTDOWN_ENABLE
,
1298 qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
1301 static void hpre_err_info_init(struct hisi_qm
*qm
)
1303 struct hisi_qm_err_info
*err_info
= &qm
->err_info
;
1305 err_info
->fe
= HPRE_HAC_RAS_FE_ENABLE
;
1306 err_info
->ce
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_QM_CE_MASK_CAP
, qm
->cap_ver
);
1307 err_info
->nfe
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_QM_NFE_MASK_CAP
, qm
->cap_ver
);
1308 err_info
->ecc_2bits_mask
= HPRE_CORE_ECC_2BIT_ERR
| HPRE_OOO_ECC_2BIT_ERR
;
1309 err_info
->dev_shutdown_mask
= hisi_qm_get_hw_info(qm
, hpre_basic_info
,
1310 HPRE_OOO_SHUTDOWN_MASK_CAP
, qm
->cap_ver
);
1311 err_info
->qm_shutdown_mask
= hisi_qm_get_hw_info(qm
, hpre_basic_info
,
1312 HPRE_QM_OOO_SHUTDOWN_MASK_CAP
, qm
->cap_ver
);
1313 err_info
->qm_reset_mask
= hisi_qm_get_hw_info(qm
, hpre_basic_info
,
1314 HPRE_QM_RESET_MASK_CAP
, qm
->cap_ver
);
1315 err_info
->dev_reset_mask
= hisi_qm_get_hw_info(qm
, hpre_basic_info
,
1316 HPRE_RESET_MASK_CAP
, qm
->cap_ver
);
1317 err_info
->msi_wr_port
= HPRE_WR_MSI_PORT
;
1318 err_info
->acpi_rst
= "HRST";
1321 static const struct hisi_qm_err_ini hpre_err_ini
= {
1322 .hw_init
= hpre_set_user_domain_and_cache
,
1323 .hw_err_enable
= hpre_hw_error_enable
,
1324 .hw_err_disable
= hpre_hw_error_disable
,
1325 .get_dev_hw_err_status
= hpre_get_hw_err_status
,
1326 .clear_dev_hw_err_status
= hpre_clear_hw_err_status
,
1327 .log_dev_hw_err
= hpre_log_hw_error
,
1328 .open_axi_master_ooo
= hpre_open_axi_master_ooo
,
1329 .open_sva_prefetch
= hpre_open_sva_prefetch
,
1330 .close_sva_prefetch
= hpre_close_sva_prefetch
,
1331 .show_last_dfx_regs
= hpre_show_last_dfx_regs
,
1332 .err_info_init
= hpre_err_info_init
,
1335 static int hpre_pf_probe_init(struct hpre
*hpre
)
1337 struct hisi_qm
*qm
= &hpre
->qm
;
1340 ret
= hpre_set_user_domain_and_cache(qm
);
1344 hpre_open_sva_prefetch(qm
);
1346 hisi_qm_dev_err_init(qm
);
1347 ret
= hpre_show_last_regs_init(qm
);
1349 pci_err(qm
->pdev
, "Failed to init last word regs!\n");
1354 static int hpre_probe_init(struct hpre
*hpre
)
1356 u32 type_rate
= HPRE_SHAPER_TYPE_RATE
;
1357 struct hisi_qm
*qm
= &hpre
->qm
;
1360 if (qm
->fun_type
== QM_HW_PF
) {
1361 ret
= hpre_pf_probe_init(hpre
);
1364 /* Enable shaper type 0 */
1365 if (qm
->ver
>= QM_HW_V3
) {
1366 type_rate
|= QM_SHAPER_ENABLE
;
1367 qm
->type_rate
= type_rate
;
1374 static void hpre_probe_uninit(struct hisi_qm
*qm
)
1376 if (qm
->fun_type
== QM_HW_VF
)
1379 hpre_cnt_regs_clear(qm
);
1380 qm
->debug
.curr_qm_qp_num
= 0;
1381 hpre_show_last_regs_uninit(qm
);
1382 hpre_close_sva_prefetch(qm
);
1383 hisi_qm_dev_err_uninit(qm
);
1386 static int hpre_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1392 hpre
= devm_kzalloc(&pdev
->dev
, sizeof(*hpre
), GFP_KERNEL
);
1397 ret
= hpre_qm_init(qm
, pdev
);
1399 pci_err(pdev
, "Failed to init HPRE QM (%d)!\n", ret
);
1403 ret
= hpre_probe_init(hpre
);
1405 pci_err(pdev
, "Failed to probe (%d)!\n", ret
);
1406 goto err_with_qm_init
;
1409 ret
= hisi_qm_start(qm
);
1411 goto err_with_probe_init
;
1413 ret
= hpre_debugfs_init(qm
);
1415 dev_warn(&pdev
->dev
, "init debugfs fail!\n");
1417 hisi_qm_add_list(qm
, &hpre_devices
);
1418 ret
= hisi_qm_alg_register(qm
, &hpre_devices
, HPRE_CTX_Q_NUM_DEF
);
1420 pci_err(pdev
, "fail to register algs to crypto!\n");
1421 goto err_qm_del_list
;
1425 ret
= uacce_register(qm
->uacce
);
1427 pci_err(pdev
, "failed to register uacce (%d)!\n", ret
);
1428 goto err_with_alg_register
;
1432 if (qm
->fun_type
== QM_HW_PF
&& vfs_num
) {
1433 ret
= hisi_qm_sriov_enable(pdev
, vfs_num
);
1435 goto err_with_alg_register
;
1438 hisi_qm_pm_init(qm
);
1442 err_with_alg_register
:
1443 hisi_qm_alg_unregister(qm
, &hpre_devices
, HPRE_CTX_Q_NUM_DEF
);
1446 hisi_qm_del_list(qm
, &hpre_devices
);
1447 hpre_debugfs_exit(qm
);
1448 hisi_qm_stop(qm
, QM_NORMAL
);
1450 err_with_probe_init
:
1451 hpre_probe_uninit(qm
);
1459 static void hpre_remove(struct pci_dev
*pdev
)
1461 struct hisi_qm
*qm
= pci_get_drvdata(pdev
);
1463 hisi_qm_pm_uninit(qm
);
1464 hisi_qm_wait_task_finish(qm
, &hpre_devices
);
1465 hisi_qm_alg_unregister(qm
, &hpre_devices
, HPRE_CTX_Q_NUM_DEF
);
1466 hisi_qm_del_list(qm
, &hpre_devices
);
1467 if (qm
->fun_type
== QM_HW_PF
&& qm
->vfs_num
)
1468 hisi_qm_sriov_disable(pdev
, true);
1470 hpre_debugfs_exit(qm
);
1471 hisi_qm_stop(qm
, QM_NORMAL
);
1473 hpre_probe_uninit(qm
);
1477 static const struct dev_pm_ops hpre_pm_ops
= {
1478 SET_RUNTIME_PM_OPS(hisi_qm_suspend
, hisi_qm_resume
, NULL
)
1481 static const struct pci_error_handlers hpre_err_handler
= {
1482 .error_detected
= hisi_qm_dev_err_detected
,
1483 .slot_reset
= hisi_qm_dev_slot_reset
,
1484 .reset_prepare
= hisi_qm_reset_prepare
,
1485 .reset_done
= hisi_qm_reset_done
,
1488 static struct pci_driver hpre_pci_driver
= {
1490 .id_table
= hpre_dev_ids
,
1491 .probe
= hpre_probe
,
1492 .remove
= hpre_remove
,
1493 .sriov_configure
= IS_ENABLED(CONFIG_PCI_IOV
) ?
1494 hisi_qm_sriov_configure
: NULL
,
1495 .err_handler
= &hpre_err_handler
,
1496 .shutdown
= hisi_qm_dev_shutdown
,
1497 .driver
.pm
= &hpre_pm_ops
,
1500 struct pci_driver
*hisi_hpre_get_pf_driver(void)
1502 return &hpre_pci_driver
;
1504 EXPORT_SYMBOL_GPL(hisi_hpre_get_pf_driver
);
1506 static void hpre_register_debugfs(void)
1508 if (!debugfs_initialized())
1511 hpre_debugfs_root
= debugfs_create_dir(hpre_name
, NULL
);
1514 static void hpre_unregister_debugfs(void)
1516 debugfs_remove_recursive(hpre_debugfs_root
);
1519 static int __init
hpre_init(void)
1523 hisi_qm_init_list(&hpre_devices
);
1524 hpre_register_debugfs();
1526 ret
= pci_register_driver(&hpre_pci_driver
);
1528 hpre_unregister_debugfs();
1529 pr_err("hpre: can't register hisi hpre driver.\n");
1535 static void __exit
hpre_exit(void)
1537 pci_unregister_driver(&hpre_pci_driver
);
1538 hpre_unregister_debugfs();
1541 module_init(hpre_init
);
1542 module_exit(hpre_exit
);
1544 MODULE_LICENSE("GPL v2");
1545 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
1546 MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
1547 MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");