1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2019 HiSilicon Limited. */
3 #include <linux/acpi.h>
4 #include <linux/bitops.h>
5 #include <linux/debugfs.h>
6 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/topology.h>
13 #include <linux/uacce.h>
16 #define CAP_FILE_PERMISSION 0444
17 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
18 #define HPRE_CTRL_CNT_CLR_CE 0x301000
19 #define HPRE_FSM_MAX_CNT 0x301008
20 #define HPRE_VFG_AXQOS 0x30100c
21 #define HPRE_VFG_AXCACHE 0x301010
22 #define HPRE_RDCHN_INI_CFG 0x301014
23 #define HPRE_AWUSR_FP_CFG 0x301018
24 #define HPRE_BD_ENDIAN 0x301020
25 #define HPRE_ECC_BYPASS 0x301024
26 #define HPRE_RAS_WIDTH_CFG 0x301028
27 #define HPRE_POISON_BYPASS 0x30102c
28 #define HPRE_BD_ARUSR_CFG 0x301030
29 #define HPRE_BD_AWUSR_CFG 0x301034
30 #define HPRE_TYPES_ENB 0x301038
31 #define HPRE_RSA_ENB BIT(0)
32 #define HPRE_ECC_ENB BIT(1)
33 #define HPRE_DATA_RUSER_CFG 0x30103c
34 #define HPRE_DATA_WUSER_CFG 0x301040
35 #define HPRE_INT_MASK 0x301400
36 #define HPRE_INT_STATUS 0x301800
37 #define HPRE_HAC_INT_MSK 0x301400
38 #define HPRE_HAC_RAS_CE_ENB 0x301410
39 #define HPRE_HAC_RAS_NFE_ENB 0x301414
40 #define HPRE_HAC_RAS_FE_ENB 0x301418
41 #define HPRE_HAC_INT_SET 0x301500
42 #define HPRE_RNG_TIMEOUT_NUM 0x301A34
43 #define HPRE_CORE_INT_ENABLE 0
44 #define HPRE_RDCHN_INI_ST 0x301a00
45 #define HPRE_CLSTR_BASE 0x302000
46 #define HPRE_CORE_EN_OFFSET 0x04
47 #define HPRE_CORE_INI_CFG_OFFSET 0x20
48 #define HPRE_CORE_INI_STATUS_OFFSET 0x80
49 #define HPRE_CORE_HTBT_WARN_OFFSET 0x8c
50 #define HPRE_CORE_IS_SCHD_OFFSET 0x90
52 #define HPRE_RAS_CE_ENB 0x301410
53 #define HPRE_RAS_NFE_ENB 0x301414
54 #define HPRE_RAS_FE_ENB 0x301418
55 #define HPRE_OOO_SHUTDOWN_SEL 0x301a3c
56 #define HPRE_HAC_RAS_FE_ENABLE 0
58 #define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
59 #define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
60 #define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
61 #define HPRE_HAC_ECC1_CNT 0x301a04
62 #define HPRE_HAC_ECC2_CNT 0x301a08
63 #define HPRE_HAC_SOURCE_INT 0x301600
64 #define HPRE_CLSTR_ADDR_INTRVL 0x1000
65 #define HPRE_CLUSTER_INQURY 0x100
66 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
67 #define HPRE_PASID_EN_BIT 9
68 #define HPRE_REG_RD_INTVRL_US 10
69 #define HPRE_REG_RD_TMOUT_US 1000
70 #define HPRE_DBGFS_VAL_MAX_LEN 20
71 #define PCI_DEVICE_ID_HUAWEI_HPRE_PF 0xa258
72 #define HPRE_QM_USR_CFG_MASK GENMASK(31, 1)
73 #define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)
74 #define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)
75 #define HPRE_BD_USR_MASK GENMASK(1, 0)
76 #define HPRE_PREFETCH_CFG 0x301130
77 #define HPRE_SVA_PREFTCH_DFX 0x30115C
78 #define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
79 #define HPRE_PREFETCH_DISABLE BIT(30)
80 #define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
83 #define HPRE_CLKGATE_CTL 0x301a10
84 #define HPRE_PEH_CFG_AUTO_GATE 0x301a2c
85 #define HPRE_CLUSTER_DYN_CTL 0x302010
86 #define HPRE_CORE_SHB_CFG 0x302088
87 #define HPRE_CLKGATE_CTL_EN BIT(0)
88 #define HPRE_PEH_CFG_AUTO_GATE_EN BIT(0)
89 #define HPRE_CLUSTER_DYN_CTL_EN BIT(0)
90 #define HPRE_CORE_GATE_EN (BIT(30) | BIT(31))
92 #define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
93 #define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
94 #define HPRE_WR_MSI_PORT BIT(2)
96 #define HPRE_CORE_ECC_2BIT_ERR BIT(1)
97 #define HPRE_OOO_ECC_2BIT_ERR BIT(5)
99 #define HPRE_QM_BME_FLR BIT(7)
100 #define HPRE_QM_PM_FLR BIT(11)
101 #define HPRE_QM_SRIOV_FLR BIT(12)
103 #define HPRE_SHAPER_TYPE_RATE 640
104 #define HPRE_VIA_MSI_DSM 1
105 #define HPRE_SQE_MASK_OFFSET 8
106 #define HPRE_SQE_MASK_LEN 44
107 #define HPRE_CTX_Q_NUM_DEF 1
109 #define HPRE_DFX_BASE 0x301000
110 #define HPRE_DFX_COMMON1 0x301400
111 #define HPRE_DFX_COMMON2 0x301A00
112 #define HPRE_DFX_CORE 0x302000
113 #define HPRE_DFX_BASE_LEN 0x55
114 #define HPRE_DFX_COMMON1_LEN 0x41
115 #define HPRE_DFX_COMMON2_LEN 0xE
116 #define HPRE_DFX_CORE_LEN 0x43
118 static const char hpre_name
[] = "hisi_hpre";
119 static struct dentry
*hpre_debugfs_root
;
120 static const struct pci_device_id hpre_dev_ids
[] = {
121 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_HUAWEI_HPRE_PF
) },
122 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_HUAWEI_HPRE_VF
) },
126 MODULE_DEVICE_TABLE(pci
, hpre_dev_ids
);
128 struct hpre_hw_error
{
133 static const struct qm_dev_alg hpre_dev_algs
[] = {
160 static struct hisi_qm_list hpre_devices
= {
161 .register_to_crypto
= hpre_algs_register
,
162 .unregister_from_crypto
= hpre_algs_unregister
,
165 static const char * const hpre_debug_file_name
[] = {
166 [HPRE_CLEAR_ENABLE
] = "rdclr_en",
167 [HPRE_CLUSTER_CTRL
] = "cluster_ctrl",
171 HPRE_QM_NFE_MASK_CAP
,
172 HPRE_QM_RESET_MASK_CAP
,
173 HPRE_QM_OOO_SHUTDOWN_MASK_CAP
,
177 HPRE_OOO_SHUTDOWN_MASK_CAP
,
179 HPRE_CLUSTER_NUM_CAP
,
180 HPRE_CORE_TYPE_NUM_CAP
,
182 HPRE_CLUSTER_CORE_NUM_CAP
,
183 HPRE_CORE_ENABLE_BITMAP_CAP
,
184 HPRE_DRV_ALG_BITMAP_CAP
,
185 HPRE_DEV_ALG_BITMAP_CAP
,
186 HPRE_CORE1_ALG_BITMAP_CAP
,
187 HPRE_CORE2_ALG_BITMAP_CAP
,
188 HPRE_CORE3_ALG_BITMAP_CAP
,
189 HPRE_CORE4_ALG_BITMAP_CAP
,
190 HPRE_CORE5_ALG_BITMAP_CAP
,
191 HPRE_CORE6_ALG_BITMAP_CAP
,
192 HPRE_CORE7_ALG_BITMAP_CAP
,
193 HPRE_CORE8_ALG_BITMAP_CAP
,
194 HPRE_CORE9_ALG_BITMAP_CAP
,
195 HPRE_CORE10_ALG_BITMAP_CAP
198 static const struct hisi_qm_cap_info hpre_basic_info
[] = {
199 {HPRE_QM_NFE_MASK_CAP
, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37},
200 {HPRE_QM_RESET_MASK_CAP
, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
201 {HPRE_QM_OOO_SHUTDOWN_MASK_CAP
, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
202 {HPRE_QM_CE_MASK_CAP
, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
203 {HPRE_NFE_MASK_CAP
, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E},
204 {HPRE_RESET_MASK_CAP
, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E},
205 {HPRE_OOO_SHUTDOWN_MASK_CAP
, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E},
206 {HPRE_CE_MASK_CAP
, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
207 {HPRE_CLUSTER_NUM_CAP
, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
208 {HPRE_CORE_TYPE_NUM_CAP
, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
209 {HPRE_CORE_NUM_CAP
, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
210 {HPRE_CLUSTER_CORE_NUM_CAP
, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
211 {HPRE_CORE_ENABLE_BITMAP_CAP
, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF},
212 {HPRE_DRV_ALG_BITMAP_CAP
, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27},
213 {HPRE_DEV_ALG_BITMAP_CAP
, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F},
214 {HPRE_CORE1_ALG_BITMAP_CAP
, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
215 {HPRE_CORE2_ALG_BITMAP_CAP
, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
216 {HPRE_CORE3_ALG_BITMAP_CAP
, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
217 {HPRE_CORE4_ALG_BITMAP_CAP
, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
218 {HPRE_CORE5_ALG_BITMAP_CAP
, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
219 {HPRE_CORE6_ALG_BITMAP_CAP
, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
220 {HPRE_CORE7_ALG_BITMAP_CAP
, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
221 {HPRE_CORE8_ALG_BITMAP_CAP
, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
222 {HPRE_CORE9_ALG_BITMAP_CAP
, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10},
223 {HPRE_CORE10_ALG_BITMAP_CAP
, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
226 static const struct hisi_qm_cap_query_info hpre_cap_query_info
[] = {
227 {QM_RAS_NFE_TYPE
, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C37, 0x7C37},
228 {QM_RAS_NFE_RESET
, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},
229 {QM_RAS_CE_TYPE
, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
230 {HPRE_RAS_NFE_TYPE
, "HPRE_RAS_NFE_TYPE ", 0x3130, 0x0, 0x3FFFFE, 0x1FFFC3E},
231 {HPRE_RAS_NFE_RESET
, "HPRE_RAS_NFE_RESET ", 0x3134, 0x0, 0x3FFFFE, 0xBFFC3E},
232 {HPRE_RAS_CE_TYPE
, "HPRE_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
233 {HPRE_CORE_INFO
, "HPRE_CORE_INFO ", 0x313c, 0x0, 0x420802, 0x120A0A},
234 {HPRE_CORE_EN
, "HPRE_CORE_EN ", 0x3140, 0x0, 0xF, 0x3FF},
235 {HPRE_DRV_ALG_BITMAP
, "HPRE_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x03, 0x27},
236 {HPRE_ALG_BITMAP
, "HPRE_ALG_BITMAP ", 0x3148, 0x0, 0x03, 0x7F},
237 {HPRE_CORE1_BITMAP_CAP
, "HPRE_CORE1_BITMAP_CAP ", 0x314c, 0x0, 0x7F, 0x7F},
238 {HPRE_CORE2_BITMAP_CAP
, "HPRE_CORE2_BITMAP_CAP ", 0x3150, 0x0, 0x7F, 0x7F},
239 {HPRE_CORE3_BITMAP_CAP
, "HPRE_CORE3_BITMAP_CAP ", 0x3154, 0x0, 0x7F, 0x7F},
240 {HPRE_CORE4_BITMAP_CAP
, "HPRE_CORE4_BITMAP_CAP ", 0x3158, 0x0, 0x7F, 0x7F},
241 {HPRE_CORE5_BITMAP_CAP
, "HPRE_CORE5_BITMAP_CAP ", 0x315c, 0x0, 0x7F, 0x7F},
242 {HPRE_CORE6_BITMAP_CAP
, "HPRE_CORE6_BITMAP_CAP ", 0x3160, 0x0, 0x7F, 0x7F},
243 {HPRE_CORE7_BITMAP_CAP
, "HPRE_CORE7_BITMAP_CAP ", 0x3164, 0x0, 0x7F, 0x7F},
244 {HPRE_CORE8_BITMAP_CAP
, "HPRE_CORE8_BITMAP_CAP ", 0x3168, 0x0, 0x7F, 0x7F},
245 {HPRE_CORE9_BITMAP_CAP
, "HPRE_CORE9_BITMAP_CAP ", 0x316c, 0x0, 0x10, 0x10},
246 {HPRE_CORE10_BITMAP_CAP
, "HPRE_CORE10_BITMAP_CAP ", 0x3170, 0x0, 0x10, 0x10},
249 static const struct hpre_hw_error hpre_hw_errors
[] = {
252 .msg
= "core_ecc_1bit_err_int_set"
255 .msg
= "core_ecc_2bit_err_int_set"
258 .msg
= "dat_wb_poison_int_set"
261 .msg
= "dat_rd_poison_int_set"
264 .msg
= "bd_rd_poison_int_set"
267 .msg
= "ooo_ecc_2bit_err_int_set"
270 .msg
= "cluster1_shb_timeout_int_set"
273 .msg
= "cluster2_shb_timeout_int_set"
276 .msg
= "cluster3_shb_timeout_int_set"
279 .msg
= "cluster4_shb_timeout_int_set"
281 .int_msk
= GENMASK(15, 10),
282 .msg
= "ooo_rdrsp_err_int_set"
284 .int_msk
= GENMASK(21, 16),
285 .msg
= "ooo_wrrsp_err_int_set"
288 .msg
= "pt_rng_timeout_int_set"
291 .msg
= "sva_fsm_timeout_int_set"
300 static const u64 hpre_cluster_offsets
[] = {
302 HPRE_CLSTR_BASE
+ HPRE_CLUSTER0
* HPRE_CLSTR_ADDR_INTRVL
,
304 HPRE_CLSTR_BASE
+ HPRE_CLUSTER1
* HPRE_CLSTR_ADDR_INTRVL
,
306 HPRE_CLSTR_BASE
+ HPRE_CLUSTER2
* HPRE_CLSTR_ADDR_INTRVL
,
308 HPRE_CLSTR_BASE
+ HPRE_CLUSTER3
* HPRE_CLSTR_ADDR_INTRVL
,
311 static const struct debugfs_reg32 hpre_cluster_dfx_regs
[] = {
312 {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET
},
313 {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET
},
314 {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET
},
315 {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET
},
316 {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET
},
319 static const struct debugfs_reg32 hpre_com_dfx_regs
[] = {
320 {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE
},
321 {"AXQOS ", HPRE_VFG_AXQOS
},
322 {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG
},
323 {"BD_ENDIAN ", HPRE_BD_ENDIAN
},
324 {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS
},
325 {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG
},
326 {"POISON_BYPASS ", HPRE_POISON_BYPASS
},
327 {"BD_ARUSER ", HPRE_BD_ARUSR_CFG
},
328 {"BD_AWUSER ", HPRE_BD_AWUSR_CFG
},
329 {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG
},
330 {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG
},
331 {"INT_STATUS ", HPRE_INT_STATUS
},
332 {"INT_MASK ", HPRE_HAC_INT_MSK
},
333 {"RAS_CE_ENB ", HPRE_HAC_RAS_CE_ENB
},
334 {"RAS_NFE_ENB ", HPRE_HAC_RAS_NFE_ENB
},
335 {"RAS_FE_ENB ", HPRE_HAC_RAS_FE_ENB
},
336 {"INT_SET ", HPRE_HAC_INT_SET
},
337 {"RNG_TIMEOUT_NUM ", HPRE_RNG_TIMEOUT_NUM
},
340 static const char *hpre_dfx_files
[HPRE_DFX_FILE_NUM
] = {
350 /* define the HPRE's dfx regs region and region length */
351 static struct dfx_diff_registers hpre_diff_regs
[] = {
353 .reg_offset
= HPRE_DFX_BASE
,
354 .reg_len
= HPRE_DFX_BASE_LEN
,
356 .reg_offset
= HPRE_DFX_COMMON1
,
357 .reg_len
= HPRE_DFX_COMMON1_LEN
,
359 .reg_offset
= HPRE_DFX_COMMON2
,
360 .reg_len
= HPRE_DFX_COMMON2_LEN
,
362 .reg_offset
= HPRE_DFX_CORE
,
363 .reg_len
= HPRE_DFX_CORE_LEN
,
367 static const struct hisi_qm_err_ini hpre_err_ini
;
369 bool hpre_check_alg_support(struct hisi_qm
*qm
, u32 alg
)
373 cap_val
= qm
->cap_tables
.dev_cap_table
[HPRE_DRV_ALG_BITMAP
].cap_val
;
380 static int hpre_diff_regs_show(struct seq_file
*s
, void *unused
)
382 struct hisi_qm
*qm
= s
->private;
384 hisi_qm_acc_diff_regs_dump(qm
, s
, qm
->debug
.acc_diff_regs
,
385 ARRAY_SIZE(hpre_diff_regs
));
390 DEFINE_SHOW_ATTRIBUTE(hpre_diff_regs
);
392 static int hpre_com_regs_show(struct seq_file
*s
, void *unused
)
394 hisi_qm_regs_dump(s
, s
->private);
399 DEFINE_SHOW_ATTRIBUTE(hpre_com_regs
);
401 static int hpre_cluster_regs_show(struct seq_file
*s
, void *unused
)
403 hisi_qm_regs_dump(s
, s
->private);
408 DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs
);
410 static const struct kernel_param_ops hpre_uacce_mode_ops
= {
411 .set
= uacce_mode_set
,
412 .get
= param_get_int
,
416 * uacce_mode = 0 means hpre only register to crypto,
417 * uacce_mode = 1 means hpre both register to crypto and uacce.
419 static u32 uacce_mode
= UACCE_MODE_NOUACCE
;
420 module_param_cb(uacce_mode
, &hpre_uacce_mode_ops
, &uacce_mode
, 0444);
421 MODULE_PARM_DESC(uacce_mode
, UACCE_MODE_DESC
);
423 static bool pf_q_num_flag
;
424 static int pf_q_num_set(const char *val
, const struct kernel_param
*kp
)
426 pf_q_num_flag
= true;
428 return hisi_qm_q_num_set(val
, kp
, PCI_DEVICE_ID_HUAWEI_HPRE_PF
);
431 static const struct kernel_param_ops hpre_pf_q_num_ops
= {
433 .get
= param_get_int
,
436 static u32 pf_q_num
= HPRE_PF_DEF_Q_NUM
;
437 module_param_cb(pf_q_num
, &hpre_pf_q_num_ops
, &pf_q_num
, 0444);
438 MODULE_PARM_DESC(pf_q_num
, "Number of queues in PF of CS(2-1024)");
440 static const struct kernel_param_ops vfs_num_ops
= {
442 .get
= param_get_int
,
446 module_param_cb(vfs_num
, &vfs_num_ops
, &vfs_num
, 0444);
447 MODULE_PARM_DESC(vfs_num
, "Number of VFs to enable(1-63), 0(default)");
449 struct hisi_qp
*hpre_create_qp(u8 type
)
451 int node
= cpu_to_node(raw_smp_processor_id());
452 struct hisi_qp
*qp
= NULL
;
455 if (type
!= HPRE_V2_ALG_TYPE
&& type
!= HPRE_V3_ECC_ALG_TYPE
)
459 * type: 0 - RSA/DH. algorithm supported in V2,
460 * 1 - ECC algorithm in V3.
462 ret
= hisi_qm_alloc_qps_node(&hpre_devices
, 1, type
, node
, &qp
);
469 static void hpre_config_pasid(struct hisi_qm
*qm
)
473 if (qm
->ver
>= QM_HW_V3
)
476 val1
= readl_relaxed(qm
->io_base
+ HPRE_DATA_RUSER_CFG
);
477 val2
= readl_relaxed(qm
->io_base
+ HPRE_DATA_WUSER_CFG
);
479 val1
|= BIT(HPRE_PASID_EN_BIT
);
480 val2
|= BIT(HPRE_PASID_EN_BIT
);
482 val1
&= ~BIT(HPRE_PASID_EN_BIT
);
483 val2
&= ~BIT(HPRE_PASID_EN_BIT
);
485 writel_relaxed(val1
, qm
->io_base
+ HPRE_DATA_RUSER_CFG
);
486 writel_relaxed(val2
, qm
->io_base
+ HPRE_DATA_WUSER_CFG
);
489 static int hpre_cfg_by_dsm(struct hisi_qm
*qm
)
491 struct device
*dev
= &qm
->pdev
->dev
;
492 union acpi_object
*obj
;
495 if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid
)) {
496 dev_err(dev
, "Hpre GUID failed\n");
500 /* Switch over to MSI handling due to non-standard PCI implementation */
501 obj
= acpi_evaluate_dsm(ACPI_HANDLE(dev
), &guid
,
502 0, HPRE_VIA_MSI_DSM
, NULL
);
504 dev_err(dev
, "ACPI handle failed!\n");
513 static int hpre_set_cluster(struct hisi_qm
*qm
)
515 struct device
*dev
= &qm
->pdev
->dev
;
516 u32 cluster_core_mask
;
517 unsigned long offset
;
523 cluster_core_mask
= qm
->cap_tables
.dev_cap_table
[HPRE_CORE_EN
].cap_val
;
524 hpre_core_info
= qm
->cap_tables
.dev_cap_table
[HPRE_CORE_INFO
].cap_val
;
525 clusters_num
= (hpre_core_info
>> hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].shift
) &
526 hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].mask
;
527 for (i
= 0; i
< clusters_num
; i
++) {
528 offset
= i
* HPRE_CLSTR_ADDR_INTRVL
;
530 /* clusters initiating */
531 writel(cluster_core_mask
,
532 qm
->io_base
+ offset
+ HPRE_CORE_ENB
);
533 writel(0x1, qm
->io_base
+ offset
+ HPRE_CORE_INI_CFG
);
534 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ offset
+
535 HPRE_CORE_INI_STATUS
, val
,
536 ((val
& cluster_core_mask
) ==
538 HPRE_REG_RD_INTVRL_US
,
539 HPRE_REG_RD_TMOUT_US
);
542 "cluster %d int st status timeout!\n", i
);
551 * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).
552 * Or it may stay in D3 state when we bind and unbind hpre quickly,
553 * as it does FLR triggered by hardware.
555 static void disable_flr_of_bme(struct hisi_qm
*qm
)
559 val
= readl(qm
->io_base
+ QM_PEH_AXUSER_CFG
);
560 val
&= ~(HPRE_QM_BME_FLR
| HPRE_QM_SRIOV_FLR
);
561 val
|= HPRE_QM_PM_FLR
;
562 writel(val
, qm
->io_base
+ QM_PEH_AXUSER_CFG
);
563 writel(PEH_AXUSER_CFG_ENABLE
, qm
->io_base
+ QM_PEH_AXUSER_CFG_ENABLE
);
566 static void hpre_open_sva_prefetch(struct hisi_qm
*qm
)
571 if (!test_bit(QM_SUPPORT_SVA_PREFETCH
, &qm
->caps
))
574 /* Enable prefetch */
575 val
= readl_relaxed(qm
->io_base
+ HPRE_PREFETCH_CFG
);
576 val
&= HPRE_PREFETCH_ENABLE
;
577 writel(val
, qm
->io_base
+ HPRE_PREFETCH_CFG
);
579 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ HPRE_PREFETCH_CFG
,
580 val
, !(val
& HPRE_PREFETCH_DISABLE
),
581 HPRE_REG_RD_INTVRL_US
,
582 HPRE_REG_RD_TMOUT_US
);
584 pci_err(qm
->pdev
, "failed to open sva prefetch\n");
587 static void hpre_close_sva_prefetch(struct hisi_qm
*qm
)
592 if (!test_bit(QM_SUPPORT_SVA_PREFETCH
, &qm
->caps
))
595 val
= readl_relaxed(qm
->io_base
+ HPRE_PREFETCH_CFG
);
596 val
|= HPRE_PREFETCH_DISABLE
;
597 writel(val
, qm
->io_base
+ HPRE_PREFETCH_CFG
);
599 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ HPRE_SVA_PREFTCH_DFX
,
600 val
, !(val
& HPRE_SVA_DISABLE_READY
),
601 HPRE_REG_RD_INTVRL_US
,
602 HPRE_REG_RD_TMOUT_US
);
604 pci_err(qm
->pdev
, "failed to close sva prefetch\n");
607 static void hpre_enable_clock_gate(struct hisi_qm
*qm
)
609 unsigned long offset
;
614 if (qm
->ver
< QM_HW_V3
)
617 val
= readl(qm
->io_base
+ HPRE_CLKGATE_CTL
);
618 val
|= HPRE_CLKGATE_CTL_EN
;
619 writel(val
, qm
->io_base
+ HPRE_CLKGATE_CTL
);
621 val
= readl(qm
->io_base
+ HPRE_PEH_CFG_AUTO_GATE
);
622 val
|= HPRE_PEH_CFG_AUTO_GATE_EN
;
623 writel(val
, qm
->io_base
+ HPRE_PEH_CFG_AUTO_GATE
);
625 hpre_core_info
= qm
->cap_tables
.dev_cap_table
[HPRE_CORE_INFO
].cap_val
;
626 clusters_num
= (hpre_core_info
>> hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].shift
) &
627 hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].mask
;
628 for (i
= 0; i
< clusters_num
; i
++) {
629 offset
= (unsigned long)i
* HPRE_CLSTR_ADDR_INTRVL
;
630 val
= readl(qm
->io_base
+ offset
+ HPRE_CLUSTER_DYN_CTL
);
631 val
|= HPRE_CLUSTER_DYN_CTL_EN
;
632 writel(val
, qm
->io_base
+ offset
+ HPRE_CLUSTER_DYN_CTL
);
634 val
= readl(qm
->io_base
+ offset
+ HPRE_CORE_SHB_CFG
);
635 val
|= HPRE_CORE_GATE_EN
;
636 writel(val
, qm
->io_base
+ offset
+ HPRE_CORE_SHB_CFG
);
640 static void hpre_disable_clock_gate(struct hisi_qm
*qm
)
642 unsigned long offset
;
647 if (qm
->ver
< QM_HW_V3
)
650 val
= readl(qm
->io_base
+ HPRE_CLKGATE_CTL
);
651 val
&= ~HPRE_CLKGATE_CTL_EN
;
652 writel(val
, qm
->io_base
+ HPRE_CLKGATE_CTL
);
654 val
= readl(qm
->io_base
+ HPRE_PEH_CFG_AUTO_GATE
);
655 val
&= ~HPRE_PEH_CFG_AUTO_GATE_EN
;
656 writel(val
, qm
->io_base
+ HPRE_PEH_CFG_AUTO_GATE
);
658 hpre_core_info
= qm
->cap_tables
.dev_cap_table
[HPRE_CORE_INFO
].cap_val
;
659 clusters_num
= (hpre_core_info
>> hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].shift
) &
660 hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].mask
;
661 for (i
= 0; i
< clusters_num
; i
++) {
662 offset
= (unsigned long)i
* HPRE_CLSTR_ADDR_INTRVL
;
663 val
= readl(qm
->io_base
+ offset
+ HPRE_CLUSTER_DYN_CTL
);
664 val
&= ~HPRE_CLUSTER_DYN_CTL_EN
;
665 writel(val
, qm
->io_base
+ offset
+ HPRE_CLUSTER_DYN_CTL
);
667 val
= readl(qm
->io_base
+ offset
+ HPRE_CORE_SHB_CFG
);
668 val
&= ~HPRE_CORE_GATE_EN
;
669 writel(val
, qm
->io_base
+ offset
+ HPRE_CORE_SHB_CFG
);
673 static int hpre_set_user_domain_and_cache(struct hisi_qm
*qm
)
675 struct device
*dev
= &qm
->pdev
->dev
;
679 /* disabel dynamic clock gate before sram init */
680 hpre_disable_clock_gate(qm
);
682 writel(HPRE_QM_USR_CFG_MASK
, qm
->io_base
+ QM_ARUSER_M_CFG_ENABLE
);
683 writel(HPRE_QM_USR_CFG_MASK
, qm
->io_base
+ QM_AWUSER_M_CFG_ENABLE
);
684 writel_relaxed(HPRE_QM_AXI_CFG_MASK
, qm
->io_base
+ QM_AXI_M_CFG
);
686 if (qm
->ver
>= QM_HW_V3
)
687 writel(HPRE_RSA_ENB
| HPRE_ECC_ENB
,
688 qm
->io_base
+ HPRE_TYPES_ENB
);
690 writel(HPRE_RSA_ENB
, qm
->io_base
+ HPRE_TYPES_ENB
);
692 writel(HPRE_QM_VFG_AX_MASK
, qm
->io_base
+ HPRE_VFG_AXCACHE
);
693 writel(0x0, qm
->io_base
+ HPRE_BD_ENDIAN
);
694 writel(0x0, qm
->io_base
+ HPRE_POISON_BYPASS
);
695 writel(0x0, qm
->io_base
+ HPRE_ECC_BYPASS
);
697 writel(HPRE_BD_USR_MASK
, qm
->io_base
+ HPRE_BD_ARUSR_CFG
);
698 writel(HPRE_BD_USR_MASK
, qm
->io_base
+ HPRE_BD_AWUSR_CFG
);
699 writel(0x1, qm
->io_base
+ HPRE_RDCHN_INI_CFG
);
700 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ HPRE_RDCHN_INI_ST
, val
,
702 HPRE_REG_RD_INTVRL_US
,
703 HPRE_REG_RD_TMOUT_US
);
705 dev_err(dev
, "read rd channel timeout fail!\n");
709 ret
= hpre_set_cluster(qm
);
713 /* This setting is only needed by Kunpeng 920. */
714 if (qm
->ver
== QM_HW_V2
) {
715 ret
= hpre_cfg_by_dsm(qm
);
719 disable_flr_of_bme(qm
);
722 /* Config data buffer pasid needed by Kunpeng 920 */
723 hpre_config_pasid(qm
);
725 hpre_enable_clock_gate(qm
);
730 static void hpre_cnt_regs_clear(struct hisi_qm
*qm
)
732 unsigned long offset
;
737 /* clear clusterX/cluster_ctrl */
738 hpre_core_info
= qm
->cap_tables
.dev_cap_table
[HPRE_CORE_INFO
].cap_val
;
739 clusters_num
= (hpre_core_info
>> hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].shift
) &
740 hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].mask
;
741 for (i
= 0; i
< clusters_num
; i
++) {
742 offset
= HPRE_CLSTR_BASE
+ i
* HPRE_CLSTR_ADDR_INTRVL
;
743 writel(0x0, qm
->io_base
+ offset
+ HPRE_CLUSTER_INQURY
);
747 writel(0x0, qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
);
749 hisi_qm_debug_regs_clear(qm
);
752 static void hpre_master_ooo_ctrl(struct hisi_qm
*qm
, bool enable
)
756 val1
= readl(qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
758 val1
|= HPRE_AM_OOO_SHUTDOWN_ENABLE
;
759 val2
= hisi_qm_get_hw_info(qm
, hpre_basic_info
,
760 HPRE_OOO_SHUTDOWN_MASK_CAP
, qm
->cap_ver
);
762 val1
&= ~HPRE_AM_OOO_SHUTDOWN_ENABLE
;
766 if (qm
->ver
> QM_HW_V2
)
767 writel(val2
, qm
->io_base
+ HPRE_OOO_SHUTDOWN_SEL
);
769 writel(val1
, qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
772 static void hpre_hw_error_disable(struct hisi_qm
*qm
)
776 ce
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_CE_MASK_CAP
, qm
->cap_ver
);
777 nfe
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_NFE_MASK_CAP
, qm
->cap_ver
);
779 /* disable hpre hw error interrupts */
780 writel(ce
| nfe
| HPRE_HAC_RAS_FE_ENABLE
, qm
->io_base
+ HPRE_INT_MASK
);
781 /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
782 hpre_master_ooo_ctrl(qm
, false);
785 static void hpre_hw_error_enable(struct hisi_qm
*qm
)
789 ce
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_CE_MASK_CAP
, qm
->cap_ver
);
790 nfe
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_NFE_MASK_CAP
, qm
->cap_ver
);
792 /* clear HPRE hw error source if having */
793 writel(ce
| nfe
| HPRE_HAC_RAS_FE_ENABLE
, qm
->io_base
+ HPRE_HAC_SOURCE_INT
);
795 /* configure error type */
796 writel(ce
, qm
->io_base
+ HPRE_RAS_CE_ENB
);
797 writel(nfe
, qm
->io_base
+ HPRE_RAS_NFE_ENB
);
798 writel(HPRE_HAC_RAS_FE_ENABLE
, qm
->io_base
+ HPRE_RAS_FE_ENB
);
800 /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
801 hpre_master_ooo_ctrl(qm
, true);
803 /* enable hpre hw error interrupts */
804 err_en
= ce
| nfe
| HPRE_HAC_RAS_FE_ENABLE
;
805 writel(~err_en
, qm
->io_base
+ HPRE_INT_MASK
);
808 static inline struct hisi_qm
*hpre_file_to_qm(struct hpre_debugfs_file
*file
)
810 struct hpre
*hpre
= container_of(file
->debug
, struct hpre
, debug
);
815 static u32
hpre_clear_enable_read(struct hpre_debugfs_file
*file
)
817 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
819 return readl(qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
) &
820 HPRE_CTRL_CNT_CLR_CE_BIT
;
823 static int hpre_clear_enable_write(struct hpre_debugfs_file
*file
, u32 val
)
825 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
828 if (val
!= 1 && val
!= 0)
831 tmp
= (readl(qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
) &
832 ~HPRE_CTRL_CNT_CLR_CE_BIT
) | val
;
833 writel(tmp
, qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
);
838 static u32
hpre_cluster_inqry_read(struct hpre_debugfs_file
*file
)
840 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
841 int cluster_index
= file
->index
- HPRE_CLUSTER_CTRL
;
842 unsigned long offset
= HPRE_CLSTR_BASE
+
843 cluster_index
* HPRE_CLSTR_ADDR_INTRVL
;
845 return readl(qm
->io_base
+ offset
+ HPRE_CLSTR_ADDR_INQRY_RSLT
);
848 static void hpre_cluster_inqry_write(struct hpre_debugfs_file
*file
, u32 val
)
850 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
851 int cluster_index
= file
->index
- HPRE_CLUSTER_CTRL
;
852 unsigned long offset
= HPRE_CLSTR_BASE
+ cluster_index
*
853 HPRE_CLSTR_ADDR_INTRVL
;
855 writel(val
, qm
->io_base
+ offset
+ HPRE_CLUSTER_INQURY
);
858 static ssize_t
hpre_ctrl_debug_read(struct file
*filp
, char __user
*buf
,
859 size_t count
, loff_t
*pos
)
861 struct hpre_debugfs_file
*file
= filp
->private_data
;
862 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
863 char tbuf
[HPRE_DBGFS_VAL_MAX_LEN
];
867 ret
= hisi_qm_get_dfx_access(qm
);
871 spin_lock_irq(&file
->lock
);
872 switch (file
->type
) {
873 case HPRE_CLEAR_ENABLE
:
874 val
= hpre_clear_enable_read(file
);
876 case HPRE_CLUSTER_CTRL
:
877 val
= hpre_cluster_inqry_read(file
);
882 spin_unlock_irq(&file
->lock
);
884 hisi_qm_put_dfx_access(qm
);
885 ret
= snprintf(tbuf
, HPRE_DBGFS_VAL_MAX_LEN
, "%u\n", val
);
886 return simple_read_from_buffer(buf
, count
, pos
, tbuf
, ret
);
889 spin_unlock_irq(&file
->lock
);
890 hisi_qm_put_dfx_access(qm
);
894 static ssize_t
hpre_ctrl_debug_write(struct file
*filp
, const char __user
*buf
,
895 size_t count
, loff_t
*pos
)
897 struct hpre_debugfs_file
*file
= filp
->private_data
;
898 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
899 char tbuf
[HPRE_DBGFS_VAL_MAX_LEN
];
906 if (count
>= HPRE_DBGFS_VAL_MAX_LEN
)
909 len
= simple_write_to_buffer(tbuf
, HPRE_DBGFS_VAL_MAX_LEN
- 1,
915 if (kstrtoul(tbuf
, 0, &val
))
918 ret
= hisi_qm_get_dfx_access(qm
);
922 spin_lock_irq(&file
->lock
);
923 switch (file
->type
) {
924 case HPRE_CLEAR_ENABLE
:
925 ret
= hpre_clear_enable_write(file
, val
);
929 case HPRE_CLUSTER_CTRL
:
930 hpre_cluster_inqry_write(file
, val
);
940 spin_unlock_irq(&file
->lock
);
941 hisi_qm_put_dfx_access(qm
);
945 static const struct file_operations hpre_ctrl_debug_fops
= {
946 .owner
= THIS_MODULE
,
948 .read
= hpre_ctrl_debug_read
,
949 .write
= hpre_ctrl_debug_write
,
952 static int hpre_debugfs_atomic64_get(void *data
, u64
*val
)
954 struct hpre_dfx
*dfx_item
= data
;
956 *val
= atomic64_read(&dfx_item
->value
);
961 static int hpre_debugfs_atomic64_set(void *data
, u64 val
)
963 struct hpre_dfx
*dfx_item
= data
;
964 struct hpre_dfx
*hpre_dfx
= NULL
;
966 if (dfx_item
->type
== HPRE_OVERTIME_THRHLD
) {
967 hpre_dfx
= dfx_item
- HPRE_OVERTIME_THRHLD
;
968 atomic64_set(&hpre_dfx
[HPRE_OVER_THRHLD_CNT
].value
, 0);
973 atomic64_set(&dfx_item
->value
, val
);
978 DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops
, hpre_debugfs_atomic64_get
,
979 hpre_debugfs_atomic64_set
, "%llu\n");
981 static int hpre_create_debugfs_file(struct hisi_qm
*qm
, struct dentry
*dir
,
982 enum hpre_ctrl_dbgfs_file type
, int indx
)
984 struct hpre
*hpre
= container_of(qm
, struct hpre
, qm
);
985 struct hpre_debug
*dbg
= &hpre
->debug
;
986 struct dentry
*file_dir
;
991 file_dir
= qm
->debug
.debug_root
;
993 if (type
>= HPRE_DEBUG_FILE_NUM
)
996 spin_lock_init(&dbg
->files
[indx
].lock
);
997 dbg
->files
[indx
].debug
= dbg
;
998 dbg
->files
[indx
].type
= type
;
999 dbg
->files
[indx
].index
= indx
;
1000 debugfs_create_file(hpre_debug_file_name
[type
], 0600, file_dir
,
1001 dbg
->files
+ indx
, &hpre_ctrl_debug_fops
);
1006 static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm
*qm
)
1008 struct device
*dev
= &qm
->pdev
->dev
;
1009 struct debugfs_regset32
*regset
;
1011 regset
= devm_kzalloc(dev
, sizeof(*regset
), GFP_KERNEL
);
1015 regset
->regs
= hpre_com_dfx_regs
;
1016 regset
->nregs
= ARRAY_SIZE(hpre_com_dfx_regs
);
1017 regset
->base
= qm
->io_base
;
1020 debugfs_create_file("regs", 0444, qm
->debug
.debug_root
,
1021 regset
, &hpre_com_regs_fops
);
1026 static int hpre_cluster_debugfs_init(struct hisi_qm
*qm
)
1028 struct device
*dev
= &qm
->pdev
->dev
;
1029 char buf
[HPRE_DBGFS_VAL_MAX_LEN
];
1030 struct debugfs_regset32
*regset
;
1031 struct dentry
*tmp_d
;
1036 hpre_core_info
= qm
->cap_tables
.dev_cap_table
[HPRE_CORE_INFO
].cap_val
;
1037 clusters_num
= (hpre_core_info
>> hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].shift
) &
1038 hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].mask
;
1039 for (i
= 0; i
< clusters_num
; i
++) {
1040 ret
= snprintf(buf
, HPRE_DBGFS_VAL_MAX_LEN
, "cluster%d", i
);
1041 if (ret
>= HPRE_DBGFS_VAL_MAX_LEN
)
1043 tmp_d
= debugfs_create_dir(buf
, qm
->debug
.debug_root
);
1045 regset
= devm_kzalloc(dev
, sizeof(*regset
), GFP_KERNEL
);
1049 regset
->regs
= hpre_cluster_dfx_regs
;
1050 regset
->nregs
= ARRAY_SIZE(hpre_cluster_dfx_regs
);
1051 regset
->base
= qm
->io_base
+ hpre_cluster_offsets
[i
];
1054 debugfs_create_file("regs", 0444, tmp_d
, regset
,
1055 &hpre_cluster_regs_fops
);
1056 ret
= hpre_create_debugfs_file(qm
, tmp_d
, HPRE_CLUSTER_CTRL
,
1057 i
+ HPRE_CLUSTER_CTRL
);
1065 static int hpre_ctrl_debug_init(struct hisi_qm
*qm
)
1069 ret
= hpre_create_debugfs_file(qm
, NULL
, HPRE_CLEAR_ENABLE
,
1074 ret
= hpre_pf_comm_regs_debugfs_init(qm
);
1078 return hpre_cluster_debugfs_init(qm
);
1081 static int hpre_cap_regs_show(struct seq_file
*s
, void *unused
)
1083 struct hisi_qm
*qm
= s
->private;
1086 size
= qm
->cap_tables
.qm_cap_size
;
1087 for (i
= 0; i
< size
; i
++)
1088 seq_printf(s
, "%s= 0x%08x\n", qm
->cap_tables
.qm_cap_table
[i
].name
,
1089 qm
->cap_tables
.qm_cap_table
[i
].cap_val
);
1091 size
= qm
->cap_tables
.dev_cap_size
;
1092 for (i
= 0; i
< size
; i
++)
1093 seq_printf(s
, "%s= 0x%08x\n", qm
->cap_tables
.dev_cap_table
[i
].name
,
1094 qm
->cap_tables
.dev_cap_table
[i
].cap_val
);
1099 DEFINE_SHOW_ATTRIBUTE(hpre_cap_regs
);
1101 static void hpre_dfx_debug_init(struct hisi_qm
*qm
)
1103 struct dfx_diff_registers
*hpre_regs
= qm
->debug
.acc_diff_regs
;
1104 struct hpre
*hpre
= container_of(qm
, struct hpre
, qm
);
1105 struct hpre_dfx
*dfx
= hpre
->debug
.dfx
;
1106 struct dentry
*parent
;
1109 parent
= debugfs_create_dir("hpre_dfx", qm
->debug
.debug_root
);
1110 for (i
= 0; i
< HPRE_DFX_FILE_NUM
; i
++) {
1112 debugfs_create_file(hpre_dfx_files
[i
], 0644, parent
, &dfx
[i
],
1113 &hpre_atomic64_ops
);
1116 if (qm
->fun_type
== QM_HW_PF
&& hpre_regs
)
1117 debugfs_create_file("diff_regs", 0444, parent
,
1118 qm
, &hpre_diff_regs_fops
);
1120 debugfs_create_file("cap_regs", CAP_FILE_PERMISSION
,
1121 qm
->debug
.debug_root
, qm
, &hpre_cap_regs_fops
);
1124 static int hpre_debugfs_init(struct hisi_qm
*qm
)
1126 struct device
*dev
= &qm
->pdev
->dev
;
1129 ret
= hisi_qm_regs_debugfs_init(qm
, hpre_diff_regs
, ARRAY_SIZE(hpre_diff_regs
));
1131 dev_warn(dev
, "Failed to init HPRE diff regs!\n");
1135 qm
->debug
.debug_root
= debugfs_create_dir(dev_name(dev
),
1137 qm
->debug
.sqe_mask_offset
= HPRE_SQE_MASK_OFFSET
;
1138 qm
->debug
.sqe_mask_len
= HPRE_SQE_MASK_LEN
;
1140 hisi_qm_debug_init(qm
);
1142 if (qm
->pdev
->device
== PCI_DEVICE_ID_HUAWEI_HPRE_PF
) {
1143 ret
= hpre_ctrl_debug_init(qm
);
1145 goto debugfs_remove
;
1148 hpre_dfx_debug_init(qm
);
1153 debugfs_remove_recursive(qm
->debug
.debug_root
);
1154 hisi_qm_regs_debugfs_uninit(qm
, ARRAY_SIZE(hpre_diff_regs
));
1158 static void hpre_debugfs_exit(struct hisi_qm
*qm
)
1160 debugfs_remove_recursive(qm
->debug
.debug_root
);
1162 hisi_qm_regs_debugfs_uninit(qm
, ARRAY_SIZE(hpre_diff_regs
));
1165 static int hpre_pre_store_cap_reg(struct hisi_qm
*qm
)
1167 struct hisi_qm_cap_record
*hpre_cap
;
1168 struct device
*dev
= &qm
->pdev
->dev
;
1173 size
= ARRAY_SIZE(hpre_cap_query_info
);
1174 hpre_cap
= devm_kzalloc(dev
, sizeof(*hpre_cap
) * size
, GFP_KERNEL
);
1178 for (i
= 0; i
< size
; i
++) {
1179 hpre_cap
[i
].type
= hpre_cap_query_info
[i
].type
;
1180 hpre_cap
[i
].name
= hpre_cap_query_info
[i
].name
;
1181 hpre_cap
[i
].cap_val
= hisi_qm_get_cap_value(qm
, hpre_cap_query_info
,
1185 hpre_core_info
= hpre_cap
[HPRE_CORE_INFO
].cap_val
;
1186 clusters_num
= (hpre_core_info
>> hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].shift
) &
1187 hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].mask
;
1188 if (clusters_num
> HPRE_CLUSTERS_NUM_MAX
) {
1189 dev_err(dev
, "Device cluster num %u is out of range for driver supports %d!\n",
1190 clusters_num
, HPRE_CLUSTERS_NUM_MAX
);
1194 qm
->cap_tables
.dev_cap_table
= hpre_cap
;
1195 qm
->cap_tables
.dev_cap_size
= size
;
1200 static int hpre_qm_init(struct hisi_qm
*qm
, struct pci_dev
*pdev
)
1205 if (pdev
->revision
== QM_HW_V1
) {
1206 pci_warn(pdev
, "HPRE version 1 is not supported!\n");
1210 qm
->mode
= uacce_mode
;
1212 qm
->ver
= pdev
->revision
;
1213 qm
->sqe_size
= HPRE_SQE_SIZE
;
1214 qm
->dev_name
= hpre_name
;
1216 qm
->fun_type
= (pdev
->device
== PCI_DEVICE_ID_HUAWEI_HPRE_PF
) ?
1217 QM_HW_PF
: QM_HW_VF
;
1218 if (qm
->fun_type
== QM_HW_PF
) {
1219 qm
->qp_base
= HPRE_PF_DEF_Q_BASE
;
1220 qm
->qp_num
= pf_q_num
;
1221 qm
->debug
.curr_qm_qp_num
= pf_q_num
;
1222 qm
->qm_list
= &hpre_devices
;
1223 qm
->err_ini
= &hpre_err_ini
;
1225 set_bit(QM_MODULE_PARAM
, &qm
->misc_ctl
);
1228 ret
= hisi_qm_init(qm
);
1230 pci_err(pdev
, "Failed to init hpre qm configures!\n");
1234 /* Fetch and save the value of capability registers */
1235 ret
= hpre_pre_store_cap_reg(qm
);
1237 pci_err(pdev
, "Failed to pre-store capability registers!\n");
1242 alg_msk
= qm
->cap_tables
.dev_cap_table
[HPRE_ALG_BITMAP
].cap_val
;
1243 ret
= hisi_qm_set_algs(qm
, alg_msk
, hpre_dev_algs
, ARRAY_SIZE(hpre_dev_algs
));
1245 pci_err(pdev
, "Failed to set hpre algs!\n");
1252 static int hpre_show_last_regs_init(struct hisi_qm
*qm
)
1254 int cluster_dfx_regs_num
= ARRAY_SIZE(hpre_cluster_dfx_regs
);
1255 int com_dfx_regs_num
= ARRAY_SIZE(hpre_com_dfx_regs
);
1256 struct qm_debug
*debug
= &qm
->debug
;
1257 void __iomem
*io_base
;
1262 hpre_core_info
= qm
->cap_tables
.dev_cap_table
[HPRE_CORE_INFO
].cap_val
;
1263 clusters_num
= (hpre_core_info
>> hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].shift
) &
1264 hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].mask
;
1265 debug
->last_words
= kcalloc(cluster_dfx_regs_num
* clusters_num
+
1266 com_dfx_regs_num
, sizeof(unsigned int), GFP_KERNEL
);
1267 if (!debug
->last_words
)
1270 for (i
= 0; i
< com_dfx_regs_num
; i
++)
1271 debug
->last_words
[i
] = readl_relaxed(qm
->io_base
+
1272 hpre_com_dfx_regs
[i
].offset
);
1274 for (i
= 0; i
< clusters_num
; i
++) {
1275 io_base
= qm
->io_base
+ hpre_cluster_offsets
[i
];
1276 for (j
= 0; j
< cluster_dfx_regs_num
; j
++) {
1277 idx
= com_dfx_regs_num
+ i
* cluster_dfx_regs_num
+ j
;
1278 debug
->last_words
[idx
] = readl_relaxed(
1279 io_base
+ hpre_cluster_dfx_regs
[j
].offset
);
1286 static void hpre_show_last_regs_uninit(struct hisi_qm
*qm
)
1288 struct qm_debug
*debug
= &qm
->debug
;
1290 if (qm
->fun_type
== QM_HW_VF
|| !debug
->last_words
)
1293 kfree(debug
->last_words
);
1294 debug
->last_words
= NULL
;
1297 static void hpre_show_last_dfx_regs(struct hisi_qm
*qm
)
1299 int cluster_dfx_regs_num
= ARRAY_SIZE(hpre_cluster_dfx_regs
);
1300 int com_dfx_regs_num
= ARRAY_SIZE(hpre_com_dfx_regs
);
1301 struct qm_debug
*debug
= &qm
->debug
;
1302 struct pci_dev
*pdev
= qm
->pdev
;
1303 void __iomem
*io_base
;
1309 if (qm
->fun_type
== QM_HW_VF
|| !debug
->last_words
)
1312 /* dumps last word of the debugging registers during controller reset */
1313 for (i
= 0; i
< com_dfx_regs_num
; i
++) {
1314 val
= readl_relaxed(qm
->io_base
+ hpre_com_dfx_regs
[i
].offset
);
1315 if (debug
->last_words
[i
] != val
)
1316 pci_info(pdev
, "Common_core:%s \t= 0x%08x => 0x%08x\n",
1317 hpre_com_dfx_regs
[i
].name
, debug
->last_words
[i
], val
);
1320 hpre_core_info
= qm
->cap_tables
.dev_cap_table
[HPRE_CORE_INFO
].cap_val
;
1321 clusters_num
= (hpre_core_info
>> hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].shift
) &
1322 hpre_basic_info
[HPRE_CLUSTER_NUM_CAP
].mask
;
1323 for (i
= 0; i
< clusters_num
; i
++) {
1324 io_base
= qm
->io_base
+ hpre_cluster_offsets
[i
];
1325 for (j
= 0; j
< cluster_dfx_regs_num
; j
++) {
1326 val
= readl_relaxed(io_base
+
1327 hpre_cluster_dfx_regs
[j
].offset
);
1328 idx
= com_dfx_regs_num
+ i
* cluster_dfx_regs_num
+ j
;
1329 if (debug
->last_words
[idx
] != val
)
1330 pci_info(pdev
, "cluster-%d:%s \t= 0x%08x => 0x%08x\n",
1331 i
, hpre_cluster_dfx_regs
[j
].name
, debug
->last_words
[idx
], val
);
1336 static void hpre_log_hw_error(struct hisi_qm
*qm
, u32 err_sts
)
1338 const struct hpre_hw_error
*err
= hpre_hw_errors
;
1339 struct device
*dev
= &qm
->pdev
->dev
;
1342 if (err
->int_msk
& err_sts
)
1343 dev_warn(dev
, "%s [error status=0x%x] found\n",
1344 err
->msg
, err
->int_msk
);
1349 static u32
hpre_get_hw_err_status(struct hisi_qm
*qm
)
1351 return readl(qm
->io_base
+ HPRE_INT_STATUS
);
1354 static void hpre_clear_hw_err_status(struct hisi_qm
*qm
, u32 err_sts
)
1356 writel(err_sts
, qm
->io_base
+ HPRE_HAC_SOURCE_INT
);
1359 static void hpre_disable_error_report(struct hisi_qm
*qm
, u32 err_type
)
1363 nfe_mask
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_NFE_MASK_CAP
, qm
->cap_ver
);
1364 writel(nfe_mask
& (~err_type
), qm
->io_base
+ HPRE_RAS_NFE_ENB
);
1367 static void hpre_open_axi_master_ooo(struct hisi_qm
*qm
)
1371 value
= readl(qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
1372 writel(value
& ~HPRE_AM_OOO_SHUTDOWN_ENABLE
,
1373 qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
1374 writel(value
| HPRE_AM_OOO_SHUTDOWN_ENABLE
,
1375 qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
1378 static enum acc_err_result
hpre_get_err_result(struct hisi_qm
*qm
)
1382 err_status
= hpre_get_hw_err_status(qm
);
1384 if (err_status
& qm
->err_info
.ecc_2bits_mask
)
1385 qm
->err_status
.is_dev_ecc_mbit
= true;
1386 hpre_log_hw_error(qm
, err_status
);
1388 if (err_status
& qm
->err_info
.dev_reset_mask
) {
1389 /* Disable the same error reporting until device is recovered. */
1390 hpre_disable_error_report(qm
, err_status
);
1391 return ACC_ERR_NEED_RESET
;
1393 hpre_clear_hw_err_status(qm
, err_status
);
1396 return ACC_ERR_RECOVERED
;
1399 static void hpre_err_info_init(struct hisi_qm
*qm
)
1401 struct hisi_qm_err_info
*err_info
= &qm
->err_info
;
1403 err_info
->fe
= HPRE_HAC_RAS_FE_ENABLE
;
1404 err_info
->ce
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_QM_CE_MASK_CAP
, qm
->cap_ver
);
1405 err_info
->nfe
= hisi_qm_get_hw_info(qm
, hpre_basic_info
, HPRE_QM_NFE_MASK_CAP
, qm
->cap_ver
);
1406 err_info
->ecc_2bits_mask
= HPRE_CORE_ECC_2BIT_ERR
| HPRE_OOO_ECC_2BIT_ERR
;
1407 err_info
->dev_shutdown_mask
= hisi_qm_get_hw_info(qm
, hpre_basic_info
,
1408 HPRE_OOO_SHUTDOWN_MASK_CAP
, qm
->cap_ver
);
1409 err_info
->qm_shutdown_mask
= hisi_qm_get_hw_info(qm
, hpre_basic_info
,
1410 HPRE_QM_OOO_SHUTDOWN_MASK_CAP
, qm
->cap_ver
);
1411 err_info
->qm_reset_mask
= hisi_qm_get_hw_info(qm
, hpre_basic_info
,
1412 HPRE_QM_RESET_MASK_CAP
, qm
->cap_ver
);
1413 err_info
->dev_reset_mask
= hisi_qm_get_hw_info(qm
, hpre_basic_info
,
1414 HPRE_RESET_MASK_CAP
, qm
->cap_ver
);
1415 err_info
->msi_wr_port
= HPRE_WR_MSI_PORT
;
1416 err_info
->acpi_rst
= "HRST";
1419 static const struct hisi_qm_err_ini hpre_err_ini
= {
1420 .hw_init
= hpre_set_user_domain_and_cache
,
1421 .hw_err_enable
= hpre_hw_error_enable
,
1422 .hw_err_disable
= hpre_hw_error_disable
,
1423 .get_dev_hw_err_status
= hpre_get_hw_err_status
,
1424 .clear_dev_hw_err_status
= hpre_clear_hw_err_status
,
1425 .open_axi_master_ooo
= hpre_open_axi_master_ooo
,
1426 .open_sva_prefetch
= hpre_open_sva_prefetch
,
1427 .close_sva_prefetch
= hpre_close_sva_prefetch
,
1428 .show_last_dfx_regs
= hpre_show_last_dfx_regs
,
1429 .err_info_init
= hpre_err_info_init
,
1430 .get_err_result
= hpre_get_err_result
,
1433 static int hpre_pf_probe_init(struct hpre
*hpre
)
1435 struct hisi_qm
*qm
= &hpre
->qm
;
1438 ret
= hpre_set_user_domain_and_cache(qm
);
1442 hpre_open_sva_prefetch(qm
);
1444 hisi_qm_dev_err_init(qm
);
1445 ret
= hpre_show_last_regs_init(qm
);
1447 pci_err(qm
->pdev
, "Failed to init last word regs!\n");
1452 static int hpre_probe_init(struct hpre
*hpre
)
1454 u32 type_rate
= HPRE_SHAPER_TYPE_RATE
;
1455 struct hisi_qm
*qm
= &hpre
->qm
;
1458 if (qm
->fun_type
== QM_HW_PF
) {
1459 ret
= hpre_pf_probe_init(hpre
);
1462 /* Enable shaper type 0 */
1463 if (qm
->ver
>= QM_HW_V3
) {
1464 type_rate
|= QM_SHAPER_ENABLE
;
1465 qm
->type_rate
= type_rate
;
1472 static void hpre_probe_uninit(struct hisi_qm
*qm
)
1474 if (qm
->fun_type
== QM_HW_VF
)
1477 hpre_cnt_regs_clear(qm
);
1478 qm
->debug
.curr_qm_qp_num
= 0;
1479 hpre_show_last_regs_uninit(qm
);
1480 hpre_close_sva_prefetch(qm
);
1481 hisi_qm_dev_err_uninit(qm
);
1484 static int hpre_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1490 hpre
= devm_kzalloc(&pdev
->dev
, sizeof(*hpre
), GFP_KERNEL
);
1495 ret
= hpre_qm_init(qm
, pdev
);
1497 pci_err(pdev
, "Failed to init HPRE QM (%d)!\n", ret
);
1501 ret
= hpre_probe_init(hpre
);
1503 pci_err(pdev
, "Failed to probe (%d)!\n", ret
);
1504 goto err_with_qm_init
;
1507 ret
= hisi_qm_start(qm
);
1509 goto err_with_probe_init
;
1511 ret
= hpre_debugfs_init(qm
);
1513 dev_warn(&pdev
->dev
, "init debugfs fail!\n");
1515 hisi_qm_add_list(qm
, &hpre_devices
);
1516 ret
= hisi_qm_alg_register(qm
, &hpre_devices
, HPRE_CTX_Q_NUM_DEF
);
1518 pci_err(pdev
, "fail to register algs to crypto!\n");
1519 goto err_qm_del_list
;
1523 ret
= uacce_register(qm
->uacce
);
1525 pci_err(pdev
, "failed to register uacce (%d)!\n", ret
);
1526 goto err_with_alg_register
;
1530 if (qm
->fun_type
== QM_HW_PF
&& vfs_num
) {
1531 ret
= hisi_qm_sriov_enable(pdev
, vfs_num
);
1533 goto err_with_alg_register
;
1536 hisi_qm_pm_init(qm
);
1540 err_with_alg_register
:
1541 hisi_qm_alg_unregister(qm
, &hpre_devices
, HPRE_CTX_Q_NUM_DEF
);
1544 hisi_qm_del_list(qm
, &hpre_devices
);
1545 hpre_debugfs_exit(qm
);
1546 hisi_qm_stop(qm
, QM_NORMAL
);
1548 err_with_probe_init
:
1549 hpre_probe_uninit(qm
);
1557 static void hpre_remove(struct pci_dev
*pdev
)
1559 struct hisi_qm
*qm
= pci_get_drvdata(pdev
);
1561 hisi_qm_pm_uninit(qm
);
1562 hisi_qm_wait_task_finish(qm
, &hpre_devices
);
1563 hisi_qm_alg_unregister(qm
, &hpre_devices
, HPRE_CTX_Q_NUM_DEF
);
1564 hisi_qm_del_list(qm
, &hpre_devices
);
1565 if (qm
->fun_type
== QM_HW_PF
&& qm
->vfs_num
)
1566 hisi_qm_sriov_disable(pdev
, true);
1568 hpre_debugfs_exit(qm
);
1569 hisi_qm_stop(qm
, QM_NORMAL
);
1571 hpre_probe_uninit(qm
);
1575 static const struct dev_pm_ops hpre_pm_ops
= {
1576 SET_RUNTIME_PM_OPS(hisi_qm_suspend
, hisi_qm_resume
, NULL
)
1579 static const struct pci_error_handlers hpre_err_handler
= {
1580 .error_detected
= hisi_qm_dev_err_detected
,
1581 .slot_reset
= hisi_qm_dev_slot_reset
,
1582 .reset_prepare
= hisi_qm_reset_prepare
,
1583 .reset_done
= hisi_qm_reset_done
,
1586 static struct pci_driver hpre_pci_driver
= {
1588 .id_table
= hpre_dev_ids
,
1589 .probe
= hpre_probe
,
1590 .remove
= hpre_remove
,
1591 .sriov_configure
= IS_ENABLED(CONFIG_PCI_IOV
) ?
1592 hisi_qm_sriov_configure
: NULL
,
1593 .err_handler
= &hpre_err_handler
,
1594 .shutdown
= hisi_qm_dev_shutdown
,
1595 .driver
.pm
= &hpre_pm_ops
,
1598 struct pci_driver
*hisi_hpre_get_pf_driver(void)
1600 return &hpre_pci_driver
;
1602 EXPORT_SYMBOL_GPL(hisi_hpre_get_pf_driver
);
1604 static void hpre_register_debugfs(void)
1606 if (!debugfs_initialized())
1609 hpre_debugfs_root
= debugfs_create_dir(hpre_name
, NULL
);
1612 static void hpre_unregister_debugfs(void)
1614 debugfs_remove_recursive(hpre_debugfs_root
);
1617 static int __init
hpre_init(void)
1621 hisi_qm_init_list(&hpre_devices
);
1622 hpre_register_debugfs();
1624 ret
= pci_register_driver(&hpre_pci_driver
);
1626 hpre_unregister_debugfs();
1627 pr_err("hpre: can't register hisi hpre driver.\n");
1633 static void __exit
hpre_exit(void)
1635 pci_unregister_driver(&hpre_pci_driver
);
1636 hpre_unregister_debugfs();
1639 module_init(hpre_init
);
1640 module_exit(hpre_exit
);
1642 MODULE_LICENSE("GPL v2");
1643 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
1644 MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
1645 MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");