1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2019 HiSilicon Limited. */
3 #include <linux/acpi.h>
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/topology.h>
15 #define HPRE_QUEUE_NUM_V2 1024
16 #define HPRE_QM_ABNML_INT_MASK 0x100004
17 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
18 #define HPRE_COMM_CNT_CLR_CE 0x0
19 #define HPRE_CTRL_CNT_CLR_CE 0x301000
20 #define HPRE_FSM_MAX_CNT 0x301008
21 #define HPRE_VFG_AXQOS 0x30100c
22 #define HPRE_VFG_AXCACHE 0x301010
23 #define HPRE_RDCHN_INI_CFG 0x301014
24 #define HPRE_AWUSR_FP_CFG 0x301018
25 #define HPRE_BD_ENDIAN 0x301020
26 #define HPRE_ECC_BYPASS 0x301024
27 #define HPRE_RAS_WIDTH_CFG 0x301028
28 #define HPRE_POISON_BYPASS 0x30102c
29 #define HPRE_BD_ARUSR_CFG 0x301030
30 #define HPRE_BD_AWUSR_CFG 0x301034
31 #define HPRE_TYPES_ENB 0x301038
32 #define HPRE_DATA_RUSER_CFG 0x30103c
33 #define HPRE_DATA_WUSER_CFG 0x301040
34 #define HPRE_INT_MASK 0x301400
35 #define HPRE_INT_STATUS 0x301800
36 #define HPRE_CORE_INT_ENABLE 0
37 #define HPRE_CORE_INT_DISABLE 0x003fffff
38 #define HPRE_RAS_ECC_1BIT_TH 0x30140c
39 #define HPRE_RDCHN_INI_ST 0x301a00
40 #define HPRE_CLSTR_BASE 0x302000
41 #define HPRE_CORE_EN_OFFSET 0x04
42 #define HPRE_CORE_INI_CFG_OFFSET 0x20
43 #define HPRE_CORE_INI_STATUS_OFFSET 0x80
44 #define HPRE_CORE_HTBT_WARN_OFFSET 0x8c
45 #define HPRE_CORE_IS_SCHD_OFFSET 0x90
47 #define HPRE_RAS_CE_ENB 0x301410
48 #define HPRE_HAC_RAS_CE_ENABLE 0x1
49 #define HPRE_RAS_NFE_ENB 0x301414
50 #define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
51 #define HPRE_RAS_FE_ENB 0x301418
52 #define HPRE_HAC_RAS_FE_ENABLE 0
54 #define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
55 #define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
56 #define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
57 #define HPRE_HAC_ECC1_CNT 0x301a04
58 #define HPRE_HAC_ECC2_CNT 0x301a08
59 #define HPRE_HAC_INT_STATUS 0x301800
60 #define HPRE_HAC_SOURCE_INT 0x301600
61 #define HPRE_CLSTR_ADDR_INTRVL 0x1000
62 #define HPRE_CLUSTER_INQURY 0x100
63 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
64 #define HPRE_TIMEOUT_ABNML_BIT 6
65 #define HPRE_PASID_EN_BIT 9
66 #define HPRE_REG_RD_INTVRL_US 10
67 #define HPRE_REG_RD_TMOUT_US 1000
68 #define HPRE_DBGFS_VAL_MAX_LEN 20
69 #define HPRE_PCI_DEVICE_ID 0xa258
70 #define HPRE_PCI_VF_DEVICE_ID 0xa259
71 #define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset))
72 #define HPRE_QM_USR_CFG_MASK 0xfffffffe
73 #define HPRE_QM_AXI_CFG_MASK 0xffff
74 #define HPRE_QM_VFG_AX_MASK 0xff
75 #define HPRE_BD_USR_MASK 0x3
76 #define HPRE_CLUSTER_CORE_MASK 0xf
78 #define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
79 #define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
80 #define HPRE_WR_MSI_PORT BIT(2)
82 #define HPRE_CORE_ECC_2BIT_ERR BIT(1)
83 #define HPRE_OOO_ECC_2BIT_ERR BIT(5)
85 #define HPRE_QM_BME_FLR BIT(7)
86 #define HPRE_QM_PM_FLR BIT(11)
87 #define HPRE_QM_SRIOV_FLR BIT(12)
89 #define HPRE_VIA_MSI_DSM 1
90 #define HPRE_SQE_MASK_OFFSET 8
91 #define HPRE_SQE_MASK_LEN 24
93 static const char hpre_name
[] = "hisi_hpre";
94 static struct dentry
*hpre_debugfs_root
;
95 static const struct pci_device_id hpre_dev_ids
[] = {
96 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, HPRE_PCI_DEVICE_ID
) },
97 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, HPRE_PCI_VF_DEVICE_ID
) },
101 MODULE_DEVICE_TABLE(pci
, hpre_dev_ids
);
103 struct hpre_hw_error
{
108 static struct hisi_qm_list hpre_devices
= {
109 .register_to_crypto
= hpre_algs_register
,
110 .unregister_from_crypto
= hpre_algs_unregister
,
113 static const char * const hpre_debug_file_name
[] = {
114 [HPRE_CURRENT_QM
] = "current_qm",
115 [HPRE_CLEAR_ENABLE
] = "rdclr_en",
116 [HPRE_CLUSTER_CTRL
] = "cluster_ctrl",
119 static const struct hpre_hw_error hpre_hw_errors
[] = {
120 { .int_msk
= BIT(0), .msg
= "core_ecc_1bit_err_int_set" },
121 { .int_msk
= BIT(1), .msg
= "core_ecc_2bit_err_int_set" },
122 { .int_msk
= BIT(2), .msg
= "dat_wb_poison_int_set" },
123 { .int_msk
= BIT(3), .msg
= "dat_rd_poison_int_set" },
124 { .int_msk
= BIT(4), .msg
= "bd_rd_poison_int_set" },
125 { .int_msk
= BIT(5), .msg
= "ooo_ecc_2bit_err_int_set" },
126 { .int_msk
= BIT(6), .msg
= "cluster1_shb_timeout_int_set" },
127 { .int_msk
= BIT(7), .msg
= "cluster2_shb_timeout_int_set" },
128 { .int_msk
= BIT(8), .msg
= "cluster3_shb_timeout_int_set" },
129 { .int_msk
= BIT(9), .msg
= "cluster4_shb_timeout_int_set" },
130 { .int_msk
= GENMASK(15, 10), .msg
= "ooo_rdrsp_err_int_set" },
131 { .int_msk
= GENMASK(21, 16), .msg
= "ooo_wrrsp_err_int_set" },
135 static const u64 hpre_cluster_offsets
[] = {
137 HPRE_CLSTR_BASE
+ HPRE_CLUSTER0
* HPRE_CLSTR_ADDR_INTRVL
,
139 HPRE_CLSTR_BASE
+ HPRE_CLUSTER1
* HPRE_CLSTR_ADDR_INTRVL
,
141 HPRE_CLSTR_BASE
+ HPRE_CLUSTER2
* HPRE_CLSTR_ADDR_INTRVL
,
143 HPRE_CLSTR_BASE
+ HPRE_CLUSTER3
* HPRE_CLSTR_ADDR_INTRVL
,
146 static const struct debugfs_reg32 hpre_cluster_dfx_regs
[] = {
147 {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET
},
148 {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET
},
149 {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET
},
150 {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET
},
151 {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET
},
154 static const struct debugfs_reg32 hpre_com_dfx_regs
[] = {
155 {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE
},
156 {"AXQOS ", HPRE_VFG_AXQOS
},
157 {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG
},
158 {"QM_ARUSR_MCFG1 ", QM_ARUSER_M_CFG_1
},
159 {"QM_AWUSR_MCFG1 ", QM_AWUSER_M_CFG_1
},
160 {"BD_ENDIAN ", HPRE_BD_ENDIAN
},
161 {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS
},
162 {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG
},
163 {"POISON_BYPASS ", HPRE_POISON_BYPASS
},
164 {"BD_ARUSER ", HPRE_BD_ARUSR_CFG
},
165 {"BD_AWUSER ", HPRE_BD_AWUSR_CFG
},
166 {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG
},
167 {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG
},
168 {"INT_STATUS ", HPRE_INT_STATUS
},
171 static const char *hpre_dfx_files
[HPRE_DFX_FILE_NUM
] = {
181 static int pf_q_num_set(const char *val
, const struct kernel_param
*kp
)
183 return q_num_set(val
, kp
, HPRE_PCI_DEVICE_ID
);
186 static const struct kernel_param_ops hpre_pf_q_num_ops
= {
188 .get
= param_get_int
,
191 static u32 pf_q_num
= HPRE_PF_DEF_Q_NUM
;
192 module_param_cb(pf_q_num
, &hpre_pf_q_num_ops
, &pf_q_num
, 0444);
193 MODULE_PARM_DESC(pf_q_num
, "Number of queues in PF of CS(2-1024)");
195 static const struct kernel_param_ops vfs_num_ops
= {
197 .get
= param_get_int
,
201 module_param_cb(vfs_num
, &vfs_num_ops
, &vfs_num
, 0444);
202 MODULE_PARM_DESC(vfs_num
, "Number of VFs to enable(1-63), 0(default)");
204 struct hisi_qp
*hpre_create_qp(void)
206 int node
= cpu_to_node(smp_processor_id());
207 struct hisi_qp
*qp
= NULL
;
210 ret
= hisi_qm_alloc_qps_node(&hpre_devices
, 1, 0, node
, &qp
);
217 static int hpre_cfg_by_dsm(struct hisi_qm
*qm
)
219 struct device
*dev
= &qm
->pdev
->dev
;
220 union acpi_object
*obj
;
223 if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid
)) {
224 dev_err(dev
, "Hpre GUID failed\n");
228 /* Switch over to MSI handling due to non-standard PCI implementation */
229 obj
= acpi_evaluate_dsm(ACPI_HANDLE(dev
), &guid
,
230 0, HPRE_VIA_MSI_DSM
, NULL
);
232 dev_err(dev
, "ACPI handle failed!\n");
242 * For Hi1620, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
243 * Or it may stay in D3 state when we bind and unbind hpre quickly,
244 * as it does FLR triggered by hardware.
246 static void disable_flr_of_bme(struct hisi_qm
*qm
)
250 val
= readl(HPRE_ADDR(qm
, QM_PEH_AXUSER_CFG
));
251 val
&= ~(HPRE_QM_BME_FLR
| HPRE_QM_SRIOV_FLR
);
252 val
|= HPRE_QM_PM_FLR
;
253 writel(val
, HPRE_ADDR(qm
, QM_PEH_AXUSER_CFG
));
254 writel(PEH_AXUSER_CFG_ENABLE
, HPRE_ADDR(qm
, QM_PEH_AXUSER_CFG_ENABLE
));
257 static int hpre_set_user_domain_and_cache(struct hisi_qm
*qm
)
259 struct device
*dev
= &qm
->pdev
->dev
;
260 unsigned long offset
;
264 writel(HPRE_QM_USR_CFG_MASK
, HPRE_ADDR(qm
, QM_ARUSER_M_CFG_ENABLE
));
265 writel(HPRE_QM_USR_CFG_MASK
, HPRE_ADDR(qm
, QM_AWUSER_M_CFG_ENABLE
));
266 writel_relaxed(HPRE_QM_AXI_CFG_MASK
, HPRE_ADDR(qm
, QM_AXI_M_CFG
));
268 /* HPRE need more time, we close this interrupt */
269 val
= readl_relaxed(HPRE_ADDR(qm
, HPRE_QM_ABNML_INT_MASK
));
270 val
|= BIT(HPRE_TIMEOUT_ABNML_BIT
);
271 writel_relaxed(val
, HPRE_ADDR(qm
, HPRE_QM_ABNML_INT_MASK
));
273 writel(0x1, HPRE_ADDR(qm
, HPRE_TYPES_ENB
));
274 writel(HPRE_QM_VFG_AX_MASK
, HPRE_ADDR(qm
, HPRE_VFG_AXCACHE
));
275 writel(0x0, HPRE_ADDR(qm
, HPRE_BD_ENDIAN
));
276 writel(0x0, HPRE_ADDR(qm
, HPRE_INT_MASK
));
277 writel(0x0, HPRE_ADDR(qm
, HPRE_RAS_ECC_1BIT_TH
));
278 writel(0x0, HPRE_ADDR(qm
, HPRE_POISON_BYPASS
));
279 writel(0x0, HPRE_ADDR(qm
, HPRE_COMM_CNT_CLR_CE
));
280 writel(0x0, HPRE_ADDR(qm
, HPRE_ECC_BYPASS
));
282 writel(HPRE_BD_USR_MASK
, HPRE_ADDR(qm
, HPRE_BD_ARUSR_CFG
));
283 writel(HPRE_BD_USR_MASK
, HPRE_ADDR(qm
, HPRE_BD_AWUSR_CFG
));
284 writel(0x1, HPRE_ADDR(qm
, HPRE_RDCHN_INI_CFG
));
285 ret
= readl_relaxed_poll_timeout(HPRE_ADDR(qm
, HPRE_RDCHN_INI_ST
), val
,
287 HPRE_REG_RD_INTVRL_US
,
288 HPRE_REG_RD_TMOUT_US
);
290 dev_err(dev
, "read rd channel timeout fail!\n");
294 for (i
= 0; i
< HPRE_CLUSTERS_NUM
; i
++) {
295 offset
= i
* HPRE_CLSTR_ADDR_INTRVL
;
297 /* clusters initiating */
298 writel(HPRE_CLUSTER_CORE_MASK
,
299 HPRE_ADDR(qm
, offset
+ HPRE_CORE_ENB
));
300 writel(0x1, HPRE_ADDR(qm
, offset
+ HPRE_CORE_INI_CFG
));
301 ret
= readl_relaxed_poll_timeout(HPRE_ADDR(qm
, offset
+
302 HPRE_CORE_INI_STATUS
), val
,
303 ((val
& HPRE_CLUSTER_CORE_MASK
) ==
304 HPRE_CLUSTER_CORE_MASK
),
305 HPRE_REG_RD_INTVRL_US
,
306 HPRE_REG_RD_TMOUT_US
);
309 "cluster %d int st status timeout!\n", i
);
314 ret
= hpre_cfg_by_dsm(qm
);
316 dev_err(dev
, "acpi_evaluate_dsm err.\n");
318 disable_flr_of_bme(qm
);
323 static void hpre_cnt_regs_clear(struct hisi_qm
*qm
)
325 unsigned long offset
;
328 /* clear current_qm */
329 writel(0x0, qm
->io_base
+ QM_DFX_MB_CNT_VF
);
330 writel(0x0, qm
->io_base
+ QM_DFX_DB_CNT_VF
);
332 /* clear clusterX/cluster_ctrl */
333 for (i
= 0; i
< HPRE_CLUSTERS_NUM
; i
++) {
334 offset
= HPRE_CLSTR_BASE
+ i
* HPRE_CLSTR_ADDR_INTRVL
;
335 writel(0x0, qm
->io_base
+ offset
+ HPRE_CLUSTER_INQURY
);
339 writel(0x0, qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
);
341 hisi_qm_debug_regs_clear(qm
);
344 static void hpre_hw_error_disable(struct hisi_qm
*qm
)
348 /* disable hpre hw error interrupts */
349 writel(HPRE_CORE_INT_DISABLE
, qm
->io_base
+ HPRE_INT_MASK
);
351 /* disable HPRE block master OOO when m-bit error occur */
352 val
= readl(qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
353 val
&= ~HPRE_AM_OOO_SHUTDOWN_ENABLE
;
354 writel(val
, qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
357 static void hpre_hw_error_enable(struct hisi_qm
*qm
)
361 /* clear HPRE hw error source if having */
362 writel(HPRE_CORE_INT_DISABLE
, qm
->io_base
+ HPRE_HAC_SOURCE_INT
);
364 /* enable hpre hw error interrupts */
365 writel(HPRE_CORE_INT_ENABLE
, qm
->io_base
+ HPRE_INT_MASK
);
366 writel(HPRE_HAC_RAS_CE_ENABLE
, qm
->io_base
+ HPRE_RAS_CE_ENB
);
367 writel(HPRE_HAC_RAS_NFE_ENABLE
, qm
->io_base
+ HPRE_RAS_NFE_ENB
);
368 writel(HPRE_HAC_RAS_FE_ENABLE
, qm
->io_base
+ HPRE_RAS_FE_ENB
);
370 /* enable HPRE block master OOO when m-bit error occur */
371 val
= readl(qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
372 val
|= HPRE_AM_OOO_SHUTDOWN_ENABLE
;
373 writel(val
, qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
376 static inline struct hisi_qm
*hpre_file_to_qm(struct hpre_debugfs_file
*file
)
378 struct hpre
*hpre
= container_of(file
->debug
, struct hpre
, debug
);
383 static u32
hpre_current_qm_read(struct hpre_debugfs_file
*file
)
385 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
387 return readl(qm
->io_base
+ QM_DFX_MB_CNT_VF
);
390 static int hpre_current_qm_write(struct hpre_debugfs_file
*file
, u32 val
)
392 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
393 u32 num_vfs
= qm
->vfs_num
;
399 /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
401 qm
->debug
.curr_qm_qp_num
= qm
->qp_num
;
403 vfq_num
= (qm
->ctrl_qp_num
- qm
->qp_num
) / num_vfs
;
404 if (val
== num_vfs
) {
405 qm
->debug
.curr_qm_qp_num
=
406 qm
->ctrl_qp_num
- qm
->qp_num
- (num_vfs
- 1) * vfq_num
;
408 qm
->debug
.curr_qm_qp_num
= vfq_num
;
412 writel(val
, qm
->io_base
+ QM_DFX_MB_CNT_VF
);
413 writel(val
, qm
->io_base
+ QM_DFX_DB_CNT_VF
);
416 (readl(qm
->io_base
+ QM_DFX_SQE_CNT_VF_SQN
) & CURRENT_Q_MASK
);
417 writel(tmp
, qm
->io_base
+ QM_DFX_SQE_CNT_VF_SQN
);
420 (readl(qm
->io_base
+ QM_DFX_CQE_CNT_VF_CQN
) & CURRENT_Q_MASK
);
421 writel(tmp
, qm
->io_base
+ QM_DFX_CQE_CNT_VF_CQN
);
426 static u32
hpre_clear_enable_read(struct hpre_debugfs_file
*file
)
428 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
430 return readl(qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
) &
431 HPRE_CTRL_CNT_CLR_CE_BIT
;
434 static int hpre_clear_enable_write(struct hpre_debugfs_file
*file
, u32 val
)
436 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
439 if (val
!= 1 && val
!= 0)
442 tmp
= (readl(qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
) &
443 ~HPRE_CTRL_CNT_CLR_CE_BIT
) | val
;
444 writel(tmp
, qm
->io_base
+ HPRE_CTRL_CNT_CLR_CE
);
449 static u32
hpre_cluster_inqry_read(struct hpre_debugfs_file
*file
)
451 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
452 int cluster_index
= file
->index
- HPRE_CLUSTER_CTRL
;
453 unsigned long offset
= HPRE_CLSTR_BASE
+
454 cluster_index
* HPRE_CLSTR_ADDR_INTRVL
;
456 return readl(qm
->io_base
+ offset
+ HPRE_CLSTR_ADDR_INQRY_RSLT
);
459 static int hpre_cluster_inqry_write(struct hpre_debugfs_file
*file
, u32 val
)
461 struct hisi_qm
*qm
= hpre_file_to_qm(file
);
462 int cluster_index
= file
->index
- HPRE_CLUSTER_CTRL
;
463 unsigned long offset
= HPRE_CLSTR_BASE
+ cluster_index
*
464 HPRE_CLSTR_ADDR_INTRVL
;
466 writel(val
, qm
->io_base
+ offset
+ HPRE_CLUSTER_INQURY
);
471 static ssize_t
hpre_ctrl_debug_read(struct file
*filp
, char __user
*buf
,
472 size_t count
, loff_t
*pos
)
474 struct hpre_debugfs_file
*file
= filp
->private_data
;
475 char tbuf
[HPRE_DBGFS_VAL_MAX_LEN
];
479 spin_lock_irq(&file
->lock
);
480 switch (file
->type
) {
481 case HPRE_CURRENT_QM
:
482 val
= hpre_current_qm_read(file
);
484 case HPRE_CLEAR_ENABLE
:
485 val
= hpre_clear_enable_read(file
);
487 case HPRE_CLUSTER_CTRL
:
488 val
= hpre_cluster_inqry_read(file
);
491 spin_unlock_irq(&file
->lock
);
494 spin_unlock_irq(&file
->lock
);
495 ret
= snprintf(tbuf
, HPRE_DBGFS_VAL_MAX_LEN
, "%u\n", val
);
496 return simple_read_from_buffer(buf
, count
, pos
, tbuf
, ret
);
499 static ssize_t
hpre_ctrl_debug_write(struct file
*filp
, const char __user
*buf
,
500 size_t count
, loff_t
*pos
)
502 struct hpre_debugfs_file
*file
= filp
->private_data
;
503 char tbuf
[HPRE_DBGFS_VAL_MAX_LEN
];
510 if (count
>= HPRE_DBGFS_VAL_MAX_LEN
)
513 len
= simple_write_to_buffer(tbuf
, HPRE_DBGFS_VAL_MAX_LEN
- 1,
519 if (kstrtoul(tbuf
, 0, &val
))
522 spin_lock_irq(&file
->lock
);
523 switch (file
->type
) {
524 case HPRE_CURRENT_QM
:
525 ret
= hpre_current_qm_write(file
, val
);
529 case HPRE_CLEAR_ENABLE
:
530 ret
= hpre_clear_enable_write(file
, val
);
534 case HPRE_CLUSTER_CTRL
:
535 ret
= hpre_cluster_inqry_write(file
, val
);
543 spin_unlock_irq(&file
->lock
);
548 spin_unlock_irq(&file
->lock
);
552 static const struct file_operations hpre_ctrl_debug_fops
= {
553 .owner
= THIS_MODULE
,
555 .read
= hpre_ctrl_debug_read
,
556 .write
= hpre_ctrl_debug_write
,
559 static int hpre_debugfs_atomic64_get(void *data
, u64
*val
)
561 struct hpre_dfx
*dfx_item
= data
;
563 *val
= atomic64_read(&dfx_item
->value
);
568 static int hpre_debugfs_atomic64_set(void *data
, u64 val
)
570 struct hpre_dfx
*dfx_item
= data
;
571 struct hpre_dfx
*hpre_dfx
= NULL
;
573 if (dfx_item
->type
== HPRE_OVERTIME_THRHLD
) {
574 hpre_dfx
= dfx_item
- HPRE_OVERTIME_THRHLD
;
575 atomic64_set(&hpre_dfx
[HPRE_OVER_THRHLD_CNT
].value
, 0);
580 atomic64_set(&dfx_item
->value
, val
);
585 DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops
, hpre_debugfs_atomic64_get
,
586 hpre_debugfs_atomic64_set
, "%llu\n");
588 static int hpre_create_debugfs_file(struct hisi_qm
*qm
, struct dentry
*dir
,
589 enum hpre_ctrl_dbgfs_file type
, int indx
)
591 struct hpre
*hpre
= container_of(qm
, struct hpre
, qm
);
592 struct hpre_debug
*dbg
= &hpre
->debug
;
593 struct dentry
*file_dir
;
598 file_dir
= qm
->debug
.debug_root
;
600 if (type
>= HPRE_DEBUG_FILE_NUM
)
603 spin_lock_init(&dbg
->files
[indx
].lock
);
604 dbg
->files
[indx
].debug
= dbg
;
605 dbg
->files
[indx
].type
= type
;
606 dbg
->files
[indx
].index
= indx
;
607 debugfs_create_file(hpre_debug_file_name
[type
], 0600, file_dir
,
608 dbg
->files
+ indx
, &hpre_ctrl_debug_fops
);
613 static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm
*qm
)
615 struct device
*dev
= &qm
->pdev
->dev
;
616 struct debugfs_regset32
*regset
;
618 regset
= devm_kzalloc(dev
, sizeof(*regset
), GFP_KERNEL
);
622 regset
->regs
= hpre_com_dfx_regs
;
623 regset
->nregs
= ARRAY_SIZE(hpre_com_dfx_regs
);
624 regset
->base
= qm
->io_base
;
626 debugfs_create_regset32("regs", 0444, qm
->debug
.debug_root
, regset
);
630 static int hpre_cluster_debugfs_init(struct hisi_qm
*qm
)
632 struct device
*dev
= &qm
->pdev
->dev
;
633 char buf
[HPRE_DBGFS_VAL_MAX_LEN
];
634 struct debugfs_regset32
*regset
;
635 struct dentry
*tmp_d
;
638 for (i
= 0; i
< HPRE_CLUSTERS_NUM
; i
++) {
639 ret
= snprintf(buf
, HPRE_DBGFS_VAL_MAX_LEN
, "cluster%d", i
);
642 tmp_d
= debugfs_create_dir(buf
, qm
->debug
.debug_root
);
644 regset
= devm_kzalloc(dev
, sizeof(*regset
), GFP_KERNEL
);
648 regset
->regs
= hpre_cluster_dfx_regs
;
649 regset
->nregs
= ARRAY_SIZE(hpre_cluster_dfx_regs
);
650 regset
->base
= qm
->io_base
+ hpre_cluster_offsets
[i
];
652 debugfs_create_regset32("regs", 0444, tmp_d
, regset
);
653 ret
= hpre_create_debugfs_file(qm
, tmp_d
, HPRE_CLUSTER_CTRL
,
654 i
+ HPRE_CLUSTER_CTRL
);
662 static int hpre_ctrl_debug_init(struct hisi_qm
*qm
)
666 ret
= hpre_create_debugfs_file(qm
, NULL
, HPRE_CURRENT_QM
,
671 ret
= hpre_create_debugfs_file(qm
, NULL
, HPRE_CLEAR_ENABLE
,
676 ret
= hpre_pf_comm_regs_debugfs_init(qm
);
680 return hpre_cluster_debugfs_init(qm
);
683 static void hpre_dfx_debug_init(struct hisi_qm
*qm
)
685 struct hpre
*hpre
= container_of(qm
, struct hpre
, qm
);
686 struct hpre_dfx
*dfx
= hpre
->debug
.dfx
;
687 struct dentry
*parent
;
690 parent
= debugfs_create_dir("hpre_dfx", qm
->debug
.debug_root
);
691 for (i
= 0; i
< HPRE_DFX_FILE_NUM
; i
++) {
693 debugfs_create_file(hpre_dfx_files
[i
], 0644, parent
, &dfx
[i
],
698 static int hpre_debugfs_init(struct hisi_qm
*qm
)
700 struct device
*dev
= &qm
->pdev
->dev
;
703 qm
->debug
.debug_root
= debugfs_create_dir(dev_name(dev
),
706 qm
->debug
.sqe_mask_offset
= HPRE_SQE_MASK_OFFSET
;
707 qm
->debug
.sqe_mask_len
= HPRE_SQE_MASK_LEN
;
708 hisi_qm_debug_init(qm
);
710 if (qm
->pdev
->device
== HPRE_PCI_DEVICE_ID
) {
711 ret
= hpre_ctrl_debug_init(qm
);
713 goto failed_to_create
;
716 hpre_dfx_debug_init(qm
);
721 debugfs_remove_recursive(qm
->debug
.debug_root
);
725 static void hpre_debugfs_exit(struct hisi_qm
*qm
)
727 debugfs_remove_recursive(qm
->debug
.debug_root
);
730 static int hpre_qm_init(struct hisi_qm
*qm
, struct pci_dev
*pdev
)
732 if (pdev
->revision
== QM_HW_V1
) {
733 pci_warn(pdev
, "HPRE version 1 is not supported!\n");
738 qm
->ver
= pdev
->revision
;
739 qm
->sqe_size
= HPRE_SQE_SIZE
;
740 qm
->dev_name
= hpre_name
;
742 qm
->fun_type
= (pdev
->device
== HPRE_PCI_DEVICE_ID
) ?
744 if (qm
->fun_type
== QM_HW_PF
) {
745 qm
->qp_base
= HPRE_PF_DEF_Q_BASE
;
746 qm
->qp_num
= pf_q_num
;
747 qm
->debug
.curr_qm_qp_num
= pf_q_num
;
748 qm
->qm_list
= &hpre_devices
;
751 return hisi_qm_init(qm
);
754 static void hpre_log_hw_error(struct hisi_qm
*qm
, u32 err_sts
)
756 const struct hpre_hw_error
*err
= hpre_hw_errors
;
757 struct device
*dev
= &qm
->pdev
->dev
;
760 if (err
->int_msk
& err_sts
)
761 dev_warn(dev
, "%s [error status=0x%x] found\n",
762 err
->msg
, err
->int_msk
);
767 static u32
hpre_get_hw_err_status(struct hisi_qm
*qm
)
769 return readl(qm
->io_base
+ HPRE_HAC_INT_STATUS
);
772 static void hpre_clear_hw_err_status(struct hisi_qm
*qm
, u32 err_sts
)
774 writel(err_sts
, qm
->io_base
+ HPRE_HAC_SOURCE_INT
);
777 static void hpre_open_axi_master_ooo(struct hisi_qm
*qm
)
781 value
= readl(qm
->io_base
+ HPRE_AM_OOO_SHUTDOWN_ENB
);
782 writel(value
& ~HPRE_AM_OOO_SHUTDOWN_ENABLE
,
783 HPRE_ADDR(qm
, HPRE_AM_OOO_SHUTDOWN_ENB
));
784 writel(value
| HPRE_AM_OOO_SHUTDOWN_ENABLE
,
785 HPRE_ADDR(qm
, HPRE_AM_OOO_SHUTDOWN_ENB
));
788 static const struct hisi_qm_err_ini hpre_err_ini
= {
789 .hw_init
= hpre_set_user_domain_and_cache
,
790 .hw_err_enable
= hpre_hw_error_enable
,
791 .hw_err_disable
= hpre_hw_error_disable
,
792 .get_dev_hw_err_status
= hpre_get_hw_err_status
,
793 .clear_dev_hw_err_status
= hpre_clear_hw_err_status
,
794 .log_dev_hw_err
= hpre_log_hw_error
,
795 .open_axi_master_ooo
= hpre_open_axi_master_ooo
,
798 .nfe
= QM_BASE_NFE
| QM_ACC_DO_TASK_TIMEOUT
,
800 .ecc_2bits_mask
= HPRE_CORE_ECC_2BIT_ERR
|
801 HPRE_OOO_ECC_2BIT_ERR
,
802 .msi_wr_port
= HPRE_WR_MSI_PORT
,
807 static int hpre_pf_probe_init(struct hpre
*hpre
)
809 struct hisi_qm
*qm
= &hpre
->qm
;
812 qm
->ctrl_qp_num
= HPRE_QUEUE_NUM_V2
;
814 ret
= hpre_set_user_domain_and_cache(qm
);
818 qm
->err_ini
= &hpre_err_ini
;
819 hisi_qm_dev_err_init(qm
);
824 static int hpre_probe_init(struct hpre
*hpre
)
826 struct hisi_qm
*qm
= &hpre
->qm
;
829 if (qm
->fun_type
== QM_HW_PF
) {
830 ret
= hpre_pf_probe_init(hpre
);
838 static int hpre_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
844 hpre
= devm_kzalloc(&pdev
->dev
, sizeof(*hpre
), GFP_KERNEL
);
849 ret
= hpre_qm_init(qm
, pdev
);
851 pci_err(pdev
, "Failed to init HPRE QM (%d)!\n", ret
);
855 ret
= hpre_probe_init(hpre
);
857 pci_err(pdev
, "Failed to probe (%d)!\n", ret
);
858 goto err_with_qm_init
;
861 ret
= hisi_qm_start(qm
);
863 goto err_with_err_init
;
865 ret
= hpre_debugfs_init(qm
);
867 dev_warn(&pdev
->dev
, "init debugfs fail!\n");
869 ret
= hisi_qm_alg_register(qm
, &hpre_devices
);
871 pci_err(pdev
, "fail to register algs to crypto!\n");
872 goto err_with_qm_start
;
875 if (qm
->fun_type
== QM_HW_PF
&& vfs_num
) {
876 ret
= hisi_qm_sriov_enable(pdev
, vfs_num
);
878 goto err_with_alg_register
;
883 err_with_alg_register
:
884 hisi_qm_alg_unregister(qm
, &hpre_devices
);
887 hpre_debugfs_exit(qm
);
888 hisi_qm_stop(qm
, QM_NORMAL
);
891 hisi_qm_dev_err_uninit(qm
);
899 static void hpre_remove(struct pci_dev
*pdev
)
901 struct hisi_qm
*qm
= pci_get_drvdata(pdev
);
904 hisi_qm_wait_task_finish(qm
, &hpre_devices
);
905 hisi_qm_alg_unregister(qm
, &hpre_devices
);
906 if (qm
->fun_type
== QM_HW_PF
&& qm
->vfs_num
) {
907 ret
= hisi_qm_sriov_disable(pdev
, qm
->is_frozen
);
909 pci_err(pdev
, "Disable SRIOV fail!\n");
913 if (qm
->fun_type
== QM_HW_PF
) {
914 hpre_cnt_regs_clear(qm
);
915 qm
->debug
.curr_qm_qp_num
= 0;
918 hpre_debugfs_exit(qm
);
919 hisi_qm_stop(qm
, QM_NORMAL
);
920 hisi_qm_dev_err_uninit(qm
);
925 static const struct pci_error_handlers hpre_err_handler
= {
926 .error_detected
= hisi_qm_dev_err_detected
,
927 .slot_reset
= hisi_qm_dev_slot_reset
,
928 .reset_prepare
= hisi_qm_reset_prepare
,
929 .reset_done
= hisi_qm_reset_done
,
932 static struct pci_driver hpre_pci_driver
= {
934 .id_table
= hpre_dev_ids
,
936 .remove
= hpre_remove
,
937 .sriov_configure
= IS_ENABLED(CONFIG_PCI_IOV
) ?
938 hisi_qm_sriov_configure
: NULL
,
939 .err_handler
= &hpre_err_handler
,
940 .shutdown
= hisi_qm_dev_shutdown
,
943 static void hpre_register_debugfs(void)
945 if (!debugfs_initialized())
948 hpre_debugfs_root
= debugfs_create_dir(hpre_name
, NULL
);
951 static void hpre_unregister_debugfs(void)
953 debugfs_remove_recursive(hpre_debugfs_root
);
956 static int __init
hpre_init(void)
960 hisi_qm_init_list(&hpre_devices
);
961 hpre_register_debugfs();
963 ret
= pci_register_driver(&hpre_pci_driver
);
965 hpre_unregister_debugfs();
966 pr_err("hpre: can't register hisi hpre driver.\n");
972 static void __exit
hpre_exit(void)
974 pci_unregister_driver(&hpre_pci_driver
);
975 hpre_unregister_debugfs();
978 module_init(hpre_init
);
979 module_exit(hpre_exit
);
981 MODULE_LICENSE("GPL v2");
982 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
983 MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");