1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <linux/acpi.h>
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/seq_file.h>
13 #include <linux/topology.h>
14 #include <linux/uacce.h>
17 #define PCI_DEVICE_ID_ZIP_PF 0xa250
18 #define PCI_DEVICE_ID_ZIP_VF 0xa251
20 #define HZIP_VF_NUM 63
21 #define HZIP_QUEUE_NUM_V1 4096
22 #define HZIP_QUEUE_NUM_V2 1024
24 #define HZIP_CLOCK_GATE_CTRL 0x301004
25 #define COMP0_ENABLE BIT(0)
26 #define COMP1_ENABLE BIT(1)
27 #define DECOMP0_ENABLE BIT(2)
28 #define DECOMP1_ENABLE BIT(3)
29 #define DECOMP2_ENABLE BIT(4)
30 #define DECOMP3_ENABLE BIT(5)
31 #define DECOMP4_ENABLE BIT(6)
32 #define DECOMP5_ENABLE BIT(7)
33 #define ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
34 DECOMP0_ENABLE | DECOMP1_ENABLE | \
35 DECOMP2_ENABLE | DECOMP3_ENABLE | \
36 DECOMP4_ENABLE | DECOMP5_ENABLE)
37 #define DECOMP_CHECK_ENABLE BIT(16)
38 #define HZIP_FSM_MAX_CNT 0x301008
40 #define HZIP_PORT_ARCA_CHE_0 0x301040
41 #define HZIP_PORT_ARCA_CHE_1 0x301044
42 #define HZIP_PORT_AWCA_CHE_0 0x301060
43 #define HZIP_PORT_AWCA_CHE_1 0x301064
44 #define CACHE_ALL_EN 0xffffffff
46 #define HZIP_BD_RUSER_32_63 0x301110
47 #define HZIP_SGL_RUSER_32_63 0x30111c
48 #define HZIP_DATA_RUSER_32_63 0x301128
49 #define HZIP_DATA_WUSER_32_63 0x301134
50 #define HZIP_BD_WUSER_32_63 0x301140
52 #define HZIP_QM_IDEL_STATUS 0x3040e4
54 #define HZIP_CORE_DEBUG_COMP_0 0x302000
55 #define HZIP_CORE_DEBUG_COMP_1 0x303000
56 #define HZIP_CORE_DEBUG_DECOMP_0 0x304000
57 #define HZIP_CORE_DEBUG_DECOMP_1 0x305000
58 #define HZIP_CORE_DEBUG_DECOMP_2 0x306000
59 #define HZIP_CORE_DEBUG_DECOMP_3 0x307000
60 #define HZIP_CORE_DEBUG_DECOMP_4 0x308000
61 #define HZIP_CORE_DEBUG_DECOMP_5 0x309000
63 #define HZIP_CORE_INT_SOURCE 0x3010A0
64 #define HZIP_CORE_INT_MASK_REG 0x3010A4
65 #define HZIP_CORE_INT_STATUS 0x3010AC
66 #define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
67 #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
68 #define HZIP_CORE_INT_RAS_CE_ENB 0x301160
69 #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
70 #define HZIP_CORE_INT_RAS_FE_ENB 0x301168
71 #define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
72 #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
73 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
74 #define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0)
75 #define HZIP_COMP_CORE_NUM 2
76 #define HZIP_DECOMP_CORE_NUM 6
77 #define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
79 #define HZIP_SQE_SIZE 128
80 #define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH)
81 #define HZIP_PF_DEF_Q_NUM 64
82 #define HZIP_PF_DEF_Q_BASE 0
84 #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
85 #define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
87 #define HZIP_BUF_SIZE 22
89 static const char hisi_zip_name
[] = "hisi_zip";
90 static struct dentry
*hzip_debugfs_root
;
91 static struct hisi_qm_list zip_devices
;
93 struct hisi_zip_hw_error
{
98 static const struct hisi_zip_hw_error zip_hw_error
[] = {
99 { .int_msk
= BIT(0), .msg
= "zip_ecc_1bitt_err" },
100 { .int_msk
= BIT(1), .msg
= "zip_ecc_2bit_err" },
101 { .int_msk
= BIT(2), .msg
= "zip_axi_rresp_err" },
102 { .int_msk
= BIT(3), .msg
= "zip_axi_bresp_err" },
103 { .int_msk
= BIT(4), .msg
= "zip_src_addr_parse_err" },
104 { .int_msk
= BIT(5), .msg
= "zip_dst_addr_parse_err" },
105 { .int_msk
= BIT(6), .msg
= "zip_pre_in_addr_err" },
106 { .int_msk
= BIT(7), .msg
= "zip_pre_in_data_err" },
107 { .int_msk
= BIT(8), .msg
= "zip_com_inf_err" },
108 { .int_msk
= BIT(9), .msg
= "zip_enc_inf_err" },
109 { .int_msk
= BIT(10), .msg
= "zip_pre_out_err" },
113 enum ctrl_debug_file_index
{
119 static const char * const ctrl_debug_file_name
[] = {
120 [HZIP_CURRENT_QM
] = "current_qm",
121 [HZIP_CLEAR_ENABLE
] = "clear_enable",
124 struct ctrl_debug_file
{
125 enum ctrl_debug_file_index index
;
127 struct hisi_zip_ctrl
*ctrl
;
131 * One ZIP controller has one PF and multiple VFs, some global configurations
132 * which PF has need this structure.
134 * Just relevant for PF.
136 struct hisi_zip_ctrl
{
138 struct hisi_zip
*hisi_zip
;
139 struct dentry
*debug_root
;
140 struct ctrl_debug_file files
[HZIP_DEBUG_FILE_NUM
];
154 static const u64 core_offsets
[] = {
155 [HZIP_COMP_CORE0
] = 0x302000,
156 [HZIP_COMP_CORE1
] = 0x303000,
157 [HZIP_DECOMP_CORE0
] = 0x304000,
158 [HZIP_DECOMP_CORE1
] = 0x305000,
159 [HZIP_DECOMP_CORE2
] = 0x306000,
160 [HZIP_DECOMP_CORE3
] = 0x307000,
161 [HZIP_DECOMP_CORE4
] = 0x308000,
162 [HZIP_DECOMP_CORE5
] = 0x309000,
165 static struct debugfs_reg32 hzip_dfx_regs
[] = {
166 {"HZIP_GET_BD_NUM ", 0x00ull
},
167 {"HZIP_GET_RIGHT_BD ", 0x04ull
},
168 {"HZIP_GET_ERROR_BD ", 0x08ull
},
169 {"HZIP_DONE_BD_NUM ", 0x0cull
},
170 {"HZIP_WORK_CYCLE ", 0x10ull
},
171 {"HZIP_IDLE_CYCLE ", 0x18ull
},
172 {"HZIP_MAX_DELAY ", 0x20ull
},
173 {"HZIP_MIN_DELAY ", 0x24ull
},
174 {"HZIP_AVG_DELAY ", 0x28ull
},
175 {"HZIP_MEM_VISIBLE_DATA ", 0x30ull
},
176 {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull
},
177 {"HZIP_COMSUMED_BYTE ", 0x38ull
},
178 {"HZIP_PRODUCED_BYTE ", 0x40ull
},
179 {"HZIP_COMP_INF ", 0x70ull
},
180 {"HZIP_PRE_OUT ", 0x78ull
},
181 {"HZIP_BD_RD ", 0x7cull
},
182 {"HZIP_BD_WR ", 0x80ull
},
183 {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull
},
184 {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull
},
185 {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull
},
186 {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull
},
187 {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull
},
190 static int pf_q_num_set(const char *val
, const struct kernel_param
*kp
)
192 struct pci_dev
*pdev
= pci_get_device(PCI_VENDOR_ID_HUAWEI
,
193 PCI_DEVICE_ID_ZIP_PF
, NULL
);
202 q_num
= min_t(u32
, HZIP_QUEUE_NUM_V1
, HZIP_QUEUE_NUM_V2
);
203 pr_info("No device found currently, suppose queue number is %d\n",
206 rev_id
= pdev
->revision
;
209 q_num
= HZIP_QUEUE_NUM_V1
;
212 q_num
= HZIP_QUEUE_NUM_V2
;
219 ret
= kstrtou32(val
, 10, &n
);
220 if (ret
!= 0 || n
> q_num
|| n
== 0)
223 return param_set_int(val
, kp
);
226 static const struct kernel_param_ops pf_q_num_ops
= {
228 .get
= param_get_int
,
231 static u32 pf_q_num
= HZIP_PF_DEF_Q_NUM
;
232 module_param_cb(pf_q_num
, &pf_q_num_ops
, &pf_q_num
, 0444);
233 MODULE_PARM_DESC(pf_q_num
, "Number of queues in PF(v1 1-4096, v2 1-1024)");
236 module_param(vfs_num
, uint
, 0444);
237 MODULE_PARM_DESC(vfs_num
, "Number of VFs to enable(1-63)");
239 static const struct pci_device_id hisi_zip_dev_ids
[] = {
240 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_ZIP_PF
) },
241 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_ZIP_VF
) },
244 MODULE_DEVICE_TABLE(pci
, hisi_zip_dev_ids
);
246 int zip_create_qps(struct hisi_qp
**qps
, int qp_num
)
248 int node
= cpu_to_node(smp_processor_id());
250 return hisi_qm_alloc_qps_node(&zip_devices
, qp_num
, 0, node
, qps
);
253 static void hisi_zip_set_user_domain_and_cache(struct hisi_zip
*hisi_zip
)
255 void __iomem
*base
= hisi_zip
->qm
.io_base
;
258 writel(AXUSER_BASE
, base
+ QM_ARUSER_M_CFG_1
);
259 writel(ARUSER_M_CFG_ENABLE
, base
+ QM_ARUSER_M_CFG_ENABLE
);
260 writel(AXUSER_BASE
, base
+ QM_AWUSER_M_CFG_1
);
261 writel(AWUSER_M_CFG_ENABLE
, base
+ QM_AWUSER_M_CFG_ENABLE
);
262 writel(WUSER_M_CFG_ENABLE
, base
+ QM_WUSER_M_CFG_ENABLE
);
265 writel(AXI_M_CFG
, base
+ QM_AXI_M_CFG
);
266 writel(AXI_M_CFG_ENABLE
, base
+ QM_AXI_M_CFG_ENABLE
);
267 /* disable FLR triggered by BME(bus master enable) */
268 writel(PEH_AXUSER_CFG
, base
+ QM_PEH_AXUSER_CFG
);
269 writel(PEH_AXUSER_CFG_ENABLE
, base
+ QM_PEH_AXUSER_CFG_ENABLE
);
272 writel(CACHE_ALL_EN
, base
+ HZIP_PORT_ARCA_CHE_0
);
273 writel(CACHE_ALL_EN
, base
+ HZIP_PORT_ARCA_CHE_1
);
274 writel(CACHE_ALL_EN
, base
+ HZIP_PORT_AWCA_CHE_0
);
275 writel(CACHE_ALL_EN
, base
+ HZIP_PORT_AWCA_CHE_1
);
277 /* user domain configurations */
278 writel(AXUSER_BASE
, base
+ HZIP_BD_RUSER_32_63
);
279 writel(AXUSER_BASE
, base
+ HZIP_SGL_RUSER_32_63
);
280 writel(AXUSER_BASE
, base
+ HZIP_BD_WUSER_32_63
);
282 if (hisi_zip
->qm
.use_sva
) {
283 writel(AXUSER_BASE
| AXUSER_SSV
, base
+ HZIP_DATA_RUSER_32_63
);
284 writel(AXUSER_BASE
| AXUSER_SSV
, base
+ HZIP_DATA_WUSER_32_63
);
286 writel(AXUSER_BASE
, base
+ HZIP_DATA_RUSER_32_63
);
287 writel(AXUSER_BASE
, base
+ HZIP_DATA_WUSER_32_63
);
290 /* let's open all compression/decompression cores */
291 writel(DECOMP_CHECK_ENABLE
| ALL_COMP_DECOMP_EN
,
292 base
+ HZIP_CLOCK_GATE_CTRL
);
294 /* enable sqc writeback */
295 writel(SQC_CACHE_ENABLE
| CQC_CACHE_ENABLE
| SQC_CACHE_WB_ENABLE
|
296 CQC_CACHE_WB_ENABLE
| FIELD_PREP(SQC_CACHE_WB_THRD
, 1) |
297 FIELD_PREP(CQC_CACHE_WB_THRD
, 1), base
+ QM_CACHE_CTL
);
300 static void hisi_zip_hw_error_enable(struct hisi_qm
*qm
)
302 if (qm
->ver
== QM_HW_V1
) {
303 writel(HZIP_CORE_INT_MASK_ALL
,
304 qm
->io_base
+ HZIP_CORE_INT_MASK_REG
);
305 dev_info(&qm
->pdev
->dev
, "Does not support hw error handle\n");
309 /* clear ZIP hw error source if having */
310 writel(HZIP_CORE_INT_MASK_ALL
, qm
->io_base
+ HZIP_CORE_INT_SOURCE
);
312 /* configure error type */
313 writel(0x1, qm
->io_base
+ HZIP_CORE_INT_RAS_CE_ENB
);
314 writel(0x0, qm
->io_base
+ HZIP_CORE_INT_RAS_FE_ENB
);
315 writel(HZIP_CORE_INT_RAS_NFE_ENABLE
,
316 qm
->io_base
+ HZIP_CORE_INT_RAS_NFE_ENB
);
318 /* enable ZIP hw error interrupts */
319 writel(0, qm
->io_base
+ HZIP_CORE_INT_MASK_REG
);
322 static void hisi_zip_hw_error_disable(struct hisi_qm
*qm
)
324 /* disable ZIP hw error interrupts */
325 writel(HZIP_CORE_INT_MASK_ALL
, qm
->io_base
+ HZIP_CORE_INT_MASK_REG
);
328 static inline struct hisi_qm
*file_to_qm(struct ctrl_debug_file
*file
)
330 struct hisi_zip
*hisi_zip
= file
->ctrl
->hisi_zip
;
332 return &hisi_zip
->qm
;
335 static u32
current_qm_read(struct ctrl_debug_file
*file
)
337 struct hisi_qm
*qm
= file_to_qm(file
);
339 return readl(qm
->io_base
+ QM_DFX_MB_CNT_VF
);
342 static int current_qm_write(struct ctrl_debug_file
*file
, u32 val
)
344 struct hisi_qm
*qm
= file_to_qm(file
);
345 struct hisi_zip_ctrl
*ctrl
= file
->ctrl
;
349 if (val
> ctrl
->num_vfs
)
352 /* Calculate curr_qm_qp_num and store */
354 qm
->debug
.curr_qm_qp_num
= qm
->qp_num
;
356 vfq_num
= (qm
->ctrl_qp_num
- qm
->qp_num
) / ctrl
->num_vfs
;
357 if (val
== ctrl
->num_vfs
)
358 qm
->debug
.curr_qm_qp_num
= qm
->ctrl_qp_num
-
359 qm
->qp_num
- (ctrl
->num_vfs
- 1) * vfq_num
;
361 qm
->debug
.curr_qm_qp_num
= vfq_num
;
364 writel(val
, qm
->io_base
+ QM_DFX_MB_CNT_VF
);
365 writel(val
, qm
->io_base
+ QM_DFX_DB_CNT_VF
);
368 (readl(qm
->io_base
+ QM_DFX_SQE_CNT_VF_SQN
) & CURRENT_Q_MASK
);
369 writel(tmp
, qm
->io_base
+ QM_DFX_SQE_CNT_VF_SQN
);
372 (readl(qm
->io_base
+ QM_DFX_CQE_CNT_VF_CQN
) & CURRENT_Q_MASK
);
373 writel(tmp
, qm
->io_base
+ QM_DFX_CQE_CNT_VF_CQN
);
378 static u32
clear_enable_read(struct ctrl_debug_file
*file
)
380 struct hisi_qm
*qm
= file_to_qm(file
);
382 return readl(qm
->io_base
+ HZIP_SOFT_CTRL_CNT_CLR_CE
) &
383 SOFT_CTRL_CNT_CLR_CE_BIT
;
386 static int clear_enable_write(struct ctrl_debug_file
*file
, u32 val
)
388 struct hisi_qm
*qm
= file_to_qm(file
);
391 if (val
!= 1 && val
!= 0)
394 tmp
= (readl(qm
->io_base
+ HZIP_SOFT_CTRL_CNT_CLR_CE
) &
395 ~SOFT_CTRL_CNT_CLR_CE_BIT
) | val
;
396 writel(tmp
, qm
->io_base
+ HZIP_SOFT_CTRL_CNT_CLR_CE
);
401 static ssize_t
ctrl_debug_read(struct file
*filp
, char __user
*buf
,
402 size_t count
, loff_t
*pos
)
404 struct ctrl_debug_file
*file
= filp
->private_data
;
405 char tbuf
[HZIP_BUF_SIZE
];
409 spin_lock_irq(&file
->lock
);
410 switch (file
->index
) {
411 case HZIP_CURRENT_QM
:
412 val
= current_qm_read(file
);
414 case HZIP_CLEAR_ENABLE
:
415 val
= clear_enable_read(file
);
418 spin_unlock_irq(&file
->lock
);
421 spin_unlock_irq(&file
->lock
);
422 ret
= sprintf(tbuf
, "%u\n", val
);
423 return simple_read_from_buffer(buf
, count
, pos
, tbuf
, ret
);
426 static ssize_t
ctrl_debug_write(struct file
*filp
, const char __user
*buf
,
427 size_t count
, loff_t
*pos
)
429 struct ctrl_debug_file
*file
= filp
->private_data
;
430 char tbuf
[HZIP_BUF_SIZE
];
437 if (count
>= HZIP_BUF_SIZE
)
440 len
= simple_write_to_buffer(tbuf
, HZIP_BUF_SIZE
- 1, pos
, buf
, count
);
445 if (kstrtoul(tbuf
, 0, &val
))
448 spin_lock_irq(&file
->lock
);
449 switch (file
->index
) {
450 case HZIP_CURRENT_QM
:
451 ret
= current_qm_write(file
, val
);
455 case HZIP_CLEAR_ENABLE
:
456 ret
= clear_enable_write(file
, val
);
464 spin_unlock_irq(&file
->lock
);
469 spin_unlock_irq(&file
->lock
);
473 static const struct file_operations ctrl_debug_fops
= {
474 .owner
= THIS_MODULE
,
476 .read
= ctrl_debug_read
,
477 .write
= ctrl_debug_write
,
480 static int hisi_zip_core_debug_init(struct hisi_zip_ctrl
*ctrl
)
482 struct hisi_zip
*hisi_zip
= ctrl
->hisi_zip
;
483 struct hisi_qm
*qm
= &hisi_zip
->qm
;
484 struct device
*dev
= &qm
->pdev
->dev
;
485 struct debugfs_regset32
*regset
;
486 struct dentry
*tmp_d
;
487 char buf
[HZIP_BUF_SIZE
];
490 for (i
= 0; i
< HZIP_CORE_NUM
; i
++) {
491 if (i
< HZIP_COMP_CORE_NUM
)
492 sprintf(buf
, "comp_core%d", i
);
494 sprintf(buf
, "decomp_core%d", i
- HZIP_COMP_CORE_NUM
);
496 regset
= devm_kzalloc(dev
, sizeof(*regset
), GFP_KERNEL
);
500 regset
->regs
= hzip_dfx_regs
;
501 regset
->nregs
= ARRAY_SIZE(hzip_dfx_regs
);
502 regset
->base
= qm
->io_base
+ core_offsets
[i
];
504 tmp_d
= debugfs_create_dir(buf
, ctrl
->debug_root
);
505 debugfs_create_regset32("regs", 0444, tmp_d
, regset
);
511 static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl
*ctrl
)
515 for (i
= HZIP_CURRENT_QM
; i
< HZIP_DEBUG_FILE_NUM
; i
++) {
516 spin_lock_init(&ctrl
->files
[i
].lock
);
517 ctrl
->files
[i
].ctrl
= ctrl
;
518 ctrl
->files
[i
].index
= i
;
520 debugfs_create_file(ctrl_debug_file_name
[i
], 0600,
521 ctrl
->debug_root
, ctrl
->files
+ i
,
525 return hisi_zip_core_debug_init(ctrl
);
528 static int hisi_zip_debugfs_init(struct hisi_zip
*hisi_zip
)
530 struct hisi_qm
*qm
= &hisi_zip
->qm
;
531 struct device
*dev
= &qm
->pdev
->dev
;
532 struct dentry
*dev_d
;
535 dev_d
= debugfs_create_dir(dev_name(dev
), hzip_debugfs_root
);
537 qm
->debug
.debug_root
= dev_d
;
538 ret
= hisi_qm_debug_init(qm
);
540 goto failed_to_create
;
542 if (qm
->fun_type
== QM_HW_PF
) {
543 hisi_zip
->ctrl
->debug_root
= dev_d
;
544 ret
= hisi_zip_ctrl_debug_init(hisi_zip
->ctrl
);
546 goto failed_to_create
;
552 debugfs_remove_recursive(hzip_debugfs_root
);
556 static void hisi_zip_debug_regs_clear(struct hisi_zip
*hisi_zip
)
558 struct hisi_qm
*qm
= &hisi_zip
->qm
;
560 writel(0x0, qm
->io_base
+ QM_DFX_MB_CNT_VF
);
561 writel(0x0, qm
->io_base
+ QM_DFX_DB_CNT_VF
);
562 writel(0x0, qm
->io_base
+ HZIP_SOFT_CTRL_CNT_CLR_CE
);
564 hisi_qm_debug_regs_clear(qm
);
567 static void hisi_zip_debugfs_exit(struct hisi_zip
*hisi_zip
)
569 struct hisi_qm
*qm
= &hisi_zip
->qm
;
571 debugfs_remove_recursive(qm
->debug
.debug_root
);
573 if (qm
->fun_type
== QM_HW_PF
)
574 hisi_zip_debug_regs_clear(hisi_zip
);
577 static void hisi_zip_log_hw_error(struct hisi_qm
*qm
, u32 err_sts
)
579 const struct hisi_zip_hw_error
*err
= zip_hw_error
;
580 struct device
*dev
= &qm
->pdev
->dev
;
584 if (err
->int_msk
& err_sts
) {
585 dev_err(dev
, "%s [error status=0x%x] found\n",
586 err
->msg
, err
->int_msk
);
588 if (err
->int_msk
& HZIP_CORE_INT_STATUS_M_ECC
) {
589 err_val
= readl(qm
->io_base
+
590 HZIP_CORE_SRAM_ECC_ERR_INFO
);
591 dev_err(dev
, "hisi-zip multi ecc sram num=0x%x\n",
593 HZIP_SRAM_ECC_ERR_NUM_SHIFT
) & 0xFF));
594 dev_err(dev
, "hisi-zip multi ecc sram addr=0x%x\n",
596 HZIP_SRAM_ECC_ERR_ADDR_SHIFT
));
602 writel(err_sts
, qm
->io_base
+ HZIP_CORE_INT_SOURCE
);
605 static u32
hisi_zip_get_hw_err_status(struct hisi_qm
*qm
)
607 return readl(qm
->io_base
+ HZIP_CORE_INT_STATUS
);
610 static const struct hisi_qm_err_ini hisi_zip_err_ini
= {
611 .hw_err_enable
= hisi_zip_hw_error_enable
,
612 .hw_err_disable
= hisi_zip_hw_error_disable
,
613 .get_dev_hw_err_status
= hisi_zip_get_hw_err_status
,
614 .log_dev_hw_err
= hisi_zip_log_hw_error
,
618 QM_ACC_WB_NOT_READY_TIMEOUT
,
620 .msi
= QM_DB_RANDOM_INVALID
,
624 static int hisi_zip_pf_probe_init(struct hisi_zip
*hisi_zip
)
626 struct hisi_qm
*qm
= &hisi_zip
->qm
;
627 struct hisi_zip_ctrl
*ctrl
;
629 ctrl
= devm_kzalloc(&qm
->pdev
->dev
, sizeof(*ctrl
), GFP_KERNEL
);
633 hisi_zip
->ctrl
= ctrl
;
634 ctrl
->hisi_zip
= hisi_zip
;
638 qm
->ctrl_qp_num
= HZIP_QUEUE_NUM_V1
;
642 qm
->ctrl_qp_num
= HZIP_QUEUE_NUM_V2
;
649 qm
->err_ini
= &hisi_zip_err_ini
;
651 hisi_zip_set_user_domain_and_cache(hisi_zip
);
652 hisi_qm_dev_err_init(qm
);
653 hisi_zip_debug_regs_clear(hisi_zip
);
658 /* Currently we only support equal assignment */
659 static int hisi_zip_vf_q_assign(struct hisi_zip
*hisi_zip
, int num_vfs
)
661 struct hisi_qm
*qm
= &hisi_zip
->qm
;
662 u32 qp_num
= qm
->qp_num
;
664 u32 q_num
, remain_q_num
, i
;
670 remain_q_num
= qm
->ctrl_qp_num
- qp_num
;
671 if (remain_q_num
< num_vfs
)
674 q_num
= remain_q_num
/ num_vfs
;
675 for (i
= 1; i
<= num_vfs
; i
++) {
677 q_num
+= remain_q_num
% num_vfs
;
678 ret
= hisi_qm_set_vft(qm
, i
, q_base
, q_num
);
687 static int hisi_zip_clear_vft_config(struct hisi_zip
*hisi_zip
)
689 struct hisi_zip_ctrl
*ctrl
= hisi_zip
->ctrl
;
690 struct hisi_qm
*qm
= &hisi_zip
->qm
;
691 u32 i
, num_vfs
= ctrl
->num_vfs
;
694 for (i
= 1; i
<= num_vfs
; i
++) {
695 ret
= hisi_qm_set_vft(qm
, i
, 0, 0);
705 static int hisi_zip_sriov_enable(struct pci_dev
*pdev
, int max_vfs
)
707 struct hisi_zip
*hisi_zip
= pci_get_drvdata(pdev
);
708 int pre_existing_vfs
, num_vfs
, ret
;
710 pre_existing_vfs
= pci_num_vf(pdev
);
712 if (pre_existing_vfs
) {
714 "Can't enable VF. Please disable pre-enabled VFs!\n");
718 num_vfs
= min_t(int, max_vfs
, HZIP_VF_NUM
);
720 ret
= hisi_zip_vf_q_assign(hisi_zip
, num_vfs
);
722 dev_err(&pdev
->dev
, "Can't assign queues for VF!\n");
726 hisi_zip
->ctrl
->num_vfs
= num_vfs
;
728 ret
= pci_enable_sriov(pdev
, num_vfs
);
730 dev_err(&pdev
->dev
, "Can't enable VF!\n");
731 hisi_zip_clear_vft_config(hisi_zip
);
738 static int hisi_zip_sriov_disable(struct pci_dev
*pdev
)
740 struct hisi_zip
*hisi_zip
= pci_get_drvdata(pdev
);
742 if (pci_vfs_assigned(pdev
)) {
744 "Can't disable VFs while VFs are assigned!\n");
748 /* remove in hisi_zip_pci_driver will be called to free VF resources */
749 pci_disable_sriov(pdev
);
751 return hisi_zip_clear_vft_config(hisi_zip
);
754 static int hisi_zip_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
756 struct hisi_zip
*hisi_zip
;
757 enum qm_hw_ver rev_id
;
761 rev_id
= hisi_qm_get_hw_version(pdev
);
762 if (rev_id
== QM_HW_UNKNOWN
)
765 hisi_zip
= devm_kzalloc(&pdev
->dev
, sizeof(*hisi_zip
), GFP_KERNEL
);
768 pci_set_drvdata(pdev
, hisi_zip
);
771 qm
->use_dma_api
= true;
775 qm
->algs
= "zlib\ngzip";
776 qm
->sqe_size
= HZIP_SQE_SIZE
;
777 qm
->dev_name
= hisi_zip_name
;
778 qm
->fun_type
= (pdev
->device
== PCI_DEVICE_ID_ZIP_PF
) ? QM_HW_PF
:
780 ret
= hisi_qm_init(qm
);
782 dev_err(&pdev
->dev
, "Failed to init qm!\n");
786 if (qm
->fun_type
== QM_HW_PF
) {
787 ret
= hisi_zip_pf_probe_init(hisi_zip
);
791 qm
->qp_base
= HZIP_PF_DEF_Q_BASE
;
792 qm
->qp_num
= pf_q_num
;
793 } else if (qm
->fun_type
== QM_HW_VF
) {
795 * have no way to get qm configure in VM in v1 hardware,
796 * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
797 * to trigger only one VF in v1 hardware.
799 * v2 hardware has no such problem.
801 if (qm
->ver
== QM_HW_V1
) {
802 qm
->qp_base
= HZIP_PF_DEF_Q_NUM
;
803 qm
->qp_num
= HZIP_QUEUE_NUM_V1
- HZIP_PF_DEF_Q_NUM
;
804 } else if (qm
->ver
== QM_HW_V2
)
805 /* v2 starts to support get vft by mailbox */
806 hisi_qm_get_vft(qm
, &qm
->qp_base
, &qm
->qp_num
);
809 ret
= hisi_qm_start(qm
);
813 ret
= hisi_zip_debugfs_init(hisi_zip
);
815 dev_err(&pdev
->dev
, "Failed to init debugfs (%d)!\n", ret
);
817 hisi_qm_add_to_list(qm
, &zip_devices
);
820 ret
= uacce_register(qm
->uacce
);
825 if (qm
->fun_type
== QM_HW_PF
&& vfs_num
> 0) {
826 ret
= hisi_zip_sriov_enable(pdev
, vfs_num
);
828 goto err_remove_from_list
;
833 err_remove_from_list
:
834 hisi_qm_del_from_list(qm
, &zip_devices
);
835 hisi_zip_debugfs_exit(hisi_zip
);
842 static int hisi_zip_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
845 return hisi_zip_sriov_disable(pdev
);
847 return hisi_zip_sriov_enable(pdev
, num_vfs
);
850 static void hisi_zip_remove(struct pci_dev
*pdev
)
852 struct hisi_zip
*hisi_zip
= pci_get_drvdata(pdev
);
853 struct hisi_qm
*qm
= &hisi_zip
->qm
;
855 if (qm
->fun_type
== QM_HW_PF
&& hisi_zip
->ctrl
->num_vfs
!= 0)
856 hisi_zip_sriov_disable(pdev
);
858 hisi_zip_debugfs_exit(hisi_zip
);
861 hisi_qm_dev_err_uninit(qm
);
863 hisi_qm_del_from_list(qm
, &zip_devices
);
866 static const struct pci_error_handlers hisi_zip_err_handler
= {
867 .error_detected
= hisi_qm_dev_err_detected
,
870 static struct pci_driver hisi_zip_pci_driver
= {
872 .id_table
= hisi_zip_dev_ids
,
873 .probe
= hisi_zip_probe
,
874 .remove
= hisi_zip_remove
,
875 .sriov_configure
= IS_ENABLED(CONFIG_PCI_IOV
) ?
876 hisi_zip_sriov_configure
: NULL
,
877 .err_handler
= &hisi_zip_err_handler
,
880 static void hisi_zip_register_debugfs(void)
882 if (!debugfs_initialized())
885 hzip_debugfs_root
= debugfs_create_dir("hisi_zip", NULL
);
888 static void hisi_zip_unregister_debugfs(void)
890 debugfs_remove_recursive(hzip_debugfs_root
);
893 static int __init
hisi_zip_init(void)
897 hisi_qm_init_list(&zip_devices
);
898 hisi_zip_register_debugfs();
900 ret
= pci_register_driver(&hisi_zip_pci_driver
);
902 pr_err("Failed to register pci driver.\n");
906 ret
= hisi_zip_register_to_crypto();
908 pr_err("Failed to register driver to crypto.\n");
915 pci_unregister_driver(&hisi_zip_pci_driver
);
917 hisi_zip_unregister_debugfs();
922 static void __exit
hisi_zip_exit(void)
924 hisi_zip_unregister_from_crypto();
925 pci_unregister_driver(&hisi_zip_pci_driver
);
926 hisi_zip_unregister_debugfs();
929 module_init(hisi_zip_init
);
930 module_exit(hisi_zip_exit
);
932 MODULE_LICENSE("GPL v2");
933 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
934 MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator");