1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/errno.h>
10 #include <linux/pci.h>
11 #include <linux/device.h>
12 #include <linux/workqueue.h>
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/log2.h>
17 #include <asm/byteorder.h>
18 #include <asm/barrier.h>
20 #include "hinic_hw_dev.h"
21 #include "hinic_hw_csr.h"
22 #include "hinic_hw_if.h"
23 #include "hinic_hw_eqs.h"
25 #define HINIC_EQS_WQ_NAME "hinic_eqs"
27 #define GET_EQ_NUM_PAGES(eq, pg_size) \
28 (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size))
30 #define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size)
32 #define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
33 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
34 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
36 #define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
37 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
38 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
40 #define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
41 HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
42 HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
44 #define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
45 HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
46 HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
48 #define GET_EQ_ELEMENT(eq, idx) \
49 ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \
50 (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
52 #define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \
53 GET_EQ_ELEMENT(eq, idx))
55 #define GET_CEQ_ELEM(eq, idx) ((u32 *) \
56 GET_EQ_ELEMENT(eq, idx))
58 #define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx)
60 #define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx)
62 #define PAGE_IN_4K(page_size) ((page_size) >> 12)
63 #define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size)))
65 #define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
66 #define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq)))
68 #define EQ_MAX_PAGES 8
70 #define CEQE_TYPE_SHIFT 23
71 #define CEQE_TYPE_MASK 0x7
73 #define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \
76 #define CEQE_DATA_MASK 0x3FFFFFF
77 #define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK)
79 #define aeq_to_aeqs(eq) \
80 container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
82 #define ceq_to_ceqs(eq) \
83 container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
85 #define work_to_aeq_work(work) \
86 container_of(work, struct hinic_eq_work, work)
88 #define DMA_ATTR_AEQ_DEFAULT 0
89 #define DMA_ATTR_CEQ_DEFAULT 0
92 #define THRESH_CEQ_DEFAULT 0
105 * hinic_aeq_register_hw_cb - register AEQ callback for specific event
106 * @aeqs: pointer to Async eqs of the chip
107 * @event: aeq event to register callback for it
108 * @handle: private data will be used by the callback
109 * @hwe_handler: callback function
111 void hinic_aeq_register_hw_cb(struct hinic_aeqs
*aeqs
,
112 enum hinic_aeq_type event
, void *handle
,
113 void (*hwe_handler
)(void *handle
, void *data
,
116 struct hinic_hw_event_cb
*hwe_cb
= &aeqs
->hwe_cb
[event
];
118 hwe_cb
->hwe_handler
= hwe_handler
;
119 hwe_cb
->handle
= handle
;
120 hwe_cb
->hwe_state
= HINIC_EQE_ENABLED
;
124 * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event
125 * @aeqs: pointer to Async eqs of the chip
126 * @event: aeq event to unregister callback for it
128 void hinic_aeq_unregister_hw_cb(struct hinic_aeqs
*aeqs
,
129 enum hinic_aeq_type event
)
131 struct hinic_hw_event_cb
*hwe_cb
= &aeqs
->hwe_cb
[event
];
133 hwe_cb
->hwe_state
&= ~HINIC_EQE_ENABLED
;
135 while (hwe_cb
->hwe_state
& HINIC_EQE_RUNNING
)
138 hwe_cb
->hwe_handler
= NULL
;
142 * hinic_ceq_register_cb - register CEQ callback for specific event
143 * @ceqs: pointer to Completion eqs part of the chip
144 * @event: ceq event to register callback for it
145 * @handle: private data will be used by the callback
146 * @handler: callback function
148 void hinic_ceq_register_cb(struct hinic_ceqs
*ceqs
,
149 enum hinic_ceq_type event
, void *handle
,
150 void (*handler
)(void *handle
, u32 ceqe_data
))
152 struct hinic_ceq_cb
*ceq_cb
= &ceqs
->ceq_cb
[event
];
154 ceq_cb
->handler
= handler
;
155 ceq_cb
->handle
= handle
;
156 ceq_cb
->ceqe_state
= HINIC_EQE_ENABLED
;
160 * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event
161 * @ceqs: pointer to Completion eqs part of the chip
162 * @event: ceq event to unregister callback for it
164 void hinic_ceq_unregister_cb(struct hinic_ceqs
*ceqs
,
165 enum hinic_ceq_type event
)
167 struct hinic_ceq_cb
*ceq_cb
= &ceqs
->ceq_cb
[event
];
169 ceq_cb
->ceqe_state
&= ~HINIC_EQE_ENABLED
;
171 while (ceq_cb
->ceqe_state
& HINIC_EQE_RUNNING
)
174 ceq_cb
->handler
= NULL
;
177 static u8
eq_cons_idx_checksum_set(u32 val
)
182 for (idx
= 0; idx
< 32; idx
+= 4)
183 checksum
^= ((val
>> idx
) & 0xF);
185 return (checksum
& 0xF);
189 * eq_update_ci - update the HW cons idx of event queue
190 * @eq: the event queue to update the cons idx for
191 * @arm_state: the arm bit value of eq's interrupt
193 static void eq_update_ci(struct hinic_eq
*eq
, u32 arm_state
)
195 u32 val
, addr
= EQ_CONS_IDX_REG_ADDR(eq
);
197 /* Read Modify Write */
198 val
= hinic_hwif_read_reg(eq
->hwif
, addr
);
200 val
= HINIC_EQ_CI_CLEAR(val
, IDX
) &
201 HINIC_EQ_CI_CLEAR(val
, WRAPPED
) &
202 HINIC_EQ_CI_CLEAR(val
, INT_ARMED
) &
203 HINIC_EQ_CI_CLEAR(val
, XOR_CHKSUM
);
205 val
|= HINIC_EQ_CI_SET(eq
->cons_idx
, IDX
) |
206 HINIC_EQ_CI_SET(eq
->wrapped
, WRAPPED
) |
207 HINIC_EQ_CI_SET(arm_state
, INT_ARMED
);
209 val
|= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val
), XOR_CHKSUM
);
211 hinic_hwif_write_reg(eq
->hwif
, addr
, val
);
215 * aeq_irq_handler - handler for the AEQ event
216 * @eq: the Async Event Queue that received the event
218 static void aeq_irq_handler(struct hinic_eq
*eq
)
220 struct hinic_aeqs
*aeqs
= aeq_to_aeqs(eq
);
221 struct hinic_hwif
*hwif
= aeqs
->hwif
;
222 struct pci_dev
*pdev
= hwif
->pdev
;
223 struct hinic_aeq_elem
*aeqe_curr
;
224 struct hinic_hw_event_cb
*hwe_cb
;
225 enum hinic_aeq_type event
;
226 unsigned long eqe_state
;
230 for (i
= 0; i
< eq
->q_len
; i
++) {
231 aeqe_curr
= GET_CURR_AEQ_ELEM(eq
);
233 /* Data in HW is in Big endian Format */
234 aeqe_desc
= be32_to_cpu(aeqe_curr
->desc
);
236 /* HW toggles the wrapped bit, when it adds eq element */
237 if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc
, WRAPPED
) == eq
->wrapped
)
242 event
= HINIC_EQ_ELEM_DESC_GET(aeqe_desc
, TYPE
);
243 if (event
>= HINIC_MAX_AEQ_EVENTS
) {
244 dev_err(&pdev
->dev
, "Unknown AEQ Event %d\n", event
);
248 if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc
, SRC
)) {
249 hwe_cb
= &aeqs
->hwe_cb
[event
];
251 size
= HINIC_EQ_ELEM_DESC_GET(aeqe_desc
, SIZE
);
253 eqe_state
= cmpxchg(&hwe_cb
->hwe_state
,
257 if ((eqe_state
== HINIC_EQE_ENABLED
) &&
258 (hwe_cb
->hwe_handler
))
259 hwe_cb
->hwe_handler(hwe_cb
->handle
,
260 aeqe_curr
->data
, size
);
262 dev_err(&pdev
->dev
, "Unhandled AEQ Event %d\n",
265 hwe_cb
->hwe_state
&= ~HINIC_EQE_RUNNING
;
270 if (eq
->cons_idx
== eq
->q_len
) {
272 eq
->wrapped
= !eq
->wrapped
;
278 * ceq_event_handler - handler for the ceq events
279 * @ceqs: ceqs part of the chip
280 * @ceqe: ceq element that describes the event
282 static void ceq_event_handler(struct hinic_ceqs
*ceqs
, u32 ceqe
)
284 struct hinic_hwif
*hwif
= ceqs
->hwif
;
285 struct pci_dev
*pdev
= hwif
->pdev
;
286 struct hinic_ceq_cb
*ceq_cb
;
287 enum hinic_ceq_type event
;
288 unsigned long eqe_state
;
290 event
= CEQE_TYPE(ceqe
);
291 if (event
>= HINIC_MAX_CEQ_EVENTS
) {
292 dev_err(&pdev
->dev
, "Unknown CEQ event, event = %d\n", event
);
296 ceq_cb
= &ceqs
->ceq_cb
[event
];
298 eqe_state
= cmpxchg(&ceq_cb
->ceqe_state
,
300 HINIC_EQE_ENABLED
| HINIC_EQE_RUNNING
);
302 if ((eqe_state
== HINIC_EQE_ENABLED
) && (ceq_cb
->handler
))
303 ceq_cb
->handler(ceq_cb
->handle
, CEQE_DATA(ceqe
));
305 dev_err(&pdev
->dev
, "Unhandled CEQ Event %d\n", event
);
307 ceq_cb
->ceqe_state
&= ~HINIC_EQE_RUNNING
;
311 * ceq_irq_handler - handler for the CEQ event
312 * @eq: the Completion Event Queue that received the event
314 static void ceq_irq_handler(struct hinic_eq
*eq
)
316 struct hinic_ceqs
*ceqs
= ceq_to_ceqs(eq
);
320 for (i
= 0; i
< eq
->q_len
; i
++) {
321 ceqe
= *(GET_CURR_CEQ_ELEM(eq
));
323 /* Data in HW is in Big endian Format */
324 ceqe
= be32_to_cpu(ceqe
);
326 /* HW toggles the wrapped bit, when it adds eq element event */
327 if (HINIC_EQ_ELEM_DESC_GET(ceqe
, WRAPPED
) == eq
->wrapped
)
330 ceq_event_handler(ceqs
, ceqe
);
334 if (eq
->cons_idx
== eq
->q_len
) {
336 eq
->wrapped
= !eq
->wrapped
;
342 * eq_irq_handler - handler for the EQ event
343 * @data: the Event Queue that received the event
345 static void eq_irq_handler(void *data
)
347 struct hinic_eq
*eq
= data
;
349 if (eq
->type
== HINIC_AEQ
)
351 else if (eq
->type
== HINIC_CEQ
)
354 eq_update_ci(eq
, EQ_ARMED
);
358 * eq_irq_work - the work of the EQ that received the event
359 * @work: the work struct that is associated with the EQ
361 static void eq_irq_work(struct work_struct
*work
)
363 struct hinic_eq_work
*aeq_work
= work_to_aeq_work(work
);
364 struct hinic_eq
*aeq
;
366 aeq
= aeq_work
->data
;
371 * ceq_tasklet - the tasklet of the EQ that received the event
372 * @t: the tasklet struct pointer
374 static void ceq_tasklet(struct tasklet_struct
*t
)
376 struct hinic_eq
*ceq
= from_tasklet(ceq
, t
, ceq_tasklet
);
382 * aeq_interrupt - aeq interrupt handler
384 * @data: the Async Event Queue that collected the event
386 static irqreturn_t
aeq_interrupt(int irq
, void *data
)
388 struct hinic_eq_work
*aeq_work
;
389 struct hinic_eq
*aeq
= data
;
390 struct hinic_aeqs
*aeqs
;
392 /* clear resend timer cnt register */
393 hinic_msix_attr_cnt_clear(aeq
->hwif
, aeq
->msix_entry
.entry
);
395 aeq_work
= &aeq
->aeq_work
;
396 aeq_work
->data
= aeq
;
398 aeqs
= aeq_to_aeqs(aeq
);
399 queue_work(aeqs
->workq
, &aeq_work
->work
);
405 * ceq_interrupt - ceq interrupt handler
407 * @data: the Completion Event Queue that collected the event
409 static irqreturn_t
ceq_interrupt(int irq
, void *data
)
411 struct hinic_eq
*ceq
= data
;
413 /* clear resend timer cnt register */
414 hinic_msix_attr_cnt_clear(ceq
->hwif
, ceq
->msix_entry
.entry
);
416 tasklet_schedule(&ceq
->ceq_tasklet
);
421 static u32
get_ctrl0_val(struct hinic_eq
*eq
, u32 addr
)
423 struct msix_entry
*msix_entry
= &eq
->msix_entry
;
424 enum hinic_eq_type type
= eq
->type
;
427 if (type
== HINIC_AEQ
) {
429 addr
= HINIC_CSR_AEQ_CTRL_0_ADDR(eq
->q_id
);
431 val
= hinic_hwif_read_reg(eq
->hwif
, addr
);
433 val
= HINIC_AEQ_CTRL_0_CLEAR(val
, INT_IDX
) &
434 HINIC_AEQ_CTRL_0_CLEAR(val
, DMA_ATTR
) &
435 HINIC_AEQ_CTRL_0_CLEAR(val
, PCI_INTF_IDX
) &
436 HINIC_AEQ_CTRL_0_CLEAR(val
, INT_MODE
);
438 ctrl0
= HINIC_AEQ_CTRL_0_SET(msix_entry
->entry
, INT_IDX
) |
439 HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT
, DMA_ATTR
) |
440 HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq
->hwif
),
442 HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED
, INT_MODE
);
447 addr
= HINIC_CSR_CEQ_CTRL_0_ADDR(eq
->q_id
);
449 val
= hinic_hwif_read_reg(eq
->hwif
, addr
);
451 val
= HINIC_CEQ_CTRL_0_CLEAR(val
, INTR_IDX
) &
452 HINIC_CEQ_CTRL_0_CLEAR(val
, DMA_ATTR
) &
453 HINIC_CEQ_CTRL_0_CLEAR(val
, KICK_THRESH
) &
454 HINIC_CEQ_CTRL_0_CLEAR(val
, PCI_INTF_IDX
) &
455 HINIC_CEQ_CTRL_0_CLEAR(val
, INTR_MODE
);
457 ctrl0
= HINIC_CEQ_CTRL_0_SET(msix_entry
->entry
, INTR_IDX
) |
458 HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT
, DMA_ATTR
) |
459 HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT
, KICK_THRESH
) |
460 HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq
->hwif
),
462 HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED
, INTR_MODE
);
469 static void set_ctrl0(struct hinic_eq
*eq
)
473 if (eq
->type
== HINIC_AEQ
)
474 addr
= HINIC_CSR_AEQ_CTRL_0_ADDR(eq
->q_id
);
476 addr
= HINIC_CSR_CEQ_CTRL_0_ADDR(eq
->q_id
);
478 val
= get_ctrl0_val(eq
, addr
);
480 hinic_hwif_write_reg(eq
->hwif
, addr
, val
);
483 static u32
get_ctrl1_val(struct hinic_eq
*eq
, u32 addr
)
485 u32 page_size_val
, elem_size
, val
, ctrl1
;
486 enum hinic_eq_type type
= eq
->type
;
488 if (type
== HINIC_AEQ
) {
490 addr
= HINIC_CSR_AEQ_CTRL_1_ADDR(eq
->q_id
);
492 page_size_val
= EQ_SET_HW_PAGE_SIZE_VAL(eq
);
493 elem_size
= EQ_SET_HW_ELEM_SIZE_VAL(eq
);
495 val
= hinic_hwif_read_reg(eq
->hwif
, addr
);
497 val
= HINIC_AEQ_CTRL_1_CLEAR(val
, LEN
) &
498 HINIC_AEQ_CTRL_1_CLEAR(val
, ELEM_SIZE
) &
499 HINIC_AEQ_CTRL_1_CLEAR(val
, PAGE_SIZE
);
501 ctrl1
= HINIC_AEQ_CTRL_1_SET(eq
->q_len
, LEN
) |
502 HINIC_AEQ_CTRL_1_SET(elem_size
, ELEM_SIZE
) |
503 HINIC_AEQ_CTRL_1_SET(page_size_val
, PAGE_SIZE
);
508 addr
= HINIC_CSR_CEQ_CTRL_1_ADDR(eq
->q_id
);
510 page_size_val
= EQ_SET_HW_PAGE_SIZE_VAL(eq
);
512 val
= hinic_hwif_read_reg(eq
->hwif
, addr
);
514 val
= HINIC_CEQ_CTRL_1_CLEAR(val
, LEN
) &
515 HINIC_CEQ_CTRL_1_CLEAR(val
, PAGE_SIZE
);
517 ctrl1
= HINIC_CEQ_CTRL_1_SET(eq
->q_len
, LEN
) |
518 HINIC_CEQ_CTRL_1_SET(page_size_val
, PAGE_SIZE
);
525 static void set_ctrl1(struct hinic_eq
*eq
)
529 if (eq
->type
== HINIC_AEQ
)
530 addr
= HINIC_CSR_AEQ_CTRL_1_ADDR(eq
->q_id
);
532 addr
= HINIC_CSR_CEQ_CTRL_1_ADDR(eq
->q_id
);
534 val
= get_ctrl1_val(eq
, addr
);
536 hinic_hwif_write_reg(eq
->hwif
, addr
, val
);
539 static int set_ceq_ctrl_reg(struct hinic_eq
*eq
)
541 struct hinic_ceq_ctrl_reg ceq_ctrl
= {0};
542 struct hinic_hwdev
*hwdev
= eq
->hwdev
;
543 u16 out_size
= sizeof(ceq_ctrl
);
544 u16 in_size
= sizeof(ceq_ctrl
);
545 struct hinic_pfhwdev
*pfhwdev
;
549 pfhwdev
= container_of(hwdev
, struct hinic_pfhwdev
, hwdev
);
551 addr
= HINIC_CSR_CEQ_CTRL_0_ADDR(eq
->q_id
);
552 ceq_ctrl
.ctrl0
= get_ctrl0_val(eq
, addr
);
553 addr
= HINIC_CSR_CEQ_CTRL_1_ADDR(eq
->q_id
);
554 ceq_ctrl
.ctrl1
= get_ctrl1_val(eq
, addr
);
556 ceq_ctrl
.func_id
= HINIC_HWIF_FUNC_IDX(hwdev
->hwif
);
557 ceq_ctrl
.q_id
= eq
->q_id
;
559 err
= hinic_msg_to_mgmt(&pfhwdev
->pf_to_mgmt
, HINIC_MOD_COMM
,
560 HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP
,
562 &ceq_ctrl
, &out_size
, HINIC_MGMT_MSG_SYNC
);
563 if (err
|| !out_size
|| ceq_ctrl
.status
) {
564 dev_err(&hwdev
->hwif
->pdev
->dev
,
565 "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n",
566 eq
->q_id
, err
, ceq_ctrl
.status
, out_size
);
574 * set_eq_ctrls - setting eq's ctrl registers
575 * @eq: the Event Queue for setting
577 static int set_eq_ctrls(struct hinic_eq
*eq
)
579 if (HINIC_IS_VF(eq
->hwif
) && eq
->type
== HINIC_CEQ
)
580 return set_ceq_ctrl_reg(eq
);
588 * aeq_elements_init - initialize all the elements in the aeq
589 * @eq: the Async Event Queue
590 * @init_val: value to initialize the elements with it
592 static void aeq_elements_init(struct hinic_eq
*eq
, u32 init_val
)
594 struct hinic_aeq_elem
*aeqe
;
597 for (i
= 0; i
< eq
->q_len
; i
++) {
598 aeqe
= GET_AEQ_ELEM(eq
, i
);
599 aeqe
->desc
= cpu_to_be32(init_val
);
602 wmb(); /* Write the initilzation values */
606 * ceq_elements_init - Initialize all the elements in the ceq
607 * @eq: the event queue
608 * @init_val: value to init with it the elements
610 static void ceq_elements_init(struct hinic_eq
*eq
, u32 init_val
)
615 for (i
= 0; i
< eq
->q_len
; i
++) {
616 ceqe
= GET_CEQ_ELEM(eq
, i
);
617 *(ceqe
) = cpu_to_be32(init_val
);
620 wmb(); /* Write the initilzation values */
624 * alloc_eq_pages - allocate the pages for the queue
625 * @eq: the event queue
627 * Return 0 - Success, Negative - Failure
629 static int alloc_eq_pages(struct hinic_eq
*eq
)
631 struct hinic_hwif
*hwif
= eq
->hwif
;
632 struct pci_dev
*pdev
= hwif
->pdev
;
633 u32 init_val
, addr
, val
;
637 addr_size
= eq
->num_pages
* sizeof(*eq
->dma_addr
);
638 eq
->dma_addr
= devm_kzalloc(&pdev
->dev
, addr_size
, GFP_KERNEL
);
642 addr_size
= eq
->num_pages
* sizeof(*eq
->virt_addr
);
643 eq
->virt_addr
= devm_kzalloc(&pdev
->dev
, addr_size
, GFP_KERNEL
);
644 if (!eq
->virt_addr
) {
646 goto err_virt_addr_alloc
;
649 for (pg
= 0; pg
< eq
->num_pages
; pg
++) {
650 eq
->virt_addr
[pg
] = dma_alloc_coherent(&pdev
->dev
,
654 if (!eq
->virt_addr
[pg
]) {
659 addr
= EQ_HI_PHYS_ADDR_REG(eq
, pg
);
660 val
= upper_32_bits(eq
->dma_addr
[pg
]);
662 hinic_hwif_write_reg(hwif
, addr
, val
);
664 addr
= EQ_LO_PHYS_ADDR_REG(eq
, pg
);
665 val
= lower_32_bits(eq
->dma_addr
[pg
]);
667 hinic_hwif_write_reg(hwif
, addr
, val
);
670 init_val
= HINIC_EQ_ELEM_DESC_SET(eq
->wrapped
, WRAPPED
);
672 if (eq
->type
== HINIC_AEQ
)
673 aeq_elements_init(eq
, init_val
);
674 else if (eq
->type
== HINIC_CEQ
)
675 ceq_elements_init(eq
, init_val
);
681 dma_free_coherent(&pdev
->dev
, eq
->page_size
,
685 devm_kfree(&pdev
->dev
, eq
->virt_addr
);
688 devm_kfree(&pdev
->dev
, eq
->dma_addr
);
693 * free_eq_pages - free the pages of the queue
694 * @eq: the Event Queue
696 static void free_eq_pages(struct hinic_eq
*eq
)
698 struct hinic_hwif
*hwif
= eq
->hwif
;
699 struct pci_dev
*pdev
= hwif
->pdev
;
702 for (pg
= 0; pg
< eq
->num_pages
; pg
++)
703 dma_free_coherent(&pdev
->dev
, eq
->page_size
,
707 devm_kfree(&pdev
->dev
, eq
->virt_addr
);
708 devm_kfree(&pdev
->dev
, eq
->dma_addr
);
712 * init_eq - initialize Event Queue
713 * @eq: the event queue
714 * @hwif: the HW interface of a PCI function device
715 * @type: the type of the event queue, aeq or ceq
716 * @q_id: Queue id number
717 * @q_len: the number of EQ elements
718 * @page_size: the page size of the pages in the event queue
719 * @entry: msix entry associated with the event queue
721 * Return 0 - Success, Negative - Failure
723 static int init_eq(struct hinic_eq
*eq
, struct hinic_hwif
*hwif
,
724 enum hinic_eq_type type
, int q_id
, u32 q_len
, u32 page_size
,
725 struct msix_entry entry
)
727 struct pci_dev
*pdev
= hwif
->pdev
;
734 eq
->page_size
= page_size
;
736 /* Clear PI and CI, also clear the ARM bit */
737 hinic_hwif_write_reg(eq
->hwif
, EQ_CONS_IDX_REG_ADDR(eq
), 0);
738 hinic_hwif_write_reg(eq
->hwif
, EQ_PROD_IDX_REG_ADDR(eq
), 0);
743 if (type
== HINIC_AEQ
) {
744 eq
->elem_size
= HINIC_AEQE_SIZE
;
745 } else if (type
== HINIC_CEQ
) {
746 eq
->elem_size
= HINIC_CEQE_SIZE
;
748 dev_err(&pdev
->dev
, "Invalid EQ type\n");
752 eq
->num_pages
= GET_EQ_NUM_PAGES(eq
, page_size
);
753 eq
->num_elem_in_pg
= GET_EQ_NUM_ELEMS_IN_PG(eq
, page_size
);
755 eq
->msix_entry
= entry
;
757 if (eq
->num_elem_in_pg
& (eq
->num_elem_in_pg
- 1)) {
758 dev_err(&pdev
->dev
, "num elements in eq page != power of 2\n");
762 if (eq
->num_pages
> EQ_MAX_PAGES
) {
763 dev_err(&pdev
->dev
, "too many pages for eq\n");
767 err
= set_eq_ctrls(eq
);
769 dev_err(&pdev
->dev
, "Failed to set eq ctrls\n");
773 eq_update_ci(eq
, EQ_ARMED
);
775 err
= alloc_eq_pages(eq
);
777 dev_err(&pdev
->dev
, "Failed to allocate pages for eq\n");
781 if (type
== HINIC_AEQ
) {
782 struct hinic_eq_work
*aeq_work
= &eq
->aeq_work
;
784 INIT_WORK(&aeq_work
->work
, eq_irq_work
);
785 } else if (type
== HINIC_CEQ
) {
786 tasklet_setup(&eq
->ceq_tasklet
, ceq_tasklet
);
789 /* set the attributes of the msix entry */
790 hinic_msix_attr_set(eq
->hwif
, eq
->msix_entry
.entry
,
791 HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT
,
792 HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT
,
793 HINIC_EQ_MSIX_LLI_TIMER_DEFAULT
,
794 HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT
,
795 HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT
);
797 if (type
== HINIC_AEQ
) {
798 snprintf(eq
->irq_name
, sizeof(eq
->irq_name
), "hinic_aeq%d@pci:%s", eq
->q_id
,
800 err
= request_irq(entry
.vector
, aeq_interrupt
, 0, eq
->irq_name
, eq
);
801 } else if (type
== HINIC_CEQ
) {
802 snprintf(eq
->irq_name
, sizeof(eq
->irq_name
), "hinic_ceq%d@pci:%s", eq
->q_id
,
804 err
= request_irq(entry
.vector
, ceq_interrupt
, 0, eq
->irq_name
, eq
);
808 dev_err(&pdev
->dev
, "Failed to request irq for the EQ\n");
820 * remove_eq - remove Event Queue
821 * @eq: the event queue
823 static void remove_eq(struct hinic_eq
*eq
)
825 hinic_set_msix_state(eq
->hwif
, eq
->msix_entry
.entry
,
827 free_irq(eq
->msix_entry
.vector
, eq
);
829 if (eq
->type
== HINIC_AEQ
) {
830 struct hinic_eq_work
*aeq_work
= &eq
->aeq_work
;
832 cancel_work_sync(&aeq_work
->work
);
833 /* clear aeq_len to avoid hw access host memory */
834 hinic_hwif_write_reg(eq
->hwif
,
835 HINIC_CSR_AEQ_CTRL_1_ADDR(eq
->q_id
), 0);
836 } else if (eq
->type
== HINIC_CEQ
) {
837 tasklet_kill(&eq
->ceq_tasklet
);
838 /* clear ceq_len to avoid hw access host memory */
839 hinic_hwif_write_reg(eq
->hwif
,
840 HINIC_CSR_CEQ_CTRL_1_ADDR(eq
->q_id
), 0);
843 /* update cons_idx to avoid invalid interrupt */
844 eq
->cons_idx
= hinic_hwif_read_reg(eq
->hwif
, EQ_PROD_IDX_REG_ADDR(eq
));
845 eq_update_ci(eq
, EQ_NOT_ARMED
);
851 * hinic_aeqs_init - initialize all the aeqs
852 * @aeqs: pointer to Async eqs of the chip
853 * @hwif: the HW interface of a PCI function device
854 * @num_aeqs: number of AEQs
855 * @q_len: number of EQ elements
856 * @page_size: the page size of the pages in the event queue
857 * @msix_entries: msix entries associated with the event queues
859 * Return 0 - Success, negative - Failure
861 int hinic_aeqs_init(struct hinic_aeqs
*aeqs
, struct hinic_hwif
*hwif
,
862 int num_aeqs
, u32 q_len
, u32 page_size
,
863 struct msix_entry
*msix_entries
)
865 struct pci_dev
*pdev
= hwif
->pdev
;
868 aeqs
->workq
= create_singlethread_workqueue(HINIC_EQS_WQ_NAME
);
873 aeqs
->num_aeqs
= num_aeqs
;
875 for (q_id
= 0; q_id
< num_aeqs
; q_id
++) {
876 err
= init_eq(&aeqs
->aeq
[q_id
], hwif
, HINIC_AEQ
, q_id
, q_len
,
877 page_size
, msix_entries
[q_id
]);
879 dev_err(&pdev
->dev
, "Failed to init aeq %d\n", q_id
);
887 for (i
= 0; i
< q_id
; i
++)
888 remove_eq(&aeqs
->aeq
[i
]);
890 destroy_workqueue(aeqs
->workq
);
895 * hinic_aeqs_free - free all the aeqs
896 * @aeqs: pointer to Async eqs of the chip
898 void hinic_aeqs_free(struct hinic_aeqs
*aeqs
)
902 for (q_id
= 0; q_id
< aeqs
->num_aeqs
; q_id
++)
903 remove_eq(&aeqs
->aeq
[q_id
]);
905 destroy_workqueue(aeqs
->workq
);
909 * hinic_ceqs_init - init all the ceqs
910 * @ceqs: ceqs part of the chip
911 * @hwif: the hardware interface of a pci function device
912 * @num_ceqs: number of CEQs
913 * @q_len: number of EQ elements
914 * @page_size: the page size of the event queue
915 * @msix_entries: msix entries associated with the event queues
917 * Return 0 - Success, Negative - Failure
919 int hinic_ceqs_init(struct hinic_ceqs
*ceqs
, struct hinic_hwif
*hwif
,
920 int num_ceqs
, u32 q_len
, u32 page_size
,
921 struct msix_entry
*msix_entries
)
923 struct pci_dev
*pdev
= hwif
->pdev
;
927 ceqs
->num_ceqs
= num_ceqs
;
929 for (q_id
= 0; q_id
< num_ceqs
; q_id
++) {
930 ceqs
->ceq
[q_id
].hwdev
= ceqs
->hwdev
;
931 err
= init_eq(&ceqs
->ceq
[q_id
], hwif
, HINIC_CEQ
, q_id
, q_len
,
932 page_size
, msix_entries
[q_id
]);
934 dev_err(&pdev
->dev
, "Failed to init ceq %d\n", q_id
);
942 for (i
= 0; i
< q_id
; i
++)
943 remove_eq(&ceqs
->ceq
[i
]);
949 * hinic_ceqs_free - free all the ceqs
950 * @ceqs: ceqs part of the chip
952 void hinic_ceqs_free(struct hinic_ceqs
*ceqs
)
956 for (q_id
= 0; q_id
< ceqs
->num_ceqs
; q_id
++)
957 remove_eq(&ceqs
->ceq
[q_id
]);
960 void hinic_dump_ceq_info(struct hinic_hwdev
*hwdev
)
962 struct hinic_eq
*eq
= NULL
;
966 for (q_id
= 0; q_id
< hwdev
->func_to_io
.ceqs
.num_ceqs
; q_id
++) {
967 eq
= &hwdev
->func_to_io
.ceqs
.ceq
[q_id
];
968 addr
= EQ_CONS_IDX_REG_ADDR(eq
);
969 ci
= hinic_hwif_read_reg(hwdev
->hwif
, addr
);
970 addr
= EQ_PROD_IDX_REG_ADDR(eq
);
971 pi
= hinic_hwif_read_reg(hwdev
->hwif
, addr
);
972 dev_err(&hwdev
->hwif
->pdev
->dev
, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n",
973 q_id
, ci
, eq
->cons_idx
, pi
,
974 eq
->ceq_tasklet
.state
,
975 eq
->wrapped
, be32_to_cpu(*(__be32
*)(GET_CURR_CEQ_ELEM(eq
))));
979 void hinic_dump_aeq_info(struct hinic_hwdev
*hwdev
)
981 struct hinic_aeq_elem
*aeqe_pos
= NULL
;
982 struct hinic_eq
*eq
= NULL
;
986 for (q_id
= 0; q_id
< hwdev
->aeqs
.num_aeqs
; q_id
++) {
987 eq
= &hwdev
->aeqs
.aeq
[q_id
];
988 addr
= EQ_CONS_IDX_REG_ADDR(eq
);
989 ci
= hinic_hwif_read_reg(hwdev
->hwif
, addr
);
990 addr
= EQ_PROD_IDX_REG_ADDR(eq
);
991 pi
= hinic_hwif_read_reg(hwdev
->hwif
, addr
);
992 aeqe_pos
= GET_CURR_AEQ_ELEM(eq
);
993 dev_err(&hwdev
->hwif
->pdev
->dev
, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n",
994 q_id
, ci
, pi
, work_busy(&eq
->aeq_work
.work
),
995 eq
->wrapped
, be32_to_cpu(aeqe_pos
->desc
));