1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /* Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
7 #include "erdma_verbs.h"
9 #define MAX_POLL_CHUNK_SIZE 16
11 void notify_eq(struct erdma_eq
*eq
)
13 u64 db_data
= FIELD_PREP(ERDMA_EQDB_CI_MASK
, eq
->ci
) |
14 FIELD_PREP(ERDMA_EQDB_ARM_MASK
, 1);
17 writeq(db_data
, eq
->db
);
19 atomic64_inc(&eq
->notify_num
);
22 void *get_next_valid_eqe(struct erdma_eq
*eq
)
24 u64
*eqe
= get_queue_entry(eq
->qbuf
, eq
->ci
, eq
->depth
, EQE_SHIFT
);
25 u32 owner
= FIELD_GET(ERDMA_CEQE_HDR_O_MASK
, READ_ONCE(*eqe
));
27 return owner
^ !!(eq
->ci
& eq
->depth
) ? eqe
: NULL
;
30 void erdma_aeq_event_handler(struct erdma_dev
*dev
)
32 struct erdma_aeqe
*aeqe
;
36 struct ib_event event
;
39 memset(&event
, 0, sizeof(event
));
41 while (poll_cnt
< MAX_POLL_CHUNK_SIZE
) {
42 aeqe
= get_next_valid_eqe(&dev
->aeq
);
49 atomic64_inc(&dev
->aeq
.event_num
);
52 if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK
,
53 le32_to_cpu(aeqe
->hdr
)) == ERDMA_AE_TYPE_CQ_ERR
) {
54 cqn
= le32_to_cpu(aeqe
->event_data0
);
55 cq
= find_cq_by_cqn(dev
, cqn
);
59 event
.device
= cq
->ibcq
.device
;
60 event
.element
.cq
= &cq
->ibcq
;
61 event
.event
= IB_EVENT_CQ_ERR
;
62 if (cq
->ibcq
.event_handler
)
63 cq
->ibcq
.event_handler(&event
,
66 qpn
= le32_to_cpu(aeqe
->event_data0
);
67 qp
= find_qp_by_qpn(dev
, qpn
);
71 event
.device
= qp
->ibqp
.device
;
72 event
.element
.qp
= &qp
->ibqp
;
73 event
.event
= IB_EVENT_QP_FATAL
;
74 if (qp
->ibqp
.event_handler
)
75 qp
->ibqp
.event_handler(&event
,
83 int erdma_eq_common_init(struct erdma_dev
*dev
, struct erdma_eq
*eq
, u32 depth
)
85 u32 buf_size
= depth
<< EQE_SHIFT
;
87 eq
->qbuf
= dma_alloc_coherent(&dev
->pdev
->dev
, buf_size
,
88 &eq
->qbuf_dma_addr
, GFP_KERNEL
);
92 eq
->dbrec
= dma_pool_zalloc(dev
->db_pool
, GFP_KERNEL
, &eq
->dbrec_dma
);
96 spin_lock_init(&eq
->lock
);
97 atomic64_set(&eq
->event_num
, 0);
98 atomic64_set(&eq
->notify_num
, 0);
105 dma_free_coherent(&dev
->pdev
->dev
, buf_size
, eq
->qbuf
,
111 void erdma_eq_destroy(struct erdma_dev
*dev
, struct erdma_eq
*eq
)
113 dma_pool_free(dev
->db_pool
, eq
->dbrec
, eq
->dbrec_dma
);
114 dma_free_coherent(&dev
->pdev
->dev
, eq
->depth
<< EQE_SHIFT
, eq
->qbuf
,
118 int erdma_aeq_init(struct erdma_dev
*dev
)
120 struct erdma_eq
*eq
= &dev
->aeq
;
123 ret
= erdma_eq_common_init(dev
, &dev
->aeq
, ERDMA_DEFAULT_EQ_DEPTH
);
127 eq
->db
= dev
->func_bar
+ ERDMA_REGS_AEQ_DB_REG
;
129 erdma_reg_write32(dev
, ERDMA_REGS_AEQ_ADDR_H_REG
,
130 upper_32_bits(eq
->qbuf_dma_addr
));
131 erdma_reg_write32(dev
, ERDMA_REGS_AEQ_ADDR_L_REG
,
132 lower_32_bits(eq
->qbuf_dma_addr
));
133 erdma_reg_write32(dev
, ERDMA_REGS_AEQ_DEPTH_REG
, eq
->depth
);
134 erdma_reg_write64(dev
, ERDMA_AEQ_DB_HOST_ADDR_REG
, eq
->dbrec_dma
);
139 void erdma_ceq_completion_handler(struct erdma_eq_cb
*ceq_cb
)
141 struct erdma_dev
*dev
= ceq_cb
->dev
;
150 while (poll_cnt
< MAX_POLL_CHUNK_SIZE
) {
151 ceqe
= get_next_valid_eqe(&ceq_cb
->eq
);
158 cqn
= FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK
, READ_ONCE(*ceqe
));
160 cq
= find_cq_by_cqn(dev
, cqn
);
164 if (rdma_is_kernel_res(&cq
->ibcq
.res
))
167 if (cq
->ibcq
.comp_handler
)
168 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
171 notify_eq(&ceq_cb
->eq
);
174 static irqreturn_t
erdma_intr_ceq_handler(int irq
, void *data
)
176 struct erdma_eq_cb
*ceq_cb
= data
;
178 tasklet_schedule(&ceq_cb
->tasklet
);
183 static void erdma_intr_ceq_task(unsigned long data
)
185 erdma_ceq_completion_handler((struct erdma_eq_cb
*)data
);
188 static int erdma_set_ceq_irq(struct erdma_dev
*dev
, u16 ceqn
)
190 struct erdma_eq_cb
*eqc
= &dev
->ceqs
[ceqn
];
193 snprintf(eqc
->irq
.name
, ERDMA_IRQNAME_SIZE
, "erdma-ceq%u@pci:%s", ceqn
,
194 pci_name(dev
->pdev
));
195 eqc
->irq
.msix_vector
= pci_irq_vector(dev
->pdev
, ceqn
+ 1);
197 tasklet_init(&dev
->ceqs
[ceqn
].tasklet
, erdma_intr_ceq_task
,
198 (unsigned long)&dev
->ceqs
[ceqn
]);
200 cpumask_set_cpu(cpumask_local_spread(ceqn
+ 1, dev
->attrs
.numa_node
),
201 &eqc
->irq
.affinity_hint_mask
);
203 err
= request_irq(eqc
->irq
.msix_vector
, erdma_intr_ceq_handler
, 0,
206 dev_err(&dev
->pdev
->dev
, "failed to request_irq(%d)\n", err
);
210 irq_set_affinity_hint(eqc
->irq
.msix_vector
,
211 &eqc
->irq
.affinity_hint_mask
);
216 static void erdma_free_ceq_irq(struct erdma_dev
*dev
, u16 ceqn
)
218 struct erdma_eq_cb
*eqc
= &dev
->ceqs
[ceqn
];
220 irq_set_affinity_hint(eqc
->irq
.msix_vector
, NULL
);
221 free_irq(eqc
->irq
.msix_vector
, eqc
);
224 static int create_eq_cmd(struct erdma_dev
*dev
, u32 eqn
, struct erdma_eq
*eq
)
226 struct erdma_cmdq_create_eq_req req
;
228 erdma_cmdq_build_reqhdr(&req
.hdr
, CMDQ_SUBMOD_COMMON
,
229 CMDQ_OPCODE_CREATE_EQ
);
231 req
.depth
= ilog2(eq
->depth
);
232 req
.qbuf_addr
= eq
->qbuf_dma_addr
;
233 req
.qtype
= ERDMA_EQ_TYPE_CEQ
;
234 /* Vector index is the same as EQN. */
235 req
.vector_idx
= eqn
;
236 req
.db_dma_addr_l
= lower_32_bits(eq
->dbrec_dma
);
237 req
.db_dma_addr_h
= upper_32_bits(eq
->dbrec_dma
);
239 return erdma_post_cmd_wait(&dev
->cmdq
, &req
, sizeof(req
), NULL
, NULL
);
242 static int erdma_ceq_init_one(struct erdma_dev
*dev
, u16 ceqn
)
244 struct erdma_eq
*eq
= &dev
->ceqs
[ceqn
].eq
;
247 ret
= erdma_eq_common_init(dev
, eq
, ERDMA_DEFAULT_EQ_DEPTH
);
251 eq
->db
= dev
->func_bar
+ ERDMA_REGS_CEQ_DB_BASE_REG
+
252 (ceqn
+ 1) * ERDMA_DB_SIZE
;
253 dev
->ceqs
[ceqn
].dev
= dev
;
254 dev
->ceqs
[ceqn
].ready
= true;
256 /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
257 ret
= create_eq_cmd(dev
, ceqn
+ 1, eq
);
259 erdma_eq_destroy(dev
, eq
);
260 dev
->ceqs
[ceqn
].ready
= false;
266 static void erdma_ceq_uninit_one(struct erdma_dev
*dev
, u16 ceqn
)
268 struct erdma_eq
*eq
= &dev
->ceqs
[ceqn
].eq
;
269 struct erdma_cmdq_destroy_eq_req req
;
272 dev
->ceqs
[ceqn
].ready
= 0;
274 erdma_cmdq_build_reqhdr(&req
.hdr
, CMDQ_SUBMOD_COMMON
,
275 CMDQ_OPCODE_DESTROY_EQ
);
276 /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
278 req
.qtype
= ERDMA_EQ_TYPE_CEQ
;
279 req
.vector_idx
= ceqn
+ 1;
281 err
= erdma_post_cmd_wait(&dev
->cmdq
, &req
, sizeof(req
), NULL
, NULL
);
285 erdma_eq_destroy(dev
, eq
);
288 int erdma_ceqs_init(struct erdma_dev
*dev
)
293 for (i
= 0; i
< dev
->attrs
.irq_num
- 1; i
++) {
294 err
= erdma_ceq_init_one(dev
, i
);
298 err
= erdma_set_ceq_irq(dev
, i
);
300 erdma_ceq_uninit_one(dev
, i
);
308 for (j
= 0; j
< i
; j
++) {
309 erdma_free_ceq_irq(dev
, j
);
310 erdma_ceq_uninit_one(dev
, j
);
316 void erdma_ceqs_uninit(struct erdma_dev
*dev
)
320 for (i
= 0; i
< dev
->attrs
.irq_num
- 1; i
++) {
321 erdma_free_ceq_irq(dev
, i
);
322 erdma_ceq_uninit_one(dev
, i
);