Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / infiniband / hw / erdma / erdma_eq.c
blob9a72fec6d5ccff642d6022fca03fa906bb7321dc
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /* Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
7 #include "erdma_verbs.h"
9 #define MAX_POLL_CHUNK_SIZE 16
11 void notify_eq(struct erdma_eq *eq)
13 u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
14 FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
16 *eq->dbrec = db_data;
17 writeq(db_data, eq->db);
19 atomic64_inc(&eq->notify_num);
22 void *get_next_valid_eqe(struct erdma_eq *eq)
24 u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
25 u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
27 return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
30 void erdma_aeq_event_handler(struct erdma_dev *dev)
32 struct erdma_aeqe *aeqe;
33 u32 cqn, qpn;
34 struct erdma_qp *qp;
35 struct erdma_cq *cq;
36 struct ib_event event;
37 u32 poll_cnt = 0;
39 memset(&event, 0, sizeof(event));
41 while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
42 aeqe = get_next_valid_eqe(&dev->aeq);
43 if (!aeqe)
44 break;
46 dma_rmb();
48 dev->aeq.ci++;
49 atomic64_inc(&dev->aeq.event_num);
50 poll_cnt++;
52 if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
53 le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
54 cqn = le32_to_cpu(aeqe->event_data0);
55 cq = find_cq_by_cqn(dev, cqn);
56 if (!cq)
57 continue;
59 event.device = cq->ibcq.device;
60 event.element.cq = &cq->ibcq;
61 event.event = IB_EVENT_CQ_ERR;
62 if (cq->ibcq.event_handler)
63 cq->ibcq.event_handler(&event,
64 cq->ibcq.cq_context);
65 } else {
66 qpn = le32_to_cpu(aeqe->event_data0);
67 qp = find_qp_by_qpn(dev, qpn);
68 if (!qp)
69 continue;
71 event.device = qp->ibqp.device;
72 event.element.qp = &qp->ibqp;
73 event.event = IB_EVENT_QP_FATAL;
74 if (qp->ibqp.event_handler)
75 qp->ibqp.event_handler(&event,
76 qp->ibqp.qp_context);
80 notify_eq(&dev->aeq);
83 int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth)
85 u32 buf_size = depth << EQE_SHIFT;
87 eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, buf_size,
88 &eq->qbuf_dma_addr, GFP_KERNEL);
89 if (!eq->qbuf)
90 return -ENOMEM;
92 eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
93 if (!eq->dbrec)
94 goto err_free_qbuf;
96 spin_lock_init(&eq->lock);
97 atomic64_set(&eq->event_num, 0);
98 atomic64_set(&eq->notify_num, 0);
99 eq->ci = 0;
100 eq->depth = depth;
102 return 0;
104 err_free_qbuf:
105 dma_free_coherent(&dev->pdev->dev, buf_size, eq->qbuf,
106 eq->qbuf_dma_addr);
108 return -ENOMEM;
111 void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq)
113 dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
114 dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
115 eq->qbuf_dma_addr);
118 int erdma_aeq_init(struct erdma_dev *dev)
120 struct erdma_eq *eq = &dev->aeq;
121 int ret;
123 ret = erdma_eq_common_init(dev, &dev->aeq, ERDMA_DEFAULT_EQ_DEPTH);
124 if (ret)
125 return ret;
127 eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
129 erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
130 upper_32_bits(eq->qbuf_dma_addr));
131 erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
132 lower_32_bits(eq->qbuf_dma_addr));
133 erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
134 erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
136 return 0;
139 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
141 struct erdma_dev *dev = ceq_cb->dev;
142 struct erdma_cq *cq;
143 u32 poll_cnt = 0;
144 u64 *ceqe;
145 int cqn;
147 if (!ceq_cb->ready)
148 return;
150 while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
151 ceqe = get_next_valid_eqe(&ceq_cb->eq);
152 if (!ceqe)
153 break;
155 dma_rmb();
156 ceq_cb->eq.ci++;
157 poll_cnt++;
158 cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
160 cq = find_cq_by_cqn(dev, cqn);
161 if (!cq)
162 continue;
164 if (rdma_is_kernel_res(&cq->ibcq.res))
165 cq->kern_cq.cmdsn++;
167 if (cq->ibcq.comp_handler)
168 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
171 notify_eq(&ceq_cb->eq);
174 static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
176 struct erdma_eq_cb *ceq_cb = data;
178 tasklet_schedule(&ceq_cb->tasklet);
180 return IRQ_HANDLED;
183 static void erdma_intr_ceq_task(unsigned long data)
185 erdma_ceq_completion_handler((struct erdma_eq_cb *)data);
188 static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
190 struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
191 int err;
193 snprintf(eqc->irq.name, ERDMA_IRQNAME_SIZE, "erdma-ceq%u@pci:%s", ceqn,
194 pci_name(dev->pdev));
195 eqc->irq.msix_vector = pci_irq_vector(dev->pdev, ceqn + 1);
197 tasklet_init(&dev->ceqs[ceqn].tasklet, erdma_intr_ceq_task,
198 (unsigned long)&dev->ceqs[ceqn]);
200 cpumask_set_cpu(cpumask_local_spread(ceqn + 1, dev->attrs.numa_node),
201 &eqc->irq.affinity_hint_mask);
203 err = request_irq(eqc->irq.msix_vector, erdma_intr_ceq_handler, 0,
204 eqc->irq.name, eqc);
205 if (err) {
206 dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
207 return err;
210 irq_set_affinity_hint(eqc->irq.msix_vector,
211 &eqc->irq.affinity_hint_mask);
213 return 0;
216 static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
218 struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
220 irq_set_affinity_hint(eqc->irq.msix_vector, NULL);
221 free_irq(eqc->irq.msix_vector, eqc);
224 static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
226 struct erdma_cmdq_create_eq_req req;
228 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
229 CMDQ_OPCODE_CREATE_EQ);
230 req.eqn = eqn;
231 req.depth = ilog2(eq->depth);
232 req.qbuf_addr = eq->qbuf_dma_addr;
233 req.qtype = ERDMA_EQ_TYPE_CEQ;
234 /* Vector index is the same as EQN. */
235 req.vector_idx = eqn;
236 req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
237 req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
239 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
242 static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
244 struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
245 int ret;
247 ret = erdma_eq_common_init(dev, eq, ERDMA_DEFAULT_EQ_DEPTH);
248 if (ret)
249 return ret;
251 eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
252 (ceqn + 1) * ERDMA_DB_SIZE;
253 dev->ceqs[ceqn].dev = dev;
254 dev->ceqs[ceqn].ready = true;
256 /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
257 ret = create_eq_cmd(dev, ceqn + 1, eq);
258 if (ret) {
259 erdma_eq_destroy(dev, eq);
260 dev->ceqs[ceqn].ready = false;
263 return ret;
266 static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
268 struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
269 struct erdma_cmdq_destroy_eq_req req;
270 int err;
272 dev->ceqs[ceqn].ready = 0;
274 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
275 CMDQ_OPCODE_DESTROY_EQ);
276 /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
277 req.eqn = ceqn + 1;
278 req.qtype = ERDMA_EQ_TYPE_CEQ;
279 req.vector_idx = ceqn + 1;
281 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
282 if (err)
283 return;
285 erdma_eq_destroy(dev, eq);
288 int erdma_ceqs_init(struct erdma_dev *dev)
290 u32 i, j;
291 int err;
293 for (i = 0; i < dev->attrs.irq_num - 1; i++) {
294 err = erdma_ceq_init_one(dev, i);
295 if (err)
296 goto out_err;
298 err = erdma_set_ceq_irq(dev, i);
299 if (err) {
300 erdma_ceq_uninit_one(dev, i);
301 goto out_err;
305 return 0;
307 out_err:
308 for (j = 0; j < i; j++) {
309 erdma_free_ceq_irq(dev, j);
310 erdma_ceq_uninit_one(dev, j);
313 return err;
316 void erdma_ceqs_uninit(struct erdma_dev *dev)
318 u32 i;
320 for (i = 0; i < dev->attrs.irq_num - 1; i++) {
321 erdma_free_ceq_irq(dev, i);
322 erdma_ceq_uninit_one(dev, i);