gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / infiniband / hw / bnxt_re / qplib_fp.c
blob899a5d2c100e57eabcb3985af60b840e8a59099a
1 /*
2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
50 #include "roce_hsi.h"
52 #include "qplib_res.h"
53 #include "qplib_rcfw.h"
54 #include "qplib_sp.h"
55 #include "qplib_fp.h"
57 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61 qp->sq.condition = false;
62 qp->sq.send_phantom = false;
63 qp->sq.single = false;
66 /* Flush list */
67 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69 struct bnxt_qplib_cq *scq, *rcq;
71 scq = qp->scq;
72 rcq = qp->rcq;
74 if (!qp->sq.flushed) {
75 dev_dbg(&scq->hwq.pdev->dev,
76 "FP: Adding to SQ Flush list = %p\n", qp);
77 bnxt_qplib_cancel_phantom_processing(qp);
78 list_add_tail(&qp->sq_flush, &scq->sqf_head);
79 qp->sq.flushed = true;
81 if (!qp->srq) {
82 if (!qp->rq.flushed) {
83 dev_dbg(&rcq->hwq.pdev->dev,
84 "FP: Adding to RQ Flush list = %p\n", qp);
85 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
86 qp->rq.flushed = true;
91 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
92 unsigned long *flags)
93 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
96 if (qp->scq == qp->rcq)
97 __acquire(&qp->rcq->flush_lock);
98 else
99 spin_lock(&qp->rcq->flush_lock);
102 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
103 unsigned long *flags)
104 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106 if (qp->scq == qp->rcq)
107 __release(&qp->rcq->flush_lock);
108 else
109 spin_unlock(&qp->rcq->flush_lock);
110 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
113 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115 unsigned long flags;
117 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
118 __bnxt_qplib_add_flush_qp(qp);
119 bnxt_qplib_release_cq_flush_locks(qp, &flags);
122 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124 if (qp->sq.flushed) {
125 qp->sq.flushed = false;
126 list_del(&qp->sq_flush);
128 if (!qp->srq) {
129 if (qp->rq.flushed) {
130 qp->rq.flushed = false;
131 list_del(&qp->rq_flush);
136 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138 unsigned long flags;
140 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
141 __clean_cq(qp->scq, (u64)(unsigned long)qp);
142 qp->sq.hwq.prod = 0;
143 qp->sq.hwq.cons = 0;
144 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
145 qp->rq.hwq.prod = 0;
146 qp->rq.hwq.cons = 0;
148 __bnxt_qplib_del_flush_qp(qp);
149 bnxt_qplib_release_cq_flush_locks(qp, &flags);
152 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154 struct bnxt_qplib_nq_work *nq_work =
155 container_of(work, struct bnxt_qplib_nq_work, work);
157 struct bnxt_qplib_cq *cq = nq_work->cq;
158 struct bnxt_qplib_nq *nq = nq_work->nq;
160 if (cq && nq) {
161 spin_lock_bh(&cq->compl_lock);
162 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
163 dev_dbg(&nq->pdev->dev,
164 "%s:Trigger cq = %p event nq = %p\n",
165 __func__, cq, nq);
166 nq->cqn_handler(nq, cq);
168 spin_unlock_bh(&cq->compl_lock);
170 kfree(nq_work);
173 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
174 struct bnxt_qplib_qp *qp)
176 struct bnxt_qplib_q *rq = &qp->rq;
177 struct bnxt_qplib_q *sq = &qp->sq;
179 if (qp->rq_hdr_buf)
180 dma_free_coherent(&res->pdev->dev,
181 rq->hwq.max_elements * qp->rq_hdr_buf_size,
182 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
183 if (qp->sq_hdr_buf)
184 dma_free_coherent(&res->pdev->dev,
185 sq->hwq.max_elements * qp->sq_hdr_buf_size,
186 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
187 qp->rq_hdr_buf = NULL;
188 qp->sq_hdr_buf = NULL;
189 qp->rq_hdr_buf_map = 0;
190 qp->sq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_size = 0;
192 qp->rq_hdr_buf_size = 0;
195 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
196 struct bnxt_qplib_qp *qp)
198 struct bnxt_qplib_q *rq = &qp->rq;
199 struct bnxt_qplib_q *sq = &qp->sq;
200 int rc = 0;
202 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
203 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
204 sq->hwq.max_elements *
205 qp->sq_hdr_buf_size,
206 &qp->sq_hdr_buf_map, GFP_KERNEL);
207 if (!qp->sq_hdr_buf) {
208 rc = -ENOMEM;
209 dev_err(&res->pdev->dev,
210 "Failed to create sq_hdr_buf\n");
211 goto fail;
215 if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 rq->hwq.max_elements *
218 qp->rq_hdr_buf_size,
219 &qp->rq_hdr_buf_map,
220 GFP_KERNEL);
221 if (!qp->rq_hdr_buf) {
222 rc = -ENOMEM;
223 dev_err(&res->pdev->dev,
224 "Failed to create rq_hdr_buf\n");
225 goto fail;
228 return 0;
230 fail:
231 bnxt_qplib_free_qp_hdr_buf(res, qp);
232 return rc;
235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
237 struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 struct nq_base *nqe, **nq_ptr;
239 int budget = nq->budget;
240 u32 sw_cons, raw_cons;
241 uintptr_t q_handle;
242 u16 type;
244 spin_lock_bh(&hwq->lock);
245 /* Service the NQ until empty */
246 raw_cons = hwq->cons;
247 while (budget--) {
248 sw_cons = HWQ_CMP(raw_cons, hwq);
249 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
250 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
251 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
252 break;
255 * The valid test of the entry must be done first before
256 * reading any further.
258 dma_rmb();
260 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
261 switch (type) {
262 case NQ_BASE_TYPE_CQ_NOTIFICATION:
264 struct nq_cn *nqcne = (struct nq_cn *)nqe;
266 q_handle = le32_to_cpu(nqcne->cq_handle_low);
267 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
268 << 32;
269 if ((unsigned long)cq == q_handle) {
270 nqcne->cq_handle_low = 0;
271 nqcne->cq_handle_high = 0;
272 cq->cnq_events++;
274 break;
276 default:
277 break;
279 raw_cons++;
281 spin_unlock_bh(&hwq->lock);
284 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
285 * this CQ.
287 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
289 u32 retry_cnt = 100;
291 while (retry_cnt--) {
292 if (cnq_events == cq->cnq_events)
293 return;
294 usleep_range(50, 100);
295 clean_nq(cq->nq, cq);
299 static void bnxt_qplib_service_nq(unsigned long data)
301 struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
302 struct bnxt_qplib_hwq *hwq = &nq->hwq;
303 struct nq_base *nqe, **nq_ptr;
304 struct bnxt_qplib_cq *cq;
305 int num_cqne_processed = 0;
306 int num_srqne_processed = 0;
307 int budget = nq->budget;
308 u32 sw_cons, raw_cons;
309 uintptr_t q_handle;
310 u16 type;
312 spin_lock_bh(&hwq->lock);
313 /* Service the NQ until empty */
314 raw_cons = hwq->cons;
315 while (budget--) {
316 sw_cons = HWQ_CMP(raw_cons, hwq);
317 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
318 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
319 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
320 break;
323 * The valid test of the entry must be done first before
324 * reading any further.
326 dma_rmb();
328 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
329 switch (type) {
330 case NQ_BASE_TYPE_CQ_NOTIFICATION:
332 struct nq_cn *nqcne = (struct nq_cn *)nqe;
334 q_handle = le32_to_cpu(nqcne->cq_handle_low);
335 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
336 << 32;
337 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
338 if (!cq)
339 break;
340 bnxt_qplib_armen_db(&cq->dbinfo,
341 DBC_DBC_TYPE_CQ_ARMENA);
342 spin_lock_bh(&cq->compl_lock);
343 atomic_set(&cq->arm_state, 0);
344 if (!nq->cqn_handler(nq, (cq)))
345 num_cqne_processed++;
346 else
347 dev_warn(&nq->pdev->dev,
348 "cqn - type 0x%x not handled\n", type);
349 cq->cnq_events++;
350 spin_unlock_bh(&cq->compl_lock);
351 break;
353 case NQ_BASE_TYPE_SRQ_EVENT:
355 struct bnxt_qplib_srq *srq;
356 struct nq_srq_event *nqsrqe =
357 (struct nq_srq_event *)nqe;
359 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
360 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
361 << 32;
362 srq = (struct bnxt_qplib_srq *)q_handle;
363 bnxt_qplib_armen_db(&srq->dbinfo,
364 DBC_DBC_TYPE_SRQ_ARMENA);
365 if (!nq->srqn_handler(nq,
366 (struct bnxt_qplib_srq *)q_handle,
367 nqsrqe->event))
368 num_srqne_processed++;
369 else
370 dev_warn(&nq->pdev->dev,
371 "SRQ event 0x%x not handled\n",
372 nqsrqe->event);
373 break;
375 case NQ_BASE_TYPE_DBQ_EVENT:
376 break;
377 default:
378 dev_warn(&nq->pdev->dev,
379 "nqe with type = 0x%x not handled\n", type);
380 break;
382 raw_cons++;
384 if (hwq->cons != raw_cons) {
385 hwq->cons = raw_cons;
386 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
388 spin_unlock_bh(&hwq->lock);
391 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
393 struct bnxt_qplib_nq *nq = dev_instance;
394 struct bnxt_qplib_hwq *hwq = &nq->hwq;
395 struct nq_base **nq_ptr;
396 u32 sw_cons;
398 /* Prefetch the NQ element */
399 sw_cons = HWQ_CMP(hwq->cons, hwq);
400 nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
401 prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
403 /* Fan out to CPU affinitized kthreads? */
404 tasklet_schedule(&nq->nq_tasklet);
406 return IRQ_HANDLED;
409 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
411 tasklet_disable(&nq->nq_tasklet);
412 /* Mask h/w interrupt */
413 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
414 /* Sync with last running IRQ handler */
415 synchronize_irq(nq->msix_vec);
416 if (kill)
417 tasklet_kill(&nq->nq_tasklet);
418 if (nq->requested) {
419 irq_set_affinity_hint(nq->msix_vec, NULL);
420 free_irq(nq->msix_vec, nq);
421 nq->requested = false;
425 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
427 if (nq->cqn_wq) {
428 destroy_workqueue(nq->cqn_wq);
429 nq->cqn_wq = NULL;
432 /* Make sure the HW is stopped! */
433 bnxt_qplib_nq_stop_irq(nq, true);
435 if (nq->nq_db.reg.bar_reg) {
436 iounmap(nq->nq_db.reg.bar_reg);
437 nq->nq_db.reg.bar_reg = NULL;
440 nq->cqn_handler = NULL;
441 nq->srqn_handler = NULL;
442 nq->msix_vec = 0;
445 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
446 int msix_vector, bool need_init)
448 int rc;
450 if (nq->requested)
451 return -EFAULT;
453 nq->msix_vec = msix_vector;
454 if (need_init)
455 tasklet_init(&nq->nq_tasklet, bnxt_qplib_service_nq,
456 (unsigned long)nq);
457 else
458 tasklet_enable(&nq->nq_tasklet);
460 snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
461 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
462 if (rc)
463 return rc;
465 cpumask_clear(&nq->mask);
466 cpumask_set_cpu(nq_indx, &nq->mask);
467 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
468 if (rc) {
469 dev_warn(&nq->pdev->dev,
470 "set affinity failed; vector: %d nq_idx: %d\n",
471 nq->msix_vec, nq_indx);
473 nq->requested = true;
474 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
476 return rc;
479 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
481 resource_size_t reg_base;
482 struct bnxt_qplib_nq_db *nq_db;
483 struct pci_dev *pdev;
484 int rc = 0;
486 pdev = nq->pdev;
487 nq_db = &nq->nq_db;
489 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
490 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
491 if (!nq_db->reg.bar_base) {
492 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
493 nq_db->reg.bar_id);
494 rc = -ENOMEM;
495 goto fail;
498 reg_base = nq_db->reg.bar_base + reg_offt;
499 /* Unconditionally map 8 bytes to support 57500 series */
500 nq_db->reg.len = 8;
501 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
502 if (!nq_db->reg.bar_reg) {
503 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
504 nq_db->reg.bar_id);
505 rc = -ENOMEM;
506 goto fail;
509 nq_db->dbinfo.db = nq_db->reg.bar_reg;
510 nq_db->dbinfo.hwq = &nq->hwq;
511 nq_db->dbinfo.xid = nq->ring_id;
512 fail:
513 return rc;
516 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
517 int nq_idx, int msix_vector, int bar_reg_offset,
518 cqn_handler_t cqn_handler,
519 srqn_handler_t srqn_handler)
521 int rc = -1;
523 nq->pdev = pdev;
524 nq->cqn_handler = cqn_handler;
525 nq->srqn_handler = srqn_handler;
527 /* Have a task to schedule CQ notifiers in post send case */
528 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
529 if (!nq->cqn_wq)
530 return -ENOMEM;
532 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
533 if (rc)
534 goto fail;
536 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
537 if (rc) {
538 dev_err(&nq->pdev->dev,
539 "Failed to request irq for nq-idx %d\n", nq_idx);
540 goto fail;
543 return 0;
544 fail:
545 bnxt_qplib_disable_nq(nq);
546 return rc;
549 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
551 if (nq->hwq.max_elements) {
552 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
553 nq->hwq.max_elements = 0;
557 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
559 struct bnxt_qplib_hwq_attr hwq_attr = {};
560 struct bnxt_qplib_sg_info sginfo = {};
562 nq->pdev = res->pdev;
563 nq->res = res;
564 if (!nq->hwq.max_elements ||
565 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
566 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
568 sginfo.pgsize = PAGE_SIZE;
569 sginfo.pgshft = PAGE_SHIFT;
570 hwq_attr.res = res;
571 hwq_attr.sginfo = &sginfo;
572 hwq_attr.depth = nq->hwq.max_elements;
573 hwq_attr.stride = sizeof(struct nq_base);
574 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
575 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
576 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
577 return -ENOMEM;
579 nq->budget = 8;
580 return 0;
583 /* SRQ */
584 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
585 struct bnxt_qplib_srq *srq)
587 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
588 struct cmdq_destroy_srq req;
589 struct creq_destroy_srq_resp resp;
590 u16 cmd_flags = 0;
591 int rc;
593 RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
595 /* Configure the request */
596 req.srq_cid = cpu_to_le32(srq->id);
598 rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
599 (struct creq_base *)&resp, NULL, 0);
600 kfree(srq->swq);
601 if (rc)
602 return;
603 bnxt_qplib_free_hwq(res, &srq->hwq);
606 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
607 struct bnxt_qplib_srq *srq)
609 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
610 struct bnxt_qplib_hwq_attr hwq_attr = {};
611 struct creq_create_srq_resp resp;
612 struct cmdq_create_srq req;
613 struct bnxt_qplib_pbl *pbl;
614 u16 cmd_flags = 0;
615 int rc, idx;
617 hwq_attr.res = res;
618 hwq_attr.sginfo = &srq->sg_info;
619 hwq_attr.depth = srq->max_wqe;
620 hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
621 hwq_attr.type = HWQ_TYPE_QUEUE;
622 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
623 if (rc)
624 goto exit;
626 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
627 GFP_KERNEL);
628 if (!srq->swq) {
629 rc = -ENOMEM;
630 goto fail;
633 RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
635 /* Configure the request */
636 req.dpi = cpu_to_le32(srq->dpi->dpi);
637 req.srq_handle = cpu_to_le64((uintptr_t)srq);
639 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
640 pbl = &srq->hwq.pbl[PBL_LVL_0];
641 req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
642 CMDQ_CREATE_SRQ_LVL_MASK) <<
643 CMDQ_CREATE_SRQ_LVL_SFT) |
644 (pbl->pg_size == ROCE_PG_SIZE_4K ?
645 CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
646 pbl->pg_size == ROCE_PG_SIZE_8K ?
647 CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
648 pbl->pg_size == ROCE_PG_SIZE_64K ?
649 CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
650 pbl->pg_size == ROCE_PG_SIZE_2M ?
651 CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
652 pbl->pg_size == ROCE_PG_SIZE_8M ?
653 CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
654 pbl->pg_size == ROCE_PG_SIZE_1G ?
655 CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
656 CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
657 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
658 req.pd_id = cpu_to_le32(srq->pd->id);
659 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
661 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
662 (void *)&resp, NULL, 0);
663 if (rc)
664 goto fail;
666 spin_lock_init(&srq->lock);
667 srq->start_idx = 0;
668 srq->last_idx = srq->hwq.max_elements - 1;
669 for (idx = 0; idx < srq->hwq.max_elements; idx++)
670 srq->swq[idx].next_idx = idx + 1;
671 srq->swq[srq->last_idx].next_idx = -1;
673 srq->id = le32_to_cpu(resp.xid);
674 srq->dbinfo.hwq = &srq->hwq;
675 srq->dbinfo.xid = srq->id;
676 srq->dbinfo.db = srq->dpi->dbr;
677 srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
678 if (srq->threshold)
679 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
680 srq->arm_req = false;
682 return 0;
683 fail:
684 bnxt_qplib_free_hwq(res, &srq->hwq);
685 kfree(srq->swq);
686 exit:
687 return rc;
690 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
691 struct bnxt_qplib_srq *srq)
693 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
694 u32 sw_prod, sw_cons, count = 0;
696 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
697 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
699 count = sw_prod > sw_cons ? sw_prod - sw_cons :
700 srq_hwq->max_elements - sw_cons + sw_prod;
701 if (count > srq->threshold) {
702 srq->arm_req = false;
703 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
704 } else {
705 /* Deferred arming */
706 srq->arm_req = true;
709 return 0;
712 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
713 struct bnxt_qplib_srq *srq)
715 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
716 struct cmdq_query_srq req;
717 struct creq_query_srq_resp resp;
718 struct bnxt_qplib_rcfw_sbuf *sbuf;
719 struct creq_query_srq_resp_sb *sb;
720 u16 cmd_flags = 0;
721 int rc = 0;
723 RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
724 req.srq_cid = cpu_to_le32(srq->id);
726 /* Configure the request */
727 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
728 if (!sbuf)
729 return -ENOMEM;
730 sb = sbuf->sb;
731 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
732 (void *)sbuf, 0);
733 srq->threshold = le16_to_cpu(sb->srq_limit);
734 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
736 return rc;
739 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
740 struct bnxt_qplib_swqe *wqe)
742 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
743 struct rq_wqe *srqe, **srqe_ptr;
744 struct sq_sge *hw_sge;
745 u32 sw_prod, sw_cons, count = 0;
746 int i, rc = 0, next;
748 spin_lock(&srq_hwq->lock);
749 if (srq->start_idx == srq->last_idx) {
750 dev_err(&srq_hwq->pdev->dev,
751 "FP: SRQ (0x%x) is full!\n", srq->id);
752 rc = -EINVAL;
753 spin_unlock(&srq_hwq->lock);
754 goto done;
756 next = srq->start_idx;
757 srq->start_idx = srq->swq[next].next_idx;
758 spin_unlock(&srq_hwq->lock);
760 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
761 srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
762 srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
763 memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
764 /* Calculate wqe_size16 and data_len */
765 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
766 i < wqe->num_sge; i++, hw_sge++) {
767 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
768 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
769 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
771 srqe->wqe_type = wqe->type;
772 srqe->flags = wqe->flags;
773 srqe->wqe_size = wqe->num_sge +
774 ((offsetof(typeof(*srqe), data) + 15) >> 4);
775 srqe->wr_id[0] = cpu_to_le32((u32)next);
776 srq->swq[next].wr_id = wqe->wr_id;
778 srq_hwq->prod++;
780 spin_lock(&srq_hwq->lock);
781 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
782 /* retaining srq_hwq->cons for this logic
783 * actually the lock is only required to
784 * read srq_hwq->cons.
786 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
787 count = sw_prod > sw_cons ? sw_prod - sw_cons :
788 srq_hwq->max_elements - sw_cons + sw_prod;
789 spin_unlock(&srq_hwq->lock);
790 /* Ring DB */
791 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
792 if (srq->arm_req == true && count > srq->threshold) {
793 srq->arm_req = false;
794 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
796 done:
797 return rc;
800 /* QP */
801 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
803 struct bnxt_qplib_hwq_attr hwq_attr = {};
804 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
805 struct bnxt_qplib_q *sq = &qp->sq;
806 struct bnxt_qplib_q *rq = &qp->rq;
807 struct creq_create_qp1_resp resp;
808 struct cmdq_create_qp1 req;
809 struct bnxt_qplib_pbl *pbl;
810 u16 cmd_flags = 0;
811 u32 qp_flags = 0;
812 int rc;
814 RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
816 /* General */
817 req.type = qp->type;
818 req.dpi = cpu_to_le32(qp->dpi->dpi);
819 req.qp_handle = cpu_to_le64(qp->qp_handle);
821 /* SQ */
822 hwq_attr.res = res;
823 hwq_attr.sginfo = &sq->sg_info;
824 hwq_attr.depth = sq->max_wqe;
825 hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
826 hwq_attr.type = HWQ_TYPE_QUEUE;
827 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
828 if (rc)
829 goto exit;
831 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
832 if (!sq->swq) {
833 rc = -ENOMEM;
834 goto fail_sq;
836 pbl = &sq->hwq.pbl[PBL_LVL_0];
837 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
838 req.sq_pg_size_sq_lvl =
839 ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
840 << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
841 (pbl->pg_size == ROCE_PG_SIZE_4K ?
842 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
843 pbl->pg_size == ROCE_PG_SIZE_8K ?
844 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
845 pbl->pg_size == ROCE_PG_SIZE_64K ?
846 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
847 pbl->pg_size == ROCE_PG_SIZE_2M ?
848 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
849 pbl->pg_size == ROCE_PG_SIZE_8M ?
850 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
851 pbl->pg_size == ROCE_PG_SIZE_1G ?
852 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
853 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
855 if (qp->scq)
856 req.scq_cid = cpu_to_le32(qp->scq->id);
858 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
860 /* RQ */
861 if (rq->max_wqe) {
862 hwq_attr.res = res;
863 hwq_attr.sginfo = &rq->sg_info;
864 hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
865 hwq_attr.depth = qp->rq.max_wqe;
866 hwq_attr.type = HWQ_TYPE_QUEUE;
867 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
868 if (rc)
869 goto fail_sq;
871 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
872 GFP_KERNEL);
873 if (!rq->swq) {
874 rc = -ENOMEM;
875 goto fail_rq;
877 pbl = &rq->hwq.pbl[PBL_LVL_0];
878 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
879 req.rq_pg_size_rq_lvl =
880 ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
881 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
882 (pbl->pg_size == ROCE_PG_SIZE_4K ?
883 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
884 pbl->pg_size == ROCE_PG_SIZE_8K ?
885 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
886 pbl->pg_size == ROCE_PG_SIZE_64K ?
887 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
888 pbl->pg_size == ROCE_PG_SIZE_2M ?
889 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
890 pbl->pg_size == ROCE_PG_SIZE_8M ?
891 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
892 pbl->pg_size == ROCE_PG_SIZE_1G ?
893 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
894 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
895 if (qp->rcq)
896 req.rcq_cid = cpu_to_le32(qp->rcq->id);
899 /* Header buffer - allow hdr_buf pass in */
900 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
901 if (rc) {
902 rc = -ENOMEM;
903 goto fail;
905 req.qp_flags = cpu_to_le32(qp_flags);
906 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
907 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
909 req.sq_fwo_sq_sge =
910 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
911 CMDQ_CREATE_QP1_SQ_SGE_SFT);
912 req.rq_fwo_rq_sge =
913 cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
914 CMDQ_CREATE_QP1_RQ_SGE_SFT);
916 req.pd_id = cpu_to_le32(qp->pd->id);
918 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
919 (void *)&resp, NULL, 0);
920 if (rc)
921 goto fail;
923 qp->id = le32_to_cpu(resp.xid);
924 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
925 qp->cctx = res->cctx;
926 sq->dbinfo.hwq = &sq->hwq;
927 sq->dbinfo.xid = qp->id;
928 sq->dbinfo.db = qp->dpi->dbr;
929 if (rq->max_wqe) {
930 rq->dbinfo.hwq = &rq->hwq;
931 rq->dbinfo.xid = qp->id;
932 rq->dbinfo.db = qp->dpi->dbr;
934 rcfw->qp_tbl[qp->id].qp_id = qp->id;
935 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
937 return 0;
939 fail:
940 bnxt_qplib_free_qp_hdr_buf(res, qp);
941 fail_rq:
942 bnxt_qplib_free_hwq(res, &rq->hwq);
943 kfree(rq->swq);
944 fail_sq:
945 bnxt_qplib_free_hwq(res, &sq->hwq);
946 kfree(sq->swq);
947 exit:
948 return rc;
951 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
953 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
954 struct bnxt_qplib_hwq_attr hwq_attr = {};
955 unsigned long int psn_search, poff = 0;
956 struct bnxt_qplib_sg_info sginfo = {};
957 struct sq_psn_search **psn_search_ptr;
958 struct bnxt_qplib_q *sq = &qp->sq;
959 struct bnxt_qplib_q *rq = &qp->rq;
960 int i, rc, req_size, psn_sz = 0;
961 struct sq_send **hw_sq_send_ptr;
962 struct creq_create_qp_resp resp;
963 struct bnxt_qplib_hwq *xrrq;
964 u16 cmd_flags = 0, max_ssge;
965 struct cmdq_create_qp req;
966 struct bnxt_qplib_pbl *pbl;
967 u32 qp_flags = 0;
968 u16 max_rsge;
970 RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
972 /* General */
973 req.type = qp->type;
974 req.dpi = cpu_to_le32(qp->dpi->dpi);
975 req.qp_handle = cpu_to_le64(qp->qp_handle);
977 /* SQ */
978 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
979 psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
980 sizeof(struct sq_psn_search_ext) :
981 sizeof(struct sq_psn_search);
984 hwq_attr.res = res;
985 hwq_attr.sginfo = &sq->sg_info;
986 hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
987 hwq_attr.depth = sq->max_wqe;
988 hwq_attr.aux_stride = psn_sz;
989 hwq_attr.aux_depth = hwq_attr.depth;
990 hwq_attr.type = HWQ_TYPE_QUEUE;
991 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
992 if (rc)
993 goto exit;
995 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
996 if (!sq->swq) {
997 rc = -ENOMEM;
998 goto fail_sq;
1000 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1001 if (psn_sz) {
1002 psn_search_ptr = (struct sq_psn_search **)
1003 &hw_sq_send_ptr[get_sqe_pg
1004 (sq->hwq.max_elements)];
1005 psn_search = (unsigned long int)
1006 &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
1007 [get_sqe_idx(sq->hwq.max_elements)];
1008 if (psn_search & ~PAGE_MASK) {
1009 /* If the psn_search does not start on a page boundary,
1010 * then calculate the offset
1012 poff = (psn_search & ~PAGE_MASK) /
1013 BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
1015 for (i = 0; i < sq->hwq.max_elements; i++) {
1016 sq->swq[i].psn_search =
1017 &psn_search_ptr[get_psne_pg(i + poff)]
1018 [get_psne_idx(i + poff)];
1019 /*psns_ext will be used only for P5 chips. */
1020 sq->swq[i].psn_ext =
1021 (struct sq_psn_search_ext *)
1022 &psn_search_ptr[get_psne_pg(i + poff)]
1023 [get_psne_idx(i + poff)];
1026 pbl = &sq->hwq.pbl[PBL_LVL_0];
1027 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1028 req.sq_pg_size_sq_lvl =
1029 ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
1030 << CMDQ_CREATE_QP_SQ_LVL_SFT) |
1031 (pbl->pg_size == ROCE_PG_SIZE_4K ?
1032 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
1033 pbl->pg_size == ROCE_PG_SIZE_8K ?
1034 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
1035 pbl->pg_size == ROCE_PG_SIZE_64K ?
1036 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
1037 pbl->pg_size == ROCE_PG_SIZE_2M ?
1038 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
1039 pbl->pg_size == ROCE_PG_SIZE_8M ?
1040 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
1041 pbl->pg_size == ROCE_PG_SIZE_1G ?
1042 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
1043 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
1045 if (qp->scq)
1046 req.scq_cid = cpu_to_le32(qp->scq->id);
1048 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1049 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1050 if (qp->sig_type)
1051 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1053 /* RQ */
1054 if (rq->max_wqe) {
1055 hwq_attr.res = res;
1056 hwq_attr.sginfo = &rq->sg_info;
1057 hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
1058 hwq_attr.depth = rq->max_wqe;
1059 hwq_attr.aux_stride = 0;
1060 hwq_attr.aux_depth = 0;
1061 hwq_attr.type = HWQ_TYPE_QUEUE;
1062 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1063 if (rc)
1064 goto fail_sq;
1066 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
1067 GFP_KERNEL);
1068 if (!rq->swq) {
1069 rc = -ENOMEM;
1070 goto fail_rq;
1072 pbl = &rq->hwq.pbl[PBL_LVL_0];
1073 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1074 req.rq_pg_size_rq_lvl =
1075 ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
1076 CMDQ_CREATE_QP_RQ_LVL_SFT) |
1077 (pbl->pg_size == ROCE_PG_SIZE_4K ?
1078 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
1079 pbl->pg_size == ROCE_PG_SIZE_8K ?
1080 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
1081 pbl->pg_size == ROCE_PG_SIZE_64K ?
1082 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
1083 pbl->pg_size == ROCE_PG_SIZE_2M ?
1084 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
1085 pbl->pg_size == ROCE_PG_SIZE_8M ?
1086 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
1087 pbl->pg_size == ROCE_PG_SIZE_1G ?
1088 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
1089 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
1090 } else {
1091 /* SRQ */
1092 if (qp->srq) {
1093 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1094 req.srq_cid = cpu_to_le32(qp->srq->id);
1098 if (qp->rcq)
1099 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1100 req.qp_flags = cpu_to_le32(qp_flags);
1101 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
1102 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
1103 qp->sq_hdr_buf = NULL;
1104 qp->rq_hdr_buf = NULL;
1106 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
1107 if (rc)
1108 goto fail_rq;
1110 /* CTRL-22434: Irrespective of the requested SGE count on the SQ
1111 * always create the QP with max send sges possible if the requested
1112 * inline size is greater than 0.
1114 max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
1115 req.sq_fwo_sq_sge = cpu_to_le16(
1116 ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
1117 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1118 max_rsge = bnxt_qplib_is_chip_gen_p5(res->cctx) ? 6 : rq->max_sge;
1119 req.rq_fwo_rq_sge = cpu_to_le16(
1120 ((max_rsge & CMDQ_CREATE_QP_RQ_SGE_MASK)
1121 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1122 /* ORRQ and IRRQ */
1123 if (psn_sz) {
1124 xrrq = &qp->orrq;
1125 xrrq->max_elements =
1126 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1127 req_size = xrrq->max_elements *
1128 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1129 req_size &= ~(PAGE_SIZE - 1);
1130 sginfo.pgsize = req_size;
1131 sginfo.pgshft = PAGE_SHIFT;
1133 hwq_attr.res = res;
1134 hwq_attr.sginfo = &sginfo;
1135 hwq_attr.depth = xrrq->max_elements;
1136 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1137 hwq_attr.aux_stride = 0;
1138 hwq_attr.aux_depth = 0;
1139 hwq_attr.type = HWQ_TYPE_CTX;
1140 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1141 if (rc)
1142 goto fail_buf_free;
1143 pbl = &xrrq->pbl[PBL_LVL_0];
1144 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1146 xrrq = &qp->irrq;
1147 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1148 qp->max_dest_rd_atomic);
1149 req_size = xrrq->max_elements *
1150 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1151 req_size &= ~(PAGE_SIZE - 1);
1152 sginfo.pgsize = req_size;
1153 hwq_attr.depth = xrrq->max_elements;
1154 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1155 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1156 if (rc)
1157 goto fail_orrq;
1159 pbl = &xrrq->pbl[PBL_LVL_0];
1160 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1162 req.pd_id = cpu_to_le32(qp->pd->id);
1164 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1165 (void *)&resp, NULL, 0);
1166 if (rc)
1167 goto fail;
1169 qp->id = le32_to_cpu(resp.xid);
1170 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1171 INIT_LIST_HEAD(&qp->sq_flush);
1172 INIT_LIST_HEAD(&qp->rq_flush);
1173 qp->cctx = res->cctx;
1174 sq->dbinfo.hwq = &sq->hwq;
1175 sq->dbinfo.xid = qp->id;
1176 sq->dbinfo.db = qp->dpi->dbr;
1177 if (rq->max_wqe) {
1178 rq->dbinfo.hwq = &rq->hwq;
1179 rq->dbinfo.xid = qp->id;
1180 rq->dbinfo.db = qp->dpi->dbr;
1182 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1183 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
1185 return 0;
1187 fail:
1188 if (qp->irrq.max_elements)
1189 bnxt_qplib_free_hwq(res, &qp->irrq);
1190 fail_orrq:
1191 if (qp->orrq.max_elements)
1192 bnxt_qplib_free_hwq(res, &qp->orrq);
1193 fail_buf_free:
1194 bnxt_qplib_free_qp_hdr_buf(res, qp);
1195 fail_rq:
1196 bnxt_qplib_free_hwq(res, &rq->hwq);
1197 kfree(rq->swq);
1198 fail_sq:
1199 bnxt_qplib_free_hwq(res, &sq->hwq);
1200 kfree(sq->swq);
1201 exit:
1202 return rc;
1205 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1207 switch (qp->state) {
1208 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1209 /* INIT->RTR, configure the path_mtu to the default
1210 * 2048 if not being requested
1212 if (!(qp->modify_flags &
1213 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1214 qp->modify_flags |=
1215 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1216 qp->path_mtu =
1217 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1219 qp->modify_flags &=
1220 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1221 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1222 if (qp->max_dest_rd_atomic < 1)
1223 qp->max_dest_rd_atomic = 1;
1224 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1225 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1226 if (!(qp->modify_flags &
1227 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1228 qp->modify_flags |=
1229 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1230 qp->ah.sgid_index = 0;
1232 break;
1233 default:
1234 break;
1238 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1240 switch (qp->state) {
1241 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1242 /* Bono FW requires the max_rd_atomic to be >= 1 */
1243 if (qp->max_rd_atomic < 1)
1244 qp->max_rd_atomic = 1;
1245 /* Bono FW does not allow PKEY_INDEX,
1246 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1247 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1248 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1249 * modification
1251 qp->modify_flags &=
1252 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1253 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1254 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1255 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1256 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1257 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1258 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1259 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1260 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1261 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1262 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1263 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1264 break;
1265 default:
1266 break;
1270 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1272 switch (qp->cur_qp_state) {
1273 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1274 break;
1275 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1276 __modify_flags_from_init_state(qp);
1277 break;
1278 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1279 __modify_flags_from_rtr_state(qp);
1280 break;
1281 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1282 break;
1283 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1284 break;
1285 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1286 break;
1287 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1288 break;
1289 default:
1290 break;
1294 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1296 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1297 struct cmdq_modify_qp req;
1298 struct creq_modify_qp_resp resp;
1299 u16 cmd_flags = 0, pkey;
1300 u32 temp32[4];
1301 u32 bmask;
1302 int rc;
1304 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1306 /* Filter out the qp_attr_mask based on the state->new transition */
1307 __filter_modify_flags(qp);
1308 bmask = qp->modify_flags;
1309 req.modify_mask = cpu_to_le32(qp->modify_flags);
1310 req.qp_cid = cpu_to_le32(qp->id);
1311 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1312 req.network_type_en_sqd_async_notify_new_state =
1313 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1314 (qp->en_sqd_async_notify ?
1315 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1317 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1319 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1320 req.access = qp->access;
1322 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1323 if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1324 qp->pkey_index, &pkey))
1325 req.pkey = cpu_to_le16(pkey);
1327 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1328 req.qkey = cpu_to_le32(qp->qkey);
1330 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1331 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1332 req.dgid[0] = cpu_to_le32(temp32[0]);
1333 req.dgid[1] = cpu_to_le32(temp32[1]);
1334 req.dgid[2] = cpu_to_le32(temp32[2]);
1335 req.dgid[3] = cpu_to_le32(temp32[3]);
1337 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1338 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1340 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1341 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1342 [qp->ah.sgid_index]);
1344 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1345 req.hop_limit = qp->ah.hop_limit;
1347 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1348 req.traffic_class = qp->ah.traffic_class;
1350 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1351 memcpy(req.dest_mac, qp->ah.dmac, 6);
1353 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1354 req.path_mtu = qp->path_mtu;
1356 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1357 req.timeout = qp->timeout;
1359 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1360 req.retry_cnt = qp->retry_cnt;
1362 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1363 req.rnr_retry = qp->rnr_retry;
1365 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1366 req.min_rnr_timer = qp->min_rnr_timer;
1368 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1369 req.rq_psn = cpu_to_le32(qp->rq.psn);
1371 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1372 req.sq_psn = cpu_to_le32(qp->sq.psn);
1374 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1375 req.max_rd_atomic =
1376 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1378 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1379 req.max_dest_rd_atomic =
1380 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1382 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1383 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1384 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1385 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1386 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1387 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1388 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1390 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1392 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1393 (void *)&resp, NULL, 0);
1394 if (rc)
1395 return rc;
1396 qp->cur_qp_state = qp->state;
1397 return 0;
1400 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1402 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1403 struct cmdq_query_qp req;
1404 struct creq_query_qp_resp resp;
1405 struct bnxt_qplib_rcfw_sbuf *sbuf;
1406 struct creq_query_qp_resp_sb *sb;
1407 u16 cmd_flags = 0;
1408 u32 temp32[4];
1409 int i, rc = 0;
1411 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1413 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1414 if (!sbuf)
1415 return -ENOMEM;
1416 sb = sbuf->sb;
1418 req.qp_cid = cpu_to_le32(qp->id);
1419 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1420 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1421 (void *)sbuf, 0);
1422 if (rc)
1423 goto bail;
1424 /* Extract the context from the side buffer */
1425 qp->state = sb->en_sqd_async_notify_state &
1426 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1427 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1428 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1429 true : false;
1430 qp->access = sb->access;
1431 qp->pkey_index = le16_to_cpu(sb->pkey);
1432 qp->qkey = le32_to_cpu(sb->qkey);
1434 temp32[0] = le32_to_cpu(sb->dgid[0]);
1435 temp32[1] = le32_to_cpu(sb->dgid[1]);
1436 temp32[2] = le32_to_cpu(sb->dgid[2]);
1437 temp32[3] = le32_to_cpu(sb->dgid[3]);
1438 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1440 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1442 qp->ah.sgid_index = 0;
1443 for (i = 0; i < res->sgid_tbl.max; i++) {
1444 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1445 qp->ah.sgid_index = i;
1446 break;
1449 if (i == res->sgid_tbl.max)
1450 dev_warn(&res->pdev->dev, "SGID not found??\n");
1452 qp->ah.hop_limit = sb->hop_limit;
1453 qp->ah.traffic_class = sb->traffic_class;
1454 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1455 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1456 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1457 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1458 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1459 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1460 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1461 qp->timeout = sb->timeout;
1462 qp->retry_cnt = sb->retry_cnt;
1463 qp->rnr_retry = sb->rnr_retry;
1464 qp->min_rnr_timer = sb->min_rnr_timer;
1465 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1466 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1467 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1468 qp->max_dest_rd_atomic =
1469 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1470 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1471 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1472 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1473 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1474 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1475 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1476 memcpy(qp->smac, sb->src_mac, 6);
1477 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1478 bail:
1479 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1480 return rc;
1483 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1485 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1486 struct cq_base *hw_cqe, **hw_cqe_ptr;
1487 int i;
1489 for (i = 0; i < cq_hwq->max_elements; i++) {
1490 hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1491 hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1492 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1493 continue;
1495 * The valid test of the entry must be done first before
1496 * reading any further.
1498 dma_rmb();
1499 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1500 case CQ_BASE_CQE_TYPE_REQ:
1501 case CQ_BASE_CQE_TYPE_TERMINAL:
1503 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1505 if (qp == le64_to_cpu(cqe->qp_handle))
1506 cqe->qp_handle = 0;
1507 break;
1509 case CQ_BASE_CQE_TYPE_RES_RC:
1510 case CQ_BASE_CQE_TYPE_RES_UD:
1511 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1513 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1515 if (qp == le64_to_cpu(cqe->qp_handle))
1516 cqe->qp_handle = 0;
1517 break;
1519 default:
1520 break;
1525 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1526 struct bnxt_qplib_qp *qp)
1528 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1529 struct cmdq_destroy_qp req;
1530 struct creq_destroy_qp_resp resp;
1531 u16 cmd_flags = 0;
1532 int rc;
1534 rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1535 rcfw->qp_tbl[qp->id].qp_handle = NULL;
1537 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1539 req.qp_cid = cpu_to_le32(qp->id);
1540 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1541 (void *)&resp, NULL, 0);
1542 if (rc) {
1543 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1544 rcfw->qp_tbl[qp->id].qp_handle = qp;
1545 return rc;
1548 return 0;
1551 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1552 struct bnxt_qplib_qp *qp)
1554 bnxt_qplib_free_qp_hdr_buf(res, qp);
1555 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1556 kfree(qp->sq.swq);
1558 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1559 kfree(qp->rq.swq);
1561 if (qp->irrq.max_elements)
1562 bnxt_qplib_free_hwq(res, &qp->irrq);
1563 if (qp->orrq.max_elements)
1564 bnxt_qplib_free_hwq(res, &qp->orrq);
1568 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1569 struct bnxt_qplib_sge *sge)
1571 struct bnxt_qplib_q *sq = &qp->sq;
1572 u32 sw_prod;
1574 memset(sge, 0, sizeof(*sge));
1576 if (qp->sq_hdr_buf) {
1577 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1578 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1579 sw_prod * qp->sq_hdr_buf_size);
1580 sge->lkey = 0xFFFFFFFF;
1581 sge->size = qp->sq_hdr_buf_size;
1582 return qp->sq_hdr_buf + sw_prod * sge->size;
1584 return NULL;
1587 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1589 struct bnxt_qplib_q *rq = &qp->rq;
1591 return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1594 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1596 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1599 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1600 struct bnxt_qplib_sge *sge)
1602 struct bnxt_qplib_q *rq = &qp->rq;
1603 u32 sw_prod;
1605 memset(sge, 0, sizeof(*sge));
1607 if (qp->rq_hdr_buf) {
1608 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1609 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1610 sw_prod * qp->rq_hdr_buf_size);
1611 sge->lkey = 0xFFFFFFFF;
1612 sge->size = qp->rq_hdr_buf_size;
1613 return qp->rq_hdr_buf + sw_prod * sge->size;
1615 return NULL;
1618 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1620 struct bnxt_qplib_q *sq = &qp->sq;
1622 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1625 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1626 struct bnxt_qplib_swqe *wqe)
1628 struct bnxt_qplib_q *sq = &qp->sq;
1629 struct bnxt_qplib_swq *swq;
1630 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1631 struct sq_sge *hw_sge;
1632 struct bnxt_qplib_nq_work *nq_work = NULL;
1633 bool sch_handler = false;
1634 u32 sw_prod;
1635 u8 wqe_size16;
1636 int i, rc = 0, data_len = 0, pkt_num = 0;
1637 __le32 temp32;
1639 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1640 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1641 sch_handler = true;
1642 dev_dbg(&sq->hwq.pdev->dev,
1643 "%s Error QP. Scheduling for poll_cq\n",
1644 __func__);
1645 goto queue_err;
1649 if (bnxt_qplib_queue_full(sq)) {
1650 dev_err(&sq->hwq.pdev->dev,
1651 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1652 sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1653 sq->q_full_delta);
1654 rc = -ENOMEM;
1655 goto done;
1657 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1658 swq = &sq->swq[sw_prod];
1659 swq->wr_id = wqe->wr_id;
1660 swq->type = wqe->type;
1661 swq->flags = wqe->flags;
1662 if (qp->sig_type)
1663 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1664 swq->start_psn = sq->psn & BTH_PSN_MASK;
1666 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1667 hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1668 [get_sqe_idx(sw_prod)];
1670 memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1672 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1673 /* Copy the inline data */
1674 if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1675 dev_warn(&sq->hwq.pdev->dev,
1676 "Inline data length > 96 detected\n");
1677 data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1678 } else {
1679 data_len = wqe->inline_len;
1681 memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1682 wqe_size16 = (data_len + 15) >> 4;
1683 } else {
1684 for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1685 i < wqe->num_sge; i++, hw_sge++) {
1686 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1687 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1688 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1689 data_len += wqe->sg_list[i].size;
1691 /* Each SGE entry = 1 WQE size16 */
1692 wqe_size16 = wqe->num_sge;
1693 /* HW requires wqe size has room for atleast one SGE even if
1694 * none was supplied by ULP
1696 if (!wqe->num_sge)
1697 wqe_size16++;
1700 /* Specifics */
1701 switch (wqe->type) {
1702 case BNXT_QPLIB_SWQE_TYPE_SEND:
1703 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1704 /* Assemble info for Raw Ethertype QPs */
1705 struct sq_send_raweth_qp1 *sqe =
1706 (struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1708 sqe->wqe_type = wqe->type;
1709 sqe->flags = wqe->flags;
1710 sqe->wqe_size = wqe_size16 +
1711 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1712 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1713 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1714 sqe->length = cpu_to_le32(data_len);
1715 sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1716 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1717 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1719 break;
1721 /* fall thru */
1722 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1723 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1725 struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1727 sqe->wqe_type = wqe->type;
1728 sqe->flags = wqe->flags;
1729 sqe->wqe_size = wqe_size16 +
1730 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1731 sqe->inv_key_or_imm_data = cpu_to_le32(
1732 wqe->send.inv_key);
1733 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1734 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1735 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1736 sqe->dst_qp = cpu_to_le32(
1737 wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1738 sqe->length = cpu_to_le32(data_len);
1739 sqe->avid = cpu_to_le32(wqe->send.avid &
1740 SQ_SEND_AVID_MASK);
1741 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1742 } else {
1743 sqe->length = cpu_to_le32(data_len);
1744 sqe->dst_qp = 0;
1745 sqe->avid = 0;
1746 if (qp->mtu)
1747 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1748 if (!pkt_num)
1749 pkt_num = 1;
1750 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1752 break;
1754 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1755 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1756 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1758 struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1760 sqe->wqe_type = wqe->type;
1761 sqe->flags = wqe->flags;
1762 sqe->wqe_size = wqe_size16 +
1763 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1764 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1765 sqe->length = cpu_to_le32((u32)data_len);
1766 sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1767 sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1768 if (qp->mtu)
1769 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1770 if (!pkt_num)
1771 pkt_num = 1;
1772 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1773 break;
1775 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1776 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1778 struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1780 sqe->wqe_type = wqe->type;
1781 sqe->flags = wqe->flags;
1782 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1783 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1784 sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1785 sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1786 if (qp->mtu)
1787 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1788 if (!pkt_num)
1789 pkt_num = 1;
1790 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1791 break;
1793 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1795 struct sq_localinvalidate *sqe =
1796 (struct sq_localinvalidate *)hw_sq_send_hdr;
1798 sqe->wqe_type = wqe->type;
1799 sqe->flags = wqe->flags;
1800 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1802 break;
1804 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1806 struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1808 sqe->wqe_type = wqe->type;
1809 sqe->flags = wqe->flags;
1810 sqe->access_cntl = wqe->frmr.access_cntl |
1811 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1812 sqe->zero_based_page_size_log =
1813 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1814 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1815 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1816 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1817 temp32 = cpu_to_le32(wqe->frmr.length);
1818 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1819 sqe->numlevels_pbl_page_size_log =
1820 ((wqe->frmr.pbl_pg_sz_log <<
1821 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1822 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1823 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1824 SQ_FR_PMR_NUMLEVELS_MASK);
1826 for (i = 0; i < wqe->frmr.page_list_len; i++)
1827 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1828 wqe->frmr.page_list[i] |
1829 PTU_PTE_VALID);
1830 sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1831 sqe->va = cpu_to_le64(wqe->frmr.va);
1833 break;
1835 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1837 struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1839 sqe->wqe_type = wqe->type;
1840 sqe->flags = wqe->flags;
1841 sqe->access_cntl = wqe->bind.access_cntl;
1842 sqe->mw_type_zero_based = wqe->bind.mw_type |
1843 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1844 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1845 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1846 sqe->va = cpu_to_le64(wqe->bind.va);
1847 temp32 = cpu_to_le32(wqe->bind.length);
1848 memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1849 break;
1851 default:
1852 /* Bad wqe, return error */
1853 rc = -EINVAL;
1854 goto done;
1856 swq->next_psn = sq->psn & BTH_PSN_MASK;
1857 if (swq->psn_search) {
1858 u32 opcd_spsn;
1859 u32 flg_npsn;
1861 opcd_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1862 SQ_PSN_SEARCH_START_PSN_MASK);
1863 opcd_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1864 SQ_PSN_SEARCH_OPCODE_MASK);
1865 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1866 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1867 if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1868 swq->psn_ext->opcode_start_psn =
1869 cpu_to_le32(opcd_spsn);
1870 swq->psn_ext->flags_next_psn =
1871 cpu_to_le32(flg_npsn);
1872 } else {
1873 swq->psn_search->opcode_start_psn =
1874 cpu_to_le32(opcd_spsn);
1875 swq->psn_search->flags_next_psn =
1876 cpu_to_le32(flg_npsn);
1879 queue_err:
1880 if (sch_handler) {
1881 /* Store the ULP info in the software structures */
1882 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1883 swq = &sq->swq[sw_prod];
1884 swq->wr_id = wqe->wr_id;
1885 swq->type = wqe->type;
1886 swq->flags = wqe->flags;
1887 if (qp->sig_type)
1888 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1889 swq->start_psn = sq->psn & BTH_PSN_MASK;
1891 sq->hwq.prod++;
1892 qp->wqe_cnt++;
1894 done:
1895 if (sch_handler) {
1896 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1897 if (nq_work) {
1898 nq_work->cq = qp->scq;
1899 nq_work->nq = qp->scq->nq;
1900 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1901 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1902 } else {
1903 dev_err(&sq->hwq.pdev->dev,
1904 "FP: Failed to allocate SQ nq_work!\n");
1905 rc = -ENOMEM;
1908 return rc;
1911 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1913 struct bnxt_qplib_q *rq = &qp->rq;
1915 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1918 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1919 struct bnxt_qplib_swqe *wqe)
1921 struct bnxt_qplib_q *rq = &qp->rq;
1922 struct rq_wqe *rqe, **rqe_ptr;
1923 struct sq_sge *hw_sge;
1924 struct bnxt_qplib_nq_work *nq_work = NULL;
1925 bool sch_handler = false;
1926 u32 sw_prod;
1927 int i, rc = 0;
1929 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1930 sch_handler = true;
1931 dev_dbg(&rq->hwq.pdev->dev,
1932 "%s: Error QP. Scheduling for poll_cq\n", __func__);
1933 goto queue_err;
1935 if (bnxt_qplib_queue_full(rq)) {
1936 dev_err(&rq->hwq.pdev->dev,
1937 "FP: QP (0x%x) RQ is full!\n", qp->id);
1938 rc = -EINVAL;
1939 goto done;
1941 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1942 rq->swq[sw_prod].wr_id = wqe->wr_id;
1944 rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1945 rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1947 memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1949 /* Calculate wqe_size16 and data_len */
1950 for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1951 i < wqe->num_sge; i++, hw_sge++) {
1952 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1953 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1954 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1956 rqe->wqe_type = wqe->type;
1957 rqe->flags = wqe->flags;
1958 rqe->wqe_size = wqe->num_sge +
1959 ((offsetof(typeof(*rqe), data) + 15) >> 4);
1960 /* HW requires wqe size has room for atleast one SGE even if none
1961 * was supplied by ULP
1963 if (!wqe->num_sge)
1964 rqe->wqe_size++;
1966 /* Supply the rqe->wr_id index to the wr_id_tbl for now */
1967 rqe->wr_id[0] = cpu_to_le32(sw_prod);
1969 queue_err:
1970 if (sch_handler) {
1971 /* Store the ULP info in the software structures */
1972 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1973 rq->swq[sw_prod].wr_id = wqe->wr_id;
1976 rq->hwq.prod++;
1977 if (sch_handler) {
1978 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1979 if (nq_work) {
1980 nq_work->cq = qp->rcq;
1981 nq_work->nq = qp->rcq->nq;
1982 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1983 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1984 } else {
1985 dev_err(&rq->hwq.pdev->dev,
1986 "FP: Failed to allocate RQ nq_work!\n");
1987 rc = -ENOMEM;
1990 done:
1991 return rc;
1994 /* CQ */
1995 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1997 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1998 struct bnxt_qplib_hwq_attr hwq_attr = {};
1999 struct creq_create_cq_resp resp;
2000 struct cmdq_create_cq req;
2001 struct bnxt_qplib_pbl *pbl;
2002 u16 cmd_flags = 0;
2003 int rc;
2005 hwq_attr.res = res;
2006 hwq_attr.depth = cq->max_wqe;
2007 hwq_attr.stride = sizeof(struct cq_base);
2008 hwq_attr.type = HWQ_TYPE_QUEUE;
2009 hwq_attr.sginfo = &cq->sg_info;
2010 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2011 if (rc)
2012 goto exit;
2014 RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
2016 if (!cq->dpi) {
2017 dev_err(&rcfw->pdev->dev,
2018 "FP: CREATE_CQ failed due to NULL DPI\n");
2019 return -EINVAL;
2021 req.dpi = cpu_to_le32(cq->dpi->dpi);
2022 req.cq_handle = cpu_to_le64(cq->cq_handle);
2024 req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2025 pbl = &cq->hwq.pbl[PBL_LVL_0];
2026 req.pg_size_lvl = cpu_to_le32(
2027 ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
2028 CMDQ_CREATE_CQ_LVL_SFT) |
2029 (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
2030 pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
2031 pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
2032 pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
2033 pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
2034 pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
2035 CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
2037 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2039 req.cq_fco_cnq_id = cpu_to_le32(
2040 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2041 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2043 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2044 (void *)&resp, NULL, 0);
2045 if (rc)
2046 goto fail;
2048 cq->id = le32_to_cpu(resp.xid);
2049 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2050 init_waitqueue_head(&cq->waitq);
2051 INIT_LIST_HEAD(&cq->sqf_head);
2052 INIT_LIST_HEAD(&cq->rqf_head);
2053 spin_lock_init(&cq->compl_lock);
2054 spin_lock_init(&cq->flush_lock);
2056 cq->dbinfo.hwq = &cq->hwq;
2057 cq->dbinfo.xid = cq->id;
2058 cq->dbinfo.db = cq->dpi->dbr;
2059 cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
2061 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2063 return 0;
2065 fail:
2066 bnxt_qplib_free_hwq(res, &cq->hwq);
2067 exit:
2068 return rc;
2071 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2073 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2074 struct cmdq_destroy_cq req;
2075 struct creq_destroy_cq_resp resp;
2076 u16 total_cnq_events;
2077 u16 cmd_flags = 0;
2078 int rc;
2080 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
2082 req.cq_cid = cpu_to_le32(cq->id);
2083 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2084 (void *)&resp, NULL, 0);
2085 if (rc)
2086 return rc;
2087 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2088 __wait_for_all_nqes(cq, total_cnq_events);
2089 bnxt_qplib_free_hwq(res, &cq->hwq);
2090 return 0;
2093 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2094 struct bnxt_qplib_cqe **pcqe, int *budget)
2096 u32 sw_prod, sw_cons;
2097 struct bnxt_qplib_cqe *cqe;
2098 int rc = 0;
2100 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2101 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
2102 cqe = *pcqe;
2103 while (*budget) {
2104 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2105 if (sw_cons == sw_prod) {
2106 break;
2108 /* Skip the FENCE WQE completions */
2109 if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
2110 bnxt_qplib_cancel_phantom_processing(qp);
2111 goto skip_compl;
2113 memset(cqe, 0, sizeof(*cqe));
2114 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2115 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2116 cqe->qp_handle = (u64)(unsigned long)qp;
2117 cqe->wr_id = sq->swq[sw_cons].wr_id;
2118 cqe->src_qp = qp->id;
2119 cqe->type = sq->swq[sw_cons].type;
2120 cqe++;
2121 (*budget)--;
2122 skip_compl:
2123 sq->hwq.cons++;
2125 *pcqe = cqe;
2126 if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
2127 /* Out of budget */
2128 rc = -EAGAIN;
2130 return rc;
2133 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2134 struct bnxt_qplib_cqe **pcqe, int *budget)
2136 struct bnxt_qplib_cqe *cqe;
2137 u32 sw_prod, sw_cons;
2138 int rc = 0;
2139 int opcode = 0;
2141 switch (qp->type) {
2142 case CMDQ_CREATE_QP1_TYPE_GSI:
2143 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2144 break;
2145 case CMDQ_CREATE_QP_TYPE_RC:
2146 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2147 break;
2148 case CMDQ_CREATE_QP_TYPE_UD:
2149 case CMDQ_CREATE_QP_TYPE_GSI:
2150 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2151 break;
2154 /* Flush the rest of the RQ */
2155 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
2156 cqe = *pcqe;
2157 while (*budget) {
2158 sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
2159 if (sw_cons == sw_prod)
2160 break;
2161 memset(cqe, 0, sizeof(*cqe));
2162 cqe->status =
2163 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2164 cqe->opcode = opcode;
2165 cqe->qp_handle = (unsigned long)qp;
2166 cqe->wr_id = rq->swq[sw_cons].wr_id;
2167 cqe++;
2168 (*budget)--;
2169 rq->hwq.cons++;
2171 *pcqe = cqe;
2172 if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
2173 /* Out of budget */
2174 rc = -EAGAIN;
2176 return rc;
2179 void bnxt_qplib_mark_qp_error(void *qp_handle)
2181 struct bnxt_qplib_qp *qp = qp_handle;
2183 if (!qp)
2184 return;
2186 /* Must block new posting of SQ and RQ */
2187 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2188 bnxt_qplib_cancel_phantom_processing(qp);
2191 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2192 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2194 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2195 u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
2197 struct bnxt_qplib_q *sq = &qp->sq;
2198 struct bnxt_qplib_swq *swq;
2199 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2200 struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
2201 struct cq_req *peek_req_hwcqe;
2202 struct bnxt_qplib_qp *peek_qp;
2203 struct bnxt_qplib_q *peek_sq;
2204 int i, rc = 0;
2206 /* Normal mode */
2207 /* Check for the psn_search marking before completing */
2208 swq = &sq->swq[sw_sq_cons];
2209 if (swq->psn_search &&
2210 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2211 /* Unmark */
2212 swq->psn_search->flags_next_psn = cpu_to_le32
2213 (le32_to_cpu(swq->psn_search->flags_next_psn)
2214 & ~0x80000000);
2215 dev_dbg(&cq->hwq.pdev->dev,
2216 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2217 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2218 sq->condition = true;
2219 sq->send_phantom = true;
2221 /* TODO: Only ARM if the previous SQE is ARMALL */
2222 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2223 rc = -EAGAIN;
2224 goto out;
2226 if (sq->condition) {
2227 /* Peek at the completions */
2228 peek_raw_cq_cons = cq->hwq.cons;
2229 peek_sw_cq_cons = cq_cons;
2230 i = cq->hwq.max_elements;
2231 while (i--) {
2232 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2233 peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2234 peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
2235 [CQE_IDX(peek_sw_cq_cons)];
2236 /* If the next hwcqe is VALID */
2237 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2238 cq->hwq.max_elements)) {
2240 * The valid test of the entry must be done first before
2241 * reading any further.
2243 dma_rmb();
2244 /* If the next hwcqe is a REQ */
2245 if ((peek_hwcqe->cqe_type_toggle &
2246 CQ_BASE_CQE_TYPE_MASK) ==
2247 CQ_BASE_CQE_TYPE_REQ) {
2248 peek_req_hwcqe = (struct cq_req *)
2249 peek_hwcqe;
2250 peek_qp = (struct bnxt_qplib_qp *)
2251 ((unsigned long)
2252 le64_to_cpu
2253 (peek_req_hwcqe->qp_handle));
2254 peek_sq = &peek_qp->sq;
2255 peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
2256 peek_req_hwcqe->sq_cons_idx) - 1
2257 , &sq->hwq);
2258 /* If the hwcqe's sq's wr_id matches */
2259 if (peek_sq == sq &&
2260 sq->swq[peek_sq_cons_idx].wr_id ==
2261 BNXT_QPLIB_FENCE_WRID) {
2263 * Unbreak only if the phantom
2264 * comes back
2266 dev_dbg(&cq->hwq.pdev->dev,
2267 "FP: Got Phantom CQE\n");
2268 sq->condition = false;
2269 sq->single = true;
2270 rc = 0;
2271 goto out;
2274 /* Valid but not the phantom, so keep looping */
2275 } else {
2276 /* Not valid yet, just exit and wait */
2277 rc = -EINVAL;
2278 goto out;
2280 peek_sw_cq_cons++;
2281 peek_raw_cq_cons++;
2283 dev_err(&cq->hwq.pdev->dev,
2284 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2285 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2286 rc = -EINVAL;
2288 out:
2289 return rc;
2292 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2293 struct cq_req *hwcqe,
2294 struct bnxt_qplib_cqe **pcqe, int *budget,
2295 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2297 struct bnxt_qplib_qp *qp;
2298 struct bnxt_qplib_q *sq;
2299 struct bnxt_qplib_cqe *cqe;
2300 u32 sw_sq_cons, cqe_sq_cons;
2301 struct bnxt_qplib_swq *swq;
2302 int rc = 0;
2304 qp = (struct bnxt_qplib_qp *)((unsigned long)
2305 le64_to_cpu(hwcqe->qp_handle));
2306 if (!qp) {
2307 dev_err(&cq->hwq.pdev->dev,
2308 "FP: Process Req qp is NULL\n");
2309 return -EINVAL;
2311 sq = &qp->sq;
2313 cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
2314 if (cqe_sq_cons > sq->hwq.max_elements) {
2315 dev_err(&cq->hwq.pdev->dev,
2316 "FP: CQ Process req reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2317 cqe_sq_cons, sq->hwq.max_elements);
2318 return -EINVAL;
2321 if (qp->sq.flushed) {
2322 dev_dbg(&cq->hwq.pdev->dev,
2323 "%s: QP in Flush QP = %p\n", __func__, qp);
2324 goto done;
2326 /* Require to walk the sq's swq to fabricate CQEs for all previously
2327 * signaled SWQEs due to CQE aggregation from the current sq cons
2328 * to the cqe_sq_cons
2330 cqe = *pcqe;
2331 while (*budget) {
2332 sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2333 if (sw_sq_cons == cqe_sq_cons)
2334 /* Done */
2335 break;
2337 swq = &sq->swq[sw_sq_cons];
2338 memset(cqe, 0, sizeof(*cqe));
2339 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2340 cqe->qp_handle = (u64)(unsigned long)qp;
2341 cqe->src_qp = qp->id;
2342 cqe->wr_id = swq->wr_id;
2343 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2344 goto skip;
2345 cqe->type = swq->type;
2347 /* For the last CQE, check for status. For errors, regardless
2348 * of the request being signaled or not, it must complete with
2349 * the hwcqe error status
2351 if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
2352 hwcqe->status != CQ_REQ_STATUS_OK) {
2353 cqe->status = hwcqe->status;
2354 dev_err(&cq->hwq.pdev->dev,
2355 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2356 sw_sq_cons, cqe->wr_id, cqe->status);
2357 cqe++;
2358 (*budget)--;
2359 bnxt_qplib_mark_qp_error(qp);
2360 /* Add qp to flush list of the CQ */
2361 bnxt_qplib_add_flush_qp(qp);
2362 } else {
2363 /* Before we complete, do WA 9060 */
2364 if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2365 cqe_sq_cons)) {
2366 *lib_qp = qp;
2367 goto out;
2369 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2370 cqe->status = CQ_REQ_STATUS_OK;
2371 cqe++;
2372 (*budget)--;
2375 skip:
2376 sq->hwq.cons++;
2377 if (sq->single)
2378 break;
2380 out:
2381 *pcqe = cqe;
2382 if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
2383 /* Out of budget */
2384 rc = -EAGAIN;
2385 goto done;
2388 * Back to normal completion mode only after it has completed all of
2389 * the WC for this CQE
2391 sq->single = false;
2392 done:
2393 return rc;
2396 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2398 spin_lock(&srq->hwq.lock);
2399 srq->swq[srq->last_idx].next_idx = (int)tag;
2400 srq->last_idx = (int)tag;
2401 srq->swq[srq->last_idx].next_idx = -1;
2402 srq->hwq.cons++; /* Support for SRQE counter */
2403 spin_unlock(&srq->hwq.lock);
2406 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2407 struct cq_res_rc *hwcqe,
2408 struct bnxt_qplib_cqe **pcqe,
2409 int *budget)
2411 struct bnxt_qplib_qp *qp;
2412 struct bnxt_qplib_q *rq;
2413 struct bnxt_qplib_srq *srq;
2414 struct bnxt_qplib_cqe *cqe;
2415 u32 wr_id_idx;
2416 int rc = 0;
2418 qp = (struct bnxt_qplib_qp *)((unsigned long)
2419 le64_to_cpu(hwcqe->qp_handle));
2420 if (!qp) {
2421 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2422 return -EINVAL;
2424 if (qp->rq.flushed) {
2425 dev_dbg(&cq->hwq.pdev->dev,
2426 "%s: QP in Flush QP = %p\n", __func__, qp);
2427 goto done;
2430 cqe = *pcqe;
2431 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2432 cqe->length = le32_to_cpu(hwcqe->length);
2433 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2434 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2435 cqe->flags = le16_to_cpu(hwcqe->flags);
2436 cqe->status = hwcqe->status;
2437 cqe->qp_handle = (u64)(unsigned long)qp;
2439 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2440 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2441 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2442 srq = qp->srq;
2443 if (!srq)
2444 return -EINVAL;
2445 if (wr_id_idx >= srq->hwq.max_elements) {
2446 dev_err(&cq->hwq.pdev->dev,
2447 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2448 wr_id_idx, srq->hwq.max_elements);
2449 return -EINVAL;
2451 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2452 bnxt_qplib_release_srqe(srq, wr_id_idx);
2453 cqe++;
2454 (*budget)--;
2455 *pcqe = cqe;
2456 } else {
2457 rq = &qp->rq;
2458 if (wr_id_idx >= rq->hwq.max_elements) {
2459 dev_err(&cq->hwq.pdev->dev,
2460 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2461 wr_id_idx, rq->hwq.max_elements);
2462 return -EINVAL;
2464 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2465 cqe++;
2466 (*budget)--;
2467 rq->hwq.cons++;
2468 *pcqe = cqe;
2470 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2471 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2472 /* Add qp to flush list of the CQ */
2473 bnxt_qplib_add_flush_qp(qp);
2477 done:
2478 return rc;
2481 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2482 struct cq_res_ud *hwcqe,
2483 struct bnxt_qplib_cqe **pcqe,
2484 int *budget)
2486 struct bnxt_qplib_qp *qp;
2487 struct bnxt_qplib_q *rq;
2488 struct bnxt_qplib_srq *srq;
2489 struct bnxt_qplib_cqe *cqe;
2490 u32 wr_id_idx;
2491 int rc = 0;
2493 qp = (struct bnxt_qplib_qp *)((unsigned long)
2494 le64_to_cpu(hwcqe->qp_handle));
2495 if (!qp) {
2496 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2497 return -EINVAL;
2499 if (qp->rq.flushed) {
2500 dev_dbg(&cq->hwq.pdev->dev,
2501 "%s: QP in Flush QP = %p\n", __func__, qp);
2502 goto done;
2504 cqe = *pcqe;
2505 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2506 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2507 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2508 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2509 cqe->flags = le16_to_cpu(hwcqe->flags);
2510 cqe->status = hwcqe->status;
2511 cqe->qp_handle = (u64)(unsigned long)qp;
2512 /*FIXME: Endianness fix needed for smace */
2513 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2514 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2515 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2516 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2517 ((le32_to_cpu(
2518 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2519 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2521 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2522 srq = qp->srq;
2523 if (!srq)
2524 return -EINVAL;
2526 if (wr_id_idx >= srq->hwq.max_elements) {
2527 dev_err(&cq->hwq.pdev->dev,
2528 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2529 wr_id_idx, srq->hwq.max_elements);
2530 return -EINVAL;
2532 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2533 bnxt_qplib_release_srqe(srq, wr_id_idx);
2534 cqe++;
2535 (*budget)--;
2536 *pcqe = cqe;
2537 } else {
2538 rq = &qp->rq;
2539 if (wr_id_idx >= rq->hwq.max_elements) {
2540 dev_err(&cq->hwq.pdev->dev,
2541 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2542 wr_id_idx, rq->hwq.max_elements);
2543 return -EINVAL;
2546 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2547 cqe++;
2548 (*budget)--;
2549 rq->hwq.cons++;
2550 *pcqe = cqe;
2552 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2553 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2554 /* Add qp to flush list of the CQ */
2555 bnxt_qplib_add_flush_qp(qp);
2558 done:
2559 return rc;
2562 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2564 struct cq_base *hw_cqe, **hw_cqe_ptr;
2565 u32 sw_cons, raw_cons;
2566 bool rc = true;
2568 raw_cons = cq->hwq.cons;
2569 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2570 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2571 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2573 /* Check for Valid bit. If the CQE is valid, return false */
2574 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2575 return rc;
2578 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2579 struct cq_res_raweth_qp1 *hwcqe,
2580 struct bnxt_qplib_cqe **pcqe,
2581 int *budget)
2583 struct bnxt_qplib_qp *qp;
2584 struct bnxt_qplib_q *rq;
2585 struct bnxt_qplib_srq *srq;
2586 struct bnxt_qplib_cqe *cqe;
2587 u32 wr_id_idx;
2588 int rc = 0;
2590 qp = (struct bnxt_qplib_qp *)((unsigned long)
2591 le64_to_cpu(hwcqe->qp_handle));
2592 if (!qp) {
2593 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2594 return -EINVAL;
2596 if (qp->rq.flushed) {
2597 dev_dbg(&cq->hwq.pdev->dev,
2598 "%s: QP in Flush QP = %p\n", __func__, qp);
2599 goto done;
2601 cqe = *pcqe;
2602 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2603 cqe->flags = le16_to_cpu(hwcqe->flags);
2604 cqe->qp_handle = (u64)(unsigned long)qp;
2606 wr_id_idx =
2607 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2608 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2609 cqe->src_qp = qp->id;
2610 if (qp->id == 1 && !cqe->length) {
2611 /* Add workaround for the length misdetection */
2612 cqe->length = 296;
2613 } else {
2614 cqe->length = le16_to_cpu(hwcqe->length);
2616 cqe->pkey_index = qp->pkey_index;
2617 memcpy(cqe->smac, qp->smac, 6);
2619 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2620 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2621 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2623 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2624 srq = qp->srq;
2625 if (!srq) {
2626 dev_err(&cq->hwq.pdev->dev,
2627 "FP: SRQ used but not defined??\n");
2628 return -EINVAL;
2630 if (wr_id_idx >= srq->hwq.max_elements) {
2631 dev_err(&cq->hwq.pdev->dev,
2632 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2633 wr_id_idx, srq->hwq.max_elements);
2634 return -EINVAL;
2636 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2637 bnxt_qplib_release_srqe(srq, wr_id_idx);
2638 cqe++;
2639 (*budget)--;
2640 *pcqe = cqe;
2641 } else {
2642 rq = &qp->rq;
2643 if (wr_id_idx >= rq->hwq.max_elements) {
2644 dev_err(&cq->hwq.pdev->dev,
2645 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2646 wr_id_idx, rq->hwq.max_elements);
2647 return -EINVAL;
2649 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2650 cqe++;
2651 (*budget)--;
2652 rq->hwq.cons++;
2653 *pcqe = cqe;
2655 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2656 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2657 /* Add qp to flush list of the CQ */
2658 bnxt_qplib_add_flush_qp(qp);
2662 done:
2663 return rc;
2666 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2667 struct cq_terminal *hwcqe,
2668 struct bnxt_qplib_cqe **pcqe,
2669 int *budget)
2671 struct bnxt_qplib_qp *qp;
2672 struct bnxt_qplib_q *sq, *rq;
2673 struct bnxt_qplib_cqe *cqe;
2674 u32 sw_cons = 0, cqe_cons;
2675 int rc = 0;
2677 /* Check the Status */
2678 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2679 dev_warn(&cq->hwq.pdev->dev,
2680 "FP: CQ Process Terminal Error status = 0x%x\n",
2681 hwcqe->status);
2683 qp = (struct bnxt_qplib_qp *)((unsigned long)
2684 le64_to_cpu(hwcqe->qp_handle));
2685 if (!qp) {
2686 dev_err(&cq->hwq.pdev->dev,
2687 "FP: CQ Process terminal qp is NULL\n");
2688 return -EINVAL;
2691 /* Must block new posting of SQ and RQ */
2692 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2694 sq = &qp->sq;
2695 rq = &qp->rq;
2697 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2698 if (cqe_cons == 0xFFFF)
2699 goto do_rq;
2701 if (cqe_cons > sq->hwq.max_elements) {
2702 dev_err(&cq->hwq.pdev->dev,
2703 "FP: CQ Process terminal reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2704 cqe_cons, sq->hwq.max_elements);
2705 goto do_rq;
2708 if (qp->sq.flushed) {
2709 dev_dbg(&cq->hwq.pdev->dev,
2710 "%s: QP in Flush QP = %p\n", __func__, qp);
2711 goto sq_done;
2714 /* Terminal CQE can also include aggregated successful CQEs prior.
2715 * So we must complete all CQEs from the current sq's cons to the
2716 * cq_cons with status OK
2718 cqe = *pcqe;
2719 while (*budget) {
2720 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2721 if (sw_cons == cqe_cons)
2722 break;
2723 if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2724 memset(cqe, 0, sizeof(*cqe));
2725 cqe->status = CQ_REQ_STATUS_OK;
2726 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2727 cqe->qp_handle = (u64)(unsigned long)qp;
2728 cqe->src_qp = qp->id;
2729 cqe->wr_id = sq->swq[sw_cons].wr_id;
2730 cqe->type = sq->swq[sw_cons].type;
2731 cqe++;
2732 (*budget)--;
2734 sq->hwq.cons++;
2736 *pcqe = cqe;
2737 if (!(*budget) && sw_cons != cqe_cons) {
2738 /* Out of budget */
2739 rc = -EAGAIN;
2740 goto sq_done;
2742 sq_done:
2743 if (rc)
2744 return rc;
2745 do_rq:
2746 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2747 if (cqe_cons == 0xFFFF) {
2748 goto done;
2749 } else if (cqe_cons > rq->hwq.max_elements) {
2750 dev_err(&cq->hwq.pdev->dev,
2751 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2752 cqe_cons, rq->hwq.max_elements);
2753 goto done;
2756 if (qp->rq.flushed) {
2757 dev_dbg(&cq->hwq.pdev->dev,
2758 "%s: QP in Flush QP = %p\n", __func__, qp);
2759 rc = 0;
2760 goto done;
2763 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2764 * from the current rq->cons to the rq->prod regardless what the
2765 * rq->cons the terminal CQE indicates
2768 /* Add qp to flush list of the CQ */
2769 bnxt_qplib_add_flush_qp(qp);
2770 done:
2771 return rc;
2774 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2775 struct cq_cutoff *hwcqe)
2777 /* Check the Status */
2778 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2779 dev_err(&cq->hwq.pdev->dev,
2780 "FP: CQ Process Cutoff Error status = 0x%x\n",
2781 hwcqe->status);
2782 return -EINVAL;
2784 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2785 wake_up_interruptible(&cq->waitq);
2787 return 0;
2790 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2791 struct bnxt_qplib_cqe *cqe,
2792 int num_cqes)
2794 struct bnxt_qplib_qp *qp = NULL;
2795 u32 budget = num_cqes;
2796 unsigned long flags;
2798 spin_lock_irqsave(&cq->flush_lock, flags);
2799 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2800 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2801 __flush_sq(&qp->sq, qp, &cqe, &budget);
2804 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2805 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2806 __flush_rq(&qp->rq, qp, &cqe, &budget);
2808 spin_unlock_irqrestore(&cq->flush_lock, flags);
2810 return num_cqes - budget;
2813 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2814 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2816 struct cq_base *hw_cqe, **hw_cqe_ptr;
2817 u32 sw_cons, raw_cons;
2818 int budget, rc = 0;
2820 raw_cons = cq->hwq.cons;
2821 budget = num_cqes;
2823 while (budget) {
2824 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2825 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2826 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2828 /* Check for Valid bit */
2829 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2830 break;
2833 * The valid test of the entry must be done first before
2834 * reading any further.
2836 dma_rmb();
2837 /* From the device's respective CQE format to qplib_wc*/
2838 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2839 case CQ_BASE_CQE_TYPE_REQ:
2840 rc = bnxt_qplib_cq_process_req(cq,
2841 (struct cq_req *)hw_cqe,
2842 &cqe, &budget,
2843 sw_cons, lib_qp);
2844 break;
2845 case CQ_BASE_CQE_TYPE_RES_RC:
2846 rc = bnxt_qplib_cq_process_res_rc(cq,
2847 (struct cq_res_rc *)
2848 hw_cqe, &cqe,
2849 &budget);
2850 break;
2851 case CQ_BASE_CQE_TYPE_RES_UD:
2852 rc = bnxt_qplib_cq_process_res_ud
2853 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2854 &budget);
2855 break;
2856 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2857 rc = bnxt_qplib_cq_process_res_raweth_qp1
2858 (cq, (struct cq_res_raweth_qp1 *)
2859 hw_cqe, &cqe, &budget);
2860 break;
2861 case CQ_BASE_CQE_TYPE_TERMINAL:
2862 rc = bnxt_qplib_cq_process_terminal
2863 (cq, (struct cq_terminal *)hw_cqe,
2864 &cqe, &budget);
2865 break;
2866 case CQ_BASE_CQE_TYPE_CUT_OFF:
2867 bnxt_qplib_cq_process_cutoff
2868 (cq, (struct cq_cutoff *)hw_cqe);
2869 /* Done processing this CQ */
2870 goto exit;
2871 default:
2872 dev_err(&cq->hwq.pdev->dev,
2873 "process_cq unknown type 0x%lx\n",
2874 hw_cqe->cqe_type_toggle &
2875 CQ_BASE_CQE_TYPE_MASK);
2876 rc = -EINVAL;
2877 break;
2879 if (rc < 0) {
2880 if (rc == -EAGAIN)
2881 break;
2882 /* Error while processing the CQE, just skip to the
2883 * next one
2885 dev_err(&cq->hwq.pdev->dev,
2886 "process_cqe error rc = 0x%x\n", rc);
2888 raw_cons++;
2890 if (cq->hwq.cons != raw_cons) {
2891 cq->hwq.cons = raw_cons;
2892 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
2894 exit:
2895 return num_cqes - budget;
2898 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2900 if (arm_type)
2901 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
2902 /* Using cq->arm_state variable to track whether to issue cq handler */
2903 atomic_set(&cq->arm_state, 1);
2906 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2908 flush_workqueue(qp->scq->nq->cqn_wq);
2909 if (qp->scq != qp->rcq)
2910 flush_workqueue(qp->rcq->nq->cqn_wq);