treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / infiniband / hw / bnxt_re / qplib_fp.c
blob020f70e6865e14b849ccc49c6856fbb02a927ce4
1 /*
2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/prefetch.h>
47 #include <linux/if_ether.h>
49 #include "roce_hsi.h"
51 #include "qplib_res.h"
52 #include "qplib_rcfw.h"
53 #include "qplib_sp.h"
54 #include "qplib_fp.h"
56 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
57 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
58 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
67 /* Flush list */
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
70 struct bnxt_qplib_cq *scq, *rcq;
72 scq = qp->scq;
73 rcq = qp->rcq;
75 if (!qp->sq.flushed) {
76 dev_dbg(&scq->hwq.pdev->dev,
77 "FP: Adding to SQ Flush list = %p\n", qp);
78 bnxt_qplib_cancel_phantom_processing(qp);
79 list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 qp->sq.flushed = true;
82 if (!qp->srq) {
83 if (!qp->rq.flushed) {
84 dev_dbg(&rcq->hwq.pdev->dev,
85 "FP: Adding to RQ Flush list = %p\n", qp);
86 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 qp->rq.flushed = true;
92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93 unsigned long *flags)
94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
96 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 if (qp->scq == qp->rcq)
98 __acquire(&qp->rcq->flush_lock);
99 else
100 spin_lock(&qp->rcq->flush_lock);
103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 unsigned long *flags)
105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
107 if (qp->scq == qp->rcq)
108 __release(&qp->rcq->flush_lock);
109 else
110 spin_unlock(&qp->rcq->flush_lock);
111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
116 unsigned long flags;
118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 __bnxt_qplib_add_flush_qp(qp);
120 bnxt_qplib_release_cq_flush_locks(qp, &flags);
123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
125 if (qp->sq.flushed) {
126 qp->sq.flushed = false;
127 list_del(&qp->sq_flush);
129 if (!qp->srq) {
130 if (qp->rq.flushed) {
131 qp->rq.flushed = false;
132 list_del(&qp->rq_flush);
137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
139 unsigned long flags;
141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 __clean_cq(qp->scq, (u64)(unsigned long)qp);
143 qp->sq.hwq.prod = 0;
144 qp->sq.hwq.cons = 0;
145 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
146 qp->rq.hwq.prod = 0;
147 qp->rq.hwq.cons = 0;
149 __bnxt_qplib_del_flush_qp(qp);
150 bnxt_qplib_release_cq_flush_locks(qp, &flags);
153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
155 struct bnxt_qplib_nq_work *nq_work =
156 container_of(work, struct bnxt_qplib_nq_work, work);
158 struct bnxt_qplib_cq *cq = nq_work->cq;
159 struct bnxt_qplib_nq *nq = nq_work->nq;
161 if (cq && nq) {
162 spin_lock_bh(&cq->compl_lock);
163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 dev_dbg(&nq->pdev->dev,
165 "%s:Trigger cq = %p event nq = %p\n",
166 __func__, cq, nq);
167 nq->cqn_handler(nq, cq);
169 spin_unlock_bh(&cq->compl_lock);
171 kfree(nq_work);
174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 struct bnxt_qplib_qp *qp)
177 struct bnxt_qplib_q *rq = &qp->rq;
178 struct bnxt_qplib_q *sq = &qp->sq;
180 if (qp->rq_hdr_buf)
181 dma_free_coherent(&res->pdev->dev,
182 rq->hwq.max_elements * qp->rq_hdr_buf_size,
183 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 if (qp->sq_hdr_buf)
185 dma_free_coherent(&res->pdev->dev,
186 sq->hwq.max_elements * qp->sq_hdr_buf_size,
187 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 qp->rq_hdr_buf = NULL;
189 qp->sq_hdr_buf = NULL;
190 qp->rq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_map = 0;
192 qp->sq_hdr_buf_size = 0;
193 qp->rq_hdr_buf_size = 0;
196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 struct bnxt_qplib_qp *qp)
199 struct bnxt_qplib_q *rq = &qp->rq;
200 struct bnxt_qplib_q *sq = &qp->sq;
201 int rc = 0;
203 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 sq->hwq.max_elements *
206 qp->sq_hdr_buf_size,
207 &qp->sq_hdr_buf_map, GFP_KERNEL);
208 if (!qp->sq_hdr_buf) {
209 rc = -ENOMEM;
210 dev_err(&res->pdev->dev,
211 "Failed to create sq_hdr_buf\n");
212 goto fail;
216 if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
217 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
218 rq->hwq.max_elements *
219 qp->rq_hdr_buf_size,
220 &qp->rq_hdr_buf_map,
221 GFP_KERNEL);
222 if (!qp->rq_hdr_buf) {
223 rc = -ENOMEM;
224 dev_err(&res->pdev->dev,
225 "Failed to create rq_hdr_buf\n");
226 goto fail;
229 return 0;
231 fail:
232 bnxt_qplib_free_qp_hdr_buf(res, qp);
233 return rc;
236 static void bnxt_qplib_service_nq(unsigned long data)
238 struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
239 struct bnxt_qplib_hwq *hwq = &nq->hwq;
240 struct nq_base *nqe, **nq_ptr;
241 struct bnxt_qplib_cq *cq;
242 int num_cqne_processed = 0;
243 int num_srqne_processed = 0;
244 u32 sw_cons, raw_cons;
245 u16 type;
246 int budget = nq->budget;
247 uintptr_t q_handle;
248 bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
250 /* Service the NQ until empty */
251 raw_cons = hwq->cons;
252 while (budget--) {
253 sw_cons = HWQ_CMP(raw_cons, hwq);
254 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
255 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
256 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
257 break;
260 * The valid test of the entry must be done first before
261 * reading any further.
263 dma_rmb();
265 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
266 switch (type) {
267 case NQ_BASE_TYPE_CQ_NOTIFICATION:
269 struct nq_cn *nqcne = (struct nq_cn *)nqe;
271 q_handle = le32_to_cpu(nqcne->cq_handle_low);
272 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
273 << 32;
274 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
275 bnxt_qplib_arm_cq_enable(cq);
276 spin_lock_bh(&cq->compl_lock);
277 atomic_set(&cq->arm_state, 0);
278 if (!nq->cqn_handler(nq, (cq)))
279 num_cqne_processed++;
280 else
281 dev_warn(&nq->pdev->dev,
282 "cqn - type 0x%x not handled\n", type);
283 spin_unlock_bh(&cq->compl_lock);
284 break;
286 case NQ_BASE_TYPE_SRQ_EVENT:
288 struct nq_srq_event *nqsrqe =
289 (struct nq_srq_event *)nqe;
291 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
292 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
293 << 32;
294 bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
295 DBC_DBC_TYPE_SRQ_ARMENA);
296 if (!nq->srqn_handler(nq,
297 (struct bnxt_qplib_srq *)q_handle,
298 nqsrqe->event))
299 num_srqne_processed++;
300 else
301 dev_warn(&nq->pdev->dev,
302 "SRQ event 0x%x not handled\n",
303 nqsrqe->event);
304 break;
306 case NQ_BASE_TYPE_DBQ_EVENT:
307 break;
308 default:
309 dev_warn(&nq->pdev->dev,
310 "nqe with type = 0x%x not handled\n", type);
311 break;
313 raw_cons++;
315 if (hwq->cons != raw_cons) {
316 hwq->cons = raw_cons;
317 bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, hwq->cons,
318 hwq->max_elements, nq->ring_id,
319 gen_p5);
323 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
325 struct bnxt_qplib_nq *nq = dev_instance;
326 struct bnxt_qplib_hwq *hwq = &nq->hwq;
327 struct nq_base **nq_ptr;
328 u32 sw_cons;
330 /* Prefetch the NQ element */
331 sw_cons = HWQ_CMP(hwq->cons, hwq);
332 nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
333 prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
335 /* Fan out to CPU affinitized kthreads? */
336 tasklet_schedule(&nq->worker);
338 return IRQ_HANDLED;
341 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
343 bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
344 tasklet_disable(&nq->worker);
345 /* Mask h/w interrupt */
346 bnxt_qplib_ring_nq_db(nq->bar_reg_iomem, nq->hwq.cons,
347 nq->hwq.max_elements, nq->ring_id, gen_p5);
348 /* Sync with last running IRQ handler */
349 synchronize_irq(nq->vector);
350 if (kill)
351 tasklet_kill(&nq->worker);
352 if (nq->requested) {
353 irq_set_affinity_hint(nq->vector, NULL);
354 free_irq(nq->vector, nq);
355 nq->requested = false;
359 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
361 if (nq->cqn_wq) {
362 destroy_workqueue(nq->cqn_wq);
363 nq->cqn_wq = NULL;
366 /* Make sure the HW is stopped! */
367 if (nq->requested)
368 bnxt_qplib_nq_stop_irq(nq, true);
370 if (nq->bar_reg_iomem)
371 iounmap(nq->bar_reg_iomem);
372 nq->bar_reg_iomem = NULL;
374 nq->cqn_handler = NULL;
375 nq->srqn_handler = NULL;
376 nq->vector = 0;
379 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
380 int msix_vector, bool need_init)
382 bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
383 int rc;
385 if (nq->requested)
386 return -EFAULT;
388 nq->vector = msix_vector;
389 if (need_init)
390 tasklet_init(&nq->worker, bnxt_qplib_service_nq,
391 (unsigned long)nq);
392 else
393 tasklet_enable(&nq->worker);
395 snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
396 rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
397 if (rc)
398 return rc;
400 cpumask_clear(&nq->mask);
401 cpumask_set_cpu(nq_indx, &nq->mask);
402 rc = irq_set_affinity_hint(nq->vector, &nq->mask);
403 if (rc) {
404 dev_warn(&nq->pdev->dev,
405 "set affinity failed; vector: %d nq_idx: %d\n",
406 nq->vector, nq_indx);
408 nq->requested = true;
409 bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, nq->hwq.cons,
410 nq->hwq.max_elements, nq->ring_id, gen_p5);
412 return rc;
415 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
416 int nq_idx, int msix_vector, int bar_reg_offset,
417 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
418 struct bnxt_qplib_cq *),
419 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
420 struct bnxt_qplib_srq *,
421 u8 event))
423 resource_size_t nq_base;
424 int rc = -1;
426 if (cqn_handler)
427 nq->cqn_handler = cqn_handler;
429 if (srqn_handler)
430 nq->srqn_handler = srqn_handler;
432 /* Have a task to schedule CQ notifiers in post send case */
433 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
434 if (!nq->cqn_wq)
435 return -ENOMEM;
437 nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
438 nq->bar_reg_off = bar_reg_offset;
439 nq_base = pci_resource_start(pdev, nq->bar_reg);
440 if (!nq_base) {
441 rc = -ENOMEM;
442 goto fail;
444 /* Unconditionally map 8 bytes to support 57500 series */
445 nq->bar_reg_iomem = ioremap(nq_base + nq->bar_reg_off, 8);
446 if (!nq->bar_reg_iomem) {
447 rc = -ENOMEM;
448 goto fail;
451 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
452 if (rc) {
453 dev_err(&nq->pdev->dev,
454 "Failed to request irq for nq-idx %d\n", nq_idx);
455 goto fail;
458 return 0;
459 fail:
460 bnxt_qplib_disable_nq(nq);
461 return rc;
464 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
466 if (nq->hwq.max_elements) {
467 bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
468 nq->hwq.max_elements = 0;
472 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
474 u8 hwq_type;
476 nq->pdev = pdev;
477 if (!nq->hwq.max_elements ||
478 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
479 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
480 hwq_type = bnxt_qplib_get_hwq_type(nq->res);
481 if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL,
482 &nq->hwq.max_elements,
483 BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
484 PAGE_SIZE, hwq_type))
485 return -ENOMEM;
487 nq->budget = 8;
488 return 0;
491 /* SRQ */
492 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
494 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
495 void __iomem *db;
496 u32 sw_prod;
497 u64 val = 0;
499 /* Ring DB */
500 sw_prod = (arm_type == DBC_DBC_TYPE_SRQ_ARM) ?
501 srq->threshold : HWQ_CMP(srq_hwq->prod, srq_hwq);
502 db = (arm_type == DBC_DBC_TYPE_SRQ_ARMENA) ? srq->dbr_base :
503 srq->dpi->dbr;
504 val = ((srq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
505 val <<= 32;
506 val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
507 writeq(val, db);
510 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
511 struct bnxt_qplib_srq *srq)
513 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
514 struct cmdq_destroy_srq req;
515 struct creq_destroy_srq_resp resp;
516 u16 cmd_flags = 0;
517 int rc;
519 RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
521 /* Configure the request */
522 req.srq_cid = cpu_to_le32(srq->id);
524 rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
525 (struct creq_base *)&resp, NULL, 0);
526 kfree(srq->swq);
527 if (rc)
528 return;
529 bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
532 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
533 struct bnxt_qplib_srq *srq)
535 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
536 struct cmdq_create_srq req;
537 struct creq_create_srq_resp resp;
538 struct bnxt_qplib_pbl *pbl;
539 u16 cmd_flags = 0;
540 int rc, idx;
542 srq->hwq.max_elements = srq->max_wqe;
543 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, &srq->sg_info,
544 &srq->hwq.max_elements,
545 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
546 PAGE_SIZE, HWQ_TYPE_QUEUE);
547 if (rc)
548 goto exit;
550 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
551 GFP_KERNEL);
552 if (!srq->swq) {
553 rc = -ENOMEM;
554 goto fail;
557 RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
559 /* Configure the request */
560 req.dpi = cpu_to_le32(srq->dpi->dpi);
561 req.srq_handle = cpu_to_le64((uintptr_t)srq);
563 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
564 pbl = &srq->hwq.pbl[PBL_LVL_0];
565 req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
566 CMDQ_CREATE_SRQ_LVL_MASK) <<
567 CMDQ_CREATE_SRQ_LVL_SFT) |
568 (pbl->pg_size == ROCE_PG_SIZE_4K ?
569 CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
570 pbl->pg_size == ROCE_PG_SIZE_8K ?
571 CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
572 pbl->pg_size == ROCE_PG_SIZE_64K ?
573 CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
574 pbl->pg_size == ROCE_PG_SIZE_2M ?
575 CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
576 pbl->pg_size == ROCE_PG_SIZE_8M ?
577 CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
578 pbl->pg_size == ROCE_PG_SIZE_1G ?
579 CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
580 CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
581 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
582 req.pd_id = cpu_to_le32(srq->pd->id);
583 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
585 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
586 (void *)&resp, NULL, 0);
587 if (rc)
588 goto fail;
590 spin_lock_init(&srq->lock);
591 srq->start_idx = 0;
592 srq->last_idx = srq->hwq.max_elements - 1;
593 for (idx = 0; idx < srq->hwq.max_elements; idx++)
594 srq->swq[idx].next_idx = idx + 1;
595 srq->swq[srq->last_idx].next_idx = -1;
597 srq->id = le32_to_cpu(resp.xid);
598 srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
599 if (srq->threshold)
600 bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARMENA);
601 srq->arm_req = false;
603 return 0;
604 fail:
605 bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
606 kfree(srq->swq);
607 exit:
608 return rc;
611 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
612 struct bnxt_qplib_srq *srq)
614 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
615 u32 sw_prod, sw_cons, count = 0;
617 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
618 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
620 count = sw_prod > sw_cons ? sw_prod - sw_cons :
621 srq_hwq->max_elements - sw_cons + sw_prod;
622 if (count > srq->threshold) {
623 srq->arm_req = false;
624 bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
625 } else {
626 /* Deferred arming */
627 srq->arm_req = true;
630 return 0;
633 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
634 struct bnxt_qplib_srq *srq)
636 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
637 struct cmdq_query_srq req;
638 struct creq_query_srq_resp resp;
639 struct bnxt_qplib_rcfw_sbuf *sbuf;
640 struct creq_query_srq_resp_sb *sb;
641 u16 cmd_flags = 0;
642 int rc = 0;
644 RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
645 req.srq_cid = cpu_to_le32(srq->id);
647 /* Configure the request */
648 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
649 if (!sbuf)
650 return -ENOMEM;
651 sb = sbuf->sb;
652 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
653 (void *)sbuf, 0);
654 srq->threshold = le16_to_cpu(sb->srq_limit);
655 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
657 return rc;
660 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
661 struct bnxt_qplib_swqe *wqe)
663 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
664 struct rq_wqe *srqe, **srqe_ptr;
665 struct sq_sge *hw_sge;
666 u32 sw_prod, sw_cons, count = 0;
667 int i, rc = 0, next;
669 spin_lock(&srq_hwq->lock);
670 if (srq->start_idx == srq->last_idx) {
671 dev_err(&srq_hwq->pdev->dev,
672 "FP: SRQ (0x%x) is full!\n", srq->id);
673 rc = -EINVAL;
674 spin_unlock(&srq_hwq->lock);
675 goto done;
677 next = srq->start_idx;
678 srq->start_idx = srq->swq[next].next_idx;
679 spin_unlock(&srq_hwq->lock);
681 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
682 srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
683 srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
684 memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
685 /* Calculate wqe_size16 and data_len */
686 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
687 i < wqe->num_sge; i++, hw_sge++) {
688 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
689 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
690 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
692 srqe->wqe_type = wqe->type;
693 srqe->flags = wqe->flags;
694 srqe->wqe_size = wqe->num_sge +
695 ((offsetof(typeof(*srqe), data) + 15) >> 4);
696 srqe->wr_id[0] = cpu_to_le32((u32)next);
697 srq->swq[next].wr_id = wqe->wr_id;
699 srq_hwq->prod++;
701 spin_lock(&srq_hwq->lock);
702 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
703 /* retaining srq_hwq->cons for this logic
704 * actually the lock is only required to
705 * read srq_hwq->cons.
707 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
708 count = sw_prod > sw_cons ? sw_prod - sw_cons :
709 srq_hwq->max_elements - sw_cons + sw_prod;
710 spin_unlock(&srq_hwq->lock);
711 /* Ring DB */
712 bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ);
713 if (srq->arm_req == true && count > srq->threshold) {
714 srq->arm_req = false;
715 bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
717 done:
718 return rc;
721 /* QP */
722 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
724 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
725 struct cmdq_create_qp1 req;
726 struct creq_create_qp1_resp resp;
727 struct bnxt_qplib_pbl *pbl;
728 struct bnxt_qplib_q *sq = &qp->sq;
729 struct bnxt_qplib_q *rq = &qp->rq;
730 int rc;
731 u16 cmd_flags = 0;
732 u32 qp_flags = 0;
734 RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
736 /* General */
737 req.type = qp->type;
738 req.dpi = cpu_to_le32(qp->dpi->dpi);
739 req.qp_handle = cpu_to_le64(qp->qp_handle);
741 /* SQ */
742 sq->hwq.max_elements = sq->max_wqe;
743 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL,
744 &sq->hwq.max_elements,
745 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
746 PAGE_SIZE, HWQ_TYPE_QUEUE);
747 if (rc)
748 goto exit;
750 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
751 if (!sq->swq) {
752 rc = -ENOMEM;
753 goto fail_sq;
755 pbl = &sq->hwq.pbl[PBL_LVL_0];
756 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
757 req.sq_pg_size_sq_lvl =
758 ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
759 << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
760 (pbl->pg_size == ROCE_PG_SIZE_4K ?
761 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
762 pbl->pg_size == ROCE_PG_SIZE_8K ?
763 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
764 pbl->pg_size == ROCE_PG_SIZE_64K ?
765 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
766 pbl->pg_size == ROCE_PG_SIZE_2M ?
767 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
768 pbl->pg_size == ROCE_PG_SIZE_8M ?
769 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
770 pbl->pg_size == ROCE_PG_SIZE_1G ?
771 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
772 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
774 if (qp->scq)
775 req.scq_cid = cpu_to_le32(qp->scq->id);
777 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
779 /* RQ */
780 if (rq->max_wqe) {
781 rq->hwq.max_elements = qp->rq.max_wqe;
782 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL,
783 &rq->hwq.max_elements,
784 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
785 PAGE_SIZE, HWQ_TYPE_QUEUE);
786 if (rc)
787 goto fail_sq;
789 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
790 GFP_KERNEL);
791 if (!rq->swq) {
792 rc = -ENOMEM;
793 goto fail_rq;
795 pbl = &rq->hwq.pbl[PBL_LVL_0];
796 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
797 req.rq_pg_size_rq_lvl =
798 ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
799 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
800 (pbl->pg_size == ROCE_PG_SIZE_4K ?
801 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
802 pbl->pg_size == ROCE_PG_SIZE_8K ?
803 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
804 pbl->pg_size == ROCE_PG_SIZE_64K ?
805 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
806 pbl->pg_size == ROCE_PG_SIZE_2M ?
807 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
808 pbl->pg_size == ROCE_PG_SIZE_8M ?
809 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
810 pbl->pg_size == ROCE_PG_SIZE_1G ?
811 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
812 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
813 if (qp->rcq)
814 req.rcq_cid = cpu_to_le32(qp->rcq->id);
817 /* Header buffer - allow hdr_buf pass in */
818 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
819 if (rc) {
820 rc = -ENOMEM;
821 goto fail;
823 req.qp_flags = cpu_to_le32(qp_flags);
824 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
825 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
827 req.sq_fwo_sq_sge =
828 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
829 CMDQ_CREATE_QP1_SQ_SGE_SFT);
830 req.rq_fwo_rq_sge =
831 cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
832 CMDQ_CREATE_QP1_RQ_SGE_SFT);
834 req.pd_id = cpu_to_le32(qp->pd->id);
836 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
837 (void *)&resp, NULL, 0);
838 if (rc)
839 goto fail;
841 qp->id = le32_to_cpu(resp.xid);
842 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
843 rcfw->qp_tbl[qp->id].qp_id = qp->id;
844 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
846 return 0;
848 fail:
849 bnxt_qplib_free_qp_hdr_buf(res, qp);
850 fail_rq:
851 bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
852 kfree(rq->swq);
853 fail_sq:
854 bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
855 kfree(sq->swq);
856 exit:
857 return rc;
860 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
862 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
863 unsigned long int psn_search, poff = 0;
864 struct sq_psn_search **psn_search_ptr;
865 struct bnxt_qplib_q *sq = &qp->sq;
866 struct bnxt_qplib_q *rq = &qp->rq;
867 int i, rc, req_size, psn_sz = 0;
868 struct sq_send **hw_sq_send_ptr;
869 struct creq_create_qp_resp resp;
870 struct bnxt_qplib_hwq *xrrq;
871 u16 cmd_flags = 0, max_ssge;
872 struct cmdq_create_qp req;
873 struct bnxt_qplib_pbl *pbl;
874 u32 qp_flags = 0;
875 u16 max_rsge;
877 RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
879 /* General */
880 req.type = qp->type;
881 req.dpi = cpu_to_le32(qp->dpi->dpi);
882 req.qp_handle = cpu_to_le64(qp->qp_handle);
884 /* SQ */
885 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
886 psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
887 sizeof(struct sq_psn_search_ext) :
888 sizeof(struct sq_psn_search);
890 sq->hwq.max_elements = sq->max_wqe;
891 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, &sq->sg_info,
892 &sq->hwq.max_elements,
893 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
894 psn_sz,
895 PAGE_SIZE, HWQ_TYPE_QUEUE);
896 if (rc)
897 goto exit;
899 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
900 if (!sq->swq) {
901 rc = -ENOMEM;
902 goto fail_sq;
904 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
905 if (psn_sz) {
906 psn_search_ptr = (struct sq_psn_search **)
907 &hw_sq_send_ptr[get_sqe_pg
908 (sq->hwq.max_elements)];
909 psn_search = (unsigned long int)
910 &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
911 [get_sqe_idx(sq->hwq.max_elements)];
912 if (psn_search & ~PAGE_MASK) {
913 /* If the psn_search does not start on a page boundary,
914 * then calculate the offset
916 poff = (psn_search & ~PAGE_MASK) /
917 BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
919 for (i = 0; i < sq->hwq.max_elements; i++) {
920 sq->swq[i].psn_search =
921 &psn_search_ptr[get_psne_pg(i + poff)]
922 [get_psne_idx(i + poff)];
923 /*psns_ext will be used only for P5 chips. */
924 sq->swq[i].psn_ext =
925 (struct sq_psn_search_ext *)
926 &psn_search_ptr[get_psne_pg(i + poff)]
927 [get_psne_idx(i + poff)];
930 pbl = &sq->hwq.pbl[PBL_LVL_0];
931 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
932 req.sq_pg_size_sq_lvl =
933 ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
934 << CMDQ_CREATE_QP_SQ_LVL_SFT) |
935 (pbl->pg_size == ROCE_PG_SIZE_4K ?
936 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
937 pbl->pg_size == ROCE_PG_SIZE_8K ?
938 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
939 pbl->pg_size == ROCE_PG_SIZE_64K ?
940 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
941 pbl->pg_size == ROCE_PG_SIZE_2M ?
942 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
943 pbl->pg_size == ROCE_PG_SIZE_8M ?
944 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
945 pbl->pg_size == ROCE_PG_SIZE_1G ?
946 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
947 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
949 if (qp->scq)
950 req.scq_cid = cpu_to_le32(qp->scq->id);
952 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
953 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
954 if (qp->sig_type)
955 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
957 /* RQ */
958 if (rq->max_wqe) {
959 rq->hwq.max_elements = rq->max_wqe;
960 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq,
961 &rq->sg_info,
962 &rq->hwq.max_elements,
963 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
964 PAGE_SIZE, HWQ_TYPE_QUEUE);
965 if (rc)
966 goto fail_sq;
968 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
969 GFP_KERNEL);
970 if (!rq->swq) {
971 rc = -ENOMEM;
972 goto fail_rq;
974 pbl = &rq->hwq.pbl[PBL_LVL_0];
975 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
976 req.rq_pg_size_rq_lvl =
977 ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
978 CMDQ_CREATE_QP_RQ_LVL_SFT) |
979 (pbl->pg_size == ROCE_PG_SIZE_4K ?
980 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
981 pbl->pg_size == ROCE_PG_SIZE_8K ?
982 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
983 pbl->pg_size == ROCE_PG_SIZE_64K ?
984 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
985 pbl->pg_size == ROCE_PG_SIZE_2M ?
986 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
987 pbl->pg_size == ROCE_PG_SIZE_8M ?
988 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
989 pbl->pg_size == ROCE_PG_SIZE_1G ?
990 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
991 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
992 } else {
993 /* SRQ */
994 if (qp->srq) {
995 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
996 req.srq_cid = cpu_to_le32(qp->srq->id);
1000 if (qp->rcq)
1001 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1002 req.qp_flags = cpu_to_le32(qp_flags);
1003 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
1004 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
1005 qp->sq_hdr_buf = NULL;
1006 qp->rq_hdr_buf = NULL;
1008 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
1009 if (rc)
1010 goto fail_rq;
1012 /* CTRL-22434: Irrespective of the requested SGE count on the SQ
1013 * always create the QP with max send sges possible if the requested
1014 * inline size is greater than 0.
1016 max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
1017 req.sq_fwo_sq_sge = cpu_to_le16(
1018 ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
1019 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1020 max_rsge = bnxt_qplib_is_chip_gen_p5(res->cctx) ? 6 : rq->max_sge;
1021 req.rq_fwo_rq_sge = cpu_to_le16(
1022 ((max_rsge & CMDQ_CREATE_QP_RQ_SGE_MASK)
1023 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1024 /* ORRQ and IRRQ */
1025 if (psn_sz) {
1026 xrrq = &qp->orrq;
1027 xrrq->max_elements =
1028 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1029 req_size = xrrq->max_elements *
1030 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1031 req_size &= ~(PAGE_SIZE - 1);
1032 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
1033 &xrrq->max_elements,
1034 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
1035 0, req_size, HWQ_TYPE_CTX);
1036 if (rc)
1037 goto fail_buf_free;
1038 pbl = &xrrq->pbl[PBL_LVL_0];
1039 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1041 xrrq = &qp->irrq;
1042 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1043 qp->max_dest_rd_atomic);
1044 req_size = xrrq->max_elements *
1045 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1046 req_size &= ~(PAGE_SIZE - 1);
1048 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
1049 &xrrq->max_elements,
1050 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
1051 0, req_size, HWQ_TYPE_CTX);
1052 if (rc)
1053 goto fail_orrq;
1055 pbl = &xrrq->pbl[PBL_LVL_0];
1056 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1058 req.pd_id = cpu_to_le32(qp->pd->id);
1060 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1061 (void *)&resp, NULL, 0);
1062 if (rc)
1063 goto fail;
1065 qp->id = le32_to_cpu(resp.xid);
1066 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1067 qp->cctx = res->cctx;
1068 INIT_LIST_HEAD(&qp->sq_flush);
1069 INIT_LIST_HEAD(&qp->rq_flush);
1070 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1071 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
1073 return 0;
1075 fail:
1076 if (qp->irrq.max_elements)
1077 bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1078 fail_orrq:
1079 if (qp->orrq.max_elements)
1080 bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1081 fail_buf_free:
1082 bnxt_qplib_free_qp_hdr_buf(res, qp);
1083 fail_rq:
1084 bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
1085 kfree(rq->swq);
1086 fail_sq:
1087 bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
1088 kfree(sq->swq);
1089 exit:
1090 return rc;
1093 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1095 switch (qp->state) {
1096 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1097 /* INIT->RTR, configure the path_mtu to the default
1098 * 2048 if not being requested
1100 if (!(qp->modify_flags &
1101 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1102 qp->modify_flags |=
1103 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1104 qp->path_mtu =
1105 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1107 qp->modify_flags &=
1108 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1109 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1110 if (qp->max_dest_rd_atomic < 1)
1111 qp->max_dest_rd_atomic = 1;
1112 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1113 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1114 if (!(qp->modify_flags &
1115 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1116 qp->modify_flags |=
1117 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1118 qp->ah.sgid_index = 0;
1120 break;
1121 default:
1122 break;
1126 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1128 switch (qp->state) {
1129 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1130 /* Bono FW requires the max_rd_atomic to be >= 1 */
1131 if (qp->max_rd_atomic < 1)
1132 qp->max_rd_atomic = 1;
1133 /* Bono FW does not allow PKEY_INDEX,
1134 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1135 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1136 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1137 * modification
1139 qp->modify_flags &=
1140 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1141 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1142 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1143 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1144 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1145 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1146 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1147 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1148 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1149 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1150 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1151 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1152 break;
1153 default:
1154 break;
1158 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1160 switch (qp->cur_qp_state) {
1161 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1162 break;
1163 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1164 __modify_flags_from_init_state(qp);
1165 break;
1166 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1167 __modify_flags_from_rtr_state(qp);
1168 break;
1169 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1170 break;
1171 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1172 break;
1173 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1174 break;
1175 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1176 break;
1177 default:
1178 break;
1182 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1184 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1185 struct cmdq_modify_qp req;
1186 struct creq_modify_qp_resp resp;
1187 u16 cmd_flags = 0, pkey;
1188 u32 temp32[4];
1189 u32 bmask;
1190 int rc;
1192 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1194 /* Filter out the qp_attr_mask based on the state->new transition */
1195 __filter_modify_flags(qp);
1196 bmask = qp->modify_flags;
1197 req.modify_mask = cpu_to_le32(qp->modify_flags);
1198 req.qp_cid = cpu_to_le32(qp->id);
1199 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1200 req.network_type_en_sqd_async_notify_new_state =
1201 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1202 (qp->en_sqd_async_notify ?
1203 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1205 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1207 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1208 req.access = qp->access;
1210 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1211 if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1212 qp->pkey_index, &pkey))
1213 req.pkey = cpu_to_le16(pkey);
1215 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1216 req.qkey = cpu_to_le32(qp->qkey);
1218 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1219 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1220 req.dgid[0] = cpu_to_le32(temp32[0]);
1221 req.dgid[1] = cpu_to_le32(temp32[1]);
1222 req.dgid[2] = cpu_to_le32(temp32[2]);
1223 req.dgid[3] = cpu_to_le32(temp32[3]);
1225 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1226 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1228 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1229 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1230 [qp->ah.sgid_index]);
1232 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1233 req.hop_limit = qp->ah.hop_limit;
1235 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1236 req.traffic_class = qp->ah.traffic_class;
1238 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1239 memcpy(req.dest_mac, qp->ah.dmac, 6);
1241 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1242 req.path_mtu = qp->path_mtu;
1244 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1245 req.timeout = qp->timeout;
1247 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1248 req.retry_cnt = qp->retry_cnt;
1250 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1251 req.rnr_retry = qp->rnr_retry;
1253 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1254 req.min_rnr_timer = qp->min_rnr_timer;
1256 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1257 req.rq_psn = cpu_to_le32(qp->rq.psn);
1259 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1260 req.sq_psn = cpu_to_le32(qp->sq.psn);
1262 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1263 req.max_rd_atomic =
1264 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1266 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1267 req.max_dest_rd_atomic =
1268 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1270 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1271 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1272 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1273 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1274 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1275 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1276 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1278 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1280 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1281 (void *)&resp, NULL, 0);
1282 if (rc)
1283 return rc;
1284 qp->cur_qp_state = qp->state;
1285 return 0;
1288 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1290 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1291 struct cmdq_query_qp req;
1292 struct creq_query_qp_resp resp;
1293 struct bnxt_qplib_rcfw_sbuf *sbuf;
1294 struct creq_query_qp_resp_sb *sb;
1295 u16 cmd_flags = 0;
1296 u32 temp32[4];
1297 int i, rc = 0;
1299 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1301 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1302 if (!sbuf)
1303 return -ENOMEM;
1304 sb = sbuf->sb;
1306 req.qp_cid = cpu_to_le32(qp->id);
1307 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1308 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1309 (void *)sbuf, 0);
1310 if (rc)
1311 goto bail;
1312 /* Extract the context from the side buffer */
1313 qp->state = sb->en_sqd_async_notify_state &
1314 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1315 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1316 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1317 true : false;
1318 qp->access = sb->access;
1319 qp->pkey_index = le16_to_cpu(sb->pkey);
1320 qp->qkey = le32_to_cpu(sb->qkey);
1322 temp32[0] = le32_to_cpu(sb->dgid[0]);
1323 temp32[1] = le32_to_cpu(sb->dgid[1]);
1324 temp32[2] = le32_to_cpu(sb->dgid[2]);
1325 temp32[3] = le32_to_cpu(sb->dgid[3]);
1326 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1328 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1330 qp->ah.sgid_index = 0;
1331 for (i = 0; i < res->sgid_tbl.max; i++) {
1332 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1333 qp->ah.sgid_index = i;
1334 break;
1337 if (i == res->sgid_tbl.max)
1338 dev_warn(&res->pdev->dev, "SGID not found??\n");
1340 qp->ah.hop_limit = sb->hop_limit;
1341 qp->ah.traffic_class = sb->traffic_class;
1342 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1343 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1344 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1345 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1346 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1347 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1348 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1349 qp->timeout = sb->timeout;
1350 qp->retry_cnt = sb->retry_cnt;
1351 qp->rnr_retry = sb->rnr_retry;
1352 qp->min_rnr_timer = sb->min_rnr_timer;
1353 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1354 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1355 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1356 qp->max_dest_rd_atomic =
1357 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1358 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1359 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1360 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1361 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1362 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1363 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1364 memcpy(qp->smac, sb->src_mac, 6);
1365 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1366 bail:
1367 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1368 return rc;
1371 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1373 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1374 struct cq_base *hw_cqe, **hw_cqe_ptr;
1375 int i;
1377 for (i = 0; i < cq_hwq->max_elements; i++) {
1378 hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1379 hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1380 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1381 continue;
1383 * The valid test of the entry must be done first before
1384 * reading any further.
1386 dma_rmb();
1387 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1388 case CQ_BASE_CQE_TYPE_REQ:
1389 case CQ_BASE_CQE_TYPE_TERMINAL:
1391 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1393 if (qp == le64_to_cpu(cqe->qp_handle))
1394 cqe->qp_handle = 0;
1395 break;
1397 case CQ_BASE_CQE_TYPE_RES_RC:
1398 case CQ_BASE_CQE_TYPE_RES_UD:
1399 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1401 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1403 if (qp == le64_to_cpu(cqe->qp_handle))
1404 cqe->qp_handle = 0;
1405 break;
1407 default:
1408 break;
1413 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1414 struct bnxt_qplib_qp *qp)
1416 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1417 struct cmdq_destroy_qp req;
1418 struct creq_destroy_qp_resp resp;
1419 u16 cmd_flags = 0;
1420 int rc;
1422 rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1423 rcfw->qp_tbl[qp->id].qp_handle = NULL;
1425 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1427 req.qp_cid = cpu_to_le32(qp->id);
1428 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1429 (void *)&resp, NULL, 0);
1430 if (rc) {
1431 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1432 rcfw->qp_tbl[qp->id].qp_handle = qp;
1433 return rc;
1436 return 0;
1439 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1440 struct bnxt_qplib_qp *qp)
1442 bnxt_qplib_free_qp_hdr_buf(res, qp);
1443 bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1444 kfree(qp->sq.swq);
1446 bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1447 kfree(qp->rq.swq);
1449 if (qp->irrq.max_elements)
1450 bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1451 if (qp->orrq.max_elements)
1452 bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1456 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1457 struct bnxt_qplib_sge *sge)
1459 struct bnxt_qplib_q *sq = &qp->sq;
1460 u32 sw_prod;
1462 memset(sge, 0, sizeof(*sge));
1464 if (qp->sq_hdr_buf) {
1465 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1466 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1467 sw_prod * qp->sq_hdr_buf_size);
1468 sge->lkey = 0xFFFFFFFF;
1469 sge->size = qp->sq_hdr_buf_size;
1470 return qp->sq_hdr_buf + sw_prod * sge->size;
1472 return NULL;
1475 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1477 struct bnxt_qplib_q *rq = &qp->rq;
1479 return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1482 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1484 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1487 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1488 struct bnxt_qplib_sge *sge)
1490 struct bnxt_qplib_q *rq = &qp->rq;
1491 u32 sw_prod;
1493 memset(sge, 0, sizeof(*sge));
1495 if (qp->rq_hdr_buf) {
1496 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1497 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1498 sw_prod * qp->rq_hdr_buf_size);
1499 sge->lkey = 0xFFFFFFFF;
1500 sge->size = qp->rq_hdr_buf_size;
1501 return qp->rq_hdr_buf + sw_prod * sge->size;
1503 return NULL;
1506 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1508 struct bnxt_qplib_q *sq = &qp->sq;
1509 u32 sw_prod;
1510 u64 val = 0;
1512 val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1513 DBC_DBC_TYPE_SQ);
1514 val <<= 32;
1515 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1516 val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1517 /* Flush all the WQE writes to HW */
1518 writeq(val, qp->dpi->dbr);
1521 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1522 struct bnxt_qplib_swqe *wqe)
1524 struct bnxt_qplib_q *sq = &qp->sq;
1525 struct bnxt_qplib_swq *swq;
1526 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1527 struct sq_sge *hw_sge;
1528 struct bnxt_qplib_nq_work *nq_work = NULL;
1529 bool sch_handler = false;
1530 u32 sw_prod;
1531 u8 wqe_size16;
1532 int i, rc = 0, data_len = 0, pkt_num = 0;
1533 __le32 temp32;
1535 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1536 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1537 sch_handler = true;
1538 dev_dbg(&sq->hwq.pdev->dev,
1539 "%s Error QP. Scheduling for poll_cq\n",
1540 __func__);
1541 goto queue_err;
1545 if (bnxt_qplib_queue_full(sq)) {
1546 dev_err(&sq->hwq.pdev->dev,
1547 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1548 sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1549 sq->q_full_delta);
1550 rc = -ENOMEM;
1551 goto done;
1553 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1554 swq = &sq->swq[sw_prod];
1555 swq->wr_id = wqe->wr_id;
1556 swq->type = wqe->type;
1557 swq->flags = wqe->flags;
1558 if (qp->sig_type)
1559 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1560 swq->start_psn = sq->psn & BTH_PSN_MASK;
1562 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1563 hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1564 [get_sqe_idx(sw_prod)];
1566 memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1568 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1569 /* Copy the inline data */
1570 if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1571 dev_warn(&sq->hwq.pdev->dev,
1572 "Inline data length > 96 detected\n");
1573 data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1574 } else {
1575 data_len = wqe->inline_len;
1577 memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1578 wqe_size16 = (data_len + 15) >> 4;
1579 } else {
1580 for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1581 i < wqe->num_sge; i++, hw_sge++) {
1582 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1583 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1584 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1585 data_len += wqe->sg_list[i].size;
1587 /* Each SGE entry = 1 WQE size16 */
1588 wqe_size16 = wqe->num_sge;
1589 /* HW requires wqe size has room for atleast one SGE even if
1590 * none was supplied by ULP
1592 if (!wqe->num_sge)
1593 wqe_size16++;
1596 /* Specifics */
1597 switch (wqe->type) {
1598 case BNXT_QPLIB_SWQE_TYPE_SEND:
1599 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1600 /* Assemble info for Raw Ethertype QPs */
1601 struct sq_send_raweth_qp1 *sqe =
1602 (struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1604 sqe->wqe_type = wqe->type;
1605 sqe->flags = wqe->flags;
1606 sqe->wqe_size = wqe_size16 +
1607 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1608 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1609 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1610 sqe->length = cpu_to_le32(data_len);
1611 sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1612 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1613 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1615 break;
1617 /* fall thru */
1618 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1619 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1621 struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1623 sqe->wqe_type = wqe->type;
1624 sqe->flags = wqe->flags;
1625 sqe->wqe_size = wqe_size16 +
1626 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1627 sqe->inv_key_or_imm_data = cpu_to_le32(
1628 wqe->send.inv_key);
1629 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1630 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1631 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1632 sqe->dst_qp = cpu_to_le32(
1633 wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1634 sqe->length = cpu_to_le32(data_len);
1635 sqe->avid = cpu_to_le32(wqe->send.avid &
1636 SQ_SEND_AVID_MASK);
1637 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1638 } else {
1639 sqe->length = cpu_to_le32(data_len);
1640 sqe->dst_qp = 0;
1641 sqe->avid = 0;
1642 if (qp->mtu)
1643 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1644 if (!pkt_num)
1645 pkt_num = 1;
1646 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1648 break;
1650 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1651 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1652 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1654 struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1656 sqe->wqe_type = wqe->type;
1657 sqe->flags = wqe->flags;
1658 sqe->wqe_size = wqe_size16 +
1659 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1660 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1661 sqe->length = cpu_to_le32((u32)data_len);
1662 sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1663 sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1664 if (qp->mtu)
1665 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1666 if (!pkt_num)
1667 pkt_num = 1;
1668 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1669 break;
1671 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1672 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1674 struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1676 sqe->wqe_type = wqe->type;
1677 sqe->flags = wqe->flags;
1678 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1679 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1680 sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1681 sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1682 if (qp->mtu)
1683 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1684 if (!pkt_num)
1685 pkt_num = 1;
1686 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1687 break;
1689 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1691 struct sq_localinvalidate *sqe =
1692 (struct sq_localinvalidate *)hw_sq_send_hdr;
1694 sqe->wqe_type = wqe->type;
1695 sqe->flags = wqe->flags;
1696 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1698 break;
1700 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1702 struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1704 sqe->wqe_type = wqe->type;
1705 sqe->flags = wqe->flags;
1706 sqe->access_cntl = wqe->frmr.access_cntl |
1707 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1708 sqe->zero_based_page_size_log =
1709 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1710 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1711 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1712 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1713 temp32 = cpu_to_le32(wqe->frmr.length);
1714 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1715 sqe->numlevels_pbl_page_size_log =
1716 ((wqe->frmr.pbl_pg_sz_log <<
1717 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1718 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1719 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1720 SQ_FR_PMR_NUMLEVELS_MASK);
1722 for (i = 0; i < wqe->frmr.page_list_len; i++)
1723 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1724 wqe->frmr.page_list[i] |
1725 PTU_PTE_VALID);
1726 sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1727 sqe->va = cpu_to_le64(wqe->frmr.va);
1729 break;
1731 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1733 struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1735 sqe->wqe_type = wqe->type;
1736 sqe->flags = wqe->flags;
1737 sqe->access_cntl = wqe->bind.access_cntl;
1738 sqe->mw_type_zero_based = wqe->bind.mw_type |
1739 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1740 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1741 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1742 sqe->va = cpu_to_le64(wqe->bind.va);
1743 temp32 = cpu_to_le32(wqe->bind.length);
1744 memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1745 break;
1747 default:
1748 /* Bad wqe, return error */
1749 rc = -EINVAL;
1750 goto done;
1752 swq->next_psn = sq->psn & BTH_PSN_MASK;
1753 if (swq->psn_search) {
1754 u32 opcd_spsn;
1755 u32 flg_npsn;
1757 opcd_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1758 SQ_PSN_SEARCH_START_PSN_MASK);
1759 opcd_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1760 SQ_PSN_SEARCH_OPCODE_MASK);
1761 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1762 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1763 if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1764 swq->psn_ext->opcode_start_psn =
1765 cpu_to_le32(opcd_spsn);
1766 swq->psn_ext->flags_next_psn =
1767 cpu_to_le32(flg_npsn);
1768 } else {
1769 swq->psn_search->opcode_start_psn =
1770 cpu_to_le32(opcd_spsn);
1771 swq->psn_search->flags_next_psn =
1772 cpu_to_le32(flg_npsn);
1775 queue_err:
1776 if (sch_handler) {
1777 /* Store the ULP info in the software structures */
1778 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1779 swq = &sq->swq[sw_prod];
1780 swq->wr_id = wqe->wr_id;
1781 swq->type = wqe->type;
1782 swq->flags = wqe->flags;
1783 if (qp->sig_type)
1784 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1785 swq->start_psn = sq->psn & BTH_PSN_MASK;
1787 sq->hwq.prod++;
1788 qp->wqe_cnt++;
1790 done:
1791 if (sch_handler) {
1792 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1793 if (nq_work) {
1794 nq_work->cq = qp->scq;
1795 nq_work->nq = qp->scq->nq;
1796 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1797 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1798 } else {
1799 dev_err(&sq->hwq.pdev->dev,
1800 "FP: Failed to allocate SQ nq_work!\n");
1801 rc = -ENOMEM;
1804 return rc;
1807 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1809 struct bnxt_qplib_q *rq = &qp->rq;
1810 u32 sw_prod;
1811 u64 val = 0;
1813 val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1814 DBC_DBC_TYPE_RQ);
1815 val <<= 32;
1816 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1817 val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1818 /* Flush the writes to HW Rx WQE before the ringing Rx DB */
1819 writeq(val, qp->dpi->dbr);
1822 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1823 struct bnxt_qplib_swqe *wqe)
1825 struct bnxt_qplib_q *rq = &qp->rq;
1826 struct rq_wqe *rqe, **rqe_ptr;
1827 struct sq_sge *hw_sge;
1828 struct bnxt_qplib_nq_work *nq_work = NULL;
1829 bool sch_handler = false;
1830 u32 sw_prod;
1831 int i, rc = 0;
1833 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1834 sch_handler = true;
1835 dev_dbg(&rq->hwq.pdev->dev,
1836 "%s: Error QP. Scheduling for poll_cq\n", __func__);
1837 goto queue_err;
1839 if (bnxt_qplib_queue_full(rq)) {
1840 dev_err(&rq->hwq.pdev->dev,
1841 "FP: QP (0x%x) RQ is full!\n", qp->id);
1842 rc = -EINVAL;
1843 goto done;
1845 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1846 rq->swq[sw_prod].wr_id = wqe->wr_id;
1848 rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1849 rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1851 memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1853 /* Calculate wqe_size16 and data_len */
1854 for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1855 i < wqe->num_sge; i++, hw_sge++) {
1856 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1857 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1858 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1860 rqe->wqe_type = wqe->type;
1861 rqe->flags = wqe->flags;
1862 rqe->wqe_size = wqe->num_sge +
1863 ((offsetof(typeof(*rqe), data) + 15) >> 4);
1864 /* HW requires wqe size has room for atleast one SGE even if none
1865 * was supplied by ULP
1867 if (!wqe->num_sge)
1868 rqe->wqe_size++;
1870 /* Supply the rqe->wr_id index to the wr_id_tbl for now */
1871 rqe->wr_id[0] = cpu_to_le32(sw_prod);
1873 queue_err:
1874 if (sch_handler) {
1875 /* Store the ULP info in the software structures */
1876 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1877 rq->swq[sw_prod].wr_id = wqe->wr_id;
1880 rq->hwq.prod++;
1881 if (sch_handler) {
1882 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1883 if (nq_work) {
1884 nq_work->cq = qp->rcq;
1885 nq_work->nq = qp->rcq->nq;
1886 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1887 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1888 } else {
1889 dev_err(&rq->hwq.pdev->dev,
1890 "FP: Failed to allocate RQ nq_work!\n");
1891 rc = -ENOMEM;
1894 done:
1895 return rc;
1898 /* CQ */
1900 /* Spinlock must be held */
1901 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1903 u64 val = 0;
1905 val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1906 DBC_DBC_TYPE_CQ_ARMENA;
1907 val <<= 32;
1908 /* Flush memory writes before enabling the CQ */
1909 writeq(val, cq->dbr_base);
1912 static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1914 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1915 u32 sw_cons;
1916 u64 val = 0;
1918 /* Ring DB */
1919 val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
1920 val <<= 32;
1921 sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1922 val |= (sw_cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1923 /* flush memory writes before arming the CQ */
1924 writeq(val, cq->dpi->dbr);
1927 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1929 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1930 struct cmdq_create_cq req;
1931 struct creq_create_cq_resp resp;
1932 struct bnxt_qplib_pbl *pbl;
1933 u16 cmd_flags = 0;
1934 int rc;
1936 cq->hwq.max_elements = cq->max_wqe;
1937 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, &cq->sg_info,
1938 &cq->hwq.max_elements,
1939 BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1940 PAGE_SIZE, HWQ_TYPE_QUEUE);
1941 if (rc)
1942 goto exit;
1944 RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1946 if (!cq->dpi) {
1947 dev_err(&rcfw->pdev->dev,
1948 "FP: CREATE_CQ failed due to NULL DPI\n");
1949 return -EINVAL;
1951 req.dpi = cpu_to_le32(cq->dpi->dpi);
1952 req.cq_handle = cpu_to_le64(cq->cq_handle);
1954 req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1955 pbl = &cq->hwq.pbl[PBL_LVL_0];
1956 req.pg_size_lvl = cpu_to_le32(
1957 ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1958 CMDQ_CREATE_CQ_LVL_SFT) |
1959 (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1960 pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1961 pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1962 pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1963 pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1964 pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1965 CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1967 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1969 req.cq_fco_cnq_id = cpu_to_le32(
1970 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1971 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1973 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1974 (void *)&resp, NULL, 0);
1975 if (rc)
1976 goto fail;
1978 cq->id = le32_to_cpu(resp.xid);
1979 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1980 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1981 init_waitqueue_head(&cq->waitq);
1982 INIT_LIST_HEAD(&cq->sqf_head);
1983 INIT_LIST_HEAD(&cq->rqf_head);
1984 spin_lock_init(&cq->compl_lock);
1985 spin_lock_init(&cq->flush_lock);
1987 bnxt_qplib_arm_cq_enable(cq);
1988 return 0;
1990 fail:
1991 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1992 exit:
1993 return rc;
1996 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1998 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1999 struct cmdq_destroy_cq req;
2000 struct creq_destroy_cq_resp resp;
2001 u16 cmd_flags = 0;
2002 int rc;
2004 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
2006 req.cq_cid = cpu_to_le32(cq->id);
2007 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2008 (void *)&resp, NULL, 0);
2009 if (rc)
2010 return rc;
2011 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
2012 return 0;
2015 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2016 struct bnxt_qplib_cqe **pcqe, int *budget)
2018 u32 sw_prod, sw_cons;
2019 struct bnxt_qplib_cqe *cqe;
2020 int rc = 0;
2022 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2023 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
2024 cqe = *pcqe;
2025 while (*budget) {
2026 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2027 if (sw_cons == sw_prod) {
2028 break;
2030 /* Skip the FENCE WQE completions */
2031 if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
2032 bnxt_qplib_cancel_phantom_processing(qp);
2033 goto skip_compl;
2035 memset(cqe, 0, sizeof(*cqe));
2036 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2037 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2038 cqe->qp_handle = (u64)(unsigned long)qp;
2039 cqe->wr_id = sq->swq[sw_cons].wr_id;
2040 cqe->src_qp = qp->id;
2041 cqe->type = sq->swq[sw_cons].type;
2042 cqe++;
2043 (*budget)--;
2044 skip_compl:
2045 sq->hwq.cons++;
2047 *pcqe = cqe;
2048 if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
2049 /* Out of budget */
2050 rc = -EAGAIN;
2052 return rc;
2055 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2056 struct bnxt_qplib_cqe **pcqe, int *budget)
2058 struct bnxt_qplib_cqe *cqe;
2059 u32 sw_prod, sw_cons;
2060 int rc = 0;
2061 int opcode = 0;
2063 switch (qp->type) {
2064 case CMDQ_CREATE_QP1_TYPE_GSI:
2065 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2066 break;
2067 case CMDQ_CREATE_QP_TYPE_RC:
2068 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2069 break;
2070 case CMDQ_CREATE_QP_TYPE_UD:
2071 case CMDQ_CREATE_QP_TYPE_GSI:
2072 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2073 break;
2076 /* Flush the rest of the RQ */
2077 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
2078 cqe = *pcqe;
2079 while (*budget) {
2080 sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
2081 if (sw_cons == sw_prod)
2082 break;
2083 memset(cqe, 0, sizeof(*cqe));
2084 cqe->status =
2085 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2086 cqe->opcode = opcode;
2087 cqe->qp_handle = (unsigned long)qp;
2088 cqe->wr_id = rq->swq[sw_cons].wr_id;
2089 cqe++;
2090 (*budget)--;
2091 rq->hwq.cons++;
2093 *pcqe = cqe;
2094 if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
2095 /* Out of budget */
2096 rc = -EAGAIN;
2098 return rc;
2101 void bnxt_qplib_mark_qp_error(void *qp_handle)
2103 struct bnxt_qplib_qp *qp = qp_handle;
2105 if (!qp)
2106 return;
2108 /* Must block new posting of SQ and RQ */
2109 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2110 bnxt_qplib_cancel_phantom_processing(qp);
2113 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2114 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2116 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2117 u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
2119 struct bnxt_qplib_q *sq = &qp->sq;
2120 struct bnxt_qplib_swq *swq;
2121 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2122 struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
2123 struct cq_req *peek_req_hwcqe;
2124 struct bnxt_qplib_qp *peek_qp;
2125 struct bnxt_qplib_q *peek_sq;
2126 int i, rc = 0;
2128 /* Normal mode */
2129 /* Check for the psn_search marking before completing */
2130 swq = &sq->swq[sw_sq_cons];
2131 if (swq->psn_search &&
2132 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2133 /* Unmark */
2134 swq->psn_search->flags_next_psn = cpu_to_le32
2135 (le32_to_cpu(swq->psn_search->flags_next_psn)
2136 & ~0x80000000);
2137 dev_dbg(&cq->hwq.pdev->dev,
2138 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2139 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2140 sq->condition = true;
2141 sq->send_phantom = true;
2143 /* TODO: Only ARM if the previous SQE is ARMALL */
2144 bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ_ARMALL);
2146 rc = -EAGAIN;
2147 goto out;
2149 if (sq->condition) {
2150 /* Peek at the completions */
2151 peek_raw_cq_cons = cq->hwq.cons;
2152 peek_sw_cq_cons = cq_cons;
2153 i = cq->hwq.max_elements;
2154 while (i--) {
2155 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2156 peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2157 peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
2158 [CQE_IDX(peek_sw_cq_cons)];
2159 /* If the next hwcqe is VALID */
2160 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2161 cq->hwq.max_elements)) {
2163 * The valid test of the entry must be done first before
2164 * reading any further.
2166 dma_rmb();
2167 /* If the next hwcqe is a REQ */
2168 if ((peek_hwcqe->cqe_type_toggle &
2169 CQ_BASE_CQE_TYPE_MASK) ==
2170 CQ_BASE_CQE_TYPE_REQ) {
2171 peek_req_hwcqe = (struct cq_req *)
2172 peek_hwcqe;
2173 peek_qp = (struct bnxt_qplib_qp *)
2174 ((unsigned long)
2175 le64_to_cpu
2176 (peek_req_hwcqe->qp_handle));
2177 peek_sq = &peek_qp->sq;
2178 peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
2179 peek_req_hwcqe->sq_cons_idx) - 1
2180 , &sq->hwq);
2181 /* If the hwcqe's sq's wr_id matches */
2182 if (peek_sq == sq &&
2183 sq->swq[peek_sq_cons_idx].wr_id ==
2184 BNXT_QPLIB_FENCE_WRID) {
2186 * Unbreak only if the phantom
2187 * comes back
2189 dev_dbg(&cq->hwq.pdev->dev,
2190 "FP: Got Phantom CQE\n");
2191 sq->condition = false;
2192 sq->single = true;
2193 rc = 0;
2194 goto out;
2197 /* Valid but not the phantom, so keep looping */
2198 } else {
2199 /* Not valid yet, just exit and wait */
2200 rc = -EINVAL;
2201 goto out;
2203 peek_sw_cq_cons++;
2204 peek_raw_cq_cons++;
2206 dev_err(&cq->hwq.pdev->dev,
2207 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2208 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2209 rc = -EINVAL;
2211 out:
2212 return rc;
2215 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2216 struct cq_req *hwcqe,
2217 struct bnxt_qplib_cqe **pcqe, int *budget,
2218 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2220 struct bnxt_qplib_qp *qp;
2221 struct bnxt_qplib_q *sq;
2222 struct bnxt_qplib_cqe *cqe;
2223 u32 sw_sq_cons, cqe_sq_cons;
2224 struct bnxt_qplib_swq *swq;
2225 int rc = 0;
2227 qp = (struct bnxt_qplib_qp *)((unsigned long)
2228 le64_to_cpu(hwcqe->qp_handle));
2229 if (!qp) {
2230 dev_err(&cq->hwq.pdev->dev,
2231 "FP: Process Req qp is NULL\n");
2232 return -EINVAL;
2234 sq = &qp->sq;
2236 cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
2237 if (cqe_sq_cons > sq->hwq.max_elements) {
2238 dev_err(&cq->hwq.pdev->dev,
2239 "FP: CQ Process req reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2240 cqe_sq_cons, sq->hwq.max_elements);
2241 return -EINVAL;
2244 if (qp->sq.flushed) {
2245 dev_dbg(&cq->hwq.pdev->dev,
2246 "%s: QP in Flush QP = %p\n", __func__, qp);
2247 goto done;
2249 /* Require to walk the sq's swq to fabricate CQEs for all previously
2250 * signaled SWQEs due to CQE aggregation from the current sq cons
2251 * to the cqe_sq_cons
2253 cqe = *pcqe;
2254 while (*budget) {
2255 sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2256 if (sw_sq_cons == cqe_sq_cons)
2257 /* Done */
2258 break;
2260 swq = &sq->swq[sw_sq_cons];
2261 memset(cqe, 0, sizeof(*cqe));
2262 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2263 cqe->qp_handle = (u64)(unsigned long)qp;
2264 cqe->src_qp = qp->id;
2265 cqe->wr_id = swq->wr_id;
2266 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2267 goto skip;
2268 cqe->type = swq->type;
2270 /* For the last CQE, check for status. For errors, regardless
2271 * of the request being signaled or not, it must complete with
2272 * the hwcqe error status
2274 if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
2275 hwcqe->status != CQ_REQ_STATUS_OK) {
2276 cqe->status = hwcqe->status;
2277 dev_err(&cq->hwq.pdev->dev,
2278 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2279 sw_sq_cons, cqe->wr_id, cqe->status);
2280 cqe++;
2281 (*budget)--;
2282 bnxt_qplib_mark_qp_error(qp);
2283 /* Add qp to flush list of the CQ */
2284 bnxt_qplib_add_flush_qp(qp);
2285 } else {
2286 /* Before we complete, do WA 9060 */
2287 if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2288 cqe_sq_cons)) {
2289 *lib_qp = qp;
2290 goto out;
2292 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2293 cqe->status = CQ_REQ_STATUS_OK;
2294 cqe++;
2295 (*budget)--;
2298 skip:
2299 sq->hwq.cons++;
2300 if (sq->single)
2301 break;
2303 out:
2304 *pcqe = cqe;
2305 if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
2306 /* Out of budget */
2307 rc = -EAGAIN;
2308 goto done;
2311 * Back to normal completion mode only after it has completed all of
2312 * the WC for this CQE
2314 sq->single = false;
2315 done:
2316 return rc;
2319 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2321 spin_lock(&srq->hwq.lock);
2322 srq->swq[srq->last_idx].next_idx = (int)tag;
2323 srq->last_idx = (int)tag;
2324 srq->swq[srq->last_idx].next_idx = -1;
2325 srq->hwq.cons++; /* Support for SRQE counter */
2326 spin_unlock(&srq->hwq.lock);
2329 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2330 struct cq_res_rc *hwcqe,
2331 struct bnxt_qplib_cqe **pcqe,
2332 int *budget)
2334 struct bnxt_qplib_qp *qp;
2335 struct bnxt_qplib_q *rq;
2336 struct bnxt_qplib_srq *srq;
2337 struct bnxt_qplib_cqe *cqe;
2338 u32 wr_id_idx;
2339 int rc = 0;
2341 qp = (struct bnxt_qplib_qp *)((unsigned long)
2342 le64_to_cpu(hwcqe->qp_handle));
2343 if (!qp) {
2344 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2345 return -EINVAL;
2347 if (qp->rq.flushed) {
2348 dev_dbg(&cq->hwq.pdev->dev,
2349 "%s: QP in Flush QP = %p\n", __func__, qp);
2350 goto done;
2353 cqe = *pcqe;
2354 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2355 cqe->length = le32_to_cpu(hwcqe->length);
2356 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2357 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2358 cqe->flags = le16_to_cpu(hwcqe->flags);
2359 cqe->status = hwcqe->status;
2360 cqe->qp_handle = (u64)(unsigned long)qp;
2362 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2363 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2364 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2365 srq = qp->srq;
2366 if (!srq)
2367 return -EINVAL;
2368 if (wr_id_idx >= srq->hwq.max_elements) {
2369 dev_err(&cq->hwq.pdev->dev,
2370 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2371 wr_id_idx, srq->hwq.max_elements);
2372 return -EINVAL;
2374 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2375 bnxt_qplib_release_srqe(srq, wr_id_idx);
2376 cqe++;
2377 (*budget)--;
2378 *pcqe = cqe;
2379 } else {
2380 rq = &qp->rq;
2381 if (wr_id_idx >= rq->hwq.max_elements) {
2382 dev_err(&cq->hwq.pdev->dev,
2383 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2384 wr_id_idx, rq->hwq.max_elements);
2385 return -EINVAL;
2387 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2388 cqe++;
2389 (*budget)--;
2390 rq->hwq.cons++;
2391 *pcqe = cqe;
2393 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2394 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2395 /* Add qp to flush list of the CQ */
2396 bnxt_qplib_add_flush_qp(qp);
2400 done:
2401 return rc;
2404 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2405 struct cq_res_ud *hwcqe,
2406 struct bnxt_qplib_cqe **pcqe,
2407 int *budget)
2409 struct bnxt_qplib_qp *qp;
2410 struct bnxt_qplib_q *rq;
2411 struct bnxt_qplib_srq *srq;
2412 struct bnxt_qplib_cqe *cqe;
2413 u32 wr_id_idx;
2414 int rc = 0;
2416 qp = (struct bnxt_qplib_qp *)((unsigned long)
2417 le64_to_cpu(hwcqe->qp_handle));
2418 if (!qp) {
2419 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2420 return -EINVAL;
2422 if (qp->rq.flushed) {
2423 dev_dbg(&cq->hwq.pdev->dev,
2424 "%s: QP in Flush QP = %p\n", __func__, qp);
2425 goto done;
2427 cqe = *pcqe;
2428 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2429 cqe->length = (u32)le16_to_cpu(hwcqe->length);
2430 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2431 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2432 cqe->flags = le16_to_cpu(hwcqe->flags);
2433 cqe->status = hwcqe->status;
2434 cqe->qp_handle = (u64)(unsigned long)qp;
2435 /*FIXME: Endianness fix needed for smace */
2436 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2437 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2438 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2439 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2440 ((le32_to_cpu(
2441 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2442 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2444 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2445 srq = qp->srq;
2446 if (!srq)
2447 return -EINVAL;
2449 if (wr_id_idx >= srq->hwq.max_elements) {
2450 dev_err(&cq->hwq.pdev->dev,
2451 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2452 wr_id_idx, srq->hwq.max_elements);
2453 return -EINVAL;
2455 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2456 bnxt_qplib_release_srqe(srq, wr_id_idx);
2457 cqe++;
2458 (*budget)--;
2459 *pcqe = cqe;
2460 } else {
2461 rq = &qp->rq;
2462 if (wr_id_idx >= rq->hwq.max_elements) {
2463 dev_err(&cq->hwq.pdev->dev,
2464 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2465 wr_id_idx, rq->hwq.max_elements);
2466 return -EINVAL;
2469 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2470 cqe++;
2471 (*budget)--;
2472 rq->hwq.cons++;
2473 *pcqe = cqe;
2475 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2476 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2477 /* Add qp to flush list of the CQ */
2478 bnxt_qplib_add_flush_qp(qp);
2481 done:
2482 return rc;
2485 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2487 struct cq_base *hw_cqe, **hw_cqe_ptr;
2488 u32 sw_cons, raw_cons;
2489 bool rc = true;
2491 raw_cons = cq->hwq.cons;
2492 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2493 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2494 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2496 /* Check for Valid bit. If the CQE is valid, return false */
2497 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2498 return rc;
2501 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2502 struct cq_res_raweth_qp1 *hwcqe,
2503 struct bnxt_qplib_cqe **pcqe,
2504 int *budget)
2506 struct bnxt_qplib_qp *qp;
2507 struct bnxt_qplib_q *rq;
2508 struct bnxt_qplib_srq *srq;
2509 struct bnxt_qplib_cqe *cqe;
2510 u32 wr_id_idx;
2511 int rc = 0;
2513 qp = (struct bnxt_qplib_qp *)((unsigned long)
2514 le64_to_cpu(hwcqe->qp_handle));
2515 if (!qp) {
2516 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2517 return -EINVAL;
2519 if (qp->rq.flushed) {
2520 dev_dbg(&cq->hwq.pdev->dev,
2521 "%s: QP in Flush QP = %p\n", __func__, qp);
2522 goto done;
2524 cqe = *pcqe;
2525 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2526 cqe->flags = le16_to_cpu(hwcqe->flags);
2527 cqe->qp_handle = (u64)(unsigned long)qp;
2529 wr_id_idx =
2530 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2531 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2532 cqe->src_qp = qp->id;
2533 if (qp->id == 1 && !cqe->length) {
2534 /* Add workaround for the length misdetection */
2535 cqe->length = 296;
2536 } else {
2537 cqe->length = le16_to_cpu(hwcqe->length);
2539 cqe->pkey_index = qp->pkey_index;
2540 memcpy(cqe->smac, qp->smac, 6);
2542 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2543 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2544 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2546 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2547 srq = qp->srq;
2548 if (!srq) {
2549 dev_err(&cq->hwq.pdev->dev,
2550 "FP: SRQ used but not defined??\n");
2551 return -EINVAL;
2553 if (wr_id_idx >= srq->hwq.max_elements) {
2554 dev_err(&cq->hwq.pdev->dev,
2555 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2556 wr_id_idx, srq->hwq.max_elements);
2557 return -EINVAL;
2559 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2560 bnxt_qplib_release_srqe(srq, wr_id_idx);
2561 cqe++;
2562 (*budget)--;
2563 *pcqe = cqe;
2564 } else {
2565 rq = &qp->rq;
2566 if (wr_id_idx >= rq->hwq.max_elements) {
2567 dev_err(&cq->hwq.pdev->dev,
2568 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2569 wr_id_idx, rq->hwq.max_elements);
2570 return -EINVAL;
2572 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2573 cqe++;
2574 (*budget)--;
2575 rq->hwq.cons++;
2576 *pcqe = cqe;
2578 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2579 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2580 /* Add qp to flush list of the CQ */
2581 bnxt_qplib_add_flush_qp(qp);
2585 done:
2586 return rc;
2589 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2590 struct cq_terminal *hwcqe,
2591 struct bnxt_qplib_cqe **pcqe,
2592 int *budget)
2594 struct bnxt_qplib_qp *qp;
2595 struct bnxt_qplib_q *sq, *rq;
2596 struct bnxt_qplib_cqe *cqe;
2597 u32 sw_cons = 0, cqe_cons;
2598 int rc = 0;
2600 /* Check the Status */
2601 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2602 dev_warn(&cq->hwq.pdev->dev,
2603 "FP: CQ Process Terminal Error status = 0x%x\n",
2604 hwcqe->status);
2606 qp = (struct bnxt_qplib_qp *)((unsigned long)
2607 le64_to_cpu(hwcqe->qp_handle));
2608 if (!qp) {
2609 dev_err(&cq->hwq.pdev->dev,
2610 "FP: CQ Process terminal qp is NULL\n");
2611 return -EINVAL;
2614 /* Must block new posting of SQ and RQ */
2615 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2617 sq = &qp->sq;
2618 rq = &qp->rq;
2620 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2621 if (cqe_cons == 0xFFFF)
2622 goto do_rq;
2624 if (cqe_cons > sq->hwq.max_elements) {
2625 dev_err(&cq->hwq.pdev->dev,
2626 "FP: CQ Process terminal reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2627 cqe_cons, sq->hwq.max_elements);
2628 goto do_rq;
2631 if (qp->sq.flushed) {
2632 dev_dbg(&cq->hwq.pdev->dev,
2633 "%s: QP in Flush QP = %p\n", __func__, qp);
2634 goto sq_done;
2637 /* Terminal CQE can also include aggregated successful CQEs prior.
2638 * So we must complete all CQEs from the current sq's cons to the
2639 * cq_cons with status OK
2641 cqe = *pcqe;
2642 while (*budget) {
2643 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2644 if (sw_cons == cqe_cons)
2645 break;
2646 if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2647 memset(cqe, 0, sizeof(*cqe));
2648 cqe->status = CQ_REQ_STATUS_OK;
2649 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2650 cqe->qp_handle = (u64)(unsigned long)qp;
2651 cqe->src_qp = qp->id;
2652 cqe->wr_id = sq->swq[sw_cons].wr_id;
2653 cqe->type = sq->swq[sw_cons].type;
2654 cqe++;
2655 (*budget)--;
2657 sq->hwq.cons++;
2659 *pcqe = cqe;
2660 if (!(*budget) && sw_cons != cqe_cons) {
2661 /* Out of budget */
2662 rc = -EAGAIN;
2663 goto sq_done;
2665 sq_done:
2666 if (rc)
2667 return rc;
2668 do_rq:
2669 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2670 if (cqe_cons == 0xFFFF) {
2671 goto done;
2672 } else if (cqe_cons > rq->hwq.max_elements) {
2673 dev_err(&cq->hwq.pdev->dev,
2674 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2675 cqe_cons, rq->hwq.max_elements);
2676 goto done;
2679 if (qp->rq.flushed) {
2680 dev_dbg(&cq->hwq.pdev->dev,
2681 "%s: QP in Flush QP = %p\n", __func__, qp);
2682 rc = 0;
2683 goto done;
2686 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2687 * from the current rq->cons to the rq->prod regardless what the
2688 * rq->cons the terminal CQE indicates
2691 /* Add qp to flush list of the CQ */
2692 bnxt_qplib_add_flush_qp(qp);
2693 done:
2694 return rc;
2697 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2698 struct cq_cutoff *hwcqe)
2700 /* Check the Status */
2701 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2702 dev_err(&cq->hwq.pdev->dev,
2703 "FP: CQ Process Cutoff Error status = 0x%x\n",
2704 hwcqe->status);
2705 return -EINVAL;
2707 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2708 wake_up_interruptible(&cq->waitq);
2710 return 0;
2713 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2714 struct bnxt_qplib_cqe *cqe,
2715 int num_cqes)
2717 struct bnxt_qplib_qp *qp = NULL;
2718 u32 budget = num_cqes;
2719 unsigned long flags;
2721 spin_lock_irqsave(&cq->flush_lock, flags);
2722 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2723 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2724 __flush_sq(&qp->sq, qp, &cqe, &budget);
2727 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2728 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2729 __flush_rq(&qp->rq, qp, &cqe, &budget);
2731 spin_unlock_irqrestore(&cq->flush_lock, flags);
2733 return num_cqes - budget;
2736 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2737 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2739 struct cq_base *hw_cqe, **hw_cqe_ptr;
2740 u32 sw_cons, raw_cons;
2741 int budget, rc = 0;
2743 raw_cons = cq->hwq.cons;
2744 budget = num_cqes;
2746 while (budget) {
2747 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2748 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2749 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2751 /* Check for Valid bit */
2752 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2753 break;
2756 * The valid test of the entry must be done first before
2757 * reading any further.
2759 dma_rmb();
2760 /* From the device's respective CQE format to qplib_wc*/
2761 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2762 case CQ_BASE_CQE_TYPE_REQ:
2763 rc = bnxt_qplib_cq_process_req(cq,
2764 (struct cq_req *)hw_cqe,
2765 &cqe, &budget,
2766 sw_cons, lib_qp);
2767 break;
2768 case CQ_BASE_CQE_TYPE_RES_RC:
2769 rc = bnxt_qplib_cq_process_res_rc(cq,
2770 (struct cq_res_rc *)
2771 hw_cqe, &cqe,
2772 &budget);
2773 break;
2774 case CQ_BASE_CQE_TYPE_RES_UD:
2775 rc = bnxt_qplib_cq_process_res_ud
2776 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2777 &budget);
2778 break;
2779 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2780 rc = bnxt_qplib_cq_process_res_raweth_qp1
2781 (cq, (struct cq_res_raweth_qp1 *)
2782 hw_cqe, &cqe, &budget);
2783 break;
2784 case CQ_BASE_CQE_TYPE_TERMINAL:
2785 rc = bnxt_qplib_cq_process_terminal
2786 (cq, (struct cq_terminal *)hw_cqe,
2787 &cqe, &budget);
2788 break;
2789 case CQ_BASE_CQE_TYPE_CUT_OFF:
2790 bnxt_qplib_cq_process_cutoff
2791 (cq, (struct cq_cutoff *)hw_cqe);
2792 /* Done processing this CQ */
2793 goto exit;
2794 default:
2795 dev_err(&cq->hwq.pdev->dev,
2796 "process_cq unknown type 0x%lx\n",
2797 hw_cqe->cqe_type_toggle &
2798 CQ_BASE_CQE_TYPE_MASK);
2799 rc = -EINVAL;
2800 break;
2802 if (rc < 0) {
2803 if (rc == -EAGAIN)
2804 break;
2805 /* Error while processing the CQE, just skip to the
2806 * next one
2808 dev_err(&cq->hwq.pdev->dev,
2809 "process_cqe error rc = 0x%x\n", rc);
2811 raw_cons++;
2813 if (cq->hwq.cons != raw_cons) {
2814 cq->hwq.cons = raw_cons;
2815 bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ);
2817 exit:
2818 return num_cqes - budget;
2821 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2823 if (arm_type)
2824 bnxt_qplib_arm_cq(cq, arm_type);
2825 /* Using cq->arm_state variable to track whether to issue cq handler */
2826 atomic_set(&cq->arm_state, 1);
2829 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2831 flush_workqueue(qp->scq->nq->cqn_wq);
2832 if (qp->scq != qp->rcq)
2833 flush_workqueue(qp->rcq->nq->cqn_wq);