2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
52 #include "qplib_res.h"
53 #include "qplib_rcfw.h"
57 static void __clean_cq(struct bnxt_qplib_cq
*cq
, u64 qp
);
59 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp
*qp
)
61 qp
->sq
.condition
= false;
62 qp
->sq
.send_phantom
= false;
63 qp
->sq
.single
= false;
67 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp
*qp
)
69 struct bnxt_qplib_cq
*scq
, *rcq
;
74 if (!qp
->sq
.flushed
) {
75 dev_dbg(&scq
->hwq
.pdev
->dev
,
76 "FP: Adding to SQ Flush list = %p\n", qp
);
77 bnxt_qplib_cancel_phantom_processing(qp
);
78 list_add_tail(&qp
->sq_flush
, &scq
->sqf_head
);
79 qp
->sq
.flushed
= true;
82 if (!qp
->rq
.flushed
) {
83 dev_dbg(&rcq
->hwq
.pdev
->dev
,
84 "FP: Adding to RQ Flush list = %p\n", qp
);
85 list_add_tail(&qp
->rq_flush
, &rcq
->rqf_head
);
86 qp
->rq
.flushed
= true;
91 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp
*qp
,
93 __acquires(&qp
->scq
->flush_lock
) __acquires(&qp
->rcq
->flush_lock
)
95 spin_lock_irqsave(&qp
->scq
->flush_lock
, *flags
);
96 if (qp
->scq
== qp
->rcq
)
97 __acquire(&qp
->rcq
->flush_lock
);
99 spin_lock(&qp
->rcq
->flush_lock
);
102 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp
*qp
,
103 unsigned long *flags
)
104 __releases(&qp
->scq
->flush_lock
) __releases(&qp
->rcq
->flush_lock
)
106 if (qp
->scq
== qp
->rcq
)
107 __release(&qp
->rcq
->flush_lock
);
109 spin_unlock(&qp
->rcq
->flush_lock
);
110 spin_unlock_irqrestore(&qp
->scq
->flush_lock
, *flags
);
113 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp
*qp
)
117 bnxt_qplib_acquire_cq_flush_locks(qp
, &flags
);
118 __bnxt_qplib_add_flush_qp(qp
);
119 bnxt_qplib_release_cq_flush_locks(qp
, &flags
);
122 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp
*qp
)
124 if (qp
->sq
.flushed
) {
125 qp
->sq
.flushed
= false;
126 list_del(&qp
->sq_flush
);
129 if (qp
->rq
.flushed
) {
130 qp
->rq
.flushed
= false;
131 list_del(&qp
->rq_flush
);
136 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp
*qp
)
140 bnxt_qplib_acquire_cq_flush_locks(qp
, &flags
);
141 __clean_cq(qp
->scq
, (u64
)(unsigned long)qp
);
144 __clean_cq(qp
->rcq
, (u64
)(unsigned long)qp
);
148 __bnxt_qplib_del_flush_qp(qp
);
149 bnxt_qplib_release_cq_flush_locks(qp
, &flags
);
152 static void bnxt_qpn_cqn_sched_task(struct work_struct
*work
)
154 struct bnxt_qplib_nq_work
*nq_work
=
155 container_of(work
, struct bnxt_qplib_nq_work
, work
);
157 struct bnxt_qplib_cq
*cq
= nq_work
->cq
;
158 struct bnxt_qplib_nq
*nq
= nq_work
->nq
;
161 spin_lock_bh(&cq
->compl_lock
);
162 if (atomic_read(&cq
->arm_state
) && nq
->cqn_handler
) {
163 dev_dbg(&nq
->pdev
->dev
,
164 "%s:Trigger cq = %p event nq = %p\n",
166 nq
->cqn_handler(nq
, cq
);
168 spin_unlock_bh(&cq
->compl_lock
);
173 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res
*res
,
174 struct bnxt_qplib_qp
*qp
)
176 struct bnxt_qplib_q
*rq
= &qp
->rq
;
177 struct bnxt_qplib_q
*sq
= &qp
->sq
;
180 dma_free_coherent(&res
->pdev
->dev
,
181 rq
->max_wqe
* qp
->rq_hdr_buf_size
,
182 qp
->rq_hdr_buf
, qp
->rq_hdr_buf_map
);
184 dma_free_coherent(&res
->pdev
->dev
,
185 sq
->max_wqe
* qp
->sq_hdr_buf_size
,
186 qp
->sq_hdr_buf
, qp
->sq_hdr_buf_map
);
187 qp
->rq_hdr_buf
= NULL
;
188 qp
->sq_hdr_buf
= NULL
;
189 qp
->rq_hdr_buf_map
= 0;
190 qp
->sq_hdr_buf_map
= 0;
191 qp
->sq_hdr_buf_size
= 0;
192 qp
->rq_hdr_buf_size
= 0;
195 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res
*res
,
196 struct bnxt_qplib_qp
*qp
)
198 struct bnxt_qplib_q
*rq
= &qp
->rq
;
199 struct bnxt_qplib_q
*sq
= &qp
->sq
;
202 if (qp
->sq_hdr_buf_size
&& sq
->max_wqe
) {
203 qp
->sq_hdr_buf
= dma_alloc_coherent(&res
->pdev
->dev
,
204 sq
->max_wqe
* qp
->sq_hdr_buf_size
,
205 &qp
->sq_hdr_buf_map
, GFP_KERNEL
);
206 if (!qp
->sq_hdr_buf
) {
208 dev_err(&res
->pdev
->dev
,
209 "Failed to create sq_hdr_buf\n");
214 if (qp
->rq_hdr_buf_size
&& rq
->max_wqe
) {
215 qp
->rq_hdr_buf
= dma_alloc_coherent(&res
->pdev
->dev
,
220 if (!qp
->rq_hdr_buf
) {
222 dev_err(&res
->pdev
->dev
,
223 "Failed to create rq_hdr_buf\n");
230 bnxt_qplib_free_qp_hdr_buf(res
, qp
);
234 static void clean_nq(struct bnxt_qplib_nq
*nq
, struct bnxt_qplib_cq
*cq
)
236 struct bnxt_qplib_hwq
*hwq
= &nq
->hwq
;
237 struct nq_base
*nqe
, **nq_ptr
;
238 int budget
= nq
->budget
;
239 u32 sw_cons
, raw_cons
;
243 spin_lock_bh(&hwq
->lock
);
244 /* Service the NQ until empty */
245 raw_cons
= hwq
->cons
;
247 sw_cons
= HWQ_CMP(raw_cons
, hwq
);
248 nq_ptr
= (struct nq_base
**)hwq
->pbl_ptr
;
249 nqe
= &nq_ptr
[NQE_PG(sw_cons
)][NQE_IDX(sw_cons
)];
250 if (!NQE_CMP_VALID(nqe
, raw_cons
, hwq
->max_elements
))
254 * The valid test of the entry must be done first before
255 * reading any further.
259 type
= le16_to_cpu(nqe
->info10_type
) & NQ_BASE_TYPE_MASK
;
261 case NQ_BASE_TYPE_CQ_NOTIFICATION
:
263 struct nq_cn
*nqcne
= (struct nq_cn
*)nqe
;
265 q_handle
= le32_to_cpu(nqcne
->cq_handle_low
);
266 q_handle
|= (u64
)le32_to_cpu(nqcne
->cq_handle_high
)
268 if ((unsigned long)cq
== q_handle
) {
269 nqcne
->cq_handle_low
= 0;
270 nqcne
->cq_handle_high
= 0;
280 spin_unlock_bh(&hwq
->lock
);
283 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
286 static void __wait_for_all_nqes(struct bnxt_qplib_cq
*cq
, u16 cnq_events
)
290 while (retry_cnt
--) {
291 if (cnq_events
== cq
->cnq_events
)
293 usleep_range(50, 100);
294 clean_nq(cq
->nq
, cq
);
298 static void bnxt_qplib_service_nq(struct tasklet_struct
*t
)
300 struct bnxt_qplib_nq
*nq
= from_tasklet(nq
, t
, nq_tasklet
);
301 struct bnxt_qplib_hwq
*hwq
= &nq
->hwq
;
302 int num_srqne_processed
= 0;
303 int num_cqne_processed
= 0;
304 struct bnxt_qplib_cq
*cq
;
305 int budget
= nq
->budget
;
306 u32 sw_cons
, raw_cons
;
311 spin_lock_bh(&hwq
->lock
);
312 /* Service the NQ until empty */
313 raw_cons
= hwq
->cons
;
315 sw_cons
= HWQ_CMP(raw_cons
, hwq
);
316 nqe
= bnxt_qplib_get_qe(hwq
, sw_cons
, NULL
);
317 if (!NQE_CMP_VALID(nqe
, raw_cons
, hwq
->max_elements
))
321 * The valid test of the entry must be done first before
322 * reading any further.
326 type
= le16_to_cpu(nqe
->info10_type
) & NQ_BASE_TYPE_MASK
;
328 case NQ_BASE_TYPE_CQ_NOTIFICATION
:
330 struct nq_cn
*nqcne
= (struct nq_cn
*)nqe
;
332 q_handle
= le32_to_cpu(nqcne
->cq_handle_low
);
333 q_handle
|= (u64
)le32_to_cpu(nqcne
->cq_handle_high
)
335 cq
= (struct bnxt_qplib_cq
*)(unsigned long)q_handle
;
338 bnxt_qplib_armen_db(&cq
->dbinfo
,
339 DBC_DBC_TYPE_CQ_ARMENA
);
340 spin_lock_bh(&cq
->compl_lock
);
341 atomic_set(&cq
->arm_state
, 0);
342 if (!nq
->cqn_handler(nq
, (cq
)))
343 num_cqne_processed
++;
345 dev_warn(&nq
->pdev
->dev
,
346 "cqn - type 0x%x not handled\n", type
);
348 spin_unlock_bh(&cq
->compl_lock
);
351 case NQ_BASE_TYPE_SRQ_EVENT
:
353 struct bnxt_qplib_srq
*srq
;
354 struct nq_srq_event
*nqsrqe
=
355 (struct nq_srq_event
*)nqe
;
357 q_handle
= le32_to_cpu(nqsrqe
->srq_handle_low
);
358 q_handle
|= (u64
)le32_to_cpu(nqsrqe
->srq_handle_high
)
360 srq
= (struct bnxt_qplib_srq
*)q_handle
;
361 bnxt_qplib_armen_db(&srq
->dbinfo
,
362 DBC_DBC_TYPE_SRQ_ARMENA
);
363 if (!nq
->srqn_handler(nq
,
364 (struct bnxt_qplib_srq
*)q_handle
,
366 num_srqne_processed
++;
368 dev_warn(&nq
->pdev
->dev
,
369 "SRQ event 0x%x not handled\n",
373 case NQ_BASE_TYPE_DBQ_EVENT
:
376 dev_warn(&nq
->pdev
->dev
,
377 "nqe with type = 0x%x not handled\n", type
);
382 if (hwq
->cons
!= raw_cons
) {
383 hwq
->cons
= raw_cons
;
384 bnxt_qplib_ring_nq_db(&nq
->nq_db
.dbinfo
, nq
->res
->cctx
, true);
386 spin_unlock_bh(&hwq
->lock
);
389 static irqreturn_t
bnxt_qplib_nq_irq(int irq
, void *dev_instance
)
391 struct bnxt_qplib_nq
*nq
= dev_instance
;
392 struct bnxt_qplib_hwq
*hwq
= &nq
->hwq
;
395 /* Prefetch the NQ element */
396 sw_cons
= HWQ_CMP(hwq
->cons
, hwq
);
397 prefetch(bnxt_qplib_get_qe(hwq
, sw_cons
, NULL
));
399 /* Fan out to CPU affinitized kthreads? */
400 tasklet_schedule(&nq
->nq_tasklet
);
405 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq
*nq
, bool kill
)
407 tasklet_disable(&nq
->nq_tasklet
);
408 /* Mask h/w interrupt */
409 bnxt_qplib_ring_nq_db(&nq
->nq_db
.dbinfo
, nq
->res
->cctx
, false);
410 /* Sync with last running IRQ handler */
411 synchronize_irq(nq
->msix_vec
);
413 tasklet_kill(&nq
->nq_tasklet
);
415 irq_set_affinity_hint(nq
->msix_vec
, NULL
);
416 free_irq(nq
->msix_vec
, nq
);
417 nq
->requested
= false;
421 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq
*nq
)
424 destroy_workqueue(nq
->cqn_wq
);
428 /* Make sure the HW is stopped! */
429 bnxt_qplib_nq_stop_irq(nq
, true);
431 if (nq
->nq_db
.reg
.bar_reg
) {
432 iounmap(nq
->nq_db
.reg
.bar_reg
);
433 nq
->nq_db
.reg
.bar_reg
= NULL
;
436 nq
->cqn_handler
= NULL
;
437 nq
->srqn_handler
= NULL
;
441 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq
*nq
, int nq_indx
,
442 int msix_vector
, bool need_init
)
449 nq
->msix_vec
= msix_vector
;
451 tasklet_setup(&nq
->nq_tasklet
, bnxt_qplib_service_nq
);
453 tasklet_enable(&nq
->nq_tasklet
);
455 snprintf(nq
->name
, sizeof(nq
->name
), "bnxt_qplib_nq-%d", nq_indx
);
456 rc
= request_irq(nq
->msix_vec
, bnxt_qplib_nq_irq
, 0, nq
->name
, nq
);
460 cpumask_clear(&nq
->mask
);
461 cpumask_set_cpu(nq_indx
, &nq
->mask
);
462 rc
= irq_set_affinity_hint(nq
->msix_vec
, &nq
->mask
);
464 dev_warn(&nq
->pdev
->dev
,
465 "set affinity failed; vector: %d nq_idx: %d\n",
466 nq
->msix_vec
, nq_indx
);
468 nq
->requested
= true;
469 bnxt_qplib_ring_nq_db(&nq
->nq_db
.dbinfo
, nq
->res
->cctx
, true);
474 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq
*nq
, u32 reg_offt
)
476 resource_size_t reg_base
;
477 struct bnxt_qplib_nq_db
*nq_db
;
478 struct pci_dev
*pdev
;
484 nq_db
->reg
.bar_id
= NQ_CONS_PCI_BAR_REGION
;
485 nq_db
->reg
.bar_base
= pci_resource_start(pdev
, nq_db
->reg
.bar_id
);
486 if (!nq_db
->reg
.bar_base
) {
487 dev_err(&pdev
->dev
, "QPLIB: NQ BAR region %d resc start is 0!",
493 reg_base
= nq_db
->reg
.bar_base
+ reg_offt
;
494 /* Unconditionally map 8 bytes to support 57500 series */
496 nq_db
->reg
.bar_reg
= ioremap(reg_base
, nq_db
->reg
.len
);
497 if (!nq_db
->reg
.bar_reg
) {
498 dev_err(&pdev
->dev
, "QPLIB: NQ BAR region %d mapping failed",
504 nq_db
->dbinfo
.db
= nq_db
->reg
.bar_reg
;
505 nq_db
->dbinfo
.hwq
= &nq
->hwq
;
506 nq_db
->dbinfo
.xid
= nq
->ring_id
;
511 int bnxt_qplib_enable_nq(struct pci_dev
*pdev
, struct bnxt_qplib_nq
*nq
,
512 int nq_idx
, int msix_vector
, int bar_reg_offset
,
513 cqn_handler_t cqn_handler
,
514 srqn_handler_t srqn_handler
)
519 nq
->cqn_handler
= cqn_handler
;
520 nq
->srqn_handler
= srqn_handler
;
522 /* Have a task to schedule CQ notifiers in post send case */
523 nq
->cqn_wq
= create_singlethread_workqueue("bnxt_qplib_nq");
527 rc
= bnxt_qplib_map_nq_db(nq
, bar_reg_offset
);
531 rc
= bnxt_qplib_nq_start_irq(nq
, nq_idx
, msix_vector
, true);
533 dev_err(&nq
->pdev
->dev
,
534 "Failed to request irq for nq-idx %d\n", nq_idx
);
540 bnxt_qplib_disable_nq(nq
);
544 void bnxt_qplib_free_nq(struct bnxt_qplib_nq
*nq
)
546 if (nq
->hwq
.max_elements
) {
547 bnxt_qplib_free_hwq(nq
->res
, &nq
->hwq
);
548 nq
->hwq
.max_elements
= 0;
552 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_nq
*nq
)
554 struct bnxt_qplib_hwq_attr hwq_attr
= {};
555 struct bnxt_qplib_sg_info sginfo
= {};
557 nq
->pdev
= res
->pdev
;
559 if (!nq
->hwq
.max_elements
||
560 nq
->hwq
.max_elements
> BNXT_QPLIB_NQE_MAX_CNT
)
561 nq
->hwq
.max_elements
= BNXT_QPLIB_NQE_MAX_CNT
;
563 sginfo
.pgsize
= PAGE_SIZE
;
564 sginfo
.pgshft
= PAGE_SHIFT
;
566 hwq_attr
.sginfo
= &sginfo
;
567 hwq_attr
.depth
= nq
->hwq
.max_elements
;
568 hwq_attr
.stride
= sizeof(struct nq_base
);
569 hwq_attr
.type
= bnxt_qplib_get_hwq_type(nq
->res
);
570 if (bnxt_qplib_alloc_init_hwq(&nq
->hwq
, &hwq_attr
)) {
571 dev_err(&nq
->pdev
->dev
, "FP NQ allocation failed");
579 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res
*res
,
580 struct bnxt_qplib_srq
*srq
)
582 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
583 struct cmdq_destroy_srq req
;
584 struct creq_destroy_srq_resp resp
;
588 RCFW_CMD_PREP(req
, DESTROY_SRQ
, cmd_flags
);
590 /* Configure the request */
591 req
.srq_cid
= cpu_to_le32(srq
->id
);
593 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (struct cmdq_base
*)&req
,
594 (struct creq_base
*)&resp
, NULL
, 0);
598 bnxt_qplib_free_hwq(res
, &srq
->hwq
);
601 int bnxt_qplib_create_srq(struct bnxt_qplib_res
*res
,
602 struct bnxt_qplib_srq
*srq
)
604 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
605 struct bnxt_qplib_hwq_attr hwq_attr
= {};
606 struct creq_create_srq_resp resp
;
607 struct cmdq_create_srq req
;
608 struct bnxt_qplib_pbl
*pbl
;
614 hwq_attr
.sginfo
= &srq
->sg_info
;
615 hwq_attr
.depth
= srq
->max_wqe
;
616 hwq_attr
.stride
= srq
->wqe_size
;
617 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
618 rc
= bnxt_qplib_alloc_init_hwq(&srq
->hwq
, &hwq_attr
);
622 srq
->swq
= kcalloc(srq
->hwq
.max_elements
, sizeof(*srq
->swq
),
629 RCFW_CMD_PREP(req
, CREATE_SRQ
, cmd_flags
);
631 /* Configure the request */
632 req
.dpi
= cpu_to_le32(srq
->dpi
->dpi
);
633 req
.srq_handle
= cpu_to_le64((uintptr_t)srq
);
635 req
.srq_size
= cpu_to_le16((u16
)srq
->hwq
.max_elements
);
636 pbl
= &srq
->hwq
.pbl
[PBL_LVL_0
];
637 pg_sz_lvl
= ((u16
)bnxt_qplib_base_pg_size(&srq
->hwq
) <<
638 CMDQ_CREATE_SRQ_PG_SIZE_SFT
);
639 pg_sz_lvl
|= (srq
->hwq
.level
& CMDQ_CREATE_SRQ_LVL_MASK
) <<
640 CMDQ_CREATE_SRQ_LVL_SFT
;
641 req
.pg_size_lvl
= cpu_to_le16(pg_sz_lvl
);
642 req
.pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
643 req
.pd_id
= cpu_to_le32(srq
->pd
->id
);
644 req
.eventq_id
= cpu_to_le16(srq
->eventq_hw_ring_id
);
646 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
647 (void *)&resp
, NULL
, 0);
651 spin_lock_init(&srq
->lock
);
653 srq
->last_idx
= srq
->hwq
.max_elements
- 1;
654 for (idx
= 0; idx
< srq
->hwq
.max_elements
; idx
++)
655 srq
->swq
[idx
].next_idx
= idx
+ 1;
656 srq
->swq
[srq
->last_idx
].next_idx
= -1;
658 srq
->id
= le32_to_cpu(resp
.xid
);
659 srq
->dbinfo
.hwq
= &srq
->hwq
;
660 srq
->dbinfo
.xid
= srq
->id
;
661 srq
->dbinfo
.db
= srq
->dpi
->dbr
;
662 srq
->dbinfo
.max_slot
= 1;
663 srq
->dbinfo
.priv_db
= res
->dpi_tbl
.dbr_bar_reg_iomem
;
665 bnxt_qplib_armen_db(&srq
->dbinfo
, DBC_DBC_TYPE_SRQ_ARMENA
);
666 srq
->arm_req
= false;
670 bnxt_qplib_free_hwq(res
, &srq
->hwq
);
676 int bnxt_qplib_modify_srq(struct bnxt_qplib_res
*res
,
677 struct bnxt_qplib_srq
*srq
)
679 struct bnxt_qplib_hwq
*srq_hwq
= &srq
->hwq
;
680 u32 sw_prod
, sw_cons
, count
= 0;
682 sw_prod
= HWQ_CMP(srq_hwq
->prod
, srq_hwq
);
683 sw_cons
= HWQ_CMP(srq_hwq
->cons
, srq_hwq
);
685 count
= sw_prod
> sw_cons
? sw_prod
- sw_cons
:
686 srq_hwq
->max_elements
- sw_cons
+ sw_prod
;
687 if (count
> srq
->threshold
) {
688 srq
->arm_req
= false;
689 bnxt_qplib_srq_arm_db(&srq
->dbinfo
, srq
->threshold
);
691 /* Deferred arming */
698 int bnxt_qplib_query_srq(struct bnxt_qplib_res
*res
,
699 struct bnxt_qplib_srq
*srq
)
701 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
702 struct cmdq_query_srq req
;
703 struct creq_query_srq_resp resp
;
704 struct bnxt_qplib_rcfw_sbuf
*sbuf
;
705 struct creq_query_srq_resp_sb
*sb
;
709 RCFW_CMD_PREP(req
, QUERY_SRQ
, cmd_flags
);
710 req
.srq_cid
= cpu_to_le32(srq
->id
);
712 /* Configure the request */
713 sbuf
= bnxt_qplib_rcfw_alloc_sbuf(rcfw
, sizeof(*sb
));
717 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
719 srq
->threshold
= le16_to_cpu(sb
->srq_limit
);
720 bnxt_qplib_rcfw_free_sbuf(rcfw
, sbuf
);
725 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq
*srq
,
726 struct bnxt_qplib_swqe
*wqe
)
728 struct bnxt_qplib_hwq
*srq_hwq
= &srq
->hwq
;
730 struct sq_sge
*hw_sge
;
731 u32 sw_prod
, sw_cons
, count
= 0;
734 spin_lock(&srq_hwq
->lock
);
735 if (srq
->start_idx
== srq
->last_idx
) {
736 dev_err(&srq_hwq
->pdev
->dev
,
737 "FP: SRQ (0x%x) is full!\n", srq
->id
);
739 spin_unlock(&srq_hwq
->lock
);
742 next
= srq
->start_idx
;
743 srq
->start_idx
= srq
->swq
[next
].next_idx
;
744 spin_unlock(&srq_hwq
->lock
);
746 sw_prod
= HWQ_CMP(srq_hwq
->prod
, srq_hwq
);
747 srqe
= bnxt_qplib_get_qe(srq_hwq
, sw_prod
, NULL
);
748 memset(srqe
, 0, srq
->wqe_size
);
749 /* Calculate wqe_size16 and data_len */
750 for (i
= 0, hw_sge
= (struct sq_sge
*)srqe
->data
;
751 i
< wqe
->num_sge
; i
++, hw_sge
++) {
752 hw_sge
->va_or_pa
= cpu_to_le64(wqe
->sg_list
[i
].addr
);
753 hw_sge
->l_key
= cpu_to_le32(wqe
->sg_list
[i
].lkey
);
754 hw_sge
->size
= cpu_to_le32(wqe
->sg_list
[i
].size
);
756 srqe
->wqe_type
= wqe
->type
;
757 srqe
->flags
= wqe
->flags
;
758 srqe
->wqe_size
= wqe
->num_sge
+
759 ((offsetof(typeof(*srqe
), data
) + 15) >> 4);
760 srqe
->wr_id
[0] = cpu_to_le32((u32
)next
);
761 srq
->swq
[next
].wr_id
= wqe
->wr_id
;
765 spin_lock(&srq_hwq
->lock
);
766 sw_prod
= HWQ_CMP(srq_hwq
->prod
, srq_hwq
);
767 /* retaining srq_hwq->cons for this logic
768 * actually the lock is only required to
769 * read srq_hwq->cons.
771 sw_cons
= HWQ_CMP(srq_hwq
->cons
, srq_hwq
);
772 count
= sw_prod
> sw_cons
? sw_prod
- sw_cons
:
773 srq_hwq
->max_elements
- sw_cons
+ sw_prod
;
774 spin_unlock(&srq_hwq
->lock
);
776 bnxt_qplib_ring_prod_db(&srq
->dbinfo
, DBC_DBC_TYPE_SRQ
);
777 if (srq
->arm_req
== true && count
> srq
->threshold
) {
778 srq
->arm_req
= false;
779 bnxt_qplib_srq_arm_db(&srq
->dbinfo
, srq
->threshold
);
787 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q
*que
)
792 que
->swq
= kcalloc(que
->max_wqe
, sizeof(*que
->swq
), GFP_KERNEL
);
799 que
->swq_last
= que
->max_wqe
- 1;
800 for (indx
= 0; indx
< que
->max_wqe
; indx
++)
801 que
->swq
[indx
].next_idx
= indx
+ 1;
802 que
->swq
[que
->swq_last
].next_idx
= 0; /* Make it circular */
808 int bnxt_qplib_create_qp1(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
810 struct bnxt_qplib_hwq_attr hwq_attr
= {};
811 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
812 struct bnxt_qplib_q
*sq
= &qp
->sq
;
813 struct bnxt_qplib_q
*rq
= &qp
->rq
;
814 struct creq_create_qp1_resp resp
;
815 struct cmdq_create_qp1 req
;
816 struct bnxt_qplib_pbl
*pbl
;
823 RCFW_CMD_PREP(req
, CREATE_QP1
, cmd_flags
);
827 req
.dpi
= cpu_to_le32(qp
->dpi
->dpi
);
828 req
.qp_handle
= cpu_to_le64(qp
->qp_handle
);
832 hwq_attr
.sginfo
= &sq
->sg_info
;
833 hwq_attr
.stride
= sizeof(struct sq_sge
);
834 hwq_attr
.depth
= bnxt_qplib_get_depth(sq
);
835 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
836 rc
= bnxt_qplib_alloc_init_hwq(&sq
->hwq
, &hwq_attr
);
840 rc
= bnxt_qplib_alloc_init_swq(sq
);
844 req
.sq_size
= cpu_to_le32(bnxt_qplib_set_sq_size(sq
, qp
->wqe_mode
));
845 pbl
= &sq
->hwq
.pbl
[PBL_LVL_0
];
846 req
.sq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
847 pg_sz_lvl
= (bnxt_qplib_base_pg_size(&sq
->hwq
) <<
848 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT
);
849 pg_sz_lvl
|= (sq
->hwq
.level
& CMDQ_CREATE_QP1_SQ_LVL_MASK
);
850 req
.sq_pg_size_sq_lvl
= pg_sz_lvl
;
852 cpu_to_le16((sq
->max_sge
& CMDQ_CREATE_QP1_SQ_SGE_MASK
) <<
853 CMDQ_CREATE_QP1_SQ_SGE_SFT
);
854 req
.scq_cid
= cpu_to_le32(qp
->scq
->id
);
859 hwq_attr
.sginfo
= &rq
->sg_info
;
860 hwq_attr
.stride
= sizeof(struct sq_sge
);
861 hwq_attr
.depth
= bnxt_qplib_get_depth(rq
);
862 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
863 rc
= bnxt_qplib_alloc_init_hwq(&rq
->hwq
, &hwq_attr
);
866 rc
= bnxt_qplib_alloc_init_swq(rq
);
869 req
.rq_size
= cpu_to_le32(rq
->max_wqe
);
870 pbl
= &rq
->hwq
.pbl
[PBL_LVL_0
];
871 req
.rq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
872 pg_sz_lvl
= (bnxt_qplib_base_pg_size(&rq
->hwq
) <<
873 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT
);
874 pg_sz_lvl
|= (rq
->hwq
.level
& CMDQ_CREATE_QP1_RQ_LVL_MASK
);
875 req
.rq_pg_size_rq_lvl
= pg_sz_lvl
;
877 cpu_to_le16((rq
->max_sge
&
878 CMDQ_CREATE_QP1_RQ_SGE_MASK
) <<
879 CMDQ_CREATE_QP1_RQ_SGE_SFT
);
881 req
.rcq_cid
= cpu_to_le32(qp
->rcq
->id
);
882 /* Header buffer - allow hdr_buf pass in */
883 rc
= bnxt_qplib_alloc_qp_hdr_buf(res
, qp
);
888 qp_flags
|= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE
;
889 req
.qp_flags
= cpu_to_le32(qp_flags
);
890 req
.pd_id
= cpu_to_le32(qp
->pd
->id
);
892 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
893 (void *)&resp
, NULL
, 0);
897 qp
->id
= le32_to_cpu(resp
.xid
);
898 qp
->cur_qp_state
= CMDQ_MODIFY_QP_NEW_STATE_RESET
;
899 qp
->cctx
= res
->cctx
;
900 sq
->dbinfo
.hwq
= &sq
->hwq
;
901 sq
->dbinfo
.xid
= qp
->id
;
902 sq
->dbinfo
.db
= qp
->dpi
->dbr
;
903 sq
->dbinfo
.max_slot
= bnxt_qplib_set_sq_max_slot(qp
->wqe_mode
);
905 rq
->dbinfo
.hwq
= &rq
->hwq
;
906 rq
->dbinfo
.xid
= qp
->id
;
907 rq
->dbinfo
.db
= qp
->dpi
->dbr
;
908 rq
->dbinfo
.max_slot
= bnxt_qplib_set_rq_max_slot(rq
->wqe_size
);
910 tbl_indx
= map_qp_id_to_tbl_indx(qp
->id
, rcfw
);
911 rcfw
->qp_tbl
[tbl_indx
].qp_id
= qp
->id
;
912 rcfw
->qp_tbl
[tbl_indx
].qp_handle
= (void *)qp
;
917 bnxt_qplib_free_qp_hdr_buf(res
, qp
);
921 bnxt_qplib_free_hwq(res
, &rq
->hwq
);
925 bnxt_qplib_free_hwq(res
, &sq
->hwq
);
930 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp
*qp
, int size
)
932 struct bnxt_qplib_hwq
*hwq
;
933 struct bnxt_qplib_q
*sq
;
939 /* First psn entry */
940 fpsne
= (u64
)bnxt_qplib_get_qe(hwq
, hwq
->depth
, &psn_pg
);
941 if (!IS_ALIGNED(fpsne
, PAGE_SIZE
))
942 indx_pad
= (fpsne
& ~PAGE_MASK
) / size
;
943 hwq
->pad_pgofft
= indx_pad
;
944 hwq
->pad_pg
= (u64
*)psn_pg
;
945 hwq
->pad_stride
= size
;
948 int bnxt_qplib_create_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
950 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
951 struct bnxt_qplib_hwq_attr hwq_attr
= {};
952 struct bnxt_qplib_sg_info sginfo
= {};
953 struct bnxt_qplib_q
*sq
= &qp
->sq
;
954 struct bnxt_qplib_q
*rq
= &qp
->rq
;
955 struct creq_create_qp_resp resp
;
956 int rc
, req_size
, psn_sz
= 0;
957 struct bnxt_qplib_hwq
*xrrq
;
958 struct bnxt_qplib_pbl
*pbl
;
959 struct cmdq_create_qp req
;
966 RCFW_CMD_PREP(req
, CREATE_QP
, cmd_flags
);
970 req
.dpi
= cpu_to_le32(qp
->dpi
->dpi
);
971 req
.qp_handle
= cpu_to_le64(qp
->qp_handle
);
974 if (qp
->type
== CMDQ_CREATE_QP_TYPE_RC
) {
975 psn_sz
= bnxt_qplib_is_chip_gen_p5(res
->cctx
) ?
976 sizeof(struct sq_psn_search_ext
) :
977 sizeof(struct sq_psn_search
);
981 hwq_attr
.sginfo
= &sq
->sg_info
;
982 hwq_attr
.stride
= sizeof(struct sq_sge
);
983 hwq_attr
.depth
= bnxt_qplib_get_depth(sq
);
984 hwq_attr
.aux_stride
= psn_sz
;
985 hwq_attr
.aux_depth
= bnxt_qplib_set_sq_size(sq
, qp
->wqe_mode
);
986 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
987 rc
= bnxt_qplib_alloc_init_hwq(&sq
->hwq
, &hwq_attr
);
991 rc
= bnxt_qplib_alloc_init_swq(sq
);
996 bnxt_qplib_init_psn_ptr(qp
, psn_sz
);
998 req
.sq_size
= cpu_to_le32(bnxt_qplib_set_sq_size(sq
, qp
->wqe_mode
));
999 pbl
= &sq
->hwq
.pbl
[PBL_LVL_0
];
1000 req
.sq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
1001 pg_sz_lvl
= (bnxt_qplib_base_pg_size(&sq
->hwq
) <<
1002 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT
);
1003 pg_sz_lvl
|= (sq
->hwq
.level
& CMDQ_CREATE_QP_SQ_LVL_MASK
);
1004 req
.sq_pg_size_sq_lvl
= pg_sz_lvl
;
1006 cpu_to_le16(((sq
->max_sge
& CMDQ_CREATE_QP_SQ_SGE_MASK
) <<
1007 CMDQ_CREATE_QP_SQ_SGE_SFT
) | 0);
1008 req
.scq_cid
= cpu_to_le32(qp
->scq
->id
);
1013 hwq_attr
.sginfo
= &rq
->sg_info
;
1014 hwq_attr
.stride
= sizeof(struct sq_sge
);
1015 hwq_attr
.depth
= bnxt_qplib_get_depth(rq
);
1016 hwq_attr
.aux_stride
= 0;
1017 hwq_attr
.aux_depth
= 0;
1018 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
1019 rc
= bnxt_qplib_alloc_init_hwq(&rq
->hwq
, &hwq_attr
);
1022 rc
= bnxt_qplib_alloc_init_swq(rq
);
1026 req
.rq_size
= cpu_to_le32(rq
->max_wqe
);
1027 pbl
= &rq
->hwq
.pbl
[PBL_LVL_0
];
1028 req
.rq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
1029 pg_sz_lvl
= (bnxt_qplib_base_pg_size(&rq
->hwq
) <<
1030 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT
);
1031 pg_sz_lvl
|= (rq
->hwq
.level
& CMDQ_CREATE_QP_RQ_LVL_MASK
);
1032 req
.rq_pg_size_rq_lvl
= pg_sz_lvl
;
1033 nsge
= (qp
->wqe_mode
== BNXT_QPLIB_WQE_MODE_STATIC
) ?
1036 cpu_to_le16(((nsge
&
1037 CMDQ_CREATE_QP_RQ_SGE_MASK
) <<
1038 CMDQ_CREATE_QP_RQ_SGE_SFT
) | 0);
1041 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED
;
1042 req
.srq_cid
= cpu_to_le32(qp
->srq
->id
);
1044 req
.rcq_cid
= cpu_to_le32(qp
->rcq
->id
);
1046 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE
;
1047 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED
;
1049 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION
;
1050 if (qp
->wqe_mode
== BNXT_QPLIB_WQE_MODE_VARIABLE
)
1051 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED
;
1052 req
.qp_flags
= cpu_to_le32(qp_flags
);
1057 xrrq
->max_elements
=
1058 ORD_LIMIT_TO_ORRQ_SLOTS(qp
->max_rd_atomic
);
1059 req_size
= xrrq
->max_elements
*
1060 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE
+ PAGE_SIZE
- 1;
1061 req_size
&= ~(PAGE_SIZE
- 1);
1062 sginfo
.pgsize
= req_size
;
1063 sginfo
.pgshft
= PAGE_SHIFT
;
1066 hwq_attr
.sginfo
= &sginfo
;
1067 hwq_attr
.depth
= xrrq
->max_elements
;
1068 hwq_attr
.stride
= BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE
;
1069 hwq_attr
.aux_stride
= 0;
1070 hwq_attr
.aux_depth
= 0;
1071 hwq_attr
.type
= HWQ_TYPE_CTX
;
1072 rc
= bnxt_qplib_alloc_init_hwq(xrrq
, &hwq_attr
);
1075 pbl
= &xrrq
->pbl
[PBL_LVL_0
];
1076 req
.orrq_addr
= cpu_to_le64(pbl
->pg_map_arr
[0]);
1079 xrrq
->max_elements
= IRD_LIMIT_TO_IRRQ_SLOTS(
1080 qp
->max_dest_rd_atomic
);
1081 req_size
= xrrq
->max_elements
*
1082 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE
+ PAGE_SIZE
- 1;
1083 req_size
&= ~(PAGE_SIZE
- 1);
1084 sginfo
.pgsize
= req_size
;
1085 hwq_attr
.depth
= xrrq
->max_elements
;
1086 hwq_attr
.stride
= BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE
;
1087 rc
= bnxt_qplib_alloc_init_hwq(xrrq
, &hwq_attr
);
1091 pbl
= &xrrq
->pbl
[PBL_LVL_0
];
1092 req
.irrq_addr
= cpu_to_le64(pbl
->pg_map_arr
[0]);
1094 req
.pd_id
= cpu_to_le32(qp
->pd
->id
);
1096 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
1097 (void *)&resp
, NULL
, 0);
1101 qp
->id
= le32_to_cpu(resp
.xid
);
1102 qp
->cur_qp_state
= CMDQ_MODIFY_QP_NEW_STATE_RESET
;
1103 INIT_LIST_HEAD(&qp
->sq_flush
);
1104 INIT_LIST_HEAD(&qp
->rq_flush
);
1105 qp
->cctx
= res
->cctx
;
1106 sq
->dbinfo
.hwq
= &sq
->hwq
;
1107 sq
->dbinfo
.xid
= qp
->id
;
1108 sq
->dbinfo
.db
= qp
->dpi
->dbr
;
1109 sq
->dbinfo
.max_slot
= bnxt_qplib_set_sq_max_slot(qp
->wqe_mode
);
1111 rq
->dbinfo
.hwq
= &rq
->hwq
;
1112 rq
->dbinfo
.xid
= qp
->id
;
1113 rq
->dbinfo
.db
= qp
->dpi
->dbr
;
1114 rq
->dbinfo
.max_slot
= bnxt_qplib_set_rq_max_slot(rq
->wqe_size
);
1116 tbl_indx
= map_qp_id_to_tbl_indx(qp
->id
, rcfw
);
1117 rcfw
->qp_tbl
[tbl_indx
].qp_id
= qp
->id
;
1118 rcfw
->qp_tbl
[tbl_indx
].qp_handle
= (void *)qp
;
1122 bnxt_qplib_free_hwq(res
, &qp
->irrq
);
1124 bnxt_qplib_free_hwq(res
, &qp
->orrq
);
1128 bnxt_qplib_free_hwq(res
, &rq
->hwq
);
1132 bnxt_qplib_free_hwq(res
, &sq
->hwq
);
1137 static void __modify_flags_from_init_state(struct bnxt_qplib_qp
*qp
)
1139 switch (qp
->state
) {
1140 case CMDQ_MODIFY_QP_NEW_STATE_RTR
:
1141 /* INIT->RTR, configure the path_mtu to the default
1142 * 2048 if not being requested
1144 if (!(qp
->modify_flags
&
1145 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
)) {
1147 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
;
1149 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
;
1152 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID
;
1153 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1154 if (qp
->max_dest_rd_atomic
< 1)
1155 qp
->max_dest_rd_atomic
= 1;
1156 qp
->modify_flags
&= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC
;
1157 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1158 if (!(qp
->modify_flags
&
1159 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
)) {
1161 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
;
1162 qp
->ah
.sgid_index
= 0;
1170 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp
*qp
)
1172 switch (qp
->state
) {
1173 case CMDQ_MODIFY_QP_NEW_STATE_RTS
:
1174 /* Bono FW requires the max_rd_atomic to be >= 1 */
1175 if (qp
->max_rd_atomic
< 1)
1176 qp
->max_rd_atomic
= 1;
1177 /* Bono FW does not allow PKEY_INDEX,
1178 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1179 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1180 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1184 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
|
1185 CMDQ_MODIFY_QP_MODIFY_MASK_DGID
|
1186 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL
|
1187 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
|
1188 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT
|
1189 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS
|
1190 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC
|
1191 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
|
1192 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN
|
1193 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER
|
1194 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC
|
1195 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID
);
1202 static void __filter_modify_flags(struct bnxt_qplib_qp
*qp
)
1204 switch (qp
->cur_qp_state
) {
1205 case CMDQ_MODIFY_QP_NEW_STATE_RESET
:
1207 case CMDQ_MODIFY_QP_NEW_STATE_INIT
:
1208 __modify_flags_from_init_state(qp
);
1210 case CMDQ_MODIFY_QP_NEW_STATE_RTR
:
1211 __modify_flags_from_rtr_state(qp
);
1213 case CMDQ_MODIFY_QP_NEW_STATE_RTS
:
1215 case CMDQ_MODIFY_QP_NEW_STATE_SQD
:
1217 case CMDQ_MODIFY_QP_NEW_STATE_SQE
:
1219 case CMDQ_MODIFY_QP_NEW_STATE_ERR
:
1226 int bnxt_qplib_modify_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
1228 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
1229 struct cmdq_modify_qp req
;
1230 struct creq_modify_qp_resp resp
;
1231 u16 cmd_flags
= 0, pkey
;
1236 RCFW_CMD_PREP(req
, MODIFY_QP
, cmd_flags
);
1238 /* Filter out the qp_attr_mask based on the state->new transition */
1239 __filter_modify_flags(qp
);
1240 bmask
= qp
->modify_flags
;
1241 req
.modify_mask
= cpu_to_le32(qp
->modify_flags
);
1242 req
.qp_cid
= cpu_to_le32(qp
->id
);
1243 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_STATE
) {
1244 req
.network_type_en_sqd_async_notify_new_state
=
1245 (qp
->state
& CMDQ_MODIFY_QP_NEW_STATE_MASK
) |
1246 (qp
->en_sqd_async_notify
?
1247 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY
: 0);
1249 req
.network_type_en_sqd_async_notify_new_state
|= qp
->nw_type
;
1251 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS
)
1252 req
.access
= qp
->access
;
1254 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
) {
1255 if (!bnxt_qplib_get_pkey(res
, &res
->pkey_tbl
,
1256 qp
->pkey_index
, &pkey
))
1257 req
.pkey
= cpu_to_le16(pkey
);
1259 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_QKEY
)
1260 req
.qkey
= cpu_to_le32(qp
->qkey
);
1262 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_DGID
) {
1263 memcpy(temp32
, qp
->ah
.dgid
.data
, sizeof(struct bnxt_qplib_gid
));
1264 req
.dgid
[0] = cpu_to_le32(temp32
[0]);
1265 req
.dgid
[1] = cpu_to_le32(temp32
[1]);
1266 req
.dgid
[2] = cpu_to_le32(temp32
[2]);
1267 req
.dgid
[3] = cpu_to_le32(temp32
[3]);
1269 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL
)
1270 req
.flow_label
= cpu_to_le32(qp
->ah
.flow_label
);
1272 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
)
1273 req
.sgid_index
= cpu_to_le16(res
->sgid_tbl
.hw_id
1274 [qp
->ah
.sgid_index
]);
1276 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT
)
1277 req
.hop_limit
= qp
->ah
.hop_limit
;
1279 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS
)
1280 req
.traffic_class
= qp
->ah
.traffic_class
;
1282 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC
)
1283 memcpy(req
.dest_mac
, qp
->ah
.dmac
, 6);
1285 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
)
1286 req
.path_mtu
= qp
->path_mtu
;
1288 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT
)
1289 req
.timeout
= qp
->timeout
;
1291 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT
)
1292 req
.retry_cnt
= qp
->retry_cnt
;
1294 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY
)
1295 req
.rnr_retry
= qp
->rnr_retry
;
1297 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER
)
1298 req
.min_rnr_timer
= qp
->min_rnr_timer
;
1300 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN
)
1301 req
.rq_psn
= cpu_to_le32(qp
->rq
.psn
);
1303 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN
)
1304 req
.sq_psn
= cpu_to_le32(qp
->sq
.psn
);
1306 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC
)
1308 ORD_LIMIT_TO_ORRQ_SLOTS(qp
->max_rd_atomic
);
1310 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC
)
1311 req
.max_dest_rd_atomic
=
1312 IRD_LIMIT_TO_IRRQ_SLOTS(qp
->max_dest_rd_atomic
);
1314 req
.sq_size
= cpu_to_le32(qp
->sq
.hwq
.max_elements
);
1315 req
.rq_size
= cpu_to_le32(qp
->rq
.hwq
.max_elements
);
1316 req
.sq_sge
= cpu_to_le16(qp
->sq
.max_sge
);
1317 req
.rq_sge
= cpu_to_le16(qp
->rq
.max_sge
);
1318 req
.max_inline_data
= cpu_to_le32(qp
->max_inline_data
);
1319 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID
)
1320 req
.dest_qp_id
= cpu_to_le32(qp
->dest_qpn
);
1322 req
.vlan_pcp_vlan_dei_vlan_id
= cpu_to_le16(qp
->vlan_id
);
1324 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
1325 (void *)&resp
, NULL
, 0);
1328 qp
->cur_qp_state
= qp
->state
;
1332 int bnxt_qplib_query_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
1334 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
1335 struct cmdq_query_qp req
;
1336 struct creq_query_qp_resp resp
;
1337 struct bnxt_qplib_rcfw_sbuf
*sbuf
;
1338 struct creq_query_qp_resp_sb
*sb
;
1343 RCFW_CMD_PREP(req
, QUERY_QP
, cmd_flags
);
1345 sbuf
= bnxt_qplib_rcfw_alloc_sbuf(rcfw
, sizeof(*sb
));
1350 req
.qp_cid
= cpu_to_le32(qp
->id
);
1351 req
.resp_size
= sizeof(*sb
) / BNXT_QPLIB_CMDQE_UNITS
;
1352 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
1356 /* Extract the context from the side buffer */
1357 qp
->state
= sb
->en_sqd_async_notify_state
&
1358 CREQ_QUERY_QP_RESP_SB_STATE_MASK
;
1359 qp
->en_sqd_async_notify
= sb
->en_sqd_async_notify_state
&
1360 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY
?
1362 qp
->access
= sb
->access
;
1363 qp
->pkey_index
= le16_to_cpu(sb
->pkey
);
1364 qp
->qkey
= le32_to_cpu(sb
->qkey
);
1366 temp32
[0] = le32_to_cpu(sb
->dgid
[0]);
1367 temp32
[1] = le32_to_cpu(sb
->dgid
[1]);
1368 temp32
[2] = le32_to_cpu(sb
->dgid
[2]);
1369 temp32
[3] = le32_to_cpu(sb
->dgid
[3]);
1370 memcpy(qp
->ah
.dgid
.data
, temp32
, sizeof(qp
->ah
.dgid
.data
));
1372 qp
->ah
.flow_label
= le32_to_cpu(sb
->flow_label
);
1374 qp
->ah
.sgid_index
= 0;
1375 for (i
= 0; i
< res
->sgid_tbl
.max
; i
++) {
1376 if (res
->sgid_tbl
.hw_id
[i
] == le16_to_cpu(sb
->sgid_index
)) {
1377 qp
->ah
.sgid_index
= i
;
1381 if (i
== res
->sgid_tbl
.max
)
1382 dev_warn(&res
->pdev
->dev
, "SGID not found??\n");
1384 qp
->ah
.hop_limit
= sb
->hop_limit
;
1385 qp
->ah
.traffic_class
= sb
->traffic_class
;
1386 memcpy(qp
->ah
.dmac
, sb
->dest_mac
, 6);
1387 qp
->ah
.vlan_id
= (le16_to_cpu(sb
->path_mtu_dest_vlan_id
) &
1388 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK
) >>
1389 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT
;
1390 qp
->path_mtu
= (le16_to_cpu(sb
->path_mtu_dest_vlan_id
) &
1391 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK
) >>
1392 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT
;
1393 qp
->timeout
= sb
->timeout
;
1394 qp
->retry_cnt
= sb
->retry_cnt
;
1395 qp
->rnr_retry
= sb
->rnr_retry
;
1396 qp
->min_rnr_timer
= sb
->min_rnr_timer
;
1397 qp
->rq
.psn
= le32_to_cpu(sb
->rq_psn
);
1398 qp
->max_rd_atomic
= ORRQ_SLOTS_TO_ORD_LIMIT(sb
->max_rd_atomic
);
1399 qp
->sq
.psn
= le32_to_cpu(sb
->sq_psn
);
1400 qp
->max_dest_rd_atomic
=
1401 IRRQ_SLOTS_TO_IRD_LIMIT(sb
->max_dest_rd_atomic
);
1402 qp
->sq
.max_wqe
= qp
->sq
.hwq
.max_elements
;
1403 qp
->rq
.max_wqe
= qp
->rq
.hwq
.max_elements
;
1404 qp
->sq
.max_sge
= le16_to_cpu(sb
->sq_sge
);
1405 qp
->rq
.max_sge
= le16_to_cpu(sb
->rq_sge
);
1406 qp
->max_inline_data
= le32_to_cpu(sb
->max_inline_data
);
1407 qp
->dest_qpn
= le32_to_cpu(sb
->dest_qp_id
);
1408 memcpy(qp
->smac
, sb
->src_mac
, 6);
1409 qp
->vlan_id
= le16_to_cpu(sb
->vlan_pcp_vlan_dei_vlan_id
);
1411 bnxt_qplib_rcfw_free_sbuf(rcfw
, sbuf
);
1415 static void __clean_cq(struct bnxt_qplib_cq
*cq
, u64 qp
)
1417 struct bnxt_qplib_hwq
*cq_hwq
= &cq
->hwq
;
1418 struct cq_base
*hw_cqe
;
1421 for (i
= 0; i
< cq_hwq
->max_elements
; i
++) {
1422 hw_cqe
= bnxt_qplib_get_qe(cq_hwq
, i
, NULL
);
1423 if (!CQE_CMP_VALID(hw_cqe
, i
, cq_hwq
->max_elements
))
1426 * The valid test of the entry must be done first before
1427 * reading any further.
1430 switch (hw_cqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
) {
1431 case CQ_BASE_CQE_TYPE_REQ
:
1432 case CQ_BASE_CQE_TYPE_TERMINAL
:
1434 struct cq_req
*cqe
= (struct cq_req
*)hw_cqe
;
1436 if (qp
== le64_to_cpu(cqe
->qp_handle
))
1440 case CQ_BASE_CQE_TYPE_RES_RC
:
1441 case CQ_BASE_CQE_TYPE_RES_UD
:
1442 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
:
1444 struct cq_res_rc
*cqe
= (struct cq_res_rc
*)hw_cqe
;
1446 if (qp
== le64_to_cpu(cqe
->qp_handle
))
1456 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res
*res
,
1457 struct bnxt_qplib_qp
*qp
)
1459 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
1460 struct cmdq_destroy_qp req
;
1461 struct creq_destroy_qp_resp resp
;
1466 tbl_indx
= map_qp_id_to_tbl_indx(qp
->id
, rcfw
);
1467 rcfw
->qp_tbl
[tbl_indx
].qp_id
= BNXT_QPLIB_QP_ID_INVALID
;
1468 rcfw
->qp_tbl
[tbl_indx
].qp_handle
= NULL
;
1470 RCFW_CMD_PREP(req
, DESTROY_QP
, cmd_flags
);
1472 req
.qp_cid
= cpu_to_le32(qp
->id
);
1473 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
1474 (void *)&resp
, NULL
, 0);
1476 rcfw
->qp_tbl
[tbl_indx
].qp_id
= qp
->id
;
1477 rcfw
->qp_tbl
[tbl_indx
].qp_handle
= qp
;
1484 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res
*res
,
1485 struct bnxt_qplib_qp
*qp
)
1487 bnxt_qplib_free_qp_hdr_buf(res
, qp
);
1488 bnxt_qplib_free_hwq(res
, &qp
->sq
.hwq
);
1491 bnxt_qplib_free_hwq(res
, &qp
->rq
.hwq
);
1494 if (qp
->irrq
.max_elements
)
1495 bnxt_qplib_free_hwq(res
, &qp
->irrq
);
1496 if (qp
->orrq
.max_elements
)
1497 bnxt_qplib_free_hwq(res
, &qp
->orrq
);
1501 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp
*qp
,
1502 struct bnxt_qplib_sge
*sge
)
1504 struct bnxt_qplib_q
*sq
= &qp
->sq
;
1507 memset(sge
, 0, sizeof(*sge
));
1509 if (qp
->sq_hdr_buf
) {
1510 sw_prod
= sq
->swq_start
;
1511 sge
->addr
= (dma_addr_t
)(qp
->sq_hdr_buf_map
+
1512 sw_prod
* qp
->sq_hdr_buf_size
);
1513 sge
->lkey
= 0xFFFFFFFF;
1514 sge
->size
= qp
->sq_hdr_buf_size
;
1515 return qp
->sq_hdr_buf
+ sw_prod
* sge
->size
;
1520 u32
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp
*qp
)
1522 struct bnxt_qplib_q
*rq
= &qp
->rq
;
1524 return rq
->swq_start
;
1527 dma_addr_t
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp
*qp
, u32 index
)
1529 return (qp
->rq_hdr_buf_map
+ index
* qp
->rq_hdr_buf_size
);
1532 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp
*qp
,
1533 struct bnxt_qplib_sge
*sge
)
1535 struct bnxt_qplib_q
*rq
= &qp
->rq
;
1538 memset(sge
, 0, sizeof(*sge
));
1540 if (qp
->rq_hdr_buf
) {
1541 sw_prod
= rq
->swq_start
;
1542 sge
->addr
= (dma_addr_t
)(qp
->rq_hdr_buf_map
+
1543 sw_prod
* qp
->rq_hdr_buf_size
);
1544 sge
->lkey
= 0xFFFFFFFF;
1545 sge
->size
= qp
->rq_hdr_buf_size
;
1546 return qp
->rq_hdr_buf
+ sw_prod
* sge
->size
;
1551 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp
*qp
,
1552 struct bnxt_qplib_swqe
*wqe
,
1553 struct bnxt_qplib_swq
*swq
)
1555 struct sq_psn_search_ext
*psns_ext
;
1556 struct sq_psn_search
*psns
;
1560 if (!swq
->psn_search
)
1562 psns
= swq
->psn_search
;
1563 psns_ext
= swq
->psn_ext
;
1565 op_spsn
= ((swq
->start_psn
<< SQ_PSN_SEARCH_START_PSN_SFT
) &
1566 SQ_PSN_SEARCH_START_PSN_MASK
);
1567 op_spsn
|= ((wqe
->type
<< SQ_PSN_SEARCH_OPCODE_SFT
) &
1568 SQ_PSN_SEARCH_OPCODE_MASK
);
1569 flg_npsn
= ((swq
->next_psn
<< SQ_PSN_SEARCH_NEXT_PSN_SFT
) &
1570 SQ_PSN_SEARCH_NEXT_PSN_MASK
);
1572 if (bnxt_qplib_is_chip_gen_p5(qp
->cctx
)) {
1573 psns_ext
->opcode_start_psn
= cpu_to_le32(op_spsn
);
1574 psns_ext
->flags_next_psn
= cpu_to_le32(flg_npsn
);
1575 psns_ext
->start_slot_idx
= cpu_to_le16(swq
->slot_idx
);
1577 psns
->opcode_start_psn
= cpu_to_le32(op_spsn
);
1578 psns
->flags_next_psn
= cpu_to_le32(flg_npsn
);
1582 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp
*qp
,
1583 struct bnxt_qplib_swqe
*wqe
,
1586 struct bnxt_qplib_hwq
*hwq
;
1587 int len
, t_len
, offt
;
1588 bool pull_dst
= true;
1589 void *il_dst
= NULL
;
1590 void *il_src
= NULL
;
1596 for (indx
= 0; indx
< wqe
->num_sge
; indx
++) {
1597 len
= wqe
->sg_list
[indx
].size
;
1598 il_src
= (void *)wqe
->sg_list
[indx
].addr
;
1600 if (t_len
> qp
->max_inline_data
)
1605 il_dst
= bnxt_qplib_get_prod_qe(hwq
, *idx
);
1610 cplen
= min_t(int, len
, sizeof(struct sq_sge
));
1611 cplen
= min_t(int, cplen
,
1612 (sizeof(struct sq_sge
) - offt
));
1613 memcpy(il_dst
, il_src
, cplen
);
1619 if (t_cplen
== sizeof(struct sq_sge
))
1629 static u32
bnxt_qplib_put_sges(struct bnxt_qplib_hwq
*hwq
,
1630 struct bnxt_qplib_sge
*ssge
,
1633 struct sq_sge
*dsge
;
1636 for (indx
= 0; indx
< nsge
; indx
++, (*idx
)++) {
1637 dsge
= bnxt_qplib_get_prod_qe(hwq
, *idx
);
1638 dsge
->va_or_pa
= cpu_to_le64(ssge
[indx
].addr
);
1639 dsge
->l_key
= cpu_to_le32(ssge
[indx
].lkey
);
1640 dsge
->size
= cpu_to_le32(ssge
[indx
].size
);
1641 len
+= ssge
[indx
].size
;
1647 static u16
bnxt_qplib_required_slots(struct bnxt_qplib_qp
*qp
,
1648 struct bnxt_qplib_swqe
*wqe
,
1649 u16
*wqe_sz
, u16
*qdf
, u8 mode
)
1655 nsge
= wqe
->num_sge
;
1656 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1657 bytes
= sizeof(struct sq_send_hdr
) + nsge
* sizeof(struct sq_sge
);
1658 if (wqe
->flags
& BNXT_QPLIB_SWQE_FLAGS_INLINE
) {
1659 ilsize
= bnxt_qplib_calc_ilsize(wqe
, qp
->max_inline_data
);
1660 bytes
= ALIGN(ilsize
, sizeof(struct sq_sge
));
1661 bytes
+= sizeof(struct sq_send_hdr
);
1664 *qdf
= __xlate_qfd(qp
->sq
.q_full_delta
, bytes
);
1667 if (mode
== BNXT_QPLIB_WQE_MODE_STATIC
)
1672 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q
*sq
,
1673 struct bnxt_qplib_swq
*swq
)
1675 struct bnxt_qplib_hwq
*hwq
;
1676 u32 pg_num
, pg_indx
;
1683 tail
= swq
->slot_idx
/ sq
->dbinfo
.max_slot
;
1684 pg_num
= (tail
+ hwq
->pad_pgofft
) / (PAGE_SIZE
/ hwq
->pad_stride
);
1685 pg_indx
= (tail
+ hwq
->pad_pgofft
) % (PAGE_SIZE
/ hwq
->pad_stride
);
1686 buff
= (void *)(hwq
->pad_pg
[pg_num
] + pg_indx
* hwq
->pad_stride
);
1687 swq
->psn_ext
= buff
;
1688 swq
->psn_search
= buff
;
1691 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp
*qp
)
1693 struct bnxt_qplib_q
*sq
= &qp
->sq
;
1695 bnxt_qplib_ring_prod_db(&sq
->dbinfo
, DBC_DBC_TYPE_SQ
);
1698 int bnxt_qplib_post_send(struct bnxt_qplib_qp
*qp
,
1699 struct bnxt_qplib_swqe
*wqe
)
1701 struct bnxt_qplib_nq_work
*nq_work
= NULL
;
1702 int i
, rc
= 0, data_len
= 0, pkt_num
= 0;
1703 struct bnxt_qplib_q
*sq
= &qp
->sq
;
1704 struct bnxt_qplib_hwq
*hwq
;
1705 struct bnxt_qplib_swq
*swq
;
1706 bool sch_handler
= false;
1707 u16 wqe_sz
, qdf
= 0;
1716 if (qp
->state
!= CMDQ_MODIFY_QP_NEW_STATE_RTS
&&
1717 qp
->state
!= CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1718 dev_err(&hwq
->pdev
->dev
,
1719 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1725 slots
= bnxt_qplib_required_slots(qp
, wqe
, &wqe_sz
, &qdf
, qp
->wqe_mode
);
1726 if (bnxt_qplib_queue_full(sq
, slots
+ qdf
)) {
1727 dev_err(&hwq
->pdev
->dev
,
1728 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1729 hwq
->prod
, hwq
->cons
, hwq
->depth
, sq
->q_full_delta
);
1734 swq
= bnxt_qplib_get_swqe(sq
, &wqe_idx
);
1735 bnxt_qplib_pull_psn_buff(sq
, swq
);
1738 swq
->slot_idx
= hwq
->prod
;
1740 swq
->wr_id
= wqe
->wr_id
;
1741 swq
->type
= wqe
->type
;
1742 swq
->flags
= wqe
->flags
;
1743 swq
->start_psn
= sq
->psn
& BTH_PSN_MASK
;
1745 swq
->flags
|= SQ_SEND_FLAGS_SIGNAL_COMP
;
1747 if (qp
->state
== CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1749 dev_dbg(&hwq
->pdev
->dev
,
1750 "%s Error QP. Scheduling for poll_cq\n", __func__
);
1754 base_hdr
= bnxt_qplib_get_prod_qe(hwq
, idx
++);
1755 ext_hdr
= bnxt_qplib_get_prod_qe(hwq
, idx
++);
1756 memset(base_hdr
, 0, sizeof(struct sq_sge
));
1757 memset(ext_hdr
, 0, sizeof(struct sq_sge
));
1759 if (wqe
->flags
& BNXT_QPLIB_SWQE_FLAGS_INLINE
)
1760 /* Copy the inline data */
1761 data_len
= bnxt_qplib_put_inline(qp
, wqe
, &idx
);
1763 data_len
= bnxt_qplib_put_sges(hwq
, wqe
->sg_list
, wqe
->num_sge
,
1768 switch (wqe
->type
) {
1769 case BNXT_QPLIB_SWQE_TYPE_SEND
:
1770 if (qp
->type
== CMDQ_CREATE_QP1_TYPE_GSI
) {
1771 struct sq_send_raweth_qp1_hdr
*sqe
= base_hdr
;
1772 struct sq_raw_ext_hdr
*ext_sqe
= ext_hdr
;
1773 /* Assemble info for Raw Ethertype QPs */
1775 sqe
->wqe_type
= wqe
->type
;
1776 sqe
->flags
= wqe
->flags
;
1777 sqe
->wqe_size
= wqe_sz
;
1778 sqe
->cfa_action
= cpu_to_le16(wqe
->rawqp1
.cfa_action
);
1779 sqe
->lflags
= cpu_to_le16(wqe
->rawqp1
.lflags
);
1780 sqe
->length
= cpu_to_le32(data_len
);
1781 ext_sqe
->cfa_meta
= cpu_to_le32((wqe
->rawqp1
.cfa_meta
&
1782 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK
) <<
1783 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT
);
1788 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM
:
1789 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV
:
1791 struct sq_ud_ext_hdr
*ext_sqe
= ext_hdr
;
1792 struct sq_send_hdr
*sqe
= base_hdr
;
1794 sqe
->wqe_type
= wqe
->type
;
1795 sqe
->flags
= wqe
->flags
;
1796 sqe
->wqe_size
= wqe_sz
;
1797 sqe
->inv_key_or_imm_data
= cpu_to_le32(wqe
->send
.inv_key
);
1798 if (qp
->type
== CMDQ_CREATE_QP_TYPE_UD
||
1799 qp
->type
== CMDQ_CREATE_QP_TYPE_GSI
) {
1800 sqe
->q_key
= cpu_to_le32(wqe
->send
.q_key
);
1801 sqe
->length
= cpu_to_le32(data_len
);
1802 sq
->psn
= (sq
->psn
+ 1) & BTH_PSN_MASK
;
1803 ext_sqe
->dst_qp
= cpu_to_le32(wqe
->send
.dst_qp
&
1804 SQ_SEND_DST_QP_MASK
);
1805 ext_sqe
->avid
= cpu_to_le32(wqe
->send
.avid
&
1808 sqe
->length
= cpu_to_le32(data_len
);
1810 pkt_num
= (data_len
+ qp
->mtu
- 1) / qp
->mtu
;
1813 sq
->psn
= (sq
->psn
+ pkt_num
) & BTH_PSN_MASK
;
1817 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE
:
1818 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM
:
1819 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ
:
1821 struct sq_rdma_ext_hdr
*ext_sqe
= ext_hdr
;
1822 struct sq_rdma_hdr
*sqe
= base_hdr
;
1824 sqe
->wqe_type
= wqe
->type
;
1825 sqe
->flags
= wqe
->flags
;
1826 sqe
->wqe_size
= wqe_sz
;
1827 sqe
->imm_data
= cpu_to_le32(wqe
->rdma
.inv_key
);
1828 sqe
->length
= cpu_to_le32((u32
)data_len
);
1829 ext_sqe
->remote_va
= cpu_to_le64(wqe
->rdma
.remote_va
);
1830 ext_sqe
->remote_key
= cpu_to_le32(wqe
->rdma
.r_key
);
1832 pkt_num
= (data_len
+ qp
->mtu
- 1) / qp
->mtu
;
1835 sq
->psn
= (sq
->psn
+ pkt_num
) & BTH_PSN_MASK
;
1838 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP
:
1839 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD
:
1841 struct sq_atomic_ext_hdr
*ext_sqe
= ext_hdr
;
1842 struct sq_atomic_hdr
*sqe
= base_hdr
;
1844 sqe
->wqe_type
= wqe
->type
;
1845 sqe
->flags
= wqe
->flags
;
1846 sqe
->remote_key
= cpu_to_le32(wqe
->atomic
.r_key
);
1847 sqe
->remote_va
= cpu_to_le64(wqe
->atomic
.remote_va
);
1848 ext_sqe
->swap_data
= cpu_to_le64(wqe
->atomic
.swap_data
);
1849 ext_sqe
->cmp_data
= cpu_to_le64(wqe
->atomic
.cmp_data
);
1851 pkt_num
= (data_len
+ qp
->mtu
- 1) / qp
->mtu
;
1854 sq
->psn
= (sq
->psn
+ pkt_num
) & BTH_PSN_MASK
;
1857 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV
:
1859 struct sq_localinvalidate
*sqe
= base_hdr
;
1861 sqe
->wqe_type
= wqe
->type
;
1862 sqe
->flags
= wqe
->flags
;
1863 sqe
->inv_l_key
= cpu_to_le32(wqe
->local_inv
.inv_l_key
);
1867 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR
:
1869 struct sq_fr_pmr_ext_hdr
*ext_sqe
= ext_hdr
;
1870 struct sq_fr_pmr_hdr
*sqe
= base_hdr
;
1872 sqe
->wqe_type
= wqe
->type
;
1873 sqe
->flags
= wqe
->flags
;
1874 sqe
->access_cntl
= wqe
->frmr
.access_cntl
|
1875 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE
;
1876 sqe
->zero_based_page_size_log
=
1877 (wqe
->frmr
.pg_sz_log
& SQ_FR_PMR_PAGE_SIZE_LOG_MASK
) <<
1878 SQ_FR_PMR_PAGE_SIZE_LOG_SFT
|
1879 (wqe
->frmr
.zero_based
? SQ_FR_PMR_ZERO_BASED
: 0);
1880 sqe
->l_key
= cpu_to_le32(wqe
->frmr
.l_key
);
1881 temp32
= cpu_to_le32(wqe
->frmr
.length
);
1882 memcpy(sqe
->length
, &temp32
, sizeof(wqe
->frmr
.length
));
1883 sqe
->numlevels_pbl_page_size_log
=
1884 ((wqe
->frmr
.pbl_pg_sz_log
<<
1885 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT
) &
1886 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK
) |
1887 ((wqe
->frmr
.levels
<< SQ_FR_PMR_NUMLEVELS_SFT
) &
1888 SQ_FR_PMR_NUMLEVELS_MASK
);
1890 for (i
= 0; i
< wqe
->frmr
.page_list_len
; i
++)
1891 wqe
->frmr
.pbl_ptr
[i
] = cpu_to_le64(
1892 wqe
->frmr
.page_list
[i
] |
1894 ext_sqe
->pblptr
= cpu_to_le64(wqe
->frmr
.pbl_dma_ptr
);
1895 ext_sqe
->va
= cpu_to_le64(wqe
->frmr
.va
);
1899 case BNXT_QPLIB_SWQE_TYPE_BIND_MW
:
1901 struct sq_bind_ext_hdr
*ext_sqe
= ext_hdr
;
1902 struct sq_bind_hdr
*sqe
= base_hdr
;
1904 sqe
->wqe_type
= wqe
->type
;
1905 sqe
->flags
= wqe
->flags
;
1906 sqe
->access_cntl
= wqe
->bind
.access_cntl
;
1907 sqe
->mw_type_zero_based
= wqe
->bind
.mw_type
|
1908 (wqe
->bind
.zero_based
? SQ_BIND_ZERO_BASED
: 0);
1909 sqe
->parent_l_key
= cpu_to_le32(wqe
->bind
.parent_l_key
);
1910 sqe
->l_key
= cpu_to_le32(wqe
->bind
.r_key
);
1911 ext_sqe
->va
= cpu_to_le64(wqe
->bind
.va
);
1912 ext_sqe
->length_lo
= cpu_to_le32(wqe
->bind
.length
);
1916 /* Bad wqe, return error */
1920 swq
->next_psn
= sq
->psn
& BTH_PSN_MASK
;
1921 bnxt_qplib_fill_psn_search(qp
, wqe
, swq
);
1923 bnxt_qplib_swq_mod_start(sq
, wqe_idx
);
1924 bnxt_qplib_hwq_incr_prod(hwq
, swq
->slots
);
1928 nq_work
= kzalloc(sizeof(*nq_work
), GFP_ATOMIC
);
1930 nq_work
->cq
= qp
->scq
;
1931 nq_work
->nq
= qp
->scq
->nq
;
1932 INIT_WORK(&nq_work
->work
, bnxt_qpn_cqn_sched_task
);
1933 queue_work(qp
->scq
->nq
->cqn_wq
, &nq_work
->work
);
1935 dev_err(&hwq
->pdev
->dev
,
1936 "FP: Failed to allocate SQ nq_work!\n");
1943 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp
*qp
)
1945 struct bnxt_qplib_q
*rq
= &qp
->rq
;
1947 bnxt_qplib_ring_prod_db(&rq
->dbinfo
, DBC_DBC_TYPE_RQ
);
1950 int bnxt_qplib_post_recv(struct bnxt_qplib_qp
*qp
,
1951 struct bnxt_qplib_swqe
*wqe
)
1953 struct bnxt_qplib_nq_work
*nq_work
= NULL
;
1954 struct bnxt_qplib_q
*rq
= &qp
->rq
;
1955 struct rq_wqe_hdr
*base_hdr
;
1956 struct rq_ext_hdr
*ext_hdr
;
1957 struct bnxt_qplib_hwq
*hwq
;
1958 struct bnxt_qplib_swq
*swq
;
1959 bool sch_handler
= false;
1965 if (qp
->state
== CMDQ_MODIFY_QP_NEW_STATE_RESET
) {
1966 dev_err(&hwq
->pdev
->dev
,
1967 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1973 if (bnxt_qplib_queue_full(rq
, rq
->dbinfo
.max_slot
)) {
1974 dev_err(&hwq
->pdev
->dev
,
1975 "FP: QP (0x%x) RQ is full!\n", qp
->id
);
1980 swq
= bnxt_qplib_get_swqe(rq
, &wqe_idx
);
1981 swq
->wr_id
= wqe
->wr_id
;
1982 swq
->slots
= rq
->dbinfo
.max_slot
;
1984 if (qp
->state
== CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1986 dev_dbg(&hwq
->pdev
->dev
,
1987 "%s: Error QP. Scheduling for poll_cq\n", __func__
);
1992 base_hdr
= bnxt_qplib_get_prod_qe(hwq
, idx
++);
1993 ext_hdr
= bnxt_qplib_get_prod_qe(hwq
, idx
++);
1994 memset(base_hdr
, 0, sizeof(struct sq_sge
));
1995 memset(ext_hdr
, 0, sizeof(struct sq_sge
));
1996 wqe_sz
= (sizeof(struct rq_wqe_hdr
) +
1997 wqe
->num_sge
* sizeof(struct sq_sge
)) >> 4;
1998 bnxt_qplib_put_sges(hwq
, wqe
->sg_list
, wqe
->num_sge
, &idx
);
1999 if (!wqe
->num_sge
) {
2002 sge
= bnxt_qplib_get_prod_qe(hwq
, idx
++);
2006 base_hdr
->wqe_type
= wqe
->type
;
2007 base_hdr
->flags
= wqe
->flags
;
2008 base_hdr
->wqe_size
= wqe_sz
;
2009 base_hdr
->wr_id
[0] = cpu_to_le32(wqe_idx
);
2011 bnxt_qplib_swq_mod_start(rq
, wqe_idx
);
2012 bnxt_qplib_hwq_incr_prod(hwq
, swq
->slots
);
2015 nq_work
= kzalloc(sizeof(*nq_work
), GFP_ATOMIC
);
2017 nq_work
->cq
= qp
->rcq
;
2018 nq_work
->nq
= qp
->rcq
->nq
;
2019 INIT_WORK(&nq_work
->work
, bnxt_qpn_cqn_sched_task
);
2020 queue_work(qp
->rcq
->nq
->cqn_wq
, &nq_work
->work
);
2022 dev_err(&hwq
->pdev
->dev
,
2023 "FP: Failed to allocate RQ nq_work!\n");
2032 int bnxt_qplib_create_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
)
2034 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
2035 struct bnxt_qplib_hwq_attr hwq_attr
= {};
2036 struct creq_create_cq_resp resp
;
2037 struct bnxt_qplib_pbl
*pbl
;
2038 struct cmdq_create_cq req
;
2044 hwq_attr
.depth
= cq
->max_wqe
;
2045 hwq_attr
.stride
= sizeof(struct cq_base
);
2046 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
2047 hwq_attr
.sginfo
= &cq
->sg_info
;
2048 rc
= bnxt_qplib_alloc_init_hwq(&cq
->hwq
, &hwq_attr
);
2052 RCFW_CMD_PREP(req
, CREATE_CQ
, cmd_flags
);
2055 dev_err(&rcfw
->pdev
->dev
,
2056 "FP: CREATE_CQ failed due to NULL DPI\n");
2059 req
.dpi
= cpu_to_le32(cq
->dpi
->dpi
);
2060 req
.cq_handle
= cpu_to_le64(cq
->cq_handle
);
2061 req
.cq_size
= cpu_to_le32(cq
->hwq
.max_elements
);
2062 pbl
= &cq
->hwq
.pbl
[PBL_LVL_0
];
2063 pg_sz_lvl
= (bnxt_qplib_base_pg_size(&cq
->hwq
) <<
2064 CMDQ_CREATE_CQ_PG_SIZE_SFT
);
2065 pg_sz_lvl
|= (cq
->hwq
.level
& CMDQ_CREATE_CQ_LVL_MASK
);
2066 req
.pg_size_lvl
= cpu_to_le32(pg_sz_lvl
);
2067 req
.pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
2068 req
.cq_fco_cnq_id
= cpu_to_le32(
2069 (cq
->cnq_hw_ring_id
& CMDQ_CREATE_CQ_CNQ_ID_MASK
) <<
2070 CMDQ_CREATE_CQ_CNQ_ID_SFT
);
2072 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
2073 (void *)&resp
, NULL
, 0);
2077 cq
->id
= le32_to_cpu(resp
.xid
);
2078 cq
->period
= BNXT_QPLIB_QUEUE_START_PERIOD
;
2079 init_waitqueue_head(&cq
->waitq
);
2080 INIT_LIST_HEAD(&cq
->sqf_head
);
2081 INIT_LIST_HEAD(&cq
->rqf_head
);
2082 spin_lock_init(&cq
->compl_lock
);
2083 spin_lock_init(&cq
->flush_lock
);
2085 cq
->dbinfo
.hwq
= &cq
->hwq
;
2086 cq
->dbinfo
.xid
= cq
->id
;
2087 cq
->dbinfo
.db
= cq
->dpi
->dbr
;
2088 cq
->dbinfo
.priv_db
= res
->dpi_tbl
.dbr_bar_reg_iomem
;
2090 bnxt_qplib_armen_db(&cq
->dbinfo
, DBC_DBC_TYPE_CQ_ARMENA
);
2095 bnxt_qplib_free_hwq(res
, &cq
->hwq
);
2100 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
)
2102 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
2103 struct cmdq_destroy_cq req
;
2104 struct creq_destroy_cq_resp resp
;
2105 u16 total_cnq_events
;
2109 RCFW_CMD_PREP(req
, DESTROY_CQ
, cmd_flags
);
2111 req
.cq_cid
= cpu_to_le32(cq
->id
);
2112 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
2113 (void *)&resp
, NULL
, 0);
2116 total_cnq_events
= le16_to_cpu(resp
.total_cnq_events
);
2117 __wait_for_all_nqes(cq
, total_cnq_events
);
2118 bnxt_qplib_free_hwq(res
, &cq
->hwq
);
2122 static int __flush_sq(struct bnxt_qplib_q
*sq
, struct bnxt_qplib_qp
*qp
,
2123 struct bnxt_qplib_cqe
**pcqe
, int *budget
)
2125 struct bnxt_qplib_cqe
*cqe
;
2129 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2130 start
= sq
->swq_start
;
2133 last
= sq
->swq_last
;
2136 /* Skip the FENCE WQE completions */
2137 if (sq
->swq
[last
].wr_id
== BNXT_QPLIB_FENCE_WRID
) {
2138 bnxt_qplib_cancel_phantom_processing(qp
);
2141 memset(cqe
, 0, sizeof(*cqe
));
2142 cqe
->status
= CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR
;
2143 cqe
->opcode
= CQ_BASE_CQE_TYPE_REQ
;
2144 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2145 cqe
->wr_id
= sq
->swq
[last
].wr_id
;
2146 cqe
->src_qp
= qp
->id
;
2147 cqe
->type
= sq
->swq
[last
].type
;
2151 bnxt_qplib_hwq_incr_cons(&sq
->hwq
, sq
->swq
[last
].slots
);
2152 sq
->swq_last
= sq
->swq
[last
].next_idx
;
2155 if (!(*budget
) && sq
->swq_last
!= start
)
2162 static int __flush_rq(struct bnxt_qplib_q
*rq
, struct bnxt_qplib_qp
*qp
,
2163 struct bnxt_qplib_cqe
**pcqe
, int *budget
)
2165 struct bnxt_qplib_cqe
*cqe
;
2171 case CMDQ_CREATE_QP1_TYPE_GSI
:
2172 opcode
= CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
;
2174 case CMDQ_CREATE_QP_TYPE_RC
:
2175 opcode
= CQ_BASE_CQE_TYPE_RES_RC
;
2177 case CMDQ_CREATE_QP_TYPE_UD
:
2178 case CMDQ_CREATE_QP_TYPE_GSI
:
2179 opcode
= CQ_BASE_CQE_TYPE_RES_UD
;
2183 /* Flush the rest of the RQ */
2184 start
= rq
->swq_start
;
2187 last
= rq
->swq_last
;
2190 memset(cqe
, 0, sizeof(*cqe
));
2192 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR
;
2193 cqe
->opcode
= opcode
;
2194 cqe
->qp_handle
= (unsigned long)qp
;
2195 cqe
->wr_id
= rq
->swq
[last
].wr_id
;
2198 bnxt_qplib_hwq_incr_cons(&rq
->hwq
, rq
->swq
[last
].slots
);
2199 rq
->swq_last
= rq
->swq
[last
].next_idx
;
2202 if (!*budget
&& rq
->swq_last
!= start
)
2209 void bnxt_qplib_mark_qp_error(void *qp_handle
)
2211 struct bnxt_qplib_qp
*qp
= qp_handle
;
2216 /* Must block new posting of SQ and RQ */
2217 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
2218 bnxt_qplib_cancel_phantom_processing(qp
);
2221 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2222 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2224 static int do_wa9060(struct bnxt_qplib_qp
*qp
, struct bnxt_qplib_cq
*cq
,
2225 u32 cq_cons
, u32 swq_last
, u32 cqe_sq_cons
)
2227 u32 peek_sw_cq_cons
, peek_raw_cq_cons
, peek_sq_cons_idx
;
2228 struct bnxt_qplib_q
*sq
= &qp
->sq
;
2229 struct cq_req
*peek_req_hwcqe
;
2230 struct bnxt_qplib_qp
*peek_qp
;
2231 struct bnxt_qplib_q
*peek_sq
;
2232 struct bnxt_qplib_swq
*swq
;
2233 struct cq_base
*peek_hwcqe
;
2237 /* Check for the psn_search marking before completing */
2238 swq
= &sq
->swq
[swq_last
];
2239 if (swq
->psn_search
&&
2240 le32_to_cpu(swq
->psn_search
->flags_next_psn
) & 0x80000000) {
2242 swq
->psn_search
->flags_next_psn
= cpu_to_le32
2243 (le32_to_cpu(swq
->psn_search
->flags_next_psn
)
2245 dev_dbg(&cq
->hwq
.pdev
->dev
,
2246 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2247 cq_cons
, qp
->id
, swq_last
, cqe_sq_cons
);
2248 sq
->condition
= true;
2249 sq
->send_phantom
= true;
2251 /* TODO: Only ARM if the previous SQE is ARMALL */
2252 bnxt_qplib_ring_db(&cq
->dbinfo
, DBC_DBC_TYPE_CQ_ARMALL
);
2256 if (sq
->condition
) {
2257 /* Peek at the completions */
2258 peek_raw_cq_cons
= cq
->hwq
.cons
;
2259 peek_sw_cq_cons
= cq_cons
;
2260 i
= cq
->hwq
.max_elements
;
2262 peek_sw_cq_cons
= HWQ_CMP((peek_sw_cq_cons
), &cq
->hwq
);
2263 peek_hwcqe
= bnxt_qplib_get_qe(&cq
->hwq
,
2264 peek_sw_cq_cons
, NULL
);
2265 /* If the next hwcqe is VALID */
2266 if (CQE_CMP_VALID(peek_hwcqe
, peek_raw_cq_cons
,
2267 cq
->hwq
.max_elements
)) {
2269 * The valid test of the entry must be done first before
2270 * reading any further.
2273 /* If the next hwcqe is a REQ */
2274 if ((peek_hwcqe
->cqe_type_toggle
&
2275 CQ_BASE_CQE_TYPE_MASK
) ==
2276 CQ_BASE_CQE_TYPE_REQ
) {
2277 peek_req_hwcqe
= (struct cq_req
*)
2279 peek_qp
= (struct bnxt_qplib_qp
*)
2282 (peek_req_hwcqe
->qp_handle
));
2283 peek_sq
= &peek_qp
->sq
;
2286 peek_req_hwcqe
->sq_cons_idx
)
2287 - 1) % sq
->max_wqe
);
2288 /* If the hwcqe's sq's wr_id matches */
2289 if (peek_sq
== sq
&&
2290 sq
->swq
[peek_sq_cons_idx
].wr_id
==
2291 BNXT_QPLIB_FENCE_WRID
) {
2293 * Unbreak only if the phantom
2296 dev_dbg(&cq
->hwq
.pdev
->dev
,
2297 "FP: Got Phantom CQE\n");
2298 sq
->condition
= false;
2304 /* Valid but not the phantom, so keep looping */
2306 /* Not valid yet, just exit and wait */
2313 dev_err(&cq
->hwq
.pdev
->dev
,
2314 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2315 cq_cons
, qp
->id
, swq_last
, cqe_sq_cons
);
2322 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq
*cq
,
2323 struct cq_req
*hwcqe
,
2324 struct bnxt_qplib_cqe
**pcqe
, int *budget
,
2325 u32 cq_cons
, struct bnxt_qplib_qp
**lib_qp
)
2327 struct bnxt_qplib_swq
*swq
;
2328 struct bnxt_qplib_cqe
*cqe
;
2329 struct bnxt_qplib_qp
*qp
;
2330 struct bnxt_qplib_q
*sq
;
2334 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
2335 le64_to_cpu(hwcqe
->qp_handle
));
2337 dev_err(&cq
->hwq
.pdev
->dev
,
2338 "FP: Process Req qp is NULL\n");
2343 cqe_sq_cons
= le16_to_cpu(hwcqe
->sq_cons_idx
) % sq
->max_wqe
;
2344 if (qp
->sq
.flushed
) {
2345 dev_dbg(&cq
->hwq
.pdev
->dev
,
2346 "%s: QP in Flush QP = %p\n", __func__
, qp
);
2349 /* Require to walk the sq's swq to fabricate CQEs for all previously
2350 * signaled SWQEs due to CQE aggregation from the current sq cons
2351 * to the cqe_sq_cons
2355 if (sq
->swq_last
== cqe_sq_cons
)
2359 swq
= &sq
->swq
[sq
->swq_last
];
2360 memset(cqe
, 0, sizeof(*cqe
));
2361 cqe
->opcode
= CQ_BASE_CQE_TYPE_REQ
;
2362 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2363 cqe
->src_qp
= qp
->id
;
2364 cqe
->wr_id
= swq
->wr_id
;
2365 if (cqe
->wr_id
== BNXT_QPLIB_FENCE_WRID
)
2367 cqe
->type
= swq
->type
;
2369 /* For the last CQE, check for status. For errors, regardless
2370 * of the request being signaled or not, it must complete with
2371 * the hwcqe error status
2373 if (swq
->next_idx
== cqe_sq_cons
&&
2374 hwcqe
->status
!= CQ_REQ_STATUS_OK
) {
2375 cqe
->status
= hwcqe
->status
;
2376 dev_err(&cq
->hwq
.pdev
->dev
,
2377 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2378 sq
->swq_last
, cqe
->wr_id
, cqe
->status
);
2381 bnxt_qplib_mark_qp_error(qp
);
2382 /* Add qp to flush list of the CQ */
2383 bnxt_qplib_add_flush_qp(qp
);
2385 /* Before we complete, do WA 9060 */
2386 if (do_wa9060(qp
, cq
, cq_cons
, sq
->swq_last
,
2391 if (swq
->flags
& SQ_SEND_FLAGS_SIGNAL_COMP
) {
2392 cqe
->status
= CQ_REQ_STATUS_OK
;
2398 bnxt_qplib_hwq_incr_cons(&sq
->hwq
, swq
->slots
);
2399 sq
->swq_last
= swq
->next_idx
;
2405 if (sq
->swq_last
!= cqe_sq_cons
) {
2411 * Back to normal completion mode only after it has completed all of
2412 * the WC for this CQE
2419 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq
*srq
, u32 tag
)
2421 spin_lock(&srq
->hwq
.lock
);
2422 srq
->swq
[srq
->last_idx
].next_idx
= (int)tag
;
2423 srq
->last_idx
= (int)tag
;
2424 srq
->swq
[srq
->last_idx
].next_idx
= -1;
2425 srq
->hwq
.cons
++; /* Support for SRQE counter */
2426 spin_unlock(&srq
->hwq
.lock
);
2429 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq
*cq
,
2430 struct cq_res_rc
*hwcqe
,
2431 struct bnxt_qplib_cqe
**pcqe
,
2434 struct bnxt_qplib_srq
*srq
;
2435 struct bnxt_qplib_cqe
*cqe
;
2436 struct bnxt_qplib_qp
*qp
;
2437 struct bnxt_qplib_q
*rq
;
2441 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
2442 le64_to_cpu(hwcqe
->qp_handle
));
2444 dev_err(&cq
->hwq
.pdev
->dev
, "process_cq RC qp is NULL\n");
2447 if (qp
->rq
.flushed
) {
2448 dev_dbg(&cq
->hwq
.pdev
->dev
,
2449 "%s: QP in Flush QP = %p\n", __func__
, qp
);
2454 cqe
->opcode
= hwcqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
;
2455 cqe
->length
= le32_to_cpu(hwcqe
->length
);
2456 cqe
->invrkey
= le32_to_cpu(hwcqe
->imm_data_or_inv_r_key
);
2457 cqe
->mr_handle
= le64_to_cpu(hwcqe
->mr_handle
);
2458 cqe
->flags
= le16_to_cpu(hwcqe
->flags
);
2459 cqe
->status
= hwcqe
->status
;
2460 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2462 wr_id_idx
= le32_to_cpu(hwcqe
->srq_or_rq_wr_id
) &
2463 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK
;
2464 if (cqe
->flags
& CQ_RES_RC_FLAGS_SRQ_SRQ
) {
2468 if (wr_id_idx
>= srq
->hwq
.max_elements
) {
2469 dev_err(&cq
->hwq
.pdev
->dev
,
2470 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2471 wr_id_idx
, srq
->hwq
.max_elements
);
2474 cqe
->wr_id
= srq
->swq
[wr_id_idx
].wr_id
;
2475 bnxt_qplib_release_srqe(srq
, wr_id_idx
);
2480 struct bnxt_qplib_swq
*swq
;
2483 if (wr_id_idx
> (rq
->max_wqe
- 1)) {
2484 dev_err(&cq
->hwq
.pdev
->dev
,
2485 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2486 wr_id_idx
, rq
->max_wqe
);
2489 if (wr_id_idx
!= rq
->swq_last
)
2491 swq
= &rq
->swq
[rq
->swq_last
];
2492 cqe
->wr_id
= swq
->wr_id
;
2495 bnxt_qplib_hwq_incr_cons(&rq
->hwq
, swq
->slots
);
2496 rq
->swq_last
= swq
->next_idx
;
2499 if (hwcqe
->status
!= CQ_RES_RC_STATUS_OK
) {
2500 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
2501 /* Add qp to flush list of the CQ */
2502 bnxt_qplib_add_flush_qp(qp
);
2510 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq
*cq
,
2511 struct cq_res_ud
*hwcqe
,
2512 struct bnxt_qplib_cqe
**pcqe
,
2515 struct bnxt_qplib_srq
*srq
;
2516 struct bnxt_qplib_cqe
*cqe
;
2517 struct bnxt_qplib_qp
*qp
;
2518 struct bnxt_qplib_q
*rq
;
2522 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
2523 le64_to_cpu(hwcqe
->qp_handle
));
2525 dev_err(&cq
->hwq
.pdev
->dev
, "process_cq UD qp is NULL\n");
2528 if (qp
->rq
.flushed
) {
2529 dev_dbg(&cq
->hwq
.pdev
->dev
,
2530 "%s: QP in Flush QP = %p\n", __func__
, qp
);
2534 cqe
->opcode
= hwcqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
;
2535 cqe
->length
= le16_to_cpu(hwcqe
->length
) & CQ_RES_UD_LENGTH_MASK
;
2536 cqe
->cfa_meta
= le16_to_cpu(hwcqe
->cfa_metadata
);
2537 cqe
->invrkey
= le32_to_cpu(hwcqe
->imm_data
);
2538 cqe
->flags
= le16_to_cpu(hwcqe
->flags
);
2539 cqe
->status
= hwcqe
->status
;
2540 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2541 /*FIXME: Endianness fix needed for smace */
2542 memcpy(cqe
->smac
, hwcqe
->src_mac
, ETH_ALEN
);
2543 wr_id_idx
= le32_to_cpu(hwcqe
->src_qp_high_srq_or_rq_wr_id
)
2544 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK
;
2545 cqe
->src_qp
= le16_to_cpu(hwcqe
->src_qp_low
) |
2547 hwcqe
->src_qp_high_srq_or_rq_wr_id
) &
2548 CQ_RES_UD_SRC_QP_HIGH_MASK
) >> 8);
2550 if (cqe
->flags
& CQ_RES_RC_FLAGS_SRQ_SRQ
) {
2555 if (wr_id_idx
>= srq
->hwq
.max_elements
) {
2556 dev_err(&cq
->hwq
.pdev
->dev
,
2557 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2558 wr_id_idx
, srq
->hwq
.max_elements
);
2561 cqe
->wr_id
= srq
->swq
[wr_id_idx
].wr_id
;
2562 bnxt_qplib_release_srqe(srq
, wr_id_idx
);
2567 struct bnxt_qplib_swq
*swq
;
2570 if (wr_id_idx
> (rq
->max_wqe
- 1)) {
2571 dev_err(&cq
->hwq
.pdev
->dev
,
2572 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2573 wr_id_idx
, rq
->max_wqe
);
2577 if (rq
->swq_last
!= wr_id_idx
)
2579 swq
= &rq
->swq
[rq
->swq_last
];
2580 cqe
->wr_id
= swq
->wr_id
;
2583 bnxt_qplib_hwq_incr_cons(&rq
->hwq
, swq
->slots
);
2584 rq
->swq_last
= swq
->next_idx
;
2587 if (hwcqe
->status
!= CQ_RES_RC_STATUS_OK
) {
2588 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
2589 /* Add qp to flush list of the CQ */
2590 bnxt_qplib_add_flush_qp(qp
);
2597 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq
*cq
)
2599 struct cq_base
*hw_cqe
;
2600 u32 sw_cons
, raw_cons
;
2603 raw_cons
= cq
->hwq
.cons
;
2604 sw_cons
= HWQ_CMP(raw_cons
, &cq
->hwq
);
2605 hw_cqe
= bnxt_qplib_get_qe(&cq
->hwq
, sw_cons
, NULL
);
2606 /* Check for Valid bit. If the CQE is valid, return false */
2607 rc
= !CQE_CMP_VALID(hw_cqe
, raw_cons
, cq
->hwq
.max_elements
);
2611 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq
*cq
,
2612 struct cq_res_raweth_qp1
*hwcqe
,
2613 struct bnxt_qplib_cqe
**pcqe
,
2616 struct bnxt_qplib_qp
*qp
;
2617 struct bnxt_qplib_q
*rq
;
2618 struct bnxt_qplib_srq
*srq
;
2619 struct bnxt_qplib_cqe
*cqe
;
2623 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
2624 le64_to_cpu(hwcqe
->qp_handle
));
2626 dev_err(&cq
->hwq
.pdev
->dev
, "process_cq Raw/QP1 qp is NULL\n");
2629 if (qp
->rq
.flushed
) {
2630 dev_dbg(&cq
->hwq
.pdev
->dev
,
2631 "%s: QP in Flush QP = %p\n", __func__
, qp
);
2635 cqe
->opcode
= hwcqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
;
2636 cqe
->flags
= le16_to_cpu(hwcqe
->flags
);
2637 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2640 le32_to_cpu(hwcqe
->raweth_qp1_payload_offset_srq_or_rq_wr_id
)
2641 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK
;
2642 cqe
->src_qp
= qp
->id
;
2643 if (qp
->id
== 1 && !cqe
->length
) {
2644 /* Add workaround for the length misdetection */
2647 cqe
->length
= le16_to_cpu(hwcqe
->length
);
2649 cqe
->pkey_index
= qp
->pkey_index
;
2650 memcpy(cqe
->smac
, qp
->smac
, 6);
2652 cqe
->raweth_qp1_flags
= le16_to_cpu(hwcqe
->raweth_qp1_flags
);
2653 cqe
->raweth_qp1_flags2
= le32_to_cpu(hwcqe
->raweth_qp1_flags2
);
2654 cqe
->raweth_qp1_metadata
= le32_to_cpu(hwcqe
->raweth_qp1_metadata
);
2656 if (cqe
->flags
& CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ
) {
2659 dev_err(&cq
->hwq
.pdev
->dev
,
2660 "FP: SRQ used but not defined??\n");
2663 if (wr_id_idx
>= srq
->hwq
.max_elements
) {
2664 dev_err(&cq
->hwq
.pdev
->dev
,
2665 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2666 wr_id_idx
, srq
->hwq
.max_elements
);
2669 cqe
->wr_id
= srq
->swq
[wr_id_idx
].wr_id
;
2670 bnxt_qplib_release_srqe(srq
, wr_id_idx
);
2675 struct bnxt_qplib_swq
*swq
;
2678 if (wr_id_idx
> (rq
->max_wqe
- 1)) {
2679 dev_err(&cq
->hwq
.pdev
->dev
,
2680 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2681 wr_id_idx
, rq
->max_wqe
);
2684 if (rq
->swq_last
!= wr_id_idx
)
2686 swq
= &rq
->swq
[rq
->swq_last
];
2687 cqe
->wr_id
= swq
->wr_id
;
2690 bnxt_qplib_hwq_incr_cons(&rq
->hwq
, swq
->slots
);
2691 rq
->swq_last
= swq
->next_idx
;
2694 if (hwcqe
->status
!= CQ_RES_RC_STATUS_OK
) {
2695 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
2696 /* Add qp to flush list of the CQ */
2697 bnxt_qplib_add_flush_qp(qp
);
2705 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq
*cq
,
2706 struct cq_terminal
*hwcqe
,
2707 struct bnxt_qplib_cqe
**pcqe
,
2710 struct bnxt_qplib_qp
*qp
;
2711 struct bnxt_qplib_q
*sq
, *rq
;
2712 struct bnxt_qplib_cqe
*cqe
;
2713 u32 swq_last
= 0, cqe_cons
;
2716 /* Check the Status */
2717 if (hwcqe
->status
!= CQ_TERMINAL_STATUS_OK
)
2718 dev_warn(&cq
->hwq
.pdev
->dev
,
2719 "FP: CQ Process Terminal Error status = 0x%x\n",
2722 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
2723 le64_to_cpu(hwcqe
->qp_handle
));
2725 dev_err(&cq
->hwq
.pdev
->dev
,
2726 "FP: CQ Process terminal qp is NULL\n");
2730 /* Must block new posting of SQ and RQ */
2731 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
2736 cqe_cons
= le16_to_cpu(hwcqe
->sq_cons_idx
);
2737 if (cqe_cons
== 0xFFFF)
2739 cqe_cons
%= sq
->max_wqe
;
2741 if (qp
->sq
.flushed
) {
2742 dev_dbg(&cq
->hwq
.pdev
->dev
,
2743 "%s: QP in Flush QP = %p\n", __func__
, qp
);
2747 /* Terminal CQE can also include aggregated successful CQEs prior.
2748 * So we must complete all CQEs from the current sq's cons to the
2749 * cq_cons with status OK
2753 swq_last
= sq
->swq_last
;
2754 if (swq_last
== cqe_cons
)
2756 if (sq
->swq
[swq_last
].flags
& SQ_SEND_FLAGS_SIGNAL_COMP
) {
2757 memset(cqe
, 0, sizeof(*cqe
));
2758 cqe
->status
= CQ_REQ_STATUS_OK
;
2759 cqe
->opcode
= CQ_BASE_CQE_TYPE_REQ
;
2760 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2761 cqe
->src_qp
= qp
->id
;
2762 cqe
->wr_id
= sq
->swq
[swq_last
].wr_id
;
2763 cqe
->type
= sq
->swq
[swq_last
].type
;
2767 bnxt_qplib_hwq_incr_cons(&sq
->hwq
, sq
->swq
[swq_last
].slots
);
2768 sq
->swq_last
= sq
->swq
[swq_last
].next_idx
;
2771 if (!(*budget
) && swq_last
!= cqe_cons
) {
2780 cqe_cons
= le16_to_cpu(hwcqe
->rq_cons_idx
);
2781 if (cqe_cons
== 0xFFFF) {
2783 } else if (cqe_cons
> rq
->max_wqe
- 1) {
2784 dev_err(&cq
->hwq
.pdev
->dev
,
2785 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2786 cqe_cons
, rq
->max_wqe
);
2790 if (qp
->rq
.flushed
) {
2791 dev_dbg(&cq
->hwq
.pdev
->dev
,
2792 "%s: QP in Flush QP = %p\n", __func__
, qp
);
2797 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2798 * from the current rq->cons to the rq->prod regardless what the
2799 * rq->cons the terminal CQE indicates
2802 /* Add qp to flush list of the CQ */
2803 bnxt_qplib_add_flush_qp(qp
);
2808 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq
*cq
,
2809 struct cq_cutoff
*hwcqe
)
2811 /* Check the Status */
2812 if (hwcqe
->status
!= CQ_CUTOFF_STATUS_OK
) {
2813 dev_err(&cq
->hwq
.pdev
->dev
,
2814 "FP: CQ Process Cutoff Error status = 0x%x\n",
2818 clear_bit(CQ_FLAGS_RESIZE_IN_PROG
, &cq
->flags
);
2819 wake_up_interruptible(&cq
->waitq
);
2824 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq
*cq
,
2825 struct bnxt_qplib_cqe
*cqe
,
2828 struct bnxt_qplib_qp
*qp
= NULL
;
2829 u32 budget
= num_cqes
;
2830 unsigned long flags
;
2832 spin_lock_irqsave(&cq
->flush_lock
, flags
);
2833 list_for_each_entry(qp
, &cq
->sqf_head
, sq_flush
) {
2834 dev_dbg(&cq
->hwq
.pdev
->dev
, "FP: Flushing SQ QP= %p\n", qp
);
2835 __flush_sq(&qp
->sq
, qp
, &cqe
, &budget
);
2838 list_for_each_entry(qp
, &cq
->rqf_head
, rq_flush
) {
2839 dev_dbg(&cq
->hwq
.pdev
->dev
, "FP: Flushing RQ QP= %p\n", qp
);
2840 __flush_rq(&qp
->rq
, qp
, &cqe
, &budget
);
2842 spin_unlock_irqrestore(&cq
->flush_lock
, flags
);
2844 return num_cqes
- budget
;
2847 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq
*cq
, struct bnxt_qplib_cqe
*cqe
,
2848 int num_cqes
, struct bnxt_qplib_qp
**lib_qp
)
2850 struct cq_base
*hw_cqe
;
2851 u32 sw_cons
, raw_cons
;
2854 raw_cons
= cq
->hwq
.cons
;
2858 sw_cons
= HWQ_CMP(raw_cons
, &cq
->hwq
);
2859 hw_cqe
= bnxt_qplib_get_qe(&cq
->hwq
, sw_cons
, NULL
);
2861 /* Check for Valid bit */
2862 if (!CQE_CMP_VALID(hw_cqe
, raw_cons
, cq
->hwq
.max_elements
))
2866 * The valid test of the entry must be done first before
2867 * reading any further.
2870 /* From the device's respective CQE format to qplib_wc*/
2871 switch (hw_cqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
) {
2872 case CQ_BASE_CQE_TYPE_REQ
:
2873 rc
= bnxt_qplib_cq_process_req(cq
,
2874 (struct cq_req
*)hw_cqe
,
2878 case CQ_BASE_CQE_TYPE_RES_RC
:
2879 rc
= bnxt_qplib_cq_process_res_rc(cq
,
2880 (struct cq_res_rc
*)
2884 case CQ_BASE_CQE_TYPE_RES_UD
:
2885 rc
= bnxt_qplib_cq_process_res_ud
2886 (cq
, (struct cq_res_ud
*)hw_cqe
, &cqe
,
2889 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
:
2890 rc
= bnxt_qplib_cq_process_res_raweth_qp1
2891 (cq
, (struct cq_res_raweth_qp1
*)
2892 hw_cqe
, &cqe
, &budget
);
2894 case CQ_BASE_CQE_TYPE_TERMINAL
:
2895 rc
= bnxt_qplib_cq_process_terminal
2896 (cq
, (struct cq_terminal
*)hw_cqe
,
2899 case CQ_BASE_CQE_TYPE_CUT_OFF
:
2900 bnxt_qplib_cq_process_cutoff
2901 (cq
, (struct cq_cutoff
*)hw_cqe
);
2902 /* Done processing this CQ */
2905 dev_err(&cq
->hwq
.pdev
->dev
,
2906 "process_cq unknown type 0x%lx\n",
2907 hw_cqe
->cqe_type_toggle
&
2908 CQ_BASE_CQE_TYPE_MASK
);
2915 /* Error while processing the CQE, just skip to the
2918 dev_err(&cq
->hwq
.pdev
->dev
,
2919 "process_cqe error rc = 0x%x\n", rc
);
2923 if (cq
->hwq
.cons
!= raw_cons
) {
2924 cq
->hwq
.cons
= raw_cons
;
2925 bnxt_qplib_ring_db(&cq
->dbinfo
, DBC_DBC_TYPE_CQ
);
2928 return num_cqes
- budget
;
2931 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq
*cq
, u32 arm_type
)
2934 bnxt_qplib_ring_db(&cq
->dbinfo
, arm_type
);
2935 /* Using cq->arm_state variable to track whether to issue cq handler */
2936 atomic_set(&cq
->arm_state
, 1);
2939 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp
*qp
)
2941 flush_workqueue(qp
->scq
->nq
->cqn_wq
);
2942 if (qp
->scq
!= qp
->rcq
)
2943 flush_workqueue(qp
->rcq
->nq
->cqn_wq
);