2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
57 #include <rdma/ib_addr.h>
62 static void __clean_cq(struct bnxt_qplib_cq
*cq
, u64 qp
);
64 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp
*qp
)
66 qp
->sq
.condition
= false;
67 qp
->sq
.send_phantom
= false;
68 qp
->sq
.single
= false;
72 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp
*qp
)
74 struct bnxt_qplib_cq
*scq
, *rcq
;
79 if (!qp
->sq
.flushed
) {
80 dev_dbg(&scq
->hwq
.pdev
->dev
,
81 "FP: Adding to SQ Flush list = %p\n", qp
);
82 bnxt_qplib_cancel_phantom_processing(qp
);
83 list_add_tail(&qp
->sq_flush
, &scq
->sqf_head
);
84 qp
->sq
.flushed
= true;
87 if (!qp
->rq
.flushed
) {
88 dev_dbg(&rcq
->hwq
.pdev
->dev
,
89 "FP: Adding to RQ Flush list = %p\n", qp
);
90 list_add_tail(&qp
->rq_flush
, &rcq
->rqf_head
);
91 qp
->rq
.flushed
= true;
96 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp
*qp
,
98 __acquires(&qp
->scq
->flush_lock
) __acquires(&qp
->rcq
->flush_lock
)
100 spin_lock_irqsave(&qp
->scq
->flush_lock
, *flags
);
101 if (qp
->scq
== qp
->rcq
)
102 __acquire(&qp
->rcq
->flush_lock
);
104 spin_lock(&qp
->rcq
->flush_lock
);
107 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp
*qp
,
108 unsigned long *flags
)
109 __releases(&qp
->scq
->flush_lock
) __releases(&qp
->rcq
->flush_lock
)
111 if (qp
->scq
== qp
->rcq
)
112 __release(&qp
->rcq
->flush_lock
);
114 spin_unlock(&qp
->rcq
->flush_lock
);
115 spin_unlock_irqrestore(&qp
->scq
->flush_lock
, *flags
);
118 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp
*qp
)
122 bnxt_qplib_acquire_cq_flush_locks(qp
, &flags
);
123 __bnxt_qplib_add_flush_qp(qp
);
124 bnxt_qplib_release_cq_flush_locks(qp
, &flags
);
127 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp
*qp
)
129 if (qp
->sq
.flushed
) {
130 qp
->sq
.flushed
= false;
131 list_del(&qp
->sq_flush
);
134 if (qp
->rq
.flushed
) {
135 qp
->rq
.flushed
= false;
136 list_del(&qp
->rq_flush
);
141 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp
*qp
)
145 bnxt_qplib_acquire_cq_flush_locks(qp
, &flags
);
146 __clean_cq(qp
->scq
, (u64
)(unsigned long)qp
);
149 __clean_cq(qp
->rcq
, (u64
)(unsigned long)qp
);
153 __bnxt_qplib_del_flush_qp(qp
);
154 bnxt_qplib_release_cq_flush_locks(qp
, &flags
);
157 static void bnxt_qpn_cqn_sched_task(struct work_struct
*work
)
159 struct bnxt_qplib_nq_work
*nq_work
=
160 container_of(work
, struct bnxt_qplib_nq_work
, work
);
162 struct bnxt_qplib_cq
*cq
= nq_work
->cq
;
163 struct bnxt_qplib_nq
*nq
= nq_work
->nq
;
166 spin_lock_bh(&cq
->compl_lock
);
167 if (atomic_read(&cq
->arm_state
) && nq
->cqn_handler
) {
168 dev_dbg(&nq
->pdev
->dev
,
169 "%s:Trigger cq = %p event nq = %p\n",
171 nq
->cqn_handler(nq
, cq
);
173 spin_unlock_bh(&cq
->compl_lock
);
178 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res
*res
,
179 struct bnxt_qplib_qp
*qp
)
181 struct bnxt_qplib_q
*rq
= &qp
->rq
;
182 struct bnxt_qplib_q
*sq
= &qp
->sq
;
185 dma_free_coherent(&res
->pdev
->dev
,
186 rq
->max_wqe
* qp
->rq_hdr_buf_size
,
187 qp
->rq_hdr_buf
, qp
->rq_hdr_buf_map
);
189 dma_free_coherent(&res
->pdev
->dev
,
190 sq
->max_wqe
* qp
->sq_hdr_buf_size
,
191 qp
->sq_hdr_buf
, qp
->sq_hdr_buf_map
);
192 qp
->rq_hdr_buf
= NULL
;
193 qp
->sq_hdr_buf
= NULL
;
194 qp
->rq_hdr_buf_map
= 0;
195 qp
->sq_hdr_buf_map
= 0;
196 qp
->sq_hdr_buf_size
= 0;
197 qp
->rq_hdr_buf_size
= 0;
200 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res
*res
,
201 struct bnxt_qplib_qp
*qp
)
203 struct bnxt_qplib_q
*rq
= &qp
->rq
;
204 struct bnxt_qplib_q
*sq
= &qp
->sq
;
207 if (qp
->sq_hdr_buf_size
&& sq
->max_wqe
) {
208 qp
->sq_hdr_buf
= dma_alloc_coherent(&res
->pdev
->dev
,
209 sq
->max_wqe
* qp
->sq_hdr_buf_size
,
210 &qp
->sq_hdr_buf_map
, GFP_KERNEL
);
211 if (!qp
->sq_hdr_buf
) {
213 dev_err(&res
->pdev
->dev
,
214 "Failed to create sq_hdr_buf\n");
219 if (qp
->rq_hdr_buf_size
&& rq
->max_wqe
) {
220 qp
->rq_hdr_buf
= dma_alloc_coherent(&res
->pdev
->dev
,
225 if (!qp
->rq_hdr_buf
) {
227 dev_err(&res
->pdev
->dev
,
228 "Failed to create rq_hdr_buf\n");
235 bnxt_qplib_free_qp_hdr_buf(res
, qp
);
239 static void clean_nq(struct bnxt_qplib_nq
*nq
, struct bnxt_qplib_cq
*cq
)
241 struct bnxt_qplib_hwq
*hwq
= &nq
->hwq
;
242 struct nq_base
*nqe
, **nq_ptr
;
243 int budget
= nq
->budget
;
247 spin_lock_bh(&hwq
->lock
);
248 /* Service the NQ until empty */
250 nq_ptr
= (struct nq_base
**)hwq
->pbl_ptr
;
251 nqe
= &nq_ptr
[NQE_PG(hwq
->cons
)][NQE_IDX(hwq
->cons
)];
252 if (!NQE_CMP_VALID(nqe
, nq
->nq_db
.dbinfo
.flags
))
256 * The valid test of the entry must be done first before
257 * reading any further.
261 type
= le16_to_cpu(nqe
->info10_type
) & NQ_BASE_TYPE_MASK
;
263 case NQ_BASE_TYPE_CQ_NOTIFICATION
:
265 struct nq_cn
*nqcne
= (struct nq_cn
*)nqe
;
267 q_handle
= le32_to_cpu(nqcne
->cq_handle_low
);
268 q_handle
|= (u64
)le32_to_cpu(nqcne
->cq_handle_high
)
270 if ((unsigned long)cq
== q_handle
) {
271 nqcne
->cq_handle_low
= 0;
272 nqcne
->cq_handle_high
= 0;
280 bnxt_qplib_hwq_incr_cons(hwq
->max_elements
, &hwq
->cons
,
281 1, &nq
->nq_db
.dbinfo
.flags
);
283 spin_unlock_bh(&hwq
->lock
);
286 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
289 static void __wait_for_all_nqes(struct bnxt_qplib_cq
*cq
, u16 cnq_events
)
293 while (retry_cnt
--) {
294 if (cnq_events
== cq
->cnq_events
)
296 usleep_range(50, 100);
297 clean_nq(cq
->nq
, cq
);
301 static void bnxt_qplib_service_nq(struct tasklet_struct
*t
)
303 struct bnxt_qplib_nq
*nq
= from_tasklet(nq
, t
, nq_tasklet
);
304 struct bnxt_qplib_hwq
*hwq
= &nq
->hwq
;
305 struct bnxt_qplib_cq
*cq
;
306 int budget
= nq
->budget
;
312 spin_lock_bh(&hwq
->lock
);
313 /* Service the NQ until empty */
315 nqe
= bnxt_qplib_get_qe(hwq
, hwq
->cons
, NULL
);
316 if (!NQE_CMP_VALID(nqe
, nq
->nq_db
.dbinfo
.flags
))
320 * The valid test of the entry must be done first before
321 * reading any further.
325 type
= le16_to_cpu(nqe
->info10_type
) & NQ_BASE_TYPE_MASK
;
327 case NQ_BASE_TYPE_CQ_NOTIFICATION
:
329 struct nq_cn
*nqcne
= (struct nq_cn
*)nqe
;
330 struct bnxt_re_cq
*cq_p
;
332 q_handle
= le32_to_cpu(nqcne
->cq_handle_low
);
333 q_handle
|= (u64
)le32_to_cpu(nqcne
->cq_handle_high
)
335 cq
= (struct bnxt_qplib_cq
*)(unsigned long)q_handle
;
338 cq
->toggle
= (le16_to_cpu(nqe
->info10_type
) &
339 NQ_CN_TOGGLE_MASK
) >> NQ_CN_TOGGLE_SFT
;
340 cq
->dbinfo
.toggle
= cq
->toggle
;
341 cq_p
= container_of(cq
, struct bnxt_re_cq
, qplib_cq
);
342 if (cq_p
->uctx_cq_page
)
343 *((u32
*)cq_p
->uctx_cq_page
) = cq
->toggle
;
345 bnxt_qplib_armen_db(&cq
->dbinfo
,
346 DBC_DBC_TYPE_CQ_ARMENA
);
347 spin_lock_bh(&cq
->compl_lock
);
348 atomic_set(&cq
->arm_state
, 0);
349 if (nq
->cqn_handler(nq
, (cq
)))
350 dev_warn(&nq
->pdev
->dev
,
351 "cqn - type 0x%x not handled\n", type
);
353 spin_unlock_bh(&cq
->compl_lock
);
356 case NQ_BASE_TYPE_SRQ_EVENT
:
358 struct bnxt_qplib_srq
*srq
;
359 struct bnxt_re_srq
*srq_p
;
360 struct nq_srq_event
*nqsrqe
=
361 (struct nq_srq_event
*)nqe
;
363 q_handle
= le32_to_cpu(nqsrqe
->srq_handle_low
);
364 q_handle
|= (u64
)le32_to_cpu(nqsrqe
->srq_handle_high
)
366 srq
= (struct bnxt_qplib_srq
*)q_handle
;
367 srq
->toggle
= (le16_to_cpu(nqe
->info10_type
) & NQ_CN_TOGGLE_MASK
)
369 srq
->dbinfo
.toggle
= srq
->toggle
;
370 srq_p
= container_of(srq
, struct bnxt_re_srq
, qplib_srq
);
371 if (srq_p
->uctx_srq_page
)
372 *((u32
*)srq_p
->uctx_srq_page
) = srq
->toggle
;
373 bnxt_qplib_armen_db(&srq
->dbinfo
,
374 DBC_DBC_TYPE_SRQ_ARMENA
);
375 if (nq
->srqn_handler(nq
,
376 (struct bnxt_qplib_srq
*)q_handle
,
378 dev_warn(&nq
->pdev
->dev
,
379 "SRQ event 0x%x not handled\n",
383 case NQ_BASE_TYPE_DBQ_EVENT
:
386 dev_warn(&nq
->pdev
->dev
,
387 "nqe with type = 0x%x not handled\n", type
);
391 bnxt_qplib_hwq_incr_cons(hwq
->max_elements
, &hwq
->cons
,
392 1, &nq
->nq_db
.dbinfo
.flags
);
395 bnxt_qplib_ring_nq_db(&nq
->nq_db
.dbinfo
, nq
->res
->cctx
, true);
396 spin_unlock_bh(&hwq
->lock
);
399 /* bnxt_re_synchronize_nq - self polling notification queue.
400 * @nq - notification queue pointer
402 * This function will start polling entries of a given notification queue
403 * for all pending entries.
404 * This function is useful to synchronize notification entries while resources
408 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq
*nq
)
410 int budget
= nq
->budget
;
412 nq
->budget
= nq
->hwq
.max_elements
;
413 bnxt_qplib_service_nq(&nq
->nq_tasklet
);
417 static irqreturn_t
bnxt_qplib_nq_irq(int irq
, void *dev_instance
)
419 struct bnxt_qplib_nq
*nq
= dev_instance
;
420 struct bnxt_qplib_hwq
*hwq
= &nq
->hwq
;
423 /* Prefetch the NQ element */
424 sw_cons
= HWQ_CMP(hwq
->cons
, hwq
);
425 prefetch(bnxt_qplib_get_qe(hwq
, sw_cons
, NULL
));
427 /* Fan out to CPU affinitized kthreads? */
428 tasklet_schedule(&nq
->nq_tasklet
);
433 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq
*nq
, bool kill
)
438 nq
->requested
= false;
439 /* Mask h/w interrupt */
440 bnxt_qplib_ring_nq_db(&nq
->nq_db
.dbinfo
, nq
->res
->cctx
, false);
441 /* Sync with last running IRQ handler */
442 synchronize_irq(nq
->msix_vec
);
443 irq_set_affinity_hint(nq
->msix_vec
, NULL
);
444 free_irq(nq
->msix_vec
, nq
);
449 tasklet_kill(&nq
->nq_tasklet
);
450 tasklet_disable(&nq
->nq_tasklet
);
453 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq
*nq
)
456 destroy_workqueue(nq
->cqn_wq
);
460 /* Make sure the HW is stopped! */
461 bnxt_qplib_nq_stop_irq(nq
, true);
463 if (nq
->nq_db
.reg
.bar_reg
) {
464 iounmap(nq
->nq_db
.reg
.bar_reg
);
465 nq
->nq_db
.reg
.bar_reg
= NULL
;
468 nq
->cqn_handler
= NULL
;
469 nq
->srqn_handler
= NULL
;
473 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq
*nq
, int nq_indx
,
474 int msix_vector
, bool need_init
)
476 struct bnxt_qplib_res
*res
= nq
->res
;
482 nq
->msix_vec
= msix_vector
;
484 tasklet_setup(&nq
->nq_tasklet
, bnxt_qplib_service_nq
);
486 tasklet_enable(&nq
->nq_tasklet
);
488 nq
->name
= kasprintf(GFP_KERNEL
, "bnxt_re-nq-%d@pci:%s",
489 nq_indx
, pci_name(res
->pdev
));
492 rc
= request_irq(nq
->msix_vec
, bnxt_qplib_nq_irq
, 0, nq
->name
, nq
);
496 tasklet_disable(&nq
->nq_tasklet
);
500 cpumask_clear(&nq
->mask
);
501 cpumask_set_cpu(nq_indx
, &nq
->mask
);
502 rc
= irq_set_affinity_hint(nq
->msix_vec
, &nq
->mask
);
504 dev_warn(&nq
->pdev
->dev
,
505 "set affinity failed; vector: %d nq_idx: %d\n",
506 nq
->msix_vec
, nq_indx
);
508 nq
->requested
= true;
509 bnxt_qplib_ring_nq_db(&nq
->nq_db
.dbinfo
, res
->cctx
, true);
514 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq
*nq
, u32 reg_offt
)
516 resource_size_t reg_base
;
517 struct bnxt_qplib_nq_db
*nq_db
;
518 struct pci_dev
*pdev
;
523 nq_db
->dbinfo
.flags
= 0;
524 nq_db
->reg
.bar_id
= NQ_CONS_PCI_BAR_REGION
;
525 nq_db
->reg
.bar_base
= pci_resource_start(pdev
, nq_db
->reg
.bar_id
);
526 if (!nq_db
->reg
.bar_base
) {
527 dev_err(&pdev
->dev
, "QPLIB: NQ BAR region %d resc start is 0!",
532 reg_base
= nq_db
->reg
.bar_base
+ reg_offt
;
533 /* Unconditionally map 8 bytes to support 57500 series */
535 nq_db
->reg
.bar_reg
= ioremap(reg_base
, nq_db
->reg
.len
);
536 if (!nq_db
->reg
.bar_reg
) {
537 dev_err(&pdev
->dev
, "QPLIB: NQ BAR region %d mapping failed",
542 nq_db
->dbinfo
.db
= nq_db
->reg
.bar_reg
;
543 nq_db
->dbinfo
.hwq
= &nq
->hwq
;
544 nq_db
->dbinfo
.xid
= nq
->ring_id
;
549 int bnxt_qplib_enable_nq(struct pci_dev
*pdev
, struct bnxt_qplib_nq
*nq
,
550 int nq_idx
, int msix_vector
, int bar_reg_offset
,
551 cqn_handler_t cqn_handler
,
552 srqn_handler_t srqn_handler
)
557 nq
->cqn_handler
= cqn_handler
;
558 nq
->srqn_handler
= srqn_handler
;
561 /* Have a task to schedule CQ notifiers in post send case */
562 nq
->cqn_wq
= create_singlethread_workqueue("bnxt_qplib_nq");
566 rc
= bnxt_qplib_map_nq_db(nq
, bar_reg_offset
);
570 rc
= bnxt_qplib_nq_start_irq(nq
, nq_idx
, msix_vector
, true);
572 dev_err(&nq
->pdev
->dev
,
573 "Failed to request irq for nq-idx %d\n", nq_idx
);
579 bnxt_qplib_disable_nq(nq
);
583 void bnxt_qplib_free_nq(struct bnxt_qplib_nq
*nq
)
585 if (nq
->hwq
.max_elements
) {
586 bnxt_qplib_free_hwq(nq
->res
, &nq
->hwq
);
587 nq
->hwq
.max_elements
= 0;
591 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_nq
*nq
)
593 struct bnxt_qplib_hwq_attr hwq_attr
= {};
594 struct bnxt_qplib_sg_info sginfo
= {};
596 nq
->pdev
= res
->pdev
;
598 if (!nq
->hwq
.max_elements
||
599 nq
->hwq
.max_elements
> BNXT_QPLIB_NQE_MAX_CNT
)
600 nq
->hwq
.max_elements
= BNXT_QPLIB_NQE_MAX_CNT
;
602 sginfo
.pgsize
= PAGE_SIZE
;
603 sginfo
.pgshft
= PAGE_SHIFT
;
605 hwq_attr
.sginfo
= &sginfo
;
606 hwq_attr
.depth
= nq
->hwq
.max_elements
;
607 hwq_attr
.stride
= sizeof(struct nq_base
);
608 hwq_attr
.type
= bnxt_qplib_get_hwq_type(nq
->res
);
609 if (bnxt_qplib_alloc_init_hwq(&nq
->hwq
, &hwq_attr
)) {
610 dev_err(&nq
->pdev
->dev
, "FP NQ allocation failed");
618 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res
*res
,
619 struct bnxt_qplib_srq
*srq
)
621 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
622 struct creq_destroy_srq_resp resp
= {};
623 struct bnxt_qplib_cmdqmsg msg
= {};
624 struct cmdq_destroy_srq req
= {};
627 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
628 CMDQ_BASE_OPCODE_DESTROY_SRQ
,
631 /* Configure the request */
632 req
.srq_cid
= cpu_to_le32(srq
->id
);
634 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
), sizeof(resp
), 0);
635 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
639 bnxt_qplib_free_hwq(res
, &srq
->hwq
);
642 int bnxt_qplib_create_srq(struct bnxt_qplib_res
*res
,
643 struct bnxt_qplib_srq
*srq
)
645 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
646 struct bnxt_qplib_hwq_attr hwq_attr
= {};
647 struct creq_create_srq_resp resp
= {};
648 struct bnxt_qplib_cmdqmsg msg
= {};
649 struct cmdq_create_srq req
= {};
650 struct bnxt_qplib_pbl
*pbl
;
655 hwq_attr
.sginfo
= &srq
->sg_info
;
656 hwq_attr
.depth
= srq
->max_wqe
;
657 hwq_attr
.stride
= srq
->wqe_size
;
658 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
659 rc
= bnxt_qplib_alloc_init_hwq(&srq
->hwq
, &hwq_attr
);
663 srq
->swq
= kcalloc(srq
->hwq
.max_elements
, sizeof(*srq
->swq
),
669 srq
->dbinfo
.flags
= 0;
670 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
671 CMDQ_BASE_OPCODE_CREATE_SRQ
,
674 /* Configure the request */
675 req
.dpi
= cpu_to_le32(srq
->dpi
->dpi
);
676 req
.srq_handle
= cpu_to_le64((uintptr_t)srq
);
678 req
.srq_size
= cpu_to_le16((u16
)srq
->hwq
.max_elements
);
679 pbl
= &srq
->hwq
.pbl
[PBL_LVL_0
];
680 pg_sz_lvl
= ((u16
)bnxt_qplib_base_pg_size(&srq
->hwq
) <<
681 CMDQ_CREATE_SRQ_PG_SIZE_SFT
);
682 pg_sz_lvl
|= (srq
->hwq
.level
& CMDQ_CREATE_SRQ_LVL_MASK
) <<
683 CMDQ_CREATE_SRQ_LVL_SFT
;
684 req
.pg_size_lvl
= cpu_to_le16(pg_sz_lvl
);
685 req
.pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
686 req
.pd_id
= cpu_to_le32(srq
->pd
->id
);
687 req
.eventq_id
= cpu_to_le16(srq
->eventq_hw_ring_id
);
689 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
), sizeof(resp
), 0);
690 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
694 spin_lock_init(&srq
->lock
);
696 srq
->last_idx
= srq
->hwq
.max_elements
- 1;
697 for (idx
= 0; idx
< srq
->hwq
.max_elements
; idx
++)
698 srq
->swq
[idx
].next_idx
= idx
+ 1;
699 srq
->swq
[srq
->last_idx
].next_idx
= -1;
701 srq
->id
= le32_to_cpu(resp
.xid
);
702 srq
->dbinfo
.hwq
= &srq
->hwq
;
703 srq
->dbinfo
.xid
= srq
->id
;
704 srq
->dbinfo
.db
= srq
->dpi
->dbr
;
705 srq
->dbinfo
.max_slot
= 1;
706 srq
->dbinfo
.priv_db
= res
->dpi_tbl
.priv_db
;
708 bnxt_qplib_armen_db(&srq
->dbinfo
, DBC_DBC_TYPE_SRQ_ARMENA
);
709 srq
->arm_req
= false;
713 bnxt_qplib_free_hwq(res
, &srq
->hwq
);
719 int bnxt_qplib_modify_srq(struct bnxt_qplib_res
*res
,
720 struct bnxt_qplib_srq
*srq
)
722 struct bnxt_qplib_hwq
*srq_hwq
= &srq
->hwq
;
725 count
= __bnxt_qplib_get_avail(srq_hwq
);
726 if (count
> srq
->threshold
) {
727 srq
->arm_req
= false;
728 bnxt_qplib_srq_arm_db(&srq
->dbinfo
, srq
->threshold
);
730 /* Deferred arming */
737 int bnxt_qplib_query_srq(struct bnxt_qplib_res
*res
,
738 struct bnxt_qplib_srq
*srq
)
740 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
741 struct creq_query_srq_resp resp
= {};
742 struct bnxt_qplib_cmdqmsg msg
= {};
743 struct bnxt_qplib_rcfw_sbuf sbuf
;
744 struct creq_query_srq_resp_sb
*sb
;
745 struct cmdq_query_srq req
= {};
748 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
749 CMDQ_BASE_OPCODE_QUERY_SRQ
,
752 /* Configure the request */
753 sbuf
.size
= ALIGN(sizeof(*sb
), BNXT_QPLIB_CMDQE_UNITS
);
754 sbuf
.sb
= dma_alloc_coherent(&rcfw
->pdev
->dev
, sbuf
.size
,
755 &sbuf
.dma_addr
, GFP_KERNEL
);
758 req
.resp_size
= sbuf
.size
/ BNXT_QPLIB_CMDQE_UNITS
;
759 req
.srq_cid
= cpu_to_le32(srq
->id
);
761 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, &sbuf
, sizeof(req
),
763 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
765 srq
->threshold
= le16_to_cpu(sb
->srq_limit
);
766 dma_free_coherent(&rcfw
->pdev
->dev
, sbuf
.size
,
767 sbuf
.sb
, sbuf
.dma_addr
);
772 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq
*srq
,
773 struct bnxt_qplib_swqe
*wqe
)
775 struct bnxt_qplib_hwq
*srq_hwq
= &srq
->hwq
;
777 struct sq_sge
*hw_sge
;
781 spin_lock(&srq_hwq
->lock
);
782 if (srq
->start_idx
== srq
->last_idx
) {
783 dev_err(&srq_hwq
->pdev
->dev
,
784 "FP: SRQ (0x%x) is full!\n", srq
->id
);
785 spin_unlock(&srq_hwq
->lock
);
788 next
= srq
->start_idx
;
789 srq
->start_idx
= srq
->swq
[next
].next_idx
;
790 spin_unlock(&srq_hwq
->lock
);
792 srqe
= bnxt_qplib_get_qe(srq_hwq
, srq_hwq
->prod
, NULL
);
793 memset(srqe
, 0, srq
->wqe_size
);
794 /* Calculate wqe_size16 and data_len */
795 for (i
= 0, hw_sge
= (struct sq_sge
*)srqe
->data
;
796 i
< wqe
->num_sge
; i
++, hw_sge
++) {
797 hw_sge
->va_or_pa
= cpu_to_le64(wqe
->sg_list
[i
].addr
);
798 hw_sge
->l_key
= cpu_to_le32(wqe
->sg_list
[i
].lkey
);
799 hw_sge
->size
= cpu_to_le32(wqe
->sg_list
[i
].size
);
801 srqe
->wqe_type
= wqe
->type
;
802 srqe
->flags
= wqe
->flags
;
803 srqe
->wqe_size
= wqe
->num_sge
+
804 ((offsetof(typeof(*srqe
), data
) + 15) >> 4);
805 srqe
->wr_id
[0] = cpu_to_le32((u32
)next
);
806 srq
->swq
[next
].wr_id
= wqe
->wr_id
;
808 bnxt_qplib_hwq_incr_prod(&srq
->dbinfo
, srq_hwq
, srq
->dbinfo
.max_slot
);
810 spin_lock(&srq_hwq
->lock
);
811 count
= __bnxt_qplib_get_avail(srq_hwq
);
812 spin_unlock(&srq_hwq
->lock
);
814 bnxt_qplib_ring_prod_db(&srq
->dbinfo
, DBC_DBC_TYPE_SRQ
);
815 if (srq
->arm_req
== true && count
> srq
->threshold
) {
816 srq
->arm_req
= false;
817 bnxt_qplib_srq_arm_db(&srq
->dbinfo
, srq
->threshold
);
825 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q
*que
)
829 que
->swq
= kcalloc(que
->max_sw_wqe
, sizeof(*que
->swq
), GFP_KERNEL
);
834 que
->swq_last
= que
->max_sw_wqe
- 1;
835 for (indx
= 0; indx
< que
->max_sw_wqe
; indx
++)
836 que
->swq
[indx
].next_idx
= indx
+ 1;
837 que
->swq
[que
->swq_last
].next_idx
= 0; /* Make it circular */
843 int bnxt_qplib_create_qp1(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
845 struct bnxt_qplib_hwq_attr hwq_attr
= {};
846 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
847 struct creq_create_qp1_resp resp
= {};
848 struct bnxt_qplib_cmdqmsg msg
= {};
849 struct bnxt_qplib_q
*sq
= &qp
->sq
;
850 struct bnxt_qplib_q
*rq
= &qp
->rq
;
851 struct cmdq_create_qp1 req
= {};
852 struct bnxt_qplib_pbl
*pbl
;
858 sq
->dbinfo
.flags
= 0;
859 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
860 CMDQ_BASE_OPCODE_CREATE_QP1
,
864 req
.dpi
= cpu_to_le32(qp
->dpi
->dpi
);
865 req
.qp_handle
= cpu_to_le64(qp
->qp_handle
);
869 hwq_attr
.sginfo
= &sq
->sg_info
;
870 hwq_attr
.stride
= sizeof(struct sq_sge
);
871 hwq_attr
.depth
= bnxt_qplib_get_depth(sq
, qp
->wqe_mode
, false);
872 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
873 rc
= bnxt_qplib_alloc_init_hwq(&sq
->hwq
, &hwq_attr
);
877 rc
= bnxt_qplib_alloc_init_swq(sq
);
881 req
.sq_size
= cpu_to_le32(bnxt_qplib_set_sq_size(sq
, qp
->wqe_mode
));
882 pbl
= &sq
->hwq
.pbl
[PBL_LVL_0
];
883 req
.sq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
884 pg_sz_lvl
= (bnxt_qplib_base_pg_size(&sq
->hwq
) <<
885 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT
);
886 pg_sz_lvl
|= (sq
->hwq
.level
& CMDQ_CREATE_QP1_SQ_LVL_MASK
);
887 req
.sq_pg_size_sq_lvl
= pg_sz_lvl
;
889 cpu_to_le16((sq
->max_sge
& CMDQ_CREATE_QP1_SQ_SGE_MASK
) <<
890 CMDQ_CREATE_QP1_SQ_SGE_SFT
);
891 req
.scq_cid
= cpu_to_le32(qp
->scq
->id
);
895 rq
->dbinfo
.flags
= 0;
897 hwq_attr
.sginfo
= &rq
->sg_info
;
898 hwq_attr
.stride
= sizeof(struct sq_sge
);
899 hwq_attr
.depth
= bnxt_qplib_get_depth(rq
, qp
->wqe_mode
, false);
900 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
901 rc
= bnxt_qplib_alloc_init_hwq(&rq
->hwq
, &hwq_attr
);
904 rc
= bnxt_qplib_alloc_init_swq(rq
);
907 req
.rq_size
= cpu_to_le32(rq
->max_wqe
);
908 pbl
= &rq
->hwq
.pbl
[PBL_LVL_0
];
909 req
.rq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
910 pg_sz_lvl
= (bnxt_qplib_base_pg_size(&rq
->hwq
) <<
911 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT
);
912 pg_sz_lvl
|= (rq
->hwq
.level
& CMDQ_CREATE_QP1_RQ_LVL_MASK
);
913 req
.rq_pg_size_rq_lvl
= pg_sz_lvl
;
915 cpu_to_le16((rq
->max_sge
&
916 CMDQ_CREATE_QP1_RQ_SGE_MASK
) <<
917 CMDQ_CREATE_QP1_RQ_SGE_SFT
);
919 req
.rcq_cid
= cpu_to_le32(qp
->rcq
->id
);
920 /* Header buffer - allow hdr_buf pass in */
921 rc
= bnxt_qplib_alloc_qp_hdr_buf(res
, qp
);
926 qp_flags
|= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE
;
927 req
.qp_flags
= cpu_to_le32(qp_flags
);
928 req
.pd_id
= cpu_to_le32(qp
->pd
->id
);
930 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
), sizeof(resp
), 0);
931 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
935 qp
->id
= le32_to_cpu(resp
.xid
);
936 qp
->cur_qp_state
= CMDQ_MODIFY_QP_NEW_STATE_RESET
;
937 qp
->cctx
= res
->cctx
;
938 sq
->dbinfo
.hwq
= &sq
->hwq
;
939 sq
->dbinfo
.xid
= qp
->id
;
940 sq
->dbinfo
.db
= qp
->dpi
->dbr
;
941 sq
->dbinfo
.max_slot
= bnxt_qplib_set_sq_max_slot(qp
->wqe_mode
);
943 rq
->dbinfo
.hwq
= &rq
->hwq
;
944 rq
->dbinfo
.xid
= qp
->id
;
945 rq
->dbinfo
.db
= qp
->dpi
->dbr
;
946 rq
->dbinfo
.max_slot
= bnxt_qplib_set_rq_max_slot(rq
->wqe_size
);
948 tbl_indx
= map_qp_id_to_tbl_indx(qp
->id
, rcfw
);
949 rcfw
->qp_tbl
[tbl_indx
].qp_id
= qp
->id
;
950 rcfw
->qp_tbl
[tbl_indx
].qp_handle
= (void *)qp
;
955 bnxt_qplib_free_qp_hdr_buf(res
, qp
);
959 bnxt_qplib_free_hwq(res
, &rq
->hwq
);
963 bnxt_qplib_free_hwq(res
, &sq
->hwq
);
967 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp
*qp
, int size
)
969 struct bnxt_qplib_hwq
*hwq
;
970 struct bnxt_qplib_q
*sq
;
976 /* First psn entry */
977 fpsne
= (u64
)bnxt_qplib_get_qe(hwq
, hwq
->depth
, &psn_pg
);
978 if (!IS_ALIGNED(fpsne
, PAGE_SIZE
))
979 indx_pad
= (fpsne
& ~PAGE_MASK
) / size
;
980 hwq
->pad_pgofft
= indx_pad
;
981 hwq
->pad_pg
= (u64
*)psn_pg
;
982 hwq
->pad_stride
= size
;
985 int bnxt_qplib_create_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
987 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
988 struct bnxt_qplib_hwq_attr hwq_attr
= {};
989 struct bnxt_qplib_sg_info sginfo
= {};
990 struct creq_create_qp_resp resp
= {};
991 struct bnxt_qplib_cmdqmsg msg
= {};
992 struct bnxt_qplib_q
*sq
= &qp
->sq
;
993 struct bnxt_qplib_q
*rq
= &qp
->rq
;
994 struct cmdq_create_qp req
= {};
995 int rc
, req_size
, psn_sz
= 0;
996 struct bnxt_qplib_hwq
*xrrq
;
997 struct bnxt_qplib_pbl
*pbl
;
1004 qp
->is_host_msn_tbl
= _is_host_msn_table(res
->dattr
->dev_cap_flags2
);
1006 sq
->dbinfo
.flags
= 0;
1007 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
1008 CMDQ_BASE_OPCODE_CREATE_QP
,
1012 req
.type
= qp
->type
;
1013 req
.dpi
= cpu_to_le32(qp
->dpi
->dpi
);
1014 req
.qp_handle
= cpu_to_le64(qp
->qp_handle
);
1017 if (qp
->type
== CMDQ_CREATE_QP_TYPE_RC
) {
1018 psn_sz
= bnxt_qplib_is_chip_gen_p5_p7(res
->cctx
) ?
1019 sizeof(struct sq_psn_search_ext
) :
1020 sizeof(struct sq_psn_search
);
1022 if (qp
->is_host_msn_tbl
) {
1023 psn_sz
= sizeof(struct sq_msn_search
);
1029 hwq_attr
.sginfo
= &sq
->sg_info
;
1030 hwq_attr
.stride
= sizeof(struct sq_sge
);
1031 hwq_attr
.depth
= bnxt_qplib_get_depth(sq
, qp
->wqe_mode
, true);
1032 hwq_attr
.aux_stride
= psn_sz
;
1033 hwq_attr
.aux_depth
= psn_sz
? bnxt_qplib_set_sq_size(sq
, qp
->wqe_mode
)
1035 /* Update msn tbl size */
1036 if (qp
->is_host_msn_tbl
&& psn_sz
) {
1037 hwq_attr
.aux_depth
= roundup_pow_of_two(bnxt_qplib_set_sq_size(sq
, qp
->wqe_mode
));
1038 qp
->msn_tbl_sz
= hwq_attr
.aux_depth
;
1042 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
1043 rc
= bnxt_qplib_alloc_init_hwq(&sq
->hwq
, &hwq_attr
);
1047 rc
= bnxt_qplib_alloc_init_swq(sq
);
1052 bnxt_qplib_init_psn_ptr(qp
, psn_sz
);
1054 req
.sq_size
= cpu_to_le32(bnxt_qplib_set_sq_size(sq
, qp
->wqe_mode
));
1055 pbl
= &sq
->hwq
.pbl
[PBL_LVL_0
];
1056 req
.sq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
1057 pg_sz_lvl
= (bnxt_qplib_base_pg_size(&sq
->hwq
) <<
1058 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT
);
1059 pg_sz_lvl
|= (sq
->hwq
.level
& CMDQ_CREATE_QP_SQ_LVL_MASK
);
1060 req
.sq_pg_size_sq_lvl
= pg_sz_lvl
;
1062 cpu_to_le16(((sq
->max_sge
& CMDQ_CREATE_QP_SQ_SGE_MASK
) <<
1063 CMDQ_CREATE_QP_SQ_SGE_SFT
) | 0);
1064 req
.scq_cid
= cpu_to_le32(qp
->scq
->id
);
1068 rq
->dbinfo
.flags
= 0;
1070 hwq_attr
.sginfo
= &rq
->sg_info
;
1071 hwq_attr
.stride
= sizeof(struct sq_sge
);
1072 hwq_attr
.depth
= bnxt_qplib_get_depth(rq
, qp
->wqe_mode
, false);
1073 hwq_attr
.aux_stride
= 0;
1074 hwq_attr
.aux_depth
= 0;
1075 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
1076 rc
= bnxt_qplib_alloc_init_hwq(&rq
->hwq
, &hwq_attr
);
1079 rc
= bnxt_qplib_alloc_init_swq(rq
);
1083 req
.rq_size
= cpu_to_le32(rq
->max_wqe
);
1084 pbl
= &rq
->hwq
.pbl
[PBL_LVL_0
];
1085 req
.rq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
1086 pg_sz_lvl
= (bnxt_qplib_base_pg_size(&rq
->hwq
) <<
1087 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT
);
1088 pg_sz_lvl
|= (rq
->hwq
.level
& CMDQ_CREATE_QP_RQ_LVL_MASK
);
1089 req
.rq_pg_size_rq_lvl
= pg_sz_lvl
;
1090 nsge
= (qp
->wqe_mode
== BNXT_QPLIB_WQE_MODE_STATIC
) ?
1093 cpu_to_le16(((nsge
&
1094 CMDQ_CREATE_QP_RQ_SGE_MASK
) <<
1095 CMDQ_CREATE_QP_RQ_SGE_SFT
) | 0);
1098 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED
;
1099 req
.srq_cid
= cpu_to_le32(qp
->srq
->id
);
1101 req
.rcq_cid
= cpu_to_le32(qp
->rcq
->id
);
1103 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE
;
1104 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED
;
1106 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION
;
1107 if (qp
->wqe_mode
== BNXT_QPLIB_WQE_MODE_VARIABLE
)
1108 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED
;
1109 if (_is_ext_stats_supported(res
->dattr
->dev_cap_flags
) && !res
->is_vf
)
1110 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED
;
1112 req
.qp_flags
= cpu_to_le32(qp_flags
);
1117 xrrq
->max_elements
=
1118 ORD_LIMIT_TO_ORRQ_SLOTS(qp
->max_rd_atomic
);
1119 req_size
= xrrq
->max_elements
*
1120 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE
+ PAGE_SIZE
- 1;
1121 req_size
&= ~(PAGE_SIZE
- 1);
1122 sginfo
.pgsize
= req_size
;
1123 sginfo
.pgshft
= PAGE_SHIFT
;
1126 hwq_attr
.sginfo
= &sginfo
;
1127 hwq_attr
.depth
= xrrq
->max_elements
;
1128 hwq_attr
.stride
= BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE
;
1129 hwq_attr
.aux_stride
= 0;
1130 hwq_attr
.aux_depth
= 0;
1131 hwq_attr
.type
= HWQ_TYPE_CTX
;
1132 rc
= bnxt_qplib_alloc_init_hwq(xrrq
, &hwq_attr
);
1135 pbl
= &xrrq
->pbl
[PBL_LVL_0
];
1136 req
.orrq_addr
= cpu_to_le64(pbl
->pg_map_arr
[0]);
1139 xrrq
->max_elements
= IRD_LIMIT_TO_IRRQ_SLOTS(
1140 qp
->max_dest_rd_atomic
);
1141 req_size
= xrrq
->max_elements
*
1142 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE
+ PAGE_SIZE
- 1;
1143 req_size
&= ~(PAGE_SIZE
- 1);
1144 sginfo
.pgsize
= req_size
;
1145 hwq_attr
.depth
= xrrq
->max_elements
;
1146 hwq_attr
.stride
= BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE
;
1147 rc
= bnxt_qplib_alloc_init_hwq(xrrq
, &hwq_attr
);
1151 pbl
= &xrrq
->pbl
[PBL_LVL_0
];
1152 req
.irrq_addr
= cpu_to_le64(pbl
->pg_map_arr
[0]);
1154 req
.pd_id
= cpu_to_le32(qp
->pd
->id
);
1156 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
1158 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
1162 qp
->id
= le32_to_cpu(resp
.xid
);
1163 qp
->cur_qp_state
= CMDQ_MODIFY_QP_NEW_STATE_RESET
;
1164 INIT_LIST_HEAD(&qp
->sq_flush
);
1165 INIT_LIST_HEAD(&qp
->rq_flush
);
1166 qp
->cctx
= res
->cctx
;
1167 sq
->dbinfo
.hwq
= &sq
->hwq
;
1168 sq
->dbinfo
.xid
= qp
->id
;
1169 sq
->dbinfo
.db
= qp
->dpi
->dbr
;
1170 sq
->dbinfo
.max_slot
= bnxt_qplib_set_sq_max_slot(qp
->wqe_mode
);
1172 rq
->dbinfo
.hwq
= &rq
->hwq
;
1173 rq
->dbinfo
.xid
= qp
->id
;
1174 rq
->dbinfo
.db
= qp
->dpi
->dbr
;
1175 rq
->dbinfo
.max_slot
= bnxt_qplib_set_rq_max_slot(rq
->wqe_size
);
1177 tbl_indx
= map_qp_id_to_tbl_indx(qp
->id
, rcfw
);
1178 rcfw
->qp_tbl
[tbl_indx
].qp_id
= qp
->id
;
1179 rcfw
->qp_tbl
[tbl_indx
].qp_handle
= (void *)qp
;
1183 bnxt_qplib_free_hwq(res
, &qp
->irrq
);
1185 bnxt_qplib_free_hwq(res
, &qp
->orrq
);
1189 bnxt_qplib_free_hwq(res
, &rq
->hwq
);
1193 bnxt_qplib_free_hwq(res
, &sq
->hwq
);
1197 static void __modify_flags_from_init_state(struct bnxt_qplib_qp
*qp
)
1199 switch (qp
->state
) {
1200 case CMDQ_MODIFY_QP_NEW_STATE_RTR
:
1201 /* INIT->RTR, configure the path_mtu to the default
1202 * 2048 if not being requested
1204 if (!(qp
->modify_flags
&
1205 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
)) {
1207 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
;
1209 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
;
1212 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID
;
1213 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1214 if (qp
->max_dest_rd_atomic
< 1)
1215 qp
->max_dest_rd_atomic
= 1;
1216 qp
->modify_flags
&= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC
;
1217 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1218 if (!(qp
->modify_flags
&
1219 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
)) {
1221 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
;
1222 qp
->ah
.sgid_index
= 0;
1230 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp
*qp
)
1232 switch (qp
->state
) {
1233 case CMDQ_MODIFY_QP_NEW_STATE_RTS
:
1234 /* Bono FW requires the max_rd_atomic to be >= 1 */
1235 if (qp
->max_rd_atomic
< 1)
1236 qp
->max_rd_atomic
= 1;
1237 /* Bono FW does not allow PKEY_INDEX,
1238 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1239 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1240 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1244 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
|
1245 CMDQ_MODIFY_QP_MODIFY_MASK_DGID
|
1246 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL
|
1247 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
|
1248 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT
|
1249 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS
|
1250 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC
|
1251 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
|
1252 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN
|
1253 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER
|
1254 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC
|
1255 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID
);
1262 static void __filter_modify_flags(struct bnxt_qplib_qp
*qp
)
1264 switch (qp
->cur_qp_state
) {
1265 case CMDQ_MODIFY_QP_NEW_STATE_RESET
:
1267 case CMDQ_MODIFY_QP_NEW_STATE_INIT
:
1268 __modify_flags_from_init_state(qp
);
1270 case CMDQ_MODIFY_QP_NEW_STATE_RTR
:
1271 __modify_flags_from_rtr_state(qp
);
1273 case CMDQ_MODIFY_QP_NEW_STATE_RTS
:
1275 case CMDQ_MODIFY_QP_NEW_STATE_SQD
:
1277 case CMDQ_MODIFY_QP_NEW_STATE_SQE
:
1279 case CMDQ_MODIFY_QP_NEW_STATE_ERR
:
1286 static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp
*qp
,
1287 struct cmdq_modify_qp
*req
)
1289 u32 mandatory_flags
= 0;
1291 if (qp
->type
== CMDQ_MODIFY_QP_QP_TYPE_RC
)
1292 mandatory_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS
;
1294 if (qp
->cur_qp_state
== CMDQ_MODIFY_QP_NEW_STATE_INIT
&&
1295 qp
->state
== CMDQ_MODIFY_QP_NEW_STATE_RTR
) {
1296 if (qp
->type
== CMDQ_MODIFY_QP_QP_TYPE_RC
&& qp
->srq
)
1297 req
->flags
= cpu_to_le16(CMDQ_MODIFY_QP_FLAGS_SRQ_USED
);
1298 mandatory_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
;
1301 if (qp
->type
== CMDQ_MODIFY_QP_QP_TYPE_UD
||
1302 qp
->type
== CMDQ_MODIFY_QP_QP_TYPE_GSI
)
1303 mandatory_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY
;
1305 qp
->modify_flags
|= mandatory_flags
;
1306 req
->qp_type
= qp
->type
;
1309 static bool is_optimized_state_transition(struct bnxt_qplib_qp
*qp
)
1311 if ((qp
->cur_qp_state
== CMDQ_MODIFY_QP_NEW_STATE_INIT
&&
1312 qp
->state
== CMDQ_MODIFY_QP_NEW_STATE_RTR
) ||
1313 (qp
->cur_qp_state
== CMDQ_MODIFY_QP_NEW_STATE_RTR
&&
1314 qp
->state
== CMDQ_MODIFY_QP_NEW_STATE_RTS
))
1320 int bnxt_qplib_modify_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
1322 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
1323 struct creq_modify_qp_resp resp
= {};
1324 struct bnxt_qplib_cmdqmsg msg
= {};
1325 struct cmdq_modify_qp req
= {};
1326 u16 vlan_pcp_vlan_dei_vlan_id
;
1331 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
1332 CMDQ_BASE_OPCODE_MODIFY_QP
,
1335 /* Filter out the qp_attr_mask based on the state->new transition */
1336 __filter_modify_flags(qp
);
1337 if (qp
->modify_flags
& CMDQ_MODIFY_QP_MODIFY_MASK_STATE
) {
1338 /* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
1339 if (_is_optimize_modify_qp_supported(res
->dattr
->dev_cap_flags2
) &&
1340 is_optimized_state_transition(qp
))
1341 bnxt_set_mandatory_attributes(qp
, &req
);
1343 bmask
= qp
->modify_flags
;
1344 req
.modify_mask
= cpu_to_le32(qp
->modify_flags
);
1345 req
.qp_cid
= cpu_to_le32(qp
->id
);
1346 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_STATE
) {
1347 req
.network_type_en_sqd_async_notify_new_state
=
1348 (qp
->state
& CMDQ_MODIFY_QP_NEW_STATE_MASK
) |
1349 (qp
->en_sqd_async_notify
?
1350 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY
: 0);
1352 req
.network_type_en_sqd_async_notify_new_state
|= qp
->nw_type
;
1354 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS
)
1355 req
.access
= qp
->access
;
1357 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
)
1358 req
.pkey
= cpu_to_le16(IB_DEFAULT_PKEY_FULL
);
1360 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_QKEY
)
1361 req
.qkey
= cpu_to_le32(qp
->qkey
);
1363 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_DGID
) {
1364 memcpy(temp32
, qp
->ah
.dgid
.data
, sizeof(struct bnxt_qplib_gid
));
1365 req
.dgid
[0] = cpu_to_le32(temp32
[0]);
1366 req
.dgid
[1] = cpu_to_le32(temp32
[1]);
1367 req
.dgid
[2] = cpu_to_le32(temp32
[2]);
1368 req
.dgid
[3] = cpu_to_le32(temp32
[3]);
1370 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL
)
1371 req
.flow_label
= cpu_to_le32(qp
->ah
.flow_label
);
1373 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
)
1374 req
.sgid_index
= cpu_to_le16(res
->sgid_tbl
.hw_id
1375 [qp
->ah
.sgid_index
]);
1377 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT
)
1378 req
.hop_limit
= qp
->ah
.hop_limit
;
1380 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS
)
1381 req
.traffic_class
= qp
->ah
.traffic_class
;
1383 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC
)
1384 memcpy(req
.dest_mac
, qp
->ah
.dmac
, 6);
1386 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
)
1387 req
.path_mtu_pingpong_push_enable
|= qp
->path_mtu
;
1389 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT
)
1390 req
.timeout
= qp
->timeout
;
1392 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT
)
1393 req
.retry_cnt
= qp
->retry_cnt
;
1395 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY
)
1396 req
.rnr_retry
= qp
->rnr_retry
;
1398 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER
)
1399 req
.min_rnr_timer
= qp
->min_rnr_timer
;
1401 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN
)
1402 req
.rq_psn
= cpu_to_le32(qp
->rq
.psn
);
1404 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN
)
1405 req
.sq_psn
= cpu_to_le32(qp
->sq
.psn
);
1407 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC
)
1409 ORD_LIMIT_TO_ORRQ_SLOTS(qp
->max_rd_atomic
);
1411 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC
)
1412 req
.max_dest_rd_atomic
=
1413 IRD_LIMIT_TO_IRRQ_SLOTS(qp
->max_dest_rd_atomic
);
1415 req
.sq_size
= cpu_to_le32(qp
->sq
.hwq
.max_elements
);
1416 req
.rq_size
= cpu_to_le32(qp
->rq
.hwq
.max_elements
);
1417 req
.sq_sge
= cpu_to_le16(qp
->sq
.max_sge
);
1418 req
.rq_sge
= cpu_to_le16(qp
->rq
.max_sge
);
1419 req
.max_inline_data
= cpu_to_le32(qp
->max_inline_data
);
1420 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID
)
1421 req
.dest_qp_id
= cpu_to_le32(qp
->dest_qpn
);
1423 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID
) {
1424 vlan_pcp_vlan_dei_vlan_id
=
1425 ((res
->sgid_tbl
.tbl
[qp
->ah
.sgid_index
].vlan_id
<<
1426 CMDQ_MODIFY_QP_VLAN_ID_SFT
) &
1427 CMDQ_MODIFY_QP_VLAN_ID_MASK
);
1428 vlan_pcp_vlan_dei_vlan_id
|=
1429 ((qp
->ah
.sl
<< CMDQ_MODIFY_QP_VLAN_PCP_SFT
) &
1430 CMDQ_MODIFY_QP_VLAN_PCP_MASK
);
1431 req
.vlan_pcp_vlan_dei_vlan_id
= cpu_to_le16(vlan_pcp_vlan_dei_vlan_id
);
1434 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
), sizeof(resp
), 0);
1435 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
1438 qp
->cur_qp_state
= qp
->state
;
1442 int bnxt_qplib_query_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
1444 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
1445 struct creq_query_qp_resp resp
= {};
1446 struct bnxt_qplib_cmdqmsg msg
= {};
1447 struct bnxt_qplib_rcfw_sbuf sbuf
;
1448 struct creq_query_qp_resp_sb
*sb
;
1449 struct cmdq_query_qp req
= {};
1453 sbuf
.size
= ALIGN(sizeof(*sb
), BNXT_QPLIB_CMDQE_UNITS
);
1454 sbuf
.sb
= dma_alloc_coherent(&rcfw
->pdev
->dev
, sbuf
.size
,
1455 &sbuf
.dma_addr
, GFP_KERNEL
);
1460 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
1461 CMDQ_BASE_OPCODE_QUERY_QP
,
1464 req
.qp_cid
= cpu_to_le32(qp
->id
);
1465 req
.resp_size
= sbuf
.size
/ BNXT_QPLIB_CMDQE_UNITS
;
1466 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, &sbuf
, sizeof(req
),
1468 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
1471 /* Extract the context from the side buffer */
1472 qp
->state
= sb
->en_sqd_async_notify_state
&
1473 CREQ_QUERY_QP_RESP_SB_STATE_MASK
;
1474 qp
->en_sqd_async_notify
= sb
->en_sqd_async_notify_state
&
1475 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY
;
1476 qp
->access
= sb
->access
;
1477 qp
->pkey_index
= le16_to_cpu(sb
->pkey
);
1478 qp
->qkey
= le32_to_cpu(sb
->qkey
);
1480 temp32
[0] = le32_to_cpu(sb
->dgid
[0]);
1481 temp32
[1] = le32_to_cpu(sb
->dgid
[1]);
1482 temp32
[2] = le32_to_cpu(sb
->dgid
[2]);
1483 temp32
[3] = le32_to_cpu(sb
->dgid
[3]);
1484 memcpy(qp
->ah
.dgid
.data
, temp32
, sizeof(qp
->ah
.dgid
.data
));
1486 qp
->ah
.flow_label
= le32_to_cpu(sb
->flow_label
);
1488 qp
->ah
.sgid_index
= 0;
1489 for (i
= 0; i
< res
->sgid_tbl
.max
; i
++) {
1490 if (res
->sgid_tbl
.hw_id
[i
] == le16_to_cpu(sb
->sgid_index
)) {
1491 qp
->ah
.sgid_index
= i
;
1495 if (i
== res
->sgid_tbl
.max
)
1496 dev_warn(&res
->pdev
->dev
, "SGID not found??\n");
1498 qp
->ah
.hop_limit
= sb
->hop_limit
;
1499 qp
->ah
.traffic_class
= sb
->traffic_class
;
1500 memcpy(qp
->ah
.dmac
, sb
->dest_mac
, 6);
1501 qp
->ah
.vlan_id
= (le16_to_cpu(sb
->path_mtu_dest_vlan_id
) &
1502 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK
) >>
1503 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT
;
1504 qp
->path_mtu
= (le16_to_cpu(sb
->path_mtu_dest_vlan_id
) &
1505 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK
) >>
1506 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT
;
1507 qp
->timeout
= sb
->timeout
;
1508 qp
->retry_cnt
= sb
->retry_cnt
;
1509 qp
->rnr_retry
= sb
->rnr_retry
;
1510 qp
->min_rnr_timer
= sb
->min_rnr_timer
;
1511 qp
->rq
.psn
= le32_to_cpu(sb
->rq_psn
);
1512 qp
->max_rd_atomic
= ORRQ_SLOTS_TO_ORD_LIMIT(sb
->max_rd_atomic
);
1513 qp
->sq
.psn
= le32_to_cpu(sb
->sq_psn
);
1514 qp
->max_dest_rd_atomic
=
1515 IRRQ_SLOTS_TO_IRD_LIMIT(sb
->max_dest_rd_atomic
);
1516 qp
->sq
.max_wqe
= qp
->sq
.hwq
.max_elements
;
1517 qp
->rq
.max_wqe
= qp
->rq
.hwq
.max_elements
;
1518 qp
->sq
.max_sge
= le16_to_cpu(sb
->sq_sge
);
1519 qp
->rq
.max_sge
= le16_to_cpu(sb
->rq_sge
);
1520 qp
->max_inline_data
= le32_to_cpu(sb
->max_inline_data
);
1521 qp
->dest_qpn
= le32_to_cpu(sb
->dest_qp_id
);
1522 memcpy(qp
->smac
, sb
->src_mac
, 6);
1523 qp
->vlan_id
= le16_to_cpu(sb
->vlan_pcp_vlan_dei_vlan_id
);
1525 dma_free_coherent(&rcfw
->pdev
->dev
, sbuf
.size
,
1526 sbuf
.sb
, sbuf
.dma_addr
);
1530 static void __clean_cq(struct bnxt_qplib_cq
*cq
, u64 qp
)
1532 struct bnxt_qplib_hwq
*cq_hwq
= &cq
->hwq
;
1533 u32 peek_flags
, peek_cons
;
1534 struct cq_base
*hw_cqe
;
1537 peek_flags
= cq
->dbinfo
.flags
;
1538 peek_cons
= cq_hwq
->cons
;
1539 for (i
= 0; i
< cq_hwq
->max_elements
; i
++) {
1540 hw_cqe
= bnxt_qplib_get_qe(cq_hwq
, peek_cons
, NULL
);
1541 if (!CQE_CMP_VALID(hw_cqe
, peek_flags
))
1544 * The valid test of the entry must be done first before
1545 * reading any further.
1548 switch (hw_cqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
) {
1549 case CQ_BASE_CQE_TYPE_REQ
:
1550 case CQ_BASE_CQE_TYPE_TERMINAL
:
1552 struct cq_req
*cqe
= (struct cq_req
*)hw_cqe
;
1554 if (qp
== le64_to_cpu(cqe
->qp_handle
))
1558 case CQ_BASE_CQE_TYPE_RES_RC
:
1559 case CQ_BASE_CQE_TYPE_RES_UD
:
1560 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
:
1562 struct cq_res_rc
*cqe
= (struct cq_res_rc
*)hw_cqe
;
1564 if (qp
== le64_to_cpu(cqe
->qp_handle
))
1571 bnxt_qplib_hwq_incr_cons(cq_hwq
->max_elements
, &peek_cons
,
1576 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res
*res
,
1577 struct bnxt_qplib_qp
*qp
)
1579 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
1580 struct creq_destroy_qp_resp resp
= {};
1581 struct bnxt_qplib_cmdqmsg msg
= {};
1582 struct cmdq_destroy_qp req
= {};
1586 spin_lock_bh(&rcfw
->tbl_lock
);
1587 tbl_indx
= map_qp_id_to_tbl_indx(qp
->id
, rcfw
);
1588 rcfw
->qp_tbl
[tbl_indx
].qp_id
= BNXT_QPLIB_QP_ID_INVALID
;
1589 rcfw
->qp_tbl
[tbl_indx
].qp_handle
= NULL
;
1590 spin_unlock_bh(&rcfw
->tbl_lock
);
1592 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
1593 CMDQ_BASE_OPCODE_DESTROY_QP
,
1596 req
.qp_cid
= cpu_to_le32(qp
->id
);
1597 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
1599 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
1601 spin_lock_bh(&rcfw
->tbl_lock
);
1602 rcfw
->qp_tbl
[tbl_indx
].qp_id
= qp
->id
;
1603 rcfw
->qp_tbl
[tbl_indx
].qp_handle
= qp
;
1604 spin_unlock_bh(&rcfw
->tbl_lock
);
1611 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res
*res
,
1612 struct bnxt_qplib_qp
*qp
)
1614 bnxt_qplib_free_qp_hdr_buf(res
, qp
);
1615 bnxt_qplib_free_hwq(res
, &qp
->sq
.hwq
);
1618 bnxt_qplib_free_hwq(res
, &qp
->rq
.hwq
);
1621 if (qp
->irrq
.max_elements
)
1622 bnxt_qplib_free_hwq(res
, &qp
->irrq
);
1623 if (qp
->orrq
.max_elements
)
1624 bnxt_qplib_free_hwq(res
, &qp
->orrq
);
1628 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp
*qp
,
1629 struct bnxt_qplib_sge
*sge
)
1631 struct bnxt_qplib_q
*sq
= &qp
->sq
;
1634 memset(sge
, 0, sizeof(*sge
));
1636 if (qp
->sq_hdr_buf
) {
1637 sw_prod
= sq
->swq_start
;
1638 sge
->addr
= (dma_addr_t
)(qp
->sq_hdr_buf_map
+
1639 sw_prod
* qp
->sq_hdr_buf_size
);
1640 sge
->lkey
= 0xFFFFFFFF;
1641 sge
->size
= qp
->sq_hdr_buf_size
;
1642 return qp
->sq_hdr_buf
+ sw_prod
* sge
->size
;
1647 u32
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp
*qp
)
1649 struct bnxt_qplib_q
*rq
= &qp
->rq
;
1651 return rq
->swq_start
;
1654 dma_addr_t
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp
*qp
, u32 index
)
1656 return (qp
->rq_hdr_buf_map
+ index
* qp
->rq_hdr_buf_size
);
1659 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp
*qp
,
1660 struct bnxt_qplib_sge
*sge
)
1662 struct bnxt_qplib_q
*rq
= &qp
->rq
;
1665 memset(sge
, 0, sizeof(*sge
));
1667 if (qp
->rq_hdr_buf
) {
1668 sw_prod
= rq
->swq_start
;
1669 sge
->addr
= (dma_addr_t
)(qp
->rq_hdr_buf_map
+
1670 sw_prod
* qp
->rq_hdr_buf_size
);
1671 sge
->lkey
= 0xFFFFFFFF;
1672 sge
->size
= qp
->rq_hdr_buf_size
;
1673 return qp
->rq_hdr_buf
+ sw_prod
* sge
->size
;
1678 /* Fil the MSN table into the next psn row */
1679 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp
*qp
,
1680 struct bnxt_qplib_swqe
*wqe
,
1681 struct bnxt_qplib_swq
*swq
)
1683 struct sq_msn_search
*msns
;
1684 u32 start_psn
, next_psn
;
1687 msns
= (struct sq_msn_search
*)swq
->psn_search
;
1688 msns
->start_idx_next_psn_start_psn
= 0;
1690 start_psn
= swq
->start_psn
;
1691 next_psn
= swq
->next_psn
;
1692 start_idx
= swq
->slot_idx
;
1693 msns
->start_idx_next_psn_start_psn
|=
1694 bnxt_re_update_msn_tbl(start_idx
, next_psn
, start_psn
);
1696 qp
->msn
%= qp
->msn_tbl_sz
;
1699 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp
*qp
,
1700 struct bnxt_qplib_swqe
*wqe
,
1701 struct bnxt_qplib_swq
*swq
)
1703 struct sq_psn_search_ext
*psns_ext
;
1704 struct sq_psn_search
*psns
;
1708 if (!swq
->psn_search
)
1710 /* Handle MSN differently on cap flags */
1711 if (qp
->is_host_msn_tbl
) {
1712 bnxt_qplib_fill_msn_search(qp
, wqe
, swq
);
1715 psns
= (struct sq_psn_search
*)swq
->psn_search
;
1716 psns
= swq
->psn_search
;
1717 psns_ext
= swq
->psn_ext
;
1719 op_spsn
= ((swq
->start_psn
<< SQ_PSN_SEARCH_START_PSN_SFT
) &
1720 SQ_PSN_SEARCH_START_PSN_MASK
);
1721 op_spsn
|= ((wqe
->type
<< SQ_PSN_SEARCH_OPCODE_SFT
) &
1722 SQ_PSN_SEARCH_OPCODE_MASK
);
1723 flg_npsn
= ((swq
->next_psn
<< SQ_PSN_SEARCH_NEXT_PSN_SFT
) &
1724 SQ_PSN_SEARCH_NEXT_PSN_MASK
);
1726 if (bnxt_qplib_is_chip_gen_p5_p7(qp
->cctx
)) {
1727 psns_ext
->opcode_start_psn
= cpu_to_le32(op_spsn
);
1728 psns_ext
->flags_next_psn
= cpu_to_le32(flg_npsn
);
1729 psns_ext
->start_slot_idx
= cpu_to_le16(swq
->slot_idx
);
1731 psns
->opcode_start_psn
= cpu_to_le32(op_spsn
);
1732 psns
->flags_next_psn
= cpu_to_le32(flg_npsn
);
1736 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp
*qp
,
1737 struct bnxt_qplib_swqe
*wqe
,
1740 struct bnxt_qplib_hwq
*hwq
;
1741 int len
, t_len
, offt
;
1742 bool pull_dst
= true;
1743 void *il_dst
= NULL
;
1744 void *il_src
= NULL
;
1750 for (indx
= 0; indx
< wqe
->num_sge
; indx
++) {
1751 len
= wqe
->sg_list
[indx
].size
;
1752 il_src
= (void *)wqe
->sg_list
[indx
].addr
;
1754 if (t_len
> qp
->max_inline_data
)
1759 il_dst
= bnxt_qplib_get_prod_qe(hwq
, *idx
);
1764 cplen
= min_t(int, len
, sizeof(struct sq_sge
));
1765 cplen
= min_t(int, cplen
,
1766 (sizeof(struct sq_sge
) - offt
));
1767 memcpy(il_dst
, il_src
, cplen
);
1773 if (t_cplen
== sizeof(struct sq_sge
))
1781 static u32
bnxt_qplib_put_sges(struct bnxt_qplib_hwq
*hwq
,
1782 struct bnxt_qplib_sge
*ssge
,
1785 struct sq_sge
*dsge
;
1788 for (indx
= 0; indx
< nsge
; indx
++, (*idx
)++) {
1789 dsge
= bnxt_qplib_get_prod_qe(hwq
, *idx
);
1790 dsge
->va_or_pa
= cpu_to_le64(ssge
[indx
].addr
);
1791 dsge
->l_key
= cpu_to_le32(ssge
[indx
].lkey
);
1792 dsge
->size
= cpu_to_le32(ssge
[indx
].size
);
1793 len
+= ssge
[indx
].size
;
1799 static u16
bnxt_qplib_required_slots(struct bnxt_qplib_qp
*qp
,
1800 struct bnxt_qplib_swqe
*wqe
,
1801 u16
*wqe_sz
, u16
*qdf
, u8 mode
)
1807 nsge
= wqe
->num_sge
;
1808 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1809 bytes
= sizeof(struct sq_send_hdr
) + nsge
* sizeof(struct sq_sge
);
1810 if (wqe
->flags
& BNXT_QPLIB_SWQE_FLAGS_INLINE
) {
1811 ilsize
= bnxt_qplib_calc_ilsize(wqe
, qp
->max_inline_data
);
1812 bytes
= ALIGN(ilsize
, sizeof(struct sq_sge
));
1813 bytes
+= sizeof(struct sq_send_hdr
);
1816 *qdf
= __xlate_qfd(qp
->sq
.q_full_delta
, bytes
);
1819 if (mode
== BNXT_QPLIB_WQE_MODE_STATIC
)
1824 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp
*qp
, struct bnxt_qplib_q
*sq
,
1825 struct bnxt_qplib_swq
*swq
, bool hw_retx
)
1827 struct bnxt_qplib_hwq
*hwq
;
1828 u32 pg_num
, pg_indx
;
1835 tail
= swq
->slot_idx
/ sq
->dbinfo
.max_slot
;
1837 /* For HW retx use qp msn index */
1839 tail
%= qp
->msn_tbl_sz
;
1841 pg_num
= (tail
+ hwq
->pad_pgofft
) / (PAGE_SIZE
/ hwq
->pad_stride
);
1842 pg_indx
= (tail
+ hwq
->pad_pgofft
) % (PAGE_SIZE
/ hwq
->pad_stride
);
1843 buff
= (void *)(hwq
->pad_pg
[pg_num
] + pg_indx
* hwq
->pad_stride
);
1844 swq
->psn_ext
= buff
;
1845 swq
->psn_search
= buff
;
1848 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp
*qp
)
1850 struct bnxt_qplib_q
*sq
= &qp
->sq
;
1852 bnxt_qplib_ring_prod_db(&sq
->dbinfo
, DBC_DBC_TYPE_SQ
);
1855 int bnxt_qplib_post_send(struct bnxt_qplib_qp
*qp
,
1856 struct bnxt_qplib_swqe
*wqe
)
1858 struct bnxt_qplib_nq_work
*nq_work
= NULL
;
1859 int i
, rc
= 0, data_len
= 0, pkt_num
= 0;
1860 struct bnxt_qplib_q
*sq
= &qp
->sq
;
1861 struct bnxt_qplib_hwq
*hwq
;
1862 struct bnxt_qplib_swq
*swq
;
1863 bool sch_handler
= false;
1864 u16 wqe_sz
, qdf
= 0;
1874 if (qp
->state
!= CMDQ_MODIFY_QP_NEW_STATE_RTS
&&
1875 qp
->state
!= CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1876 dev_err(&hwq
->pdev
->dev
,
1877 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1883 slots
= bnxt_qplib_required_slots(qp
, wqe
, &wqe_sz
, &qdf
, qp
->wqe_mode
);
1884 if (bnxt_qplib_queue_full(sq
, slots
+ qdf
)) {
1885 dev_err(&hwq
->pdev
->dev
,
1886 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1887 hwq
->prod
, hwq
->cons
, hwq
->depth
, sq
->q_full_delta
);
1892 swq
= bnxt_qplib_get_swqe(sq
, &wqe_idx
);
1893 bnxt_qplib_pull_psn_buff(qp
, sq
, swq
, qp
->is_host_msn_tbl
);
1896 swq
->slot_idx
= hwq
->prod
;
1898 swq
->wr_id
= wqe
->wr_id
;
1899 swq
->type
= wqe
->type
;
1900 swq
->flags
= wqe
->flags
;
1901 swq
->start_psn
= sq
->psn
& BTH_PSN_MASK
;
1903 swq
->flags
|= SQ_SEND_FLAGS_SIGNAL_COMP
;
1905 if (qp
->state
== CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1907 dev_dbg(&hwq
->pdev
->dev
,
1908 "%s Error QP. Scheduling for poll_cq\n", __func__
);
1912 base_hdr
= bnxt_qplib_get_prod_qe(hwq
, idx
++);
1913 ext_hdr
= bnxt_qplib_get_prod_qe(hwq
, idx
++);
1914 memset(base_hdr
, 0, sizeof(struct sq_sge
));
1915 memset(ext_hdr
, 0, sizeof(struct sq_sge
));
1917 if (wqe
->flags
& BNXT_QPLIB_SWQE_FLAGS_INLINE
)
1918 /* Copy the inline data */
1919 data_len
= bnxt_qplib_put_inline(qp
, wqe
, &idx
);
1921 data_len
= bnxt_qplib_put_sges(hwq
, wqe
->sg_list
, wqe
->num_sge
,
1925 /* Make sure we update MSN table only for wired wqes */
1928 switch (wqe
->type
) {
1929 case BNXT_QPLIB_SWQE_TYPE_SEND
:
1930 if (qp
->type
== CMDQ_CREATE_QP1_TYPE_GSI
) {
1931 struct sq_send_raweth_qp1_hdr
*sqe
= base_hdr
;
1932 struct sq_raw_ext_hdr
*ext_sqe
= ext_hdr
;
1933 /* Assemble info for Raw Ethertype QPs */
1935 sqe
->wqe_type
= wqe
->type
;
1936 sqe
->flags
= wqe
->flags
;
1937 sqe
->wqe_size
= wqe_sz
;
1938 sqe
->cfa_action
= cpu_to_le16(wqe
->rawqp1
.cfa_action
);
1939 sqe
->lflags
= cpu_to_le16(wqe
->rawqp1
.lflags
);
1940 sqe
->length
= cpu_to_le32(data_len
);
1941 ext_sqe
->cfa_meta
= cpu_to_le32((wqe
->rawqp1
.cfa_meta
&
1942 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK
) <<
1943 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT
);
1948 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM
:
1949 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV
:
1951 struct sq_ud_ext_hdr
*ext_sqe
= ext_hdr
;
1952 struct sq_send_hdr
*sqe
= base_hdr
;
1954 sqe
->wqe_type
= wqe
->type
;
1955 sqe
->flags
= wqe
->flags
;
1956 sqe
->wqe_size
= wqe_sz
;
1957 sqe
->inv_key_or_imm_data
= cpu_to_le32(wqe
->send
.inv_key
);
1958 if (qp
->type
== CMDQ_CREATE_QP_TYPE_UD
||
1959 qp
->type
== CMDQ_CREATE_QP_TYPE_GSI
) {
1960 sqe
->q_key
= cpu_to_le32(wqe
->send
.q_key
);
1961 sqe
->length
= cpu_to_le32(data_len
);
1962 sq
->psn
= (sq
->psn
+ 1) & BTH_PSN_MASK
;
1963 ext_sqe
->dst_qp
= cpu_to_le32(wqe
->send
.dst_qp
&
1964 SQ_SEND_DST_QP_MASK
);
1965 ext_sqe
->avid
= cpu_to_le32(wqe
->send
.avid
&
1969 sqe
->length
= cpu_to_le32(data_len
);
1971 pkt_num
= (data_len
+ qp
->mtu
- 1) / qp
->mtu
;
1974 sq
->psn
= (sq
->psn
+ pkt_num
) & BTH_PSN_MASK
;
1978 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE
:
1979 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM
:
1980 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ
:
1982 struct sq_rdma_ext_hdr
*ext_sqe
= ext_hdr
;
1983 struct sq_rdma_hdr
*sqe
= base_hdr
;
1985 sqe
->wqe_type
= wqe
->type
;
1986 sqe
->flags
= wqe
->flags
;
1987 sqe
->wqe_size
= wqe_sz
;
1988 sqe
->imm_data
= cpu_to_le32(wqe
->rdma
.inv_key
);
1989 sqe
->length
= cpu_to_le32((u32
)data_len
);
1990 ext_sqe
->remote_va
= cpu_to_le64(wqe
->rdma
.remote_va
);
1991 ext_sqe
->remote_key
= cpu_to_le32(wqe
->rdma
.r_key
);
1993 pkt_num
= (data_len
+ qp
->mtu
- 1) / qp
->mtu
;
1996 sq
->psn
= (sq
->psn
+ pkt_num
) & BTH_PSN_MASK
;
1999 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP
:
2000 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD
:
2002 struct sq_atomic_ext_hdr
*ext_sqe
= ext_hdr
;
2003 struct sq_atomic_hdr
*sqe
= base_hdr
;
2005 sqe
->wqe_type
= wqe
->type
;
2006 sqe
->flags
= wqe
->flags
;
2007 sqe
->remote_key
= cpu_to_le32(wqe
->atomic
.r_key
);
2008 sqe
->remote_va
= cpu_to_le64(wqe
->atomic
.remote_va
);
2009 ext_sqe
->swap_data
= cpu_to_le64(wqe
->atomic
.swap_data
);
2010 ext_sqe
->cmp_data
= cpu_to_le64(wqe
->atomic
.cmp_data
);
2012 pkt_num
= (data_len
+ qp
->mtu
- 1) / qp
->mtu
;
2015 sq
->psn
= (sq
->psn
+ pkt_num
) & BTH_PSN_MASK
;
2018 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV
:
2020 struct sq_localinvalidate
*sqe
= base_hdr
;
2022 sqe
->wqe_type
= wqe
->type
;
2023 sqe
->flags
= wqe
->flags
;
2024 sqe
->inv_l_key
= cpu_to_le32(wqe
->local_inv
.inv_l_key
);
2028 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR
:
2030 struct sq_fr_pmr_ext_hdr
*ext_sqe
= ext_hdr
;
2031 struct sq_fr_pmr_hdr
*sqe
= base_hdr
;
2033 sqe
->wqe_type
= wqe
->type
;
2034 sqe
->flags
= wqe
->flags
;
2035 sqe
->access_cntl
= wqe
->frmr
.access_cntl
|
2036 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE
;
2037 sqe
->zero_based_page_size_log
=
2038 (wqe
->frmr
.pg_sz_log
& SQ_FR_PMR_PAGE_SIZE_LOG_MASK
) <<
2039 SQ_FR_PMR_PAGE_SIZE_LOG_SFT
|
2040 (wqe
->frmr
.zero_based
? SQ_FR_PMR_ZERO_BASED
: 0);
2041 sqe
->l_key
= cpu_to_le32(wqe
->frmr
.l_key
);
2042 temp32
= cpu_to_le32(wqe
->frmr
.length
);
2043 memcpy(sqe
->length
, &temp32
, sizeof(wqe
->frmr
.length
));
2044 sqe
->numlevels_pbl_page_size_log
=
2045 ((wqe
->frmr
.pbl_pg_sz_log
<<
2046 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT
) &
2047 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK
) |
2048 ((wqe
->frmr
.levels
<< SQ_FR_PMR_NUMLEVELS_SFT
) &
2049 SQ_FR_PMR_NUMLEVELS_MASK
);
2051 for (i
= 0; i
< wqe
->frmr
.page_list_len
; i
++)
2052 wqe
->frmr
.pbl_ptr
[i
] = cpu_to_le64(
2053 wqe
->frmr
.page_list
[i
] |
2055 ext_sqe
->pblptr
= cpu_to_le64(wqe
->frmr
.pbl_dma_ptr
);
2056 ext_sqe
->va
= cpu_to_le64(wqe
->frmr
.va
);
2061 case BNXT_QPLIB_SWQE_TYPE_BIND_MW
:
2063 struct sq_bind_ext_hdr
*ext_sqe
= ext_hdr
;
2064 struct sq_bind_hdr
*sqe
= base_hdr
;
2066 sqe
->wqe_type
= wqe
->type
;
2067 sqe
->flags
= wqe
->flags
;
2068 sqe
->access_cntl
= wqe
->bind
.access_cntl
;
2069 sqe
->mw_type_zero_based
= wqe
->bind
.mw_type
|
2070 (wqe
->bind
.zero_based
? SQ_BIND_ZERO_BASED
: 0);
2071 sqe
->parent_l_key
= cpu_to_le32(wqe
->bind
.parent_l_key
);
2072 sqe
->l_key
= cpu_to_le32(wqe
->bind
.r_key
);
2073 ext_sqe
->va
= cpu_to_le64(wqe
->bind
.va
);
2074 ext_sqe
->length_lo
= cpu_to_le32(wqe
->bind
.length
);
2079 /* Bad wqe, return error */
2083 if (!qp
->is_host_msn_tbl
|| msn_update
) {
2084 swq
->next_psn
= sq
->psn
& BTH_PSN_MASK
;
2085 bnxt_qplib_fill_psn_search(qp
, wqe
, swq
);
2088 bnxt_qplib_swq_mod_start(sq
, wqe_idx
);
2089 bnxt_qplib_hwq_incr_prod(&sq
->dbinfo
, hwq
, swq
->slots
);
2093 nq_work
= kzalloc(sizeof(*nq_work
), GFP_ATOMIC
);
2095 nq_work
->cq
= qp
->scq
;
2096 nq_work
->nq
= qp
->scq
->nq
;
2097 INIT_WORK(&nq_work
->work
, bnxt_qpn_cqn_sched_task
);
2098 queue_work(qp
->scq
->nq
->cqn_wq
, &nq_work
->work
);
2100 dev_err(&hwq
->pdev
->dev
,
2101 "FP: Failed to allocate SQ nq_work!\n");
2108 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp
*qp
)
2110 struct bnxt_qplib_q
*rq
= &qp
->rq
;
2112 bnxt_qplib_ring_prod_db(&rq
->dbinfo
, DBC_DBC_TYPE_RQ
);
2115 int bnxt_qplib_post_recv(struct bnxt_qplib_qp
*qp
,
2116 struct bnxt_qplib_swqe
*wqe
)
2118 struct bnxt_qplib_nq_work
*nq_work
= NULL
;
2119 struct bnxt_qplib_q
*rq
= &qp
->rq
;
2120 struct rq_wqe_hdr
*base_hdr
;
2121 struct rq_ext_hdr
*ext_hdr
;
2122 struct bnxt_qplib_hwq
*hwq
;
2123 struct bnxt_qplib_swq
*swq
;
2124 bool sch_handler
= false;
2130 if (qp
->state
== CMDQ_MODIFY_QP_NEW_STATE_RESET
) {
2131 dev_err(&hwq
->pdev
->dev
,
2132 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2138 if (bnxt_qplib_queue_full(rq
, rq
->dbinfo
.max_slot
)) {
2139 dev_err(&hwq
->pdev
->dev
,
2140 "FP: QP (0x%x) RQ is full!\n", qp
->id
);
2145 swq
= bnxt_qplib_get_swqe(rq
, &wqe_idx
);
2146 swq
->wr_id
= wqe
->wr_id
;
2147 swq
->slots
= rq
->dbinfo
.max_slot
;
2149 if (qp
->state
== CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
2151 dev_dbg(&hwq
->pdev
->dev
,
2152 "%s: Error QP. Scheduling for poll_cq\n", __func__
);
2157 base_hdr
= bnxt_qplib_get_prod_qe(hwq
, idx
++);
2158 ext_hdr
= bnxt_qplib_get_prod_qe(hwq
, idx
++);
2159 memset(base_hdr
, 0, sizeof(struct sq_sge
));
2160 memset(ext_hdr
, 0, sizeof(struct sq_sge
));
2161 wqe_sz
= (sizeof(struct rq_wqe_hdr
) +
2162 wqe
->num_sge
* sizeof(struct sq_sge
)) >> 4;
2163 bnxt_qplib_put_sges(hwq
, wqe
->sg_list
, wqe
->num_sge
, &idx
);
2164 if (!wqe
->num_sge
) {
2167 sge
= bnxt_qplib_get_prod_qe(hwq
, idx
++);
2171 base_hdr
->wqe_type
= wqe
->type
;
2172 base_hdr
->flags
= wqe
->flags
;
2173 base_hdr
->wqe_size
= wqe_sz
;
2174 base_hdr
->wr_id
[0] = cpu_to_le32(wqe_idx
);
2176 bnxt_qplib_swq_mod_start(rq
, wqe_idx
);
2177 bnxt_qplib_hwq_incr_prod(&rq
->dbinfo
, hwq
, swq
->slots
);
2180 nq_work
= kzalloc(sizeof(*nq_work
), GFP_ATOMIC
);
2182 nq_work
->cq
= qp
->rcq
;
2183 nq_work
->nq
= qp
->rcq
->nq
;
2184 INIT_WORK(&nq_work
->work
, bnxt_qpn_cqn_sched_task
);
2185 queue_work(qp
->rcq
->nq
->cqn_wq
, &nq_work
->work
);
2187 dev_err(&hwq
->pdev
->dev
,
2188 "FP: Failed to allocate RQ nq_work!\n");
2197 int bnxt_qplib_create_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
)
2199 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
2200 struct bnxt_qplib_hwq_attr hwq_attr
= {};
2201 struct creq_create_cq_resp resp
= {};
2202 struct bnxt_qplib_cmdqmsg msg
= {};
2203 struct cmdq_create_cq req
= {};
2204 struct bnxt_qplib_pbl
*pbl
;
2210 dev_err(&rcfw
->pdev
->dev
,
2211 "FP: CREATE_CQ failed due to NULL DPI\n");
2215 cq
->dbinfo
.flags
= 0;
2217 hwq_attr
.depth
= cq
->max_wqe
;
2218 hwq_attr
.stride
= sizeof(struct cq_base
);
2219 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
2220 hwq_attr
.sginfo
= &cq
->sg_info
;
2221 rc
= bnxt_qplib_alloc_init_hwq(&cq
->hwq
, &hwq_attr
);
2225 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
2226 CMDQ_BASE_OPCODE_CREATE_CQ
,
2229 req
.dpi
= cpu_to_le32(cq
->dpi
->dpi
);
2230 req
.cq_handle
= cpu_to_le64(cq
->cq_handle
);
2231 req
.cq_size
= cpu_to_le32(cq
->max_wqe
);
2233 if (_is_cq_coalescing_supported(res
->dattr
->dev_cap_flags2
)) {
2234 req
.flags
|= cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID
);
2235 coalescing
|= ((cq
->coalescing
->buf_maxtime
<<
2236 CMDQ_CREATE_CQ_BUF_MAXTIME_SFT
) &
2237 CMDQ_CREATE_CQ_BUF_MAXTIME_MASK
);
2238 coalescing
|= ((cq
->coalescing
->normal_maxbuf
<<
2239 CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT
) &
2240 CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK
);
2241 coalescing
|= ((cq
->coalescing
->during_maxbuf
<<
2242 CMDQ_CREATE_CQ_DURING_MAXBUF_SFT
) &
2243 CMDQ_CREATE_CQ_DURING_MAXBUF_MASK
);
2244 if (cq
->coalescing
->en_ring_idle_mode
)
2245 coalescing
|= CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE
;
2247 coalescing
&= ~CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE
;
2248 req
.coalescing
= cpu_to_le32(coalescing
);
2251 pbl
= &cq
->hwq
.pbl
[PBL_LVL_0
];
2252 pg_sz_lvl
= (bnxt_qplib_base_pg_size(&cq
->hwq
) <<
2253 CMDQ_CREATE_CQ_PG_SIZE_SFT
);
2254 pg_sz_lvl
|= (cq
->hwq
.level
& CMDQ_CREATE_CQ_LVL_MASK
);
2255 req
.pg_size_lvl
= cpu_to_le32(pg_sz_lvl
);
2256 req
.pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
2257 req
.cq_fco_cnq_id
= cpu_to_le32(
2258 (cq
->cnq_hw_ring_id
& CMDQ_CREATE_CQ_CNQ_ID_MASK
) <<
2259 CMDQ_CREATE_CQ_CNQ_ID_SFT
);
2260 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
2262 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
2266 cq
->id
= le32_to_cpu(resp
.xid
);
2267 cq
->period
= BNXT_QPLIB_QUEUE_START_PERIOD
;
2268 init_waitqueue_head(&cq
->waitq
);
2269 INIT_LIST_HEAD(&cq
->sqf_head
);
2270 INIT_LIST_HEAD(&cq
->rqf_head
);
2271 spin_lock_init(&cq
->compl_lock
);
2272 spin_lock_init(&cq
->flush_lock
);
2274 cq
->dbinfo
.hwq
= &cq
->hwq
;
2275 cq
->dbinfo
.xid
= cq
->id
;
2276 cq
->dbinfo
.db
= cq
->dpi
->dbr
;
2277 cq
->dbinfo
.priv_db
= res
->dpi_tbl
.priv_db
;
2278 cq
->dbinfo
.flags
= 0;
2279 cq
->dbinfo
.toggle
= 0;
2281 bnxt_qplib_armen_db(&cq
->dbinfo
, DBC_DBC_TYPE_CQ_ARMENA
);
2286 bnxt_qplib_free_hwq(res
, &cq
->hwq
);
2290 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res
*res
,
2291 struct bnxt_qplib_cq
*cq
)
2293 bnxt_qplib_free_hwq(res
, &cq
->hwq
);
2294 memcpy(&cq
->hwq
, &cq
->resize_hwq
, sizeof(cq
->hwq
));
2295 /* Reset only the cons bit in the flags */
2296 cq
->dbinfo
.flags
&= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT
);
2299 int bnxt_qplib_resize_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
,
2302 struct bnxt_qplib_hwq_attr hwq_attr
= {};
2303 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
2304 struct creq_resize_cq_resp resp
= {};
2305 struct bnxt_qplib_cmdqmsg msg
= {};
2306 struct cmdq_resize_cq req
= {};
2307 struct bnxt_qplib_pbl
*pbl
;
2308 u32 pg_sz
, lvl
, new_sz
;
2311 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
2312 CMDQ_BASE_OPCODE_RESIZE_CQ
,
2314 hwq_attr
.sginfo
= &cq
->sg_info
;
2316 hwq_attr
.depth
= new_cqes
;
2317 hwq_attr
.stride
= sizeof(struct cq_base
);
2318 hwq_attr
.type
= HWQ_TYPE_QUEUE
;
2319 rc
= bnxt_qplib_alloc_init_hwq(&cq
->resize_hwq
, &hwq_attr
);
2323 req
.cq_cid
= cpu_to_le32(cq
->id
);
2324 pbl
= &cq
->resize_hwq
.pbl
[PBL_LVL_0
];
2325 pg_sz
= bnxt_qplib_base_pg_size(&cq
->resize_hwq
);
2326 lvl
= (cq
->resize_hwq
.level
<< CMDQ_RESIZE_CQ_LVL_SFT
) &
2327 CMDQ_RESIZE_CQ_LVL_MASK
;
2328 new_sz
= (new_cqes
<< CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT
) &
2329 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK
;
2330 req
.new_cq_size_pg_size_lvl
= cpu_to_le32(new_sz
| pg_sz
| lvl
);
2331 req
.new_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
2333 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
2335 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
2339 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
)
2341 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
2342 struct creq_destroy_cq_resp resp
= {};
2343 struct bnxt_qplib_cmdqmsg msg
= {};
2344 struct cmdq_destroy_cq req
= {};
2345 u16 total_cnq_events
;
2348 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
2349 CMDQ_BASE_OPCODE_DESTROY_CQ
,
2352 req
.cq_cid
= cpu_to_le32(cq
->id
);
2353 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
2355 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
2358 total_cnq_events
= le16_to_cpu(resp
.total_cnq_events
);
2359 __wait_for_all_nqes(cq
, total_cnq_events
);
2360 bnxt_qplib_free_hwq(res
, &cq
->hwq
);
2364 static int __flush_sq(struct bnxt_qplib_q
*sq
, struct bnxt_qplib_qp
*qp
,
2365 struct bnxt_qplib_cqe
**pcqe
, int *budget
)
2367 struct bnxt_qplib_cqe
*cqe
;
2371 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2372 start
= sq
->swq_start
;
2375 last
= sq
->swq_last
;
2378 /* Skip the FENCE WQE completions */
2379 if (sq
->swq
[last
].wr_id
== BNXT_QPLIB_FENCE_WRID
) {
2380 bnxt_qplib_cancel_phantom_processing(qp
);
2383 memset(cqe
, 0, sizeof(*cqe
));
2384 cqe
->status
= CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR
;
2385 cqe
->opcode
= CQ_BASE_CQE_TYPE_REQ
;
2386 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2387 cqe
->wr_id
= sq
->swq
[last
].wr_id
;
2388 cqe
->src_qp
= qp
->id
;
2389 cqe
->type
= sq
->swq
[last
].type
;
2393 bnxt_qplib_hwq_incr_cons(sq
->hwq
.max_elements
, &sq
->hwq
.cons
,
2394 sq
->swq
[last
].slots
, &sq
->dbinfo
.flags
);
2395 sq
->swq_last
= sq
->swq
[last
].next_idx
;
2398 if (!(*budget
) && sq
->swq_last
!= start
)
2405 static int __flush_rq(struct bnxt_qplib_q
*rq
, struct bnxt_qplib_qp
*qp
,
2406 struct bnxt_qplib_cqe
**pcqe
, int *budget
)
2408 struct bnxt_qplib_cqe
*cqe
;
2414 case CMDQ_CREATE_QP1_TYPE_GSI
:
2415 opcode
= CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
;
2417 case CMDQ_CREATE_QP_TYPE_RC
:
2418 opcode
= CQ_BASE_CQE_TYPE_RES_RC
;
2420 case CMDQ_CREATE_QP_TYPE_UD
:
2421 case CMDQ_CREATE_QP_TYPE_GSI
:
2422 opcode
= CQ_BASE_CQE_TYPE_RES_UD
;
2426 /* Flush the rest of the RQ */
2427 start
= rq
->swq_start
;
2430 last
= rq
->swq_last
;
2433 memset(cqe
, 0, sizeof(*cqe
));
2435 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR
;
2436 cqe
->opcode
= opcode
;
2437 cqe
->qp_handle
= (unsigned long)qp
;
2438 cqe
->wr_id
= rq
->swq
[last
].wr_id
;
2441 bnxt_qplib_hwq_incr_cons(rq
->hwq
.max_elements
, &rq
->hwq
.cons
,
2442 rq
->swq
[last
].slots
, &rq
->dbinfo
.flags
);
2443 rq
->swq_last
= rq
->swq
[last
].next_idx
;
2446 if (!*budget
&& rq
->swq_last
!= start
)
2453 void bnxt_qplib_mark_qp_error(void *qp_handle
)
2455 struct bnxt_qplib_qp
*qp
= qp_handle
;
2460 /* Must block new posting of SQ and RQ */
2461 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
2462 bnxt_qplib_cancel_phantom_processing(qp
);
2465 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2466 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2468 static int do_wa9060(struct bnxt_qplib_qp
*qp
, struct bnxt_qplib_cq
*cq
,
2469 u32 cq_cons
, u32 swq_last
, u32 cqe_sq_cons
)
2471 u32 peek_sw_cq_cons
, peek_sq_cons_idx
, peek_flags
;
2472 struct bnxt_qplib_q
*sq
= &qp
->sq
;
2473 struct cq_req
*peek_req_hwcqe
;
2474 struct bnxt_qplib_qp
*peek_qp
;
2475 struct bnxt_qplib_q
*peek_sq
;
2476 struct bnxt_qplib_swq
*swq
;
2477 struct cq_base
*peek_hwcqe
;
2481 /* Check for the psn_search marking before completing */
2482 swq
= &sq
->swq
[swq_last
];
2483 if (swq
->psn_search
&&
2484 le32_to_cpu(swq
->psn_search
->flags_next_psn
) & 0x80000000) {
2486 swq
->psn_search
->flags_next_psn
= cpu_to_le32
2487 (le32_to_cpu(swq
->psn_search
->flags_next_psn
)
2489 dev_dbg(&cq
->hwq
.pdev
->dev
,
2490 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2491 cq_cons
, qp
->id
, swq_last
, cqe_sq_cons
);
2492 sq
->condition
= true;
2493 sq
->send_phantom
= true;
2495 /* TODO: Only ARM if the previous SQE is ARMALL */
2496 bnxt_qplib_ring_db(&cq
->dbinfo
, DBC_DBC_TYPE_CQ_ARMALL
);
2500 if (sq
->condition
) {
2501 /* Peek at the completions */
2502 peek_flags
= cq
->dbinfo
.flags
;
2503 peek_sw_cq_cons
= cq_cons
;
2504 i
= cq
->hwq
.max_elements
;
2506 peek_hwcqe
= bnxt_qplib_get_qe(&cq
->hwq
,
2507 peek_sw_cq_cons
, NULL
);
2508 /* If the next hwcqe is VALID */
2509 if (CQE_CMP_VALID(peek_hwcqe
, peek_flags
)) {
2511 * The valid test of the entry must be done first before
2512 * reading any further.
2515 /* If the next hwcqe is a REQ */
2516 if ((peek_hwcqe
->cqe_type_toggle
&
2517 CQ_BASE_CQE_TYPE_MASK
) ==
2518 CQ_BASE_CQE_TYPE_REQ
) {
2519 peek_req_hwcqe
= (struct cq_req
*)
2521 peek_qp
= (struct bnxt_qplib_qp
*)
2524 (peek_req_hwcqe
->qp_handle
));
2525 peek_sq
= &peek_qp
->sq
;
2528 peek_req_hwcqe
->sq_cons_idx
)
2529 - 1) % sq
->max_wqe
);
2530 /* If the hwcqe's sq's wr_id matches */
2531 if (peek_sq
== sq
&&
2532 sq
->swq
[peek_sq_cons_idx
].wr_id
==
2533 BNXT_QPLIB_FENCE_WRID
) {
2535 * Unbreak only if the phantom
2538 dev_dbg(&cq
->hwq
.pdev
->dev
,
2539 "FP: Got Phantom CQE\n");
2540 sq
->condition
= false;
2546 /* Valid but not the phantom, so keep looping */
2548 /* Not valid yet, just exit and wait */
2552 bnxt_qplib_hwq_incr_cons(cq
->hwq
.max_elements
,
2556 dev_err(&cq
->hwq
.pdev
->dev
,
2557 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2558 cq_cons
, qp
->id
, swq_last
, cqe_sq_cons
);
2565 static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q
*sq
, u32 cqe_slot
)
2567 struct bnxt_qplib_hwq
*sq_hwq
;
2568 struct bnxt_qplib_swq
*swq
;
2569 int cqe_sq_cons
= -1;
2574 start
= sq
->swq_start
;
2575 last
= sq
->swq_last
;
2577 while (last
!= start
) {
2578 swq
= &sq
->swq
[last
];
2579 if (swq
->slot_idx
== cqe_slot
) {
2580 cqe_sq_cons
= swq
->next_idx
;
2581 dev_err(&sq_hwq
->pdev
->dev
, "%s: Found cons wqe = %d slot = %d\n",
2582 __func__
, cqe_sq_cons
, cqe_slot
);
2586 last
= swq
->next_idx
;
2591 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq
*cq
,
2592 struct cq_req
*hwcqe
,
2593 struct bnxt_qplib_cqe
**pcqe
, int *budget
,
2594 u32 cq_cons
, struct bnxt_qplib_qp
**lib_qp
)
2596 struct bnxt_qplib_swq
*swq
;
2597 struct bnxt_qplib_cqe
*cqe
;
2598 u32 cqe_sq_cons
, slot_num
;
2599 struct bnxt_qplib_qp
*qp
;
2600 struct bnxt_qplib_q
*sq
;
2604 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
2605 le64_to_cpu(hwcqe
->qp_handle
));
2607 dev_err(&cq
->hwq
.pdev
->dev
,
2608 "FP: Process Req qp is NULL\n");
2613 cqe_sq_cons
= le16_to_cpu(hwcqe
->sq_cons_idx
) % sq
->max_sw_wqe
;
2614 if (qp
->sq
.flushed
) {
2615 dev_dbg(&cq
->hwq
.pdev
->dev
,
2616 "%s: QP in Flush QP = %p\n", __func__
, qp
);
2620 if (__is_err_cqe_for_var_wqe(qp
, hwcqe
->status
)) {
2621 slot_num
= le16_to_cpu(hwcqe
->sq_cons_idx
);
2622 cqe_cons
= bnxt_qplib_get_cqe_sq_cons(sq
, slot_num
);
2624 dev_err(&cq
->hwq
.pdev
->dev
, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
2625 __func__
, slot_num
);
2628 cqe_sq_cons
= cqe_cons
;
2629 dev_err(&cq
->hwq
.pdev
->dev
, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
2630 __func__
, cqe_sq_cons
, sq
->swq_last
, sq
->swq_start
);
2633 /* Require to walk the sq's swq to fabricate CQEs for all previously
2634 * signaled SWQEs due to CQE aggregation from the current sq cons
2635 * to the cqe_sq_cons
2639 if (sq
->swq_last
== cqe_sq_cons
)
2643 swq
= &sq
->swq
[sq
->swq_last
];
2644 memset(cqe
, 0, sizeof(*cqe
));
2645 cqe
->opcode
= CQ_BASE_CQE_TYPE_REQ
;
2646 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2647 cqe
->src_qp
= qp
->id
;
2648 cqe
->wr_id
= swq
->wr_id
;
2649 if (cqe
->wr_id
== BNXT_QPLIB_FENCE_WRID
)
2651 cqe
->type
= swq
->type
;
2653 /* For the last CQE, check for status. For errors, regardless
2654 * of the request being signaled or not, it must complete with
2655 * the hwcqe error status
2657 if (swq
->next_idx
== cqe_sq_cons
&&
2658 hwcqe
->status
!= CQ_REQ_STATUS_OK
) {
2659 cqe
->status
= hwcqe
->status
;
2660 dev_err(&cq
->hwq
.pdev
->dev
,
2661 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2662 sq
->swq_last
, cqe
->wr_id
, cqe
->status
);
2665 bnxt_qplib_mark_qp_error(qp
);
2666 /* Add qp to flush list of the CQ */
2667 bnxt_qplib_add_flush_qp(qp
);
2669 /* Before we complete, do WA 9060 */
2670 if (do_wa9060(qp
, cq
, cq_cons
, sq
->swq_last
,
2675 if (swq
->flags
& SQ_SEND_FLAGS_SIGNAL_COMP
) {
2676 cqe
->status
= CQ_REQ_STATUS_OK
;
2682 bnxt_qplib_hwq_incr_cons(sq
->hwq
.max_elements
, &sq
->hwq
.cons
,
2683 swq
->slots
, &sq
->dbinfo
.flags
);
2684 sq
->swq_last
= swq
->next_idx
;
2690 if (sq
->swq_last
!= cqe_sq_cons
) {
2696 * Back to normal completion mode only after it has completed all of
2697 * the WC for this CQE
2704 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq
*srq
, u32 tag
)
2706 spin_lock(&srq
->hwq
.lock
);
2707 srq
->swq
[srq
->last_idx
].next_idx
= (int)tag
;
2708 srq
->last_idx
= (int)tag
;
2709 srq
->swq
[srq
->last_idx
].next_idx
= -1;
2710 bnxt_qplib_hwq_incr_cons(srq
->hwq
.max_elements
, &srq
->hwq
.cons
,
2711 srq
->dbinfo
.max_slot
, &srq
->dbinfo
.flags
);
2712 spin_unlock(&srq
->hwq
.lock
);
2715 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq
*cq
,
2716 struct cq_res_rc
*hwcqe
,
2717 struct bnxt_qplib_cqe
**pcqe
,
2720 struct bnxt_qplib_srq
*srq
;
2721 struct bnxt_qplib_cqe
*cqe
;
2722 struct bnxt_qplib_qp
*qp
;
2723 struct bnxt_qplib_q
*rq
;
2726 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
2727 le64_to_cpu(hwcqe
->qp_handle
));
2729 dev_err(&cq
->hwq
.pdev
->dev
, "process_cq RC qp is NULL\n");
2732 if (qp
->rq
.flushed
) {
2733 dev_dbg(&cq
->hwq
.pdev
->dev
,
2734 "%s: QP in Flush QP = %p\n", __func__
, qp
);
2739 cqe
->opcode
= hwcqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
;
2740 cqe
->length
= le32_to_cpu(hwcqe
->length
);
2741 cqe
->invrkey
= le32_to_cpu(hwcqe
->imm_data_or_inv_r_key
);
2742 cqe
->mr_handle
= le64_to_cpu(hwcqe
->mr_handle
);
2743 cqe
->flags
= le16_to_cpu(hwcqe
->flags
);
2744 cqe
->status
= hwcqe
->status
;
2745 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2747 wr_id_idx
= le32_to_cpu(hwcqe
->srq_or_rq_wr_id
) &
2748 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK
;
2749 if (cqe
->flags
& CQ_RES_RC_FLAGS_SRQ_SRQ
) {
2753 if (wr_id_idx
>= srq
->hwq
.max_elements
) {
2754 dev_err(&cq
->hwq
.pdev
->dev
,
2755 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2756 wr_id_idx
, srq
->hwq
.max_elements
);
2759 cqe
->wr_id
= srq
->swq
[wr_id_idx
].wr_id
;
2760 bnxt_qplib_release_srqe(srq
, wr_id_idx
);
2765 struct bnxt_qplib_swq
*swq
;
2768 if (wr_id_idx
> (rq
->max_wqe
- 1)) {
2769 dev_err(&cq
->hwq
.pdev
->dev
,
2770 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2771 wr_id_idx
, rq
->max_wqe
);
2774 if (wr_id_idx
!= rq
->swq_last
)
2776 swq
= &rq
->swq
[rq
->swq_last
];
2777 cqe
->wr_id
= swq
->wr_id
;
2780 bnxt_qplib_hwq_incr_cons(rq
->hwq
.max_elements
, &rq
->hwq
.cons
,
2781 swq
->slots
, &rq
->dbinfo
.flags
);
2782 rq
->swq_last
= swq
->next_idx
;
2785 if (hwcqe
->status
!= CQ_RES_RC_STATUS_OK
) {
2786 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
2787 /* Add qp to flush list of the CQ */
2788 bnxt_qplib_add_flush_qp(qp
);
2795 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq
*cq
,
2796 struct cq_res_ud
*hwcqe
,
2797 struct bnxt_qplib_cqe
**pcqe
,
2800 struct bnxt_qplib_srq
*srq
;
2801 struct bnxt_qplib_cqe
*cqe
;
2802 struct bnxt_qplib_qp
*qp
;
2803 struct bnxt_qplib_q
*rq
;
2806 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
2807 le64_to_cpu(hwcqe
->qp_handle
));
2809 dev_err(&cq
->hwq
.pdev
->dev
, "process_cq UD qp is NULL\n");
2812 if (qp
->rq
.flushed
) {
2813 dev_dbg(&cq
->hwq
.pdev
->dev
,
2814 "%s: QP in Flush QP = %p\n", __func__
, qp
);
2818 cqe
->opcode
= hwcqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
;
2819 cqe
->length
= le16_to_cpu(hwcqe
->length
) & CQ_RES_UD_LENGTH_MASK
;
2820 cqe
->cfa_meta
= le16_to_cpu(hwcqe
->cfa_metadata
);
2821 cqe
->invrkey
= le32_to_cpu(hwcqe
->imm_data
);
2822 cqe
->flags
= le16_to_cpu(hwcqe
->flags
);
2823 cqe
->status
= hwcqe
->status
;
2824 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2825 /*FIXME: Endianness fix needed for smace */
2826 memcpy(cqe
->smac
, hwcqe
->src_mac
, ETH_ALEN
);
2827 wr_id_idx
= le32_to_cpu(hwcqe
->src_qp_high_srq_or_rq_wr_id
)
2828 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK
;
2829 cqe
->src_qp
= le16_to_cpu(hwcqe
->src_qp_low
) |
2831 hwcqe
->src_qp_high_srq_or_rq_wr_id
) &
2832 CQ_RES_UD_SRC_QP_HIGH_MASK
) >> 8);
2834 if (cqe
->flags
& CQ_RES_RC_FLAGS_SRQ_SRQ
) {
2839 if (wr_id_idx
>= srq
->hwq
.max_elements
) {
2840 dev_err(&cq
->hwq
.pdev
->dev
,
2841 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2842 wr_id_idx
, srq
->hwq
.max_elements
);
2845 cqe
->wr_id
= srq
->swq
[wr_id_idx
].wr_id
;
2846 bnxt_qplib_release_srqe(srq
, wr_id_idx
);
2851 struct bnxt_qplib_swq
*swq
;
2854 if (wr_id_idx
> (rq
->max_wqe
- 1)) {
2855 dev_err(&cq
->hwq
.pdev
->dev
,
2856 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2857 wr_id_idx
, rq
->max_wqe
);
2861 if (rq
->swq_last
!= wr_id_idx
)
2863 swq
= &rq
->swq
[rq
->swq_last
];
2864 cqe
->wr_id
= swq
->wr_id
;
2867 bnxt_qplib_hwq_incr_cons(rq
->hwq
.max_elements
, &rq
->hwq
.cons
,
2868 swq
->slots
, &rq
->dbinfo
.flags
);
2869 rq
->swq_last
= swq
->next_idx
;
2872 if (hwcqe
->status
!= CQ_RES_RC_STATUS_OK
) {
2873 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
2874 /* Add qp to flush list of the CQ */
2875 bnxt_qplib_add_flush_qp(qp
);
2882 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq
*cq
)
2884 struct cq_base
*hw_cqe
;
2887 hw_cqe
= bnxt_qplib_get_qe(&cq
->hwq
, cq
->hwq
.cons
, NULL
);
2888 /* Check for Valid bit. If the CQE is valid, return false */
2889 rc
= !CQE_CMP_VALID(hw_cqe
, cq
->dbinfo
.flags
);
2893 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq
*cq
,
2894 struct cq_res_raweth_qp1
*hwcqe
,
2895 struct bnxt_qplib_cqe
**pcqe
,
2898 struct bnxt_qplib_qp
*qp
;
2899 struct bnxt_qplib_q
*rq
;
2900 struct bnxt_qplib_srq
*srq
;
2901 struct bnxt_qplib_cqe
*cqe
;
2904 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
2905 le64_to_cpu(hwcqe
->qp_handle
));
2907 dev_err(&cq
->hwq
.pdev
->dev
, "process_cq Raw/QP1 qp is NULL\n");
2910 if (qp
->rq
.flushed
) {
2911 dev_dbg(&cq
->hwq
.pdev
->dev
,
2912 "%s: QP in Flush QP = %p\n", __func__
, qp
);
2916 cqe
->opcode
= hwcqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
;
2917 cqe
->flags
= le16_to_cpu(hwcqe
->flags
);
2918 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2921 le32_to_cpu(hwcqe
->raweth_qp1_payload_offset_srq_or_rq_wr_id
)
2922 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK
;
2923 cqe
->src_qp
= qp
->id
;
2924 if (qp
->id
== 1 && !cqe
->length
) {
2925 /* Add workaround for the length misdetection */
2928 cqe
->length
= le16_to_cpu(hwcqe
->length
);
2930 cqe
->pkey_index
= qp
->pkey_index
;
2931 memcpy(cqe
->smac
, qp
->smac
, 6);
2933 cqe
->raweth_qp1_flags
= le16_to_cpu(hwcqe
->raweth_qp1_flags
);
2934 cqe
->raweth_qp1_flags2
= le32_to_cpu(hwcqe
->raweth_qp1_flags2
);
2935 cqe
->raweth_qp1_metadata
= le32_to_cpu(hwcqe
->raweth_qp1_metadata
);
2937 if (cqe
->flags
& CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ
) {
2940 dev_err(&cq
->hwq
.pdev
->dev
,
2941 "FP: SRQ used but not defined??\n");
2944 if (wr_id_idx
>= srq
->hwq
.max_elements
) {
2945 dev_err(&cq
->hwq
.pdev
->dev
,
2946 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2947 wr_id_idx
, srq
->hwq
.max_elements
);
2950 cqe
->wr_id
= srq
->swq
[wr_id_idx
].wr_id
;
2951 bnxt_qplib_release_srqe(srq
, wr_id_idx
);
2956 struct bnxt_qplib_swq
*swq
;
2959 if (wr_id_idx
> (rq
->max_wqe
- 1)) {
2960 dev_err(&cq
->hwq
.pdev
->dev
,
2961 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2962 wr_id_idx
, rq
->max_wqe
);
2965 if (rq
->swq_last
!= wr_id_idx
)
2967 swq
= &rq
->swq
[rq
->swq_last
];
2968 cqe
->wr_id
= swq
->wr_id
;
2971 bnxt_qplib_hwq_incr_cons(rq
->hwq
.max_elements
, &rq
->hwq
.cons
,
2972 swq
->slots
, &rq
->dbinfo
.flags
);
2973 rq
->swq_last
= swq
->next_idx
;
2976 if (hwcqe
->status
!= CQ_RES_RC_STATUS_OK
) {
2977 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
2978 /* Add qp to flush list of the CQ */
2979 bnxt_qplib_add_flush_qp(qp
);
2986 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq
*cq
,
2987 struct cq_terminal
*hwcqe
,
2988 struct bnxt_qplib_cqe
**pcqe
,
2991 struct bnxt_qplib_qp
*qp
;
2992 struct bnxt_qplib_q
*sq
, *rq
;
2993 struct bnxt_qplib_cqe
*cqe
;
2994 u32 swq_last
= 0, cqe_cons
;
2997 /* Check the Status */
2998 if (hwcqe
->status
!= CQ_TERMINAL_STATUS_OK
)
2999 dev_warn(&cq
->hwq
.pdev
->dev
,
3000 "FP: CQ Process Terminal Error status = 0x%x\n",
3003 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
3004 le64_to_cpu(hwcqe
->qp_handle
));
3008 /* Must block new posting of SQ and RQ */
3009 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
3014 cqe_cons
= le16_to_cpu(hwcqe
->sq_cons_idx
);
3015 if (cqe_cons
== 0xFFFF)
3017 cqe_cons
%= sq
->max_sw_wqe
;
3019 if (qp
->sq
.flushed
) {
3020 dev_dbg(&cq
->hwq
.pdev
->dev
,
3021 "%s: QP in Flush QP = %p\n", __func__
, qp
);
3025 /* Terminal CQE can also include aggregated successful CQEs prior.
3026 * So we must complete all CQEs from the current sq's cons to the
3027 * cq_cons with status OK
3031 swq_last
= sq
->swq_last
;
3032 if (swq_last
== cqe_cons
)
3034 if (sq
->swq
[swq_last
].flags
& SQ_SEND_FLAGS_SIGNAL_COMP
) {
3035 memset(cqe
, 0, sizeof(*cqe
));
3036 cqe
->status
= CQ_REQ_STATUS_OK
;
3037 cqe
->opcode
= CQ_BASE_CQE_TYPE_REQ
;
3038 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
3039 cqe
->src_qp
= qp
->id
;
3040 cqe
->wr_id
= sq
->swq
[swq_last
].wr_id
;
3041 cqe
->type
= sq
->swq
[swq_last
].type
;
3045 bnxt_qplib_hwq_incr_cons(sq
->hwq
.max_elements
, &sq
->hwq
.cons
,
3046 sq
->swq
[swq_last
].slots
, &sq
->dbinfo
.flags
);
3047 sq
->swq_last
= sq
->swq
[swq_last
].next_idx
;
3050 if (!(*budget
) && swq_last
!= cqe_cons
) {
3059 cqe_cons
= le16_to_cpu(hwcqe
->rq_cons_idx
);
3060 if (cqe_cons
== 0xFFFF) {
3062 } else if (cqe_cons
> rq
->max_wqe
- 1) {
3063 dev_err(&cq
->hwq
.pdev
->dev
,
3064 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
3065 cqe_cons
, rq
->max_wqe
);
3070 if (qp
->rq
.flushed
) {
3071 dev_dbg(&cq
->hwq
.pdev
->dev
,
3072 "%s: QP in Flush QP = %p\n", __func__
, qp
);
3077 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
3078 * from the current rq->cons to the rq->prod regardless what the
3079 * rq->cons the terminal CQE indicates
3082 /* Add qp to flush list of the CQ */
3083 bnxt_qplib_add_flush_qp(qp
);
3088 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq
*cq
,
3089 struct cq_cutoff
*hwcqe
)
3091 /* Check the Status */
3092 if (hwcqe
->status
!= CQ_CUTOFF_STATUS_OK
) {
3093 dev_err(&cq
->hwq
.pdev
->dev
,
3094 "FP: CQ Process Cutoff Error status = 0x%x\n",
3098 clear_bit(CQ_FLAGS_RESIZE_IN_PROG
, &cq
->flags
);
3099 wake_up_interruptible(&cq
->waitq
);
3104 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq
*cq
,
3105 struct bnxt_qplib_cqe
*cqe
,
3108 struct bnxt_qplib_qp
*qp
= NULL
;
3109 u32 budget
= num_cqes
;
3110 unsigned long flags
;
3112 spin_lock_irqsave(&cq
->flush_lock
, flags
);
3113 list_for_each_entry(qp
, &cq
->sqf_head
, sq_flush
) {
3114 dev_dbg(&cq
->hwq
.pdev
->dev
, "FP: Flushing SQ QP= %p\n", qp
);
3115 __flush_sq(&qp
->sq
, qp
, &cqe
, &budget
);
3118 list_for_each_entry(qp
, &cq
->rqf_head
, rq_flush
) {
3119 dev_dbg(&cq
->hwq
.pdev
->dev
, "FP: Flushing RQ QP= %p\n", qp
);
3120 __flush_rq(&qp
->rq
, qp
, &cqe
, &budget
);
3122 spin_unlock_irqrestore(&cq
->flush_lock
, flags
);
3124 return num_cqes
- budget
;
3127 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq
*cq
, struct bnxt_qplib_cqe
*cqe
,
3128 int num_cqes
, struct bnxt_qplib_qp
**lib_qp
)
3130 struct cq_base
*hw_cqe
;
3138 hw_cqe
= bnxt_qplib_get_qe(&cq
->hwq
, cq
->hwq
.cons
, NULL
);
3140 /* Check for Valid bit */
3141 if (!CQE_CMP_VALID(hw_cqe
, cq
->dbinfo
.flags
))
3145 * The valid test of the entry must be done first before
3146 * reading any further.
3149 /* From the device's respective CQE format to qplib_wc*/
3150 type
= hw_cqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
;
3152 case CQ_BASE_CQE_TYPE_REQ
:
3153 rc
= bnxt_qplib_cq_process_req(cq
,
3154 (struct cq_req
*)hw_cqe
,
3156 cq
->hwq
.cons
, lib_qp
);
3158 case CQ_BASE_CQE_TYPE_RES_RC
:
3159 rc
= bnxt_qplib_cq_process_res_rc(cq
,
3160 (struct cq_res_rc
*)
3164 case CQ_BASE_CQE_TYPE_RES_UD
:
3165 rc
= bnxt_qplib_cq_process_res_ud
3166 (cq
, (struct cq_res_ud
*)hw_cqe
, &cqe
,
3169 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
:
3170 rc
= bnxt_qplib_cq_process_res_raweth_qp1
3171 (cq
, (struct cq_res_raweth_qp1
*)
3172 hw_cqe
, &cqe
, &budget
);
3174 case CQ_BASE_CQE_TYPE_TERMINAL
:
3175 rc
= bnxt_qplib_cq_process_terminal
3176 (cq
, (struct cq_terminal
*)hw_cqe
,
3179 case CQ_BASE_CQE_TYPE_CUT_OFF
:
3180 bnxt_qplib_cq_process_cutoff
3181 (cq
, (struct cq_cutoff
*)hw_cqe
);
3182 /* Done processing this CQ */
3185 dev_err(&cq
->hwq
.pdev
->dev
,
3186 "process_cq unknown type 0x%lx\n",
3187 hw_cqe
->cqe_type_toggle
&
3188 CQ_BASE_CQE_TYPE_MASK
);
3195 /* Error while processing the CQE, just skip to the
3198 if (type
!= CQ_BASE_CQE_TYPE_TERMINAL
)
3199 dev_err(&cq
->hwq
.pdev
->dev
,
3200 "process_cqe error rc = 0x%x\n", rc
);
3203 bnxt_qplib_hwq_incr_cons(cq
->hwq
.max_elements
, &cq
->hwq
.cons
,
3204 1, &cq
->dbinfo
.flags
);
3208 bnxt_qplib_ring_db(&cq
->dbinfo
, DBC_DBC_TYPE_CQ
);
3210 return num_cqes
- budget
;
3213 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq
*cq
, u32 arm_type
)
3215 cq
->dbinfo
.toggle
= cq
->toggle
;
3217 bnxt_qplib_ring_db(&cq
->dbinfo
, arm_type
);
3218 /* Using cq->arm_state variable to track whether to issue cq handler */
3219 atomic_set(&cq
->arm_state
, 1);
3222 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp
*qp
)
3224 flush_workqueue(qp
->scq
->nq
->cqn_wq
);
3225 if (qp
->scq
!= qp
->rcq
)
3226 flush_workqueue(qp
->rcq
->nq
->cqn_wq
);