2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators (header)
39 #ifndef __BNXT_QPLIB_FP_H__
40 #define __BNXT_QPLIB_FP_H__
42 #include <rdma/bnxt_re-abi.h>
44 /* Few helper structures temporarily defined here
45 * should get rid of these when roce_hsi.h is updated
46 * in original code base
48 struct sq_ud_ext_hdr
{
54 struct sq_raw_ext_hdr
{
60 struct sq_rdma_ext_hdr
{
66 struct sq_atomic_ext_hdr
{
71 struct sq_fr_pmr_ext_hdr
{
76 struct sq_bind_ext_hdr
{
87 /* Helper structures end */
89 struct bnxt_qplib_srq
{
90 struct bnxt_qplib_pd
*pd
;
91 struct bnxt_qplib_dpi
*dpi
;
92 struct bnxt_qplib_db_info dbinfo
;
100 struct bnxt_qplib_cq
*cq
;
101 struct bnxt_qplib_hwq hwq
;
102 struct bnxt_qplib_swq
*swq
;
105 struct bnxt_qplib_sg_info sg_info
;
106 u16 eventq_hw_ring_id
;
107 spinlock_t lock
; /* protect SRQE link list */
111 struct bnxt_qplib_sge
{
117 #define BNXT_QPLIB_QP_MAX_SGL 6
118 struct bnxt_qplib_swq
{
127 struct sq_psn_search
*psn_search
;
128 struct sq_psn_search_ext
*psn_ext
;
131 struct bnxt_qplib_swqe
{
133 #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
137 #define BNXT_QPLIB_SWQE_TYPE_SEND 0
138 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1
139 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2
140 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4
141 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5
142 #define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6
143 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8
144 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11
145 #define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12
146 #define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13
147 #define BNXT_QPLIB_SWQE_TYPE_REG_MR 13
148 #define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14
149 #define BNXT_QPLIB_SWQE_TYPE_RECV 128
150 #define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129
152 #define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0)
153 #define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1)
154 #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
155 #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
156 #define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
157 struct bnxt_qplib_sge sg_list
[BNXT_QPLIB_QP_MAX_SGL
];
159 /* Max inline data is 96 bytes */
161 #define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96
162 u8 inline_data
[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH
];
165 /* Send, with imm, inval key */
176 /* Send Raw Ethernet and QP1 */
183 /* RDMA write, with imm, read */
193 /* Atomic cmp/swap, fetch/add */
201 /* Local Invalidate */
214 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0
215 #define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1
216 #define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4
217 #define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6
218 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8
219 #define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9
220 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10
221 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18
223 #define PAGE_SHIFT_4K 12
225 dma_addr_t pbl_dma_ptr
;
234 #define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0)
235 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1)
236 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2)
237 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3)
238 #define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4)
249 struct bnxt_qplib_q
{
250 struct bnxt_qplib_hwq hwq
;
251 struct bnxt_qplib_swq
*swq
;
252 struct bnxt_qplib_db_info dbinfo
;
253 struct bnxt_qplib_sg_info sg_info
;
271 struct bnxt_qplib_qp
{
272 struct bnxt_qplib_pd
*pd
;
273 struct bnxt_qplib_dpi
*dpi
;
274 struct bnxt_qplib_chip_ctx
*cctx
;
276 #define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
287 bool en_sqd_async_notify
;
298 u32 max_dest_rd_atomic
;
303 struct bnxt_qplib_ah ah
;
305 #define BTH_PSN_MASK ((1 << 24) - 1)
307 struct bnxt_qplib_q sq
;
309 struct bnxt_qplib_q rq
;
311 struct bnxt_qplib_srq
*srq
;
313 struct bnxt_qplib_cq
*scq
;
314 struct bnxt_qplib_cq
*rcq
;
316 struct bnxt_qplib_hwq irrq
;
317 struct bnxt_qplib_hwq orrq
;
318 /* Header buffer for QP1 */
322 * Buffer space for ETH(14), IP or GRH(40), UDP header(8)
323 * and ib_bth + ib_deth (20).
324 * Max required is 82 when RoCE V2 is enabled
326 #define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86
327 /* Ethernet header = 14 */
328 /* ib_grh = 40 (provided by MAD) */
329 /* ib_bth + ib_deth = 20 */
330 /* MAD = 256 (provided by MAD) */
332 #define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14
333 #define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512
334 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20
335 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40
336 #define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20
338 dma_addr_t sq_hdr_buf_map
;
340 dma_addr_t rq_hdr_buf_map
;
341 struct list_head sq_flush
;
342 struct list_head rq_flush
;
345 bool is_host_msn_tbl
;
348 #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
350 #define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE)
351 #define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1)
352 #define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG)
353 #define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG)
355 #define ROCE_CQE_CMP_V 0
356 #define CQE_CMP_VALID(hdr, pass) \
357 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
358 !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
360 static inline u32
__bnxt_qplib_get_avail(struct bnxt_qplib_hwq
*hwq
)
362 int cons
, prod
, avail
;
372 static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q
*que
,
375 struct bnxt_qplib_hwq
*hwq
;
379 /* False full is possible, retrying post-send makes sense */
380 avail
= hwq
->cons
- hwq
->prod
;
381 if (hwq
->cons
<= hwq
->prod
)
383 return avail
<= slots
;
386 /* CQ coalescing parameters */
387 struct bnxt_qplib_cq_coal_param
{
391 u8 en_ring_idle_mode
;
394 #define BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME 0x1
395 #define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P7 0x8
396 #define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P7 0x8
397 #define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P5 0x1
398 #define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P5 0x1
399 #define BNXT_QPLIB_CQ_COAL_DEF_EN_RING_IDLE_MODE 0x1
400 #define BNXT_QPLIB_CQ_COAL_MAX_BUF_MAXTIME 0x1bf
401 #define BNXT_QPLIB_CQ_COAL_MAX_NORMAL_MAXBUF 0x1f
402 #define BNXT_QPLIB_CQ_COAL_MAX_DURING_MAXBUF 0x1f
403 #define BNXT_QPLIB_CQ_COAL_MAX_EN_RING_IDLE_MODE 0x1
405 struct bnxt_qplib_cqe
{
421 u16 raweth_qp1_flags
;
422 u16 raweth_qp1_errors
;
423 u16 raweth_qp1_cfa_code
;
424 u32 raweth_qp1_flags2
;
425 u32 raweth_qp1_metadata
;
426 u8 raweth_qp1_payload_offset
;
430 #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
431 struct bnxt_qplib_cq
{
432 struct bnxt_qplib_dpi
*dpi
;
433 struct bnxt_qplib_db_info dbinfo
;
438 struct bnxt_qplib_hwq hwq
;
439 struct bnxt_qplib_hwq resize_hwq
;
441 struct bnxt_qplib_nq
*nq
;
442 bool resize_in_progress
;
443 struct bnxt_qplib_sg_info sg_info
;
447 #define CQ_RESIZE_WAIT_TIME_MS 500
449 #define CQ_FLAGS_RESIZE_IN_PROG 1
450 wait_queue_head_t waitq
;
451 struct list_head sqf_head
, rqf_head
;
453 spinlock_t compl_lock
; /* synch CQ handlers */
455 * QP can move to error state from modify_qp, async error event or error
456 * CQE as part of poll_cq. When QP is moved to error state, it gets added
457 * to two flush lists, one each for SQ and RQ.
458 * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
459 * flush_locks should be acquired when QP is moved to error. The control path
460 * operations(modify_qp and async error events) are synchronized with poll_cq
461 * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
462 * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
463 * of the same QP while manipulating the flush list.
465 spinlock_t flush_lock
; /* QP flush management */
467 struct bnxt_qplib_cq_coal_param
*coalescing
;
470 #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
471 #define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq)
472 #define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2)
473 #define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1)
474 #define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1)
475 #define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1)
477 #define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base)
479 #define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE)
480 #define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1)
481 #define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG)
482 #define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG)
484 #define NQE_CMP_VALID(hdr, pass) \
485 (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \
486 !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
488 #define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
490 #define NQ_CONS_PCI_BAR_REGION 2
491 #define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
492 #define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
493 #define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
494 #define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \
496 #define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \
500 struct bnxt_qplib_nq_db
{
501 struct bnxt_qplib_reg_desc reg
;
502 struct bnxt_qplib_db_info dbinfo
;
505 typedef int (*cqn_handler_t
)(struct bnxt_qplib_nq
*nq
,
506 struct bnxt_qplib_cq
*cq
);
507 typedef int (*srqn_handler_t
)(struct bnxt_qplib_nq
*nq
,
508 struct bnxt_qplib_srq
*srq
, u8 event
);
510 struct bnxt_qplib_nq
{
511 struct pci_dev
*pdev
;
512 struct bnxt_qplib_res
*res
;
514 struct bnxt_qplib_hwq hwq
;
515 struct bnxt_qplib_nq_db nq_db
;
519 struct tasklet_struct nq_tasklet
;
524 cqn_handler_t cqn_handler
;
525 srqn_handler_t srqn_handler
;
526 struct workqueue_struct
*cqn_wq
;
529 struct bnxt_qplib_nq_work
{
530 struct work_struct work
;
531 struct bnxt_qplib_nq
*nq
;
532 struct bnxt_qplib_cq
*cq
;
535 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq
*nq
, bool kill
);
536 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq
*nq
);
537 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq
*nq
, int nq_indx
,
538 int msix_vector
, bool need_init
);
539 int bnxt_qplib_enable_nq(struct pci_dev
*pdev
, struct bnxt_qplib_nq
*nq
,
540 int nq_idx
, int msix_vector
, int bar_reg_offset
,
541 cqn_handler_t cqn_handler
,
542 srqn_handler_t srq_handler
);
543 int bnxt_qplib_create_srq(struct bnxt_qplib_res
*res
,
544 struct bnxt_qplib_srq
*srq
);
545 int bnxt_qplib_modify_srq(struct bnxt_qplib_res
*res
,
546 struct bnxt_qplib_srq
*srq
);
547 int bnxt_qplib_query_srq(struct bnxt_qplib_res
*res
,
548 struct bnxt_qplib_srq
*srq
);
549 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res
*res
,
550 struct bnxt_qplib_srq
*srq
);
551 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq
*srq
,
552 struct bnxt_qplib_swqe
*wqe
);
553 int bnxt_qplib_create_qp1(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
554 int bnxt_qplib_create_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
555 int bnxt_qplib_modify_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
556 int bnxt_qplib_query_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
557 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
558 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp
*qp
);
559 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res
*res
,
560 struct bnxt_qplib_qp
*qp
);
561 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp
*qp
,
562 struct bnxt_qplib_sge
*sge
);
563 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp
*qp
,
564 struct bnxt_qplib_sge
*sge
);
565 u32
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp
*qp
);
566 dma_addr_t
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp
*qp
,
568 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp
*qp
);
569 int bnxt_qplib_post_send(struct bnxt_qplib_qp
*qp
,
570 struct bnxt_qplib_swqe
*wqe
);
571 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp
*qp
);
572 int bnxt_qplib_post_recv(struct bnxt_qplib_qp
*qp
,
573 struct bnxt_qplib_swqe
*wqe
);
574 int bnxt_qplib_create_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
);
575 int bnxt_qplib_resize_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
,
577 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res
*res
,
578 struct bnxt_qplib_cq
*cq
);
579 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
);
580 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq
*cq
, struct bnxt_qplib_cqe
*cqe
,
581 int num
, struct bnxt_qplib_qp
**qp
);
582 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq
*cq
);
583 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq
*cq
, u32 arm_type
);
584 void bnxt_qplib_free_nq(struct bnxt_qplib_nq
*nq
);
585 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_nq
*nq
);
586 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp
*qp
);
587 void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp
*qp
,
588 unsigned long *flags
);
589 void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp
*qp
,
590 unsigned long *flags
);
591 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq
*cq
,
592 struct bnxt_qplib_cqe
*cqe
,
594 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp
*qp
);
595 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq
*nq
);
597 static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q
*que
, u32
*swq_idx
)
601 idx
= que
->swq_start
;
604 return &que
->swq
[idx
];
607 static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q
*que
, u32 idx
)
609 que
->swq_start
= que
->swq
[idx
].next_idx
;
612 static inline u32
bnxt_qplib_get_depth(struct bnxt_qplib_q
*que
, u8 wqe_mode
, bool is_sq
)
616 /* Queue depth is the number of slots. */
617 slots
= (que
->wqe_size
* que
->max_wqe
) / sizeof(struct sq_sge
);
618 /* For variable WQE mode, need to align the slots to 256 */
619 if (wqe_mode
== BNXT_QPLIB_WQE_MODE_VARIABLE
&& is_sq
)
620 slots
= ALIGN(slots
, BNXT_VAR_MAX_SLOT_ALIGN
);
624 static inline u32
bnxt_qplib_set_sq_size(struct bnxt_qplib_q
*que
, u8 wqe_mode
)
626 return (wqe_mode
== BNXT_QPLIB_WQE_MODE_STATIC
) ?
627 que
->max_wqe
: bnxt_qplib_get_depth(que
, wqe_mode
, true);
630 static inline u32
bnxt_qplib_set_sq_max_slot(u8 wqe_mode
)
632 return (wqe_mode
== BNXT_QPLIB_WQE_MODE_STATIC
) ?
633 sizeof(struct sq_send
) / sizeof(struct sq_sge
) : 1;
636 static inline u32
bnxt_qplib_set_rq_max_slot(u32 wqe_size
)
638 return (wqe_size
/ sizeof(struct sq_sge
));
641 static inline u16
__xlate_qfd(u16 delta
, u16 wqe_bytes
)
643 /* For Cu/Wh delta = 128, stride = 16, wqe_bytes = 128
644 * For Gen-p5 B/C mode delta = 0, stride = 16, wqe_bytes = 128.
645 * For Gen-p5 delta = 0, stride = 16, 32 <= wqe_bytes <= 512.
646 * when 8916 is disabled.
648 return (delta
* wqe_bytes
) / sizeof(struct sq_sge
);
651 static inline u16
bnxt_qplib_calc_ilsize(struct bnxt_qplib_swqe
*wqe
, u16 max
)
656 for (indx
= 0; indx
< wqe
->num_sge
; indx
++)
657 size
+= wqe
->sg_list
[indx
].size
;
664 /* MSN table update inlin */
665 static inline __le64
bnxt_re_update_msn_tbl(u32 st_idx
, u32 npsn
, u32 start_psn
)
667 return cpu_to_le64((((u64
)(st_idx
) << SQ_MSN_SEARCH_START_IDX_SFT
) &
668 SQ_MSN_SEARCH_START_IDX_MASK
) |
669 (((u64
)(npsn
) << SQ_MSN_SEARCH_NEXT_PSN_SFT
) &
670 SQ_MSN_SEARCH_NEXT_PSN_MASK
) |
671 (((start_psn
) << SQ_MSN_SEARCH_START_PSN_SFT
) &
672 SQ_MSN_SEARCH_START_PSN_MASK
));
675 static inline bool __is_var_wqe(struct bnxt_qplib_qp
*qp
)
677 return (qp
->wqe_mode
== BNXT_QPLIB_WQE_MODE_VARIABLE
);
680 static inline bool __is_err_cqe_for_var_wqe(struct bnxt_qplib_qp
*qp
, u8 status
)
682 return (status
!= CQ_REQ_STATUS_OK
) && __is_var_wqe(qp
);
684 #endif /* __BNXT_QPLIB_FP_H__ */