2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators (header)
39 #ifndef __BNXT_QPLIB_FP_H__
40 #define __BNXT_QPLIB_FP_H__
42 struct bnxt_qplib_srq
{
43 struct bnxt_qplib_pd
*pd
;
44 struct bnxt_qplib_dpi
*dpi
;
45 void __iomem
*dbr_base
;
52 struct bnxt_qplib_cq
*cq
;
53 struct bnxt_qplib_hwq hwq
;
54 struct bnxt_qplib_swq
*swq
;
57 struct bnxt_qplib_sg_info sg_info
;
58 u16 eventq_hw_ring_id
;
59 spinlock_t lock
; /* protect SRQE link list */
62 struct bnxt_qplib_sge
{
68 #define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send)
70 #define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE)
71 #define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1)
73 static inline u32
get_sqe_pg(u32 val
)
75 return ((val
& ~SQE_MAX_IDX_PER_PG
) / SQE_CNT_PER_PG
);
78 static inline u32
get_sqe_idx(u32 val
)
80 return (val
& SQE_MAX_IDX_PER_PG
);
83 #define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search)
85 #define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE)
86 #define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1)
88 static inline u32
get_psne_pg(u32 val
)
90 return ((val
& ~PSNE_MAX_IDX_PER_PG
) / PSNE_CNT_PER_PG
);
93 static inline u32
get_psne_idx(u32 val
)
95 return (val
& PSNE_MAX_IDX_PER_PG
);
98 #define BNXT_QPLIB_QP_MAX_SGL 6
100 struct bnxt_qplib_swq
{
107 struct sq_psn_search
*psn_search
;
108 struct sq_psn_search_ext
*psn_ext
;
111 struct bnxt_qplib_swqe
{
113 #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
117 #define BNXT_QPLIB_SWQE_TYPE_SEND 0
118 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1
119 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2
120 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4
121 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5
122 #define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6
123 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8
124 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11
125 #define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12
126 #define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13
127 #define BNXT_QPLIB_SWQE_TYPE_REG_MR 13
128 #define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14
129 #define BNXT_QPLIB_SWQE_TYPE_RECV 128
130 #define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129
132 #define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0)
133 #define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1)
134 #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
135 #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
136 #define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
137 struct bnxt_qplib_sge sg_list
[BNXT_QPLIB_QP_MAX_SGL
];
139 /* Max inline data is 96 bytes */
141 #define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96
142 u8 inline_data
[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH
];
145 /* Send, with imm, inval key */
156 /* Send Raw Ethernet and QP1 */
163 /* RDMA write, with imm, read */
173 /* Atomic cmp/swap, fetch/add */
181 /* Local Invalidate */
194 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0
195 #define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1
196 #define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4
197 #define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6
198 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8
199 #define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9
200 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10
201 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18
203 #define PAGE_SHIFT_4K 12
205 dma_addr_t pbl_dma_ptr
;
214 #define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0)
215 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1)
216 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2)
217 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3)
218 #define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4)
229 #define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe)
231 #define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE)
232 #define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1)
233 #define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG)
234 #define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG)
236 struct bnxt_qplib_q
{
237 struct bnxt_qplib_hwq hwq
;
238 struct bnxt_qplib_swq
*swq
;
239 struct bnxt_qplib_sg_info sg_info
;
253 struct bnxt_qplib_qp
{
254 struct bnxt_qplib_pd
*pd
;
255 struct bnxt_qplib_dpi
*dpi
;
256 struct bnxt_qplib_chip_ctx
*cctx
;
258 #define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
268 bool en_sqd_async_notify
;
279 u32 max_dest_rd_atomic
;
284 struct bnxt_qplib_ah ah
;
286 #define BTH_PSN_MASK ((1 << 24) - 1)
288 struct bnxt_qplib_q sq
;
290 struct bnxt_qplib_q rq
;
292 struct bnxt_qplib_srq
*srq
;
294 struct bnxt_qplib_cq
*scq
;
295 struct bnxt_qplib_cq
*rcq
;
297 struct bnxt_qplib_hwq irrq
;
298 struct bnxt_qplib_hwq orrq
;
299 /* Header buffer for QP1 */
303 * Buffer space for ETH(14), IP or GRH(40), UDP header(8)
304 * and ib_bth + ib_deth (20).
305 * Max required is 82 when RoCE V2 is enabled
307 #define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86
308 /* Ethernet header = 14 */
309 /* ib_grh = 40 (provided by MAD) */
310 /* ib_bth + ib_deth = 20 */
311 /* MAD = 256 (provided by MAD) */
313 #define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14
314 #define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512
315 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20
316 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40
317 #define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20
319 dma_addr_t sq_hdr_buf_map
;
321 dma_addr_t rq_hdr_buf_map
;
322 struct list_head sq_flush
;
323 struct list_head rq_flush
;
326 #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
328 #define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE)
329 #define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1)
330 #define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG)
331 #define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG)
333 #define ROCE_CQE_CMP_V 0
334 #define CQE_CMP_VALID(hdr, raw_cons, cp_bit) \
335 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
336 !((raw_cons) & (cp_bit)))
338 static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q
*qplib_q
)
340 return HWQ_CMP((qplib_q
->hwq
.prod
+ qplib_q
->q_full_delta
),
341 &qplib_q
->hwq
) == HWQ_CMP(qplib_q
->hwq
.cons
,
345 struct bnxt_qplib_cqe
{
361 u16 raweth_qp1_flags
;
362 u16 raweth_qp1_errors
;
363 u16 raweth_qp1_cfa_code
;
364 u32 raweth_qp1_flags2
;
365 u32 raweth_qp1_metadata
;
366 u8 raweth_qp1_payload_offset
;
370 #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
371 struct bnxt_qplib_cq
{
372 struct bnxt_qplib_dpi
*dpi
;
373 void __iomem
*dbr_base
;
378 struct bnxt_qplib_hwq hwq
;
380 struct bnxt_qplib_nq
*nq
;
381 bool resize_in_progress
;
382 struct bnxt_qplib_sg_info sg_info
;
385 #define CQ_RESIZE_WAIT_TIME_MS 500
387 #define CQ_FLAGS_RESIZE_IN_PROG 1
388 wait_queue_head_t waitq
;
389 struct list_head sqf_head
, rqf_head
;
391 spinlock_t compl_lock
; /* synch CQ handlers */
393 * QP can move to error state from modify_qp, async error event or error
394 * CQE as part of poll_cq. When QP is moved to error state, it gets added
395 * to two flush lists, one each for SQ and RQ.
396 * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
397 * flush_locks should be acquired when QP is moved to error. The control path
398 * operations(modify_qp and async error events) are synchronized with poll_cq
399 * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
400 * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
401 * of the same QP while manipulating the flush list.
403 spinlock_t flush_lock
; /* QP flush management */
406 #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
407 #define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq)
408 #define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2)
409 #define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1)
410 #define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1)
411 #define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1)
413 #define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base)
415 #define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE)
416 #define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1)
417 #define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG)
418 #define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG)
420 #define NQE_CMP_VALID(hdr, raw_cons, cp_bit) \
421 (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \
422 !((raw_cons) & (cp_bit)))
424 #define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
426 #define NQ_CONS_PCI_BAR_REGION 2
427 #define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
428 #define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
429 #define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
430 #define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \
432 #define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \
436 static inline void bnxt_qplib_ring_nq_db64(void __iomem
*db
, u32 index
,
441 val
= xid
& DBC_DBC_XID_MASK
;
442 val
|= DBC_DBC_PATH_ROCE
;
443 val
|= arm
? DBC_DBC_TYPE_NQ_ARM
: DBC_DBC_TYPE_NQ
;
445 val
|= index
& DBC_DBC_INDEX_MASK
;
449 static inline void bnxt_qplib_ring_nq_db_rearm(void __iomem
*db
, u32 raw_cons
,
450 u32 max_elements
, u32 xid
,
453 u32 index
= raw_cons
& (max_elements
- 1);
456 bnxt_qplib_ring_nq_db64(db
, index
, xid
, true);
458 writel(NQ_DB_CP_FLAGS_REARM
| (index
& DBC_DBC32_XID_MASK
), db
);
461 static inline void bnxt_qplib_ring_nq_db(void __iomem
*db
, u32 raw_cons
,
462 u32 max_elements
, u32 xid
,
465 u32 index
= raw_cons
& (max_elements
- 1);
468 bnxt_qplib_ring_nq_db64(db
, index
, xid
, false);
470 writel(NQ_DB_CP_FLAGS
| (index
& DBC_DBC32_XID_MASK
), db
);
473 struct bnxt_qplib_nq
{
474 struct pci_dev
*pdev
;
475 struct bnxt_qplib_res
*res
;
481 struct tasklet_struct worker
;
482 struct bnxt_qplib_hwq hwq
;
487 void __iomem
*bar_reg_iomem
;
489 int (*cqn_handler
)(struct bnxt_qplib_nq
*nq
,
490 struct bnxt_qplib_cq
*cq
);
491 int (*srqn_handler
)(struct bnxt_qplib_nq
*nq
,
492 struct bnxt_qplib_srq
*srq
,
494 struct workqueue_struct
*cqn_wq
;
498 struct bnxt_qplib_nq_work
{
499 struct work_struct work
;
500 struct bnxt_qplib_nq
*nq
;
501 struct bnxt_qplib_cq
*cq
;
504 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq
*nq
, bool kill
);
505 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq
*nq
);
506 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq
*nq
, int nq_indx
,
507 int msix_vector
, bool need_init
);
508 int bnxt_qplib_enable_nq(struct pci_dev
*pdev
, struct bnxt_qplib_nq
*nq
,
509 int nq_idx
, int msix_vector
, int bar_reg_offset
,
510 int (*cqn_handler
)(struct bnxt_qplib_nq
*nq
,
511 struct bnxt_qplib_cq
*cq
),
512 int (*srqn_handler
)(struct bnxt_qplib_nq
*nq
,
513 struct bnxt_qplib_srq
*srq
,
515 int bnxt_qplib_create_srq(struct bnxt_qplib_res
*res
,
516 struct bnxt_qplib_srq
*srq
);
517 int bnxt_qplib_modify_srq(struct bnxt_qplib_res
*res
,
518 struct bnxt_qplib_srq
*srq
);
519 int bnxt_qplib_query_srq(struct bnxt_qplib_res
*res
,
520 struct bnxt_qplib_srq
*srq
);
521 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res
*res
,
522 struct bnxt_qplib_srq
*srq
);
523 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq
*srq
,
524 struct bnxt_qplib_swqe
*wqe
);
525 int bnxt_qplib_create_qp1(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
526 int bnxt_qplib_create_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
527 int bnxt_qplib_modify_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
528 int bnxt_qplib_query_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
529 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
530 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp
*qp
);
531 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res
*res
,
532 struct bnxt_qplib_qp
*qp
);
533 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp
*qp
,
534 struct bnxt_qplib_sge
*sge
);
535 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp
*qp
,
536 struct bnxt_qplib_sge
*sge
);
537 u32
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp
*qp
);
538 dma_addr_t
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp
*qp
,
540 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp
*qp
);
541 int bnxt_qplib_post_send(struct bnxt_qplib_qp
*qp
,
542 struct bnxt_qplib_swqe
*wqe
);
543 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp
*qp
);
544 int bnxt_qplib_post_recv(struct bnxt_qplib_qp
*qp
,
545 struct bnxt_qplib_swqe
*wqe
);
546 int bnxt_qplib_create_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
);
547 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
);
548 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq
*cq
, struct bnxt_qplib_cqe
*cqe
,
549 int num
, struct bnxt_qplib_qp
**qp
);
550 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq
*cq
);
551 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq
*cq
, u32 arm_type
);
552 void bnxt_qplib_free_nq(struct bnxt_qplib_nq
*nq
);
553 int bnxt_qplib_alloc_nq(struct pci_dev
*pdev
, struct bnxt_qplib_nq
*nq
);
554 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp
*qp
);
555 void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp
*qp
,
556 unsigned long *flags
);
557 void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp
*qp
,
558 unsigned long *flags
);
559 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq
*cq
,
560 struct bnxt_qplib_cqe
*cqe
,
562 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp
*qp
);
563 #endif /* __BNXT_QPLIB_FP_H__ */