2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: IB Verbs interpreter (header)
39 #ifndef __BNXT_RE_IB_VERBS_H__
40 #define __BNXT_RE_IB_VERBS_H__
42 struct bnxt_re_gid_ctx
{
47 #define BNXT_RE_FENCE_BYTES 64
48 struct bnxt_re_fence_data
{
50 u8 va
[BNXT_RE_FENCE_BYTES
];
52 struct bnxt_re_mr
*mr
;
54 struct bnxt_qplib_swqe bind_wqe
;
60 struct bnxt_re_dev
*rdev
;
61 struct bnxt_qplib_pd qplib_pd
;
62 struct bnxt_re_fence_data fence
;
67 struct bnxt_re_dev
*rdev
;
68 struct bnxt_qplib_ah qplib_ah
;
73 struct bnxt_re_dev
*rdev
;
75 struct bnxt_qplib_srq qplib_srq
;
77 spinlock_t lock
; /* protect srq */
81 struct list_head list
;
82 struct bnxt_re_dev
*rdev
;
84 spinlock_t sq_lock
; /* protect sq */
85 spinlock_t rq_lock
; /* protect rq */
86 struct bnxt_qplib_qp qplib_qp
;
87 struct ib_umem
*sumem
;
88 struct ib_umem
*rumem
;
91 struct ib_ud_header qp1_hdr
;
92 struct bnxt_re_cq
*scq
;
93 struct bnxt_re_cq
*rcq
;
98 struct bnxt_re_dev
*rdev
;
99 spinlock_t cq_lock
; /* protect cq */
102 struct bnxt_qplib_cq qplib_cq
;
103 struct bnxt_qplib_cqe
*cql
;
104 #define MAX_CQL_PER_POLL 1024
106 struct ib_umem
*umem
;
110 struct bnxt_re_dev
*rdev
;
112 struct ib_umem
*ib_umem
;
113 struct bnxt_qplib_mrw qplib_mr
;
116 struct bnxt_qplib_frpl qplib_frpl
;
119 struct bnxt_re_frpl
{
120 struct bnxt_re_dev
*rdev
;
121 struct bnxt_qplib_frpl qplib_frpl
;
126 struct bnxt_re_dev
*rdev
;
128 struct bnxt_qplib_mrw qplib_mw
;
131 struct bnxt_re_ucontext
{
132 struct ib_ucontext ib_uctx
;
133 struct bnxt_re_dev
*rdev
;
134 struct bnxt_qplib_dpi dpi
;
136 spinlock_t sh_lock
; /* protect shpg */
139 static inline u16
bnxt_re_get_swqe_size(int nsge
)
141 return sizeof(struct sq_send_hdr
) + nsge
* sizeof(struct sq_sge
);
144 static inline u16
bnxt_re_get_rwqe_size(int nsge
)
146 return sizeof(struct rq_wqe_hdr
) + (nsge
* sizeof(struct sq_sge
));
149 int bnxt_re_query_device(struct ib_device
*ibdev
,
150 struct ib_device_attr
*ib_attr
,
151 struct ib_udata
*udata
);
152 int bnxt_re_query_port(struct ib_device
*ibdev
, u8 port_num
,
153 struct ib_port_attr
*port_attr
);
154 int bnxt_re_get_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
155 struct ib_port_immutable
*immutable
);
156 void bnxt_re_query_fw_str(struct ib_device
*ibdev
, char *str
);
157 int bnxt_re_query_pkey(struct ib_device
*ibdev
, u8 port_num
,
158 u16 index
, u16
*pkey
);
159 int bnxt_re_del_gid(const struct ib_gid_attr
*attr
, void **context
);
160 int bnxt_re_add_gid(const struct ib_gid_attr
*attr
, void **context
);
161 int bnxt_re_query_gid(struct ib_device
*ibdev
, u8 port_num
,
162 int index
, union ib_gid
*gid
);
163 enum rdma_link_layer
bnxt_re_get_link_layer(struct ib_device
*ibdev
,
165 int bnxt_re_alloc_pd(struct ib_pd
*pd
, struct ib_udata
*udata
);
166 int bnxt_re_dealloc_pd(struct ib_pd
*pd
, struct ib_udata
*udata
);
167 int bnxt_re_create_ah(struct ib_ah
*ah
, struct rdma_ah_init_attr
*init_attr
,
168 struct ib_udata
*udata
);
169 int bnxt_re_modify_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
170 int bnxt_re_query_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
171 int bnxt_re_destroy_ah(struct ib_ah
*ah
, u32 flags
);
172 int bnxt_re_create_srq(struct ib_srq
*srq
,
173 struct ib_srq_init_attr
*srq_init_attr
,
174 struct ib_udata
*udata
);
175 int bnxt_re_modify_srq(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
,
176 enum ib_srq_attr_mask srq_attr_mask
,
177 struct ib_udata
*udata
);
178 int bnxt_re_query_srq(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
);
179 int bnxt_re_destroy_srq(struct ib_srq
*srq
, struct ib_udata
*udata
);
180 int bnxt_re_post_srq_recv(struct ib_srq
*srq
, const struct ib_recv_wr
*recv_wr
,
181 const struct ib_recv_wr
**bad_recv_wr
);
182 struct ib_qp
*bnxt_re_create_qp(struct ib_pd
*pd
,
183 struct ib_qp_init_attr
*qp_init_attr
,
184 struct ib_udata
*udata
);
185 int bnxt_re_modify_qp(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
186 int qp_attr_mask
, struct ib_udata
*udata
);
187 int bnxt_re_query_qp(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
188 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
);
189 int bnxt_re_destroy_qp(struct ib_qp
*qp
, struct ib_udata
*udata
);
190 int bnxt_re_post_send(struct ib_qp
*qp
, const struct ib_send_wr
*send_wr
,
191 const struct ib_send_wr
**bad_send_wr
);
192 int bnxt_re_post_recv(struct ib_qp
*qp
, const struct ib_recv_wr
*recv_wr
,
193 const struct ib_recv_wr
**bad_recv_wr
);
194 int bnxt_re_create_cq(struct ib_cq
*ibcq
, const struct ib_cq_init_attr
*attr
,
195 struct ib_udata
*udata
);
196 int bnxt_re_destroy_cq(struct ib_cq
*cq
, struct ib_udata
*udata
);
197 int bnxt_re_poll_cq(struct ib_cq
*cq
, int num_entries
, struct ib_wc
*wc
);
198 int bnxt_re_req_notify_cq(struct ib_cq
*cq
, enum ib_cq_notify_flags flags
);
199 struct ib_mr
*bnxt_re_get_dma_mr(struct ib_pd
*pd
, int mr_access_flags
);
201 int bnxt_re_map_mr_sg(struct ib_mr
*ib_mr
, struct scatterlist
*sg
, int sg_nents
,
202 unsigned int *sg_offset
);
203 struct ib_mr
*bnxt_re_alloc_mr(struct ib_pd
*ib_pd
, enum ib_mr_type mr_type
,
205 int bnxt_re_dereg_mr(struct ib_mr
*mr
, struct ib_udata
*udata
);
206 struct ib_mw
*bnxt_re_alloc_mw(struct ib_pd
*ib_pd
, enum ib_mw_type type
,
207 struct ib_udata
*udata
);
208 int bnxt_re_dealloc_mw(struct ib_mw
*mw
);
209 struct ib_mr
*bnxt_re_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
210 u64 virt_addr
, int mr_access_flags
,
211 struct ib_udata
*udata
);
212 int bnxt_re_alloc_ucontext(struct ib_ucontext
*ctx
, struct ib_udata
*udata
);
213 void bnxt_re_dealloc_ucontext(struct ib_ucontext
*context
);
214 int bnxt_re_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
);
216 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp
*qp
);
217 void bnxt_re_unlock_cqs(struct bnxt_re_qp
*qp
, unsigned long flags
);
218 #endif /* __BNXT_RE_IB_VERBS_H__ */