2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: IB Verbs interpreter (header)
39 #ifndef __BNXT_RE_IB_VERBS_H__
40 #define __BNXT_RE_IB_VERBS_H__
42 struct bnxt_re_gid_ctx
{
47 #define BNXT_RE_FENCE_BYTES 64
48 struct bnxt_re_fence_data
{
50 u8 va
[BNXT_RE_FENCE_BYTES
];
52 struct bnxt_re_mr
*mr
;
54 struct bnxt_qplib_swqe bind_wqe
;
60 struct bnxt_re_dev
*rdev
;
61 struct bnxt_qplib_pd qplib_pd
;
62 struct bnxt_re_fence_data fence
;
63 struct rdma_user_mmap_entry
*pd_db_mmap
;
64 struct rdma_user_mmap_entry
*pd_wcdb_mmap
;
69 struct bnxt_re_dev
*rdev
;
70 struct bnxt_qplib_ah qplib_ah
;
75 struct bnxt_re_dev
*rdev
;
77 struct bnxt_qplib_srq qplib_srq
;
79 spinlock_t lock
; /* protect srq */
81 struct hlist_node hash_entry
;
86 struct list_head list
;
87 struct bnxt_re_dev
*rdev
;
88 spinlock_t sq_lock
; /* protect sq */
89 spinlock_t rq_lock
; /* protect rq */
90 struct bnxt_qplib_qp qplib_qp
;
91 struct ib_umem
*sumem
;
92 struct ib_umem
*rumem
;
95 struct ib_ud_header qp1_hdr
;
96 struct bnxt_re_cq
*scq
;
97 struct bnxt_re_cq
*rcq
;
98 struct dentry
*dentry
;
103 struct bnxt_re_dev
*rdev
;
104 spinlock_t cq_lock
; /* protect cq */
107 struct bnxt_qplib_cq qplib_cq
;
108 struct bnxt_qplib_cqe
*cql
;
109 #define MAX_CQL_PER_POLL 1024
111 struct ib_umem
*umem
;
112 struct ib_umem
*resize_umem
;
115 struct hlist_node hash_entry
;
119 struct bnxt_re_dev
*rdev
;
121 struct ib_umem
*ib_umem
;
122 struct bnxt_qplib_mrw qplib_mr
;
125 struct bnxt_qplib_frpl qplib_frpl
;
128 struct bnxt_re_frpl
{
129 struct bnxt_re_dev
*rdev
;
130 struct bnxt_qplib_frpl qplib_frpl
;
135 struct bnxt_re_dev
*rdev
;
137 struct bnxt_qplib_mrw qplib_mw
;
140 struct bnxt_re_ucontext
{
141 struct ib_ucontext ib_uctx
;
142 struct bnxt_re_dev
*rdev
;
143 struct bnxt_qplib_dpi dpi
;
144 struct bnxt_qplib_dpi wcdpi
;
146 spinlock_t sh_lock
; /* protect shpg */
147 struct rdma_user_mmap_entry
*shpage_mmap
;
151 enum bnxt_re_mmap_flag
{
152 BNXT_RE_MMAP_SH_PAGE
,
155 BNXT_RE_MMAP_DBR_PAGE
,
156 BNXT_RE_MMAP_DBR_BAR
,
157 BNXT_RE_MMAP_TOGGLE_PAGE
,
160 struct bnxt_re_user_mmap_entry
{
161 struct rdma_user_mmap_entry rdma_entry
;
162 struct bnxt_re_ucontext
*uctx
;
167 static inline u16
bnxt_re_get_swqe_size(int nsge
)
169 return sizeof(struct sq_send_hdr
) + nsge
* sizeof(struct sq_sge
);
172 static inline u16
bnxt_re_get_rwqe_size(int nsge
)
174 return sizeof(struct rq_wqe_hdr
) + (nsge
* sizeof(struct sq_sge
));
178 BNXT_RE_UCNTX_CAP_POW2_DISABLED
= 0x1ULL
,
179 BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED
= 0x2ULL
,
182 static inline u32
bnxt_re_init_depth(u32 ent
, struct bnxt_re_ucontext
*uctx
)
184 return uctx
? (uctx
->cmask
& BNXT_RE_UCNTX_CAP_POW2_DISABLED
) ?
185 ent
: roundup_pow_of_two(ent
) : ent
;
188 static inline bool bnxt_re_is_var_size_supported(struct bnxt_re_dev
*rdev
,
189 struct bnxt_re_ucontext
*uctx
)
192 return uctx
->cmask
& BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED
;
194 return rdev
->chip_ctx
->modes
.wqe_mode
;
197 int bnxt_re_query_device(struct ib_device
*ibdev
,
198 struct ib_device_attr
*ib_attr
,
199 struct ib_udata
*udata
);
200 int bnxt_re_modify_device(struct ib_device
*ibdev
,
201 int device_modify_mask
,
202 struct ib_device_modify
*device_modify
);
203 int bnxt_re_query_port(struct ib_device
*ibdev
, u32 port_num
,
204 struct ib_port_attr
*port_attr
);
205 int bnxt_re_get_port_immutable(struct ib_device
*ibdev
, u32 port_num
,
206 struct ib_port_immutable
*immutable
);
207 void bnxt_re_query_fw_str(struct ib_device
*ibdev
, char *str
);
208 int bnxt_re_query_pkey(struct ib_device
*ibdev
, u32 port_num
,
209 u16 index
, u16
*pkey
);
210 int bnxt_re_del_gid(const struct ib_gid_attr
*attr
, void **context
);
211 int bnxt_re_add_gid(const struct ib_gid_attr
*attr
, void **context
);
212 int bnxt_re_query_gid(struct ib_device
*ibdev
, u32 port_num
,
213 int index
, union ib_gid
*gid
);
214 enum rdma_link_layer
bnxt_re_get_link_layer(struct ib_device
*ibdev
,
216 int bnxt_re_alloc_pd(struct ib_pd
*pd
, struct ib_udata
*udata
);
217 int bnxt_re_dealloc_pd(struct ib_pd
*pd
, struct ib_udata
*udata
);
218 int bnxt_re_create_ah(struct ib_ah
*ah
, struct rdma_ah_init_attr
*init_attr
,
219 struct ib_udata
*udata
);
220 int bnxt_re_query_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
221 int bnxt_re_destroy_ah(struct ib_ah
*ah
, u32 flags
);
222 int bnxt_re_create_srq(struct ib_srq
*srq
,
223 struct ib_srq_init_attr
*srq_init_attr
,
224 struct ib_udata
*udata
);
225 int bnxt_re_modify_srq(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
,
226 enum ib_srq_attr_mask srq_attr_mask
,
227 struct ib_udata
*udata
);
228 int bnxt_re_query_srq(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
);
229 int bnxt_re_destroy_srq(struct ib_srq
*srq
, struct ib_udata
*udata
);
230 int bnxt_re_post_srq_recv(struct ib_srq
*srq
, const struct ib_recv_wr
*recv_wr
,
231 const struct ib_recv_wr
**bad_recv_wr
);
232 int bnxt_re_create_qp(struct ib_qp
*qp
, struct ib_qp_init_attr
*qp_init_attr
,
233 struct ib_udata
*udata
);
234 int bnxt_re_modify_qp(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
235 int qp_attr_mask
, struct ib_udata
*udata
);
236 int bnxt_re_query_qp(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
237 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
);
238 int bnxt_re_destroy_qp(struct ib_qp
*qp
, struct ib_udata
*udata
);
239 int bnxt_re_post_send(struct ib_qp
*qp
, const struct ib_send_wr
*send_wr
,
240 const struct ib_send_wr
**bad_send_wr
);
241 int bnxt_re_post_recv(struct ib_qp
*qp
, const struct ib_recv_wr
*recv_wr
,
242 const struct ib_recv_wr
**bad_recv_wr
);
243 int bnxt_re_create_cq(struct ib_cq
*ibcq
, const struct ib_cq_init_attr
*attr
,
244 struct uverbs_attr_bundle
*attrs
);
245 int bnxt_re_resize_cq(struct ib_cq
*ibcq
, int cqe
, struct ib_udata
*udata
);
246 int bnxt_re_destroy_cq(struct ib_cq
*cq
, struct ib_udata
*udata
);
247 int bnxt_re_poll_cq(struct ib_cq
*cq
, int num_entries
, struct ib_wc
*wc
);
248 int bnxt_re_req_notify_cq(struct ib_cq
*cq
, enum ib_cq_notify_flags flags
);
249 struct ib_mr
*bnxt_re_get_dma_mr(struct ib_pd
*pd
, int mr_access_flags
);
251 int bnxt_re_map_mr_sg(struct ib_mr
*ib_mr
, struct scatterlist
*sg
, int sg_nents
,
252 unsigned int *sg_offset
);
253 struct ib_mr
*bnxt_re_alloc_mr(struct ib_pd
*ib_pd
, enum ib_mr_type mr_type
,
255 int bnxt_re_dereg_mr(struct ib_mr
*mr
, struct ib_udata
*udata
);
256 struct ib_mw
*bnxt_re_alloc_mw(struct ib_pd
*ib_pd
, enum ib_mw_type type
,
257 struct ib_udata
*udata
);
258 int bnxt_re_dealloc_mw(struct ib_mw
*mw
);
259 struct ib_mr
*bnxt_re_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
260 u64 virt_addr
, int mr_access_flags
,
261 struct ib_udata
*udata
);
262 struct ib_mr
*bnxt_re_reg_user_mr_dmabuf(struct ib_pd
*ib_pd
, u64 start
,
263 u64 length
, u64 virt_addr
,
264 int fd
, int mr_access_flags
,
265 struct uverbs_attr_bundle
*attrs
);
266 int bnxt_re_alloc_ucontext(struct ib_ucontext
*ctx
, struct ib_udata
*udata
);
267 void bnxt_re_dealloc_ucontext(struct ib_ucontext
*context
);
268 int bnxt_re_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
);
269 void bnxt_re_mmap_free(struct rdma_user_mmap_entry
*rdma_entry
);
272 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp
*qp
);
273 void bnxt_re_unlock_cqs(struct bnxt_re_qp
*qp
, unsigned long flags
);
274 #endif /* __BNXT_RE_IB_VERBS_H__ */