2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: IB Verbs interpreter
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
55 #include "qplib_res.h"
58 #include "qplib_rcfw.h"
62 #include <rdma/bnxt_re-abi.h>
64 static int __from_ib_access_flags(int iflags
)
68 if (iflags
& IB_ACCESS_LOCAL_WRITE
)
69 qflags
|= BNXT_QPLIB_ACCESS_LOCAL_WRITE
;
70 if (iflags
& IB_ACCESS_REMOTE_READ
)
71 qflags
|= BNXT_QPLIB_ACCESS_REMOTE_READ
;
72 if (iflags
& IB_ACCESS_REMOTE_WRITE
)
73 qflags
|= BNXT_QPLIB_ACCESS_REMOTE_WRITE
;
74 if (iflags
& IB_ACCESS_REMOTE_ATOMIC
)
75 qflags
|= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC
;
76 if (iflags
& IB_ACCESS_MW_BIND
)
77 qflags
|= BNXT_QPLIB_ACCESS_MW_BIND
;
78 if (iflags
& IB_ZERO_BASED
)
79 qflags
|= BNXT_QPLIB_ACCESS_ZERO_BASED
;
80 if (iflags
& IB_ACCESS_ON_DEMAND
)
81 qflags
|= BNXT_QPLIB_ACCESS_ON_DEMAND
;
85 static enum ib_access_flags
__to_ib_access_flags(int qflags
)
87 enum ib_access_flags iflags
= 0;
89 if (qflags
& BNXT_QPLIB_ACCESS_LOCAL_WRITE
)
90 iflags
|= IB_ACCESS_LOCAL_WRITE
;
91 if (qflags
& BNXT_QPLIB_ACCESS_REMOTE_WRITE
)
92 iflags
|= IB_ACCESS_REMOTE_WRITE
;
93 if (qflags
& BNXT_QPLIB_ACCESS_REMOTE_READ
)
94 iflags
|= IB_ACCESS_REMOTE_READ
;
95 if (qflags
& BNXT_QPLIB_ACCESS_REMOTE_ATOMIC
)
96 iflags
|= IB_ACCESS_REMOTE_ATOMIC
;
97 if (qflags
& BNXT_QPLIB_ACCESS_MW_BIND
)
98 iflags
|= IB_ACCESS_MW_BIND
;
99 if (qflags
& BNXT_QPLIB_ACCESS_ZERO_BASED
)
100 iflags
|= IB_ZERO_BASED
;
101 if (qflags
& BNXT_QPLIB_ACCESS_ON_DEMAND
)
102 iflags
|= IB_ACCESS_ON_DEMAND
;
106 static int bnxt_re_build_sgl(struct ib_sge
*ib_sg_list
,
107 struct bnxt_qplib_sge
*sg_list
, int num
)
111 for (i
= 0; i
< num
; i
++) {
112 sg_list
[i
].addr
= ib_sg_list
[i
].addr
;
113 sg_list
[i
].lkey
= ib_sg_list
[i
].lkey
;
114 sg_list
[i
].size
= ib_sg_list
[i
].length
;
115 total
+= sg_list
[i
].size
;
121 struct net_device
*bnxt_re_get_netdev(struct ib_device
*ibdev
, u8 port_num
)
123 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
124 struct net_device
*netdev
= NULL
;
128 netdev
= rdev
->netdev
;
136 int bnxt_re_query_device(struct ib_device
*ibdev
,
137 struct ib_device_attr
*ib_attr
,
138 struct ib_udata
*udata
)
140 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
141 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
143 memset(ib_attr
, 0, sizeof(*ib_attr
));
144 memcpy(&ib_attr
->fw_ver
, dev_attr
->fw_ver
,
145 min(sizeof(dev_attr
->fw_ver
),
146 sizeof(ib_attr
->fw_ver
)));
147 bnxt_qplib_get_guid(rdev
->netdev
->dev_addr
,
148 (u8
*)&ib_attr
->sys_image_guid
);
149 ib_attr
->max_mr_size
= BNXT_RE_MAX_MR_SIZE
;
150 ib_attr
->page_size_cap
= BNXT_RE_PAGE_SIZE_4K
| BNXT_RE_PAGE_SIZE_2M
;
152 ib_attr
->vendor_id
= rdev
->en_dev
->pdev
->vendor
;
153 ib_attr
->vendor_part_id
= rdev
->en_dev
->pdev
->device
;
154 ib_attr
->hw_ver
= rdev
->en_dev
->pdev
->subsystem_device
;
155 ib_attr
->max_qp
= dev_attr
->max_qp
;
156 ib_attr
->max_qp_wr
= dev_attr
->max_qp_wqes
;
157 ib_attr
->device_cap_flags
=
158 IB_DEVICE_CURR_QP_STATE_MOD
159 | IB_DEVICE_RC_RNR_NAK_GEN
160 | IB_DEVICE_SHUTDOWN_PORT
161 | IB_DEVICE_SYS_IMAGE_GUID
162 | IB_DEVICE_LOCAL_DMA_LKEY
163 | IB_DEVICE_RESIZE_MAX_WR
164 | IB_DEVICE_PORT_ACTIVE_EVENT
165 | IB_DEVICE_N_NOTIFY_CQ
166 | IB_DEVICE_MEM_WINDOW
167 | IB_DEVICE_MEM_WINDOW_TYPE_2B
168 | IB_DEVICE_MEM_MGT_EXTENSIONS
;
169 ib_attr
->max_sge
= dev_attr
->max_qp_sges
;
170 ib_attr
->max_sge_rd
= dev_attr
->max_qp_sges
;
171 ib_attr
->max_cq
= dev_attr
->max_cq
;
172 ib_attr
->max_cqe
= dev_attr
->max_cq_wqes
;
173 ib_attr
->max_mr
= dev_attr
->max_mr
;
174 ib_attr
->max_pd
= dev_attr
->max_pd
;
175 ib_attr
->max_qp_rd_atom
= dev_attr
->max_qp_rd_atom
;
176 ib_attr
->max_qp_init_rd_atom
= dev_attr
->max_qp_init_rd_atom
;
177 if (dev_attr
->is_atomic
) {
178 ib_attr
->atomic_cap
= IB_ATOMIC_HCA
;
179 ib_attr
->masked_atomic_cap
= IB_ATOMIC_HCA
;
182 ib_attr
->max_ee_rd_atom
= 0;
183 ib_attr
->max_res_rd_atom
= 0;
184 ib_attr
->max_ee_init_rd_atom
= 0;
186 ib_attr
->max_rdd
= 0;
187 ib_attr
->max_mw
= dev_attr
->max_mw
;
188 ib_attr
->max_raw_ipv6_qp
= 0;
189 ib_attr
->max_raw_ethy_qp
= dev_attr
->max_raw_ethy_qp
;
190 ib_attr
->max_mcast_grp
= 0;
191 ib_attr
->max_mcast_qp_attach
= 0;
192 ib_attr
->max_total_mcast_qp_attach
= 0;
193 ib_attr
->max_ah
= dev_attr
->max_ah
;
195 ib_attr
->max_fmr
= 0;
196 ib_attr
->max_map_per_fmr
= 0;
198 ib_attr
->max_srq
= dev_attr
->max_srq
;
199 ib_attr
->max_srq_wr
= dev_attr
->max_srq_wqes
;
200 ib_attr
->max_srq_sge
= dev_attr
->max_srq_sges
;
202 ib_attr
->max_fast_reg_page_list_len
= MAX_PBL_LVL_1_PGS
;
204 ib_attr
->max_pkeys
= 1;
205 ib_attr
->local_ca_ack_delay
= BNXT_RE_DEFAULT_ACK_DELAY
;
209 int bnxt_re_modify_device(struct ib_device
*ibdev
,
210 int device_modify_mask
,
211 struct ib_device_modify
*device_modify
)
213 switch (device_modify_mask
) {
214 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID
:
215 /* Modify the GUID requires the modification of the GID table */
216 /* GUID should be made as READ-ONLY */
218 case IB_DEVICE_MODIFY_NODE_DESC
:
219 /* Node Desc should be made as READ-ONLY */
228 int bnxt_re_query_port(struct ib_device
*ibdev
, u8 port_num
,
229 struct ib_port_attr
*port_attr
)
231 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
232 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
234 memset(port_attr
, 0, sizeof(*port_attr
));
236 if (netif_running(rdev
->netdev
) && netif_carrier_ok(rdev
->netdev
)) {
237 port_attr
->state
= IB_PORT_ACTIVE
;
238 port_attr
->phys_state
= 5;
240 port_attr
->state
= IB_PORT_DOWN
;
241 port_attr
->phys_state
= 3;
243 port_attr
->max_mtu
= IB_MTU_4096
;
244 port_attr
->active_mtu
= iboe_get_mtu(rdev
->netdev
->mtu
);
245 port_attr
->gid_tbl_len
= dev_attr
->max_sgid
;
246 port_attr
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
247 IB_PORT_DEVICE_MGMT_SUP
|
248 IB_PORT_VENDOR_CLASS_SUP
|
249 IB_PORT_IP_BASED_GIDS
;
251 port_attr
->max_msg_sz
= (u32
)BNXT_RE_MAX_MR_SIZE_LOW
;
252 port_attr
->bad_pkey_cntr
= 0;
253 port_attr
->qkey_viol_cntr
= 0;
254 port_attr
->pkey_tbl_len
= dev_attr
->max_pkey
;
256 port_attr
->sm_lid
= 0;
258 port_attr
->max_vl_num
= 4;
259 port_attr
->sm_sl
= 0;
260 port_attr
->subnet_timeout
= 0;
261 port_attr
->init_type_reply
= 0;
262 port_attr
->active_speed
= rdev
->active_speed
;
263 port_attr
->active_width
= rdev
->active_width
;
268 int bnxt_re_get_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
269 struct ib_port_immutable
*immutable
)
271 struct ib_port_attr port_attr
;
273 if (bnxt_re_query_port(ibdev
, port_num
, &port_attr
))
276 immutable
->pkey_tbl_len
= port_attr
.pkey_tbl_len
;
277 immutable
->gid_tbl_len
= port_attr
.gid_tbl_len
;
278 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
279 immutable
->core_cap_flags
|= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
;
280 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
284 void bnxt_re_query_fw_str(struct ib_device
*ibdev
, char *str
)
286 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
288 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%d.%d.%d.%d",
289 rdev
->dev_attr
.fw_ver
[0], rdev
->dev_attr
.fw_ver
[1],
290 rdev
->dev_attr
.fw_ver
[2], rdev
->dev_attr
.fw_ver
[3]);
293 int bnxt_re_query_pkey(struct ib_device
*ibdev
, u8 port_num
,
294 u16 index
, u16
*pkey
)
296 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
298 /* Ignore port_num */
300 memset(pkey
, 0, sizeof(*pkey
));
301 return bnxt_qplib_get_pkey(&rdev
->qplib_res
,
302 &rdev
->qplib_res
.pkey_tbl
, index
, pkey
);
305 int bnxt_re_query_gid(struct ib_device
*ibdev
, u8 port_num
,
306 int index
, union ib_gid
*gid
)
308 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
311 /* Ignore port_num */
312 memset(gid
, 0, sizeof(*gid
));
313 rc
= bnxt_qplib_get_sgid(&rdev
->qplib_res
,
314 &rdev
->qplib_res
.sgid_tbl
, index
,
315 (struct bnxt_qplib_gid
*)gid
);
319 int bnxt_re_del_gid(struct ib_device
*ibdev
, u8 port_num
,
320 unsigned int index
, void **context
)
323 struct bnxt_re_gid_ctx
*ctx
, **ctx_tbl
;
324 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
325 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
326 struct bnxt_qplib_gid
*gid_to_del
;
328 /* Delete the entry from the hardware */
333 if (sgid_tbl
&& sgid_tbl
->active
) {
334 if (ctx
->idx
>= sgid_tbl
->max
)
336 gid_to_del
= &sgid_tbl
->tbl
[ctx
->idx
];
337 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
338 * or via the ib_unregister_device path. In the former case QP1
339 * may not be destroyed yet, in which case just return as FW
340 * needs that entry to be present and will fail it's deletion.
341 * We could get invoked again after QP1 is destroyed OR get an
342 * ADD_GID call with a different GID value for the same index
343 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
346 rdma_link_local_addr((struct in6_addr
*)gid_to_del
) &&
347 ctx
->refcnt
== 1 && rdev
->qp1_sqp
) {
348 dev_dbg(rdev_to_dev(rdev
),
349 "Trying to delete GID0 while QP1 is alive\n");
354 rc
= bnxt_qplib_del_sgid(sgid_tbl
, gid_to_del
, true);
356 dev_err(rdev_to_dev(rdev
),
357 "Failed to remove GID: %#x", rc
);
359 ctx_tbl
= sgid_tbl
->ctx
;
360 ctx_tbl
[ctx
->idx
] = NULL
;
370 int bnxt_re_add_gid(struct ib_device
*ibdev
, u8 port_num
,
371 unsigned int index
, const union ib_gid
*gid
,
372 const struct ib_gid_attr
*attr
, void **context
)
376 u16 vlan_id
= 0xFFFF;
377 struct bnxt_re_gid_ctx
*ctx
, **ctx_tbl
;
378 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
379 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
381 if ((attr
->ndev
) && is_vlan_dev(attr
->ndev
))
382 vlan_id
= vlan_dev_vlan_id(attr
->ndev
);
384 rc
= bnxt_qplib_add_sgid(sgid_tbl
, (struct bnxt_qplib_gid
*)gid
,
385 rdev
->qplib_res
.netdev
->dev_addr
,
386 vlan_id
, true, &tbl_idx
);
387 if (rc
== -EALREADY
) {
388 ctx_tbl
= sgid_tbl
->ctx
;
389 ctx_tbl
[tbl_idx
]->refcnt
++;
390 *context
= ctx_tbl
[tbl_idx
];
395 dev_err(rdev_to_dev(rdev
), "Failed to add GID: %#x", rc
);
399 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
402 ctx_tbl
= sgid_tbl
->ctx
;
405 ctx_tbl
[tbl_idx
] = ctx
;
411 enum rdma_link_layer
bnxt_re_get_link_layer(struct ib_device
*ibdev
,
414 return IB_LINK_LAYER_ETHERNET
;
417 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
419 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd
*pd
)
421 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
422 struct ib_mr
*ib_mr
= &fence
->mr
->ib_mr
;
423 struct bnxt_qplib_swqe
*wqe
= &fence
->bind_wqe
;
425 memset(wqe
, 0, sizeof(*wqe
));
426 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_BIND_MW
;
427 wqe
->wr_id
= BNXT_QPLIB_FENCE_WRID
;
428 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
429 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
430 wqe
->bind
.zero_based
= false;
431 wqe
->bind
.parent_l_key
= ib_mr
->lkey
;
432 wqe
->bind
.va
= (u64
)(unsigned long)fence
->va
;
433 wqe
->bind
.length
= fence
->size
;
434 wqe
->bind
.access_cntl
= __from_ib_access_flags(IB_ACCESS_REMOTE_READ
);
435 wqe
->bind
.mw_type
= SQ_BIND_MW_TYPE_TYPE1
;
437 /* Save the initial rkey in fence structure for now;
438 * wqe->bind.r_key will be set at (re)bind time.
440 fence
->bind_rkey
= ib_inc_rkey(fence
->mw
->rkey
);
443 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp
*qplib_qp
)
445 struct bnxt_re_qp
*qp
= container_of(qplib_qp
, struct bnxt_re_qp
,
447 struct ib_pd
*ib_pd
= qp
->ib_qp
.pd
;
448 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
449 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
450 struct bnxt_qplib_swqe
*fence_wqe
= &fence
->bind_wqe
;
451 struct bnxt_qplib_swqe wqe
;
454 memcpy(&wqe
, fence_wqe
, sizeof(wqe
));
455 wqe
.bind
.r_key
= fence
->bind_rkey
;
456 fence
->bind_rkey
= ib_inc_rkey(fence
->bind_rkey
);
458 dev_dbg(rdev_to_dev(qp
->rdev
),
459 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
460 wqe
.bind
.r_key
, qp
->qplib_qp
.id
, pd
);
461 rc
= bnxt_qplib_post_send(&qp
->qplib_qp
, &wqe
);
463 dev_err(rdev_to_dev(qp
->rdev
), "Failed to bind fence-WQE\n");
466 bnxt_qplib_post_send_db(&qp
->qplib_qp
);
471 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd
*pd
)
473 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
474 struct bnxt_re_dev
*rdev
= pd
->rdev
;
475 struct device
*dev
= &rdev
->en_dev
->pdev
->dev
;
476 struct bnxt_re_mr
*mr
= fence
->mr
;
479 bnxt_re_dealloc_mw(fence
->mw
);
484 bnxt_qplib_dereg_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
,
487 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
491 if (fence
->dma_addr
) {
492 dma_unmap_single(dev
, fence
->dma_addr
, BNXT_RE_FENCE_BYTES
,
498 static int bnxt_re_create_fence_mr(struct bnxt_re_pd
*pd
)
500 int mr_access_flags
= IB_ACCESS_LOCAL_WRITE
| IB_ACCESS_MW_BIND
;
501 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
502 struct bnxt_re_dev
*rdev
= pd
->rdev
;
503 struct device
*dev
= &rdev
->en_dev
->pdev
->dev
;
504 struct bnxt_re_mr
*mr
= NULL
;
505 dma_addr_t dma_addr
= 0;
510 dma_addr
= dma_map_single(dev
, fence
->va
, BNXT_RE_FENCE_BYTES
,
512 rc
= dma_mapping_error(dev
, dma_addr
);
514 dev_err(rdev_to_dev(rdev
), "Failed to dma-map fence-MR-mem\n");
519 fence
->dma_addr
= dma_addr
;
522 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
529 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
530 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR
;
531 mr
->qplib_mr
.flags
= __from_ib_access_flags(mr_access_flags
);
532 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
534 dev_err(rdev_to_dev(rdev
), "Failed to alloc fence-HW-MR\n");
539 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
540 mr
->qplib_mr
.va
= (u64
)(unsigned long)fence
->va
;
541 mr
->qplib_mr
.total_size
= BNXT_RE_FENCE_BYTES
;
543 rc
= bnxt_qplib_reg_mr(&rdev
->qplib_res
, &mr
->qplib_mr
, &pbl_tbl
,
544 BNXT_RE_FENCE_PBL_SIZE
, false, PAGE_SIZE
);
546 dev_err(rdev_to_dev(rdev
), "Failed to register fence-MR\n");
549 mr
->ib_mr
.rkey
= mr
->qplib_mr
.rkey
;
551 /* Create a fence MW only for kernel consumers */
552 mw
= bnxt_re_alloc_mw(&pd
->ib_pd
, IB_MW_TYPE_1
, NULL
);
554 dev_err(rdev_to_dev(rdev
),
555 "Failed to create fence-MW for PD: %p\n", pd
);
561 bnxt_re_create_fence_wqe(pd
);
565 bnxt_re_destroy_fence_mr(pd
);
569 /* Protection Domains */
570 int bnxt_re_dealloc_pd(struct ib_pd
*ib_pd
)
572 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
573 struct bnxt_re_dev
*rdev
= pd
->rdev
;
576 bnxt_re_destroy_fence_mr(pd
);
578 if (pd
->qplib_pd
.id
) {
579 rc
= bnxt_qplib_dealloc_pd(&rdev
->qplib_res
,
580 &rdev
->qplib_res
.pd_tbl
,
583 dev_err(rdev_to_dev(rdev
), "Failed to deallocate HW PD");
590 struct ib_pd
*bnxt_re_alloc_pd(struct ib_device
*ibdev
,
591 struct ib_ucontext
*ucontext
,
592 struct ib_udata
*udata
)
594 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
595 struct bnxt_re_ucontext
*ucntx
= container_of(ucontext
,
596 struct bnxt_re_ucontext
,
598 struct bnxt_re_pd
*pd
;
601 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
603 return ERR_PTR(-ENOMEM
);
606 if (bnxt_qplib_alloc_pd(&rdev
->qplib_res
.pd_tbl
, &pd
->qplib_pd
)) {
607 dev_err(rdev_to_dev(rdev
), "Failed to allocate HW PD");
613 struct bnxt_re_pd_resp resp
;
615 if (!ucntx
->dpi
.dbr
) {
616 /* Allocate DPI in alloc_pd to avoid failing of
617 * ibv_devinfo and family of application when DPIs
620 if (bnxt_qplib_alloc_dpi(&rdev
->qplib_res
.dpi_tbl
,
621 &ucntx
->dpi
, ucntx
)) {
627 resp
.pdid
= pd
->qplib_pd
.id
;
628 /* Still allow mapping this DBR to the new user PD. */
629 resp
.dpi
= ucntx
->dpi
.dpi
;
630 resp
.dbr
= (u64
)ucntx
->dpi
.umdbr
;
632 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
634 dev_err(rdev_to_dev(rdev
),
635 "Failed to copy user response\n");
641 if (bnxt_re_create_fence_mr(pd
))
642 dev_warn(rdev_to_dev(rdev
),
643 "Failed to create Fence-MR\n");
646 (void)bnxt_qplib_dealloc_pd(&rdev
->qplib_res
, &rdev
->qplib_res
.pd_tbl
,
653 /* Address Handles */
654 int bnxt_re_destroy_ah(struct ib_ah
*ib_ah
)
656 struct bnxt_re_ah
*ah
= container_of(ib_ah
, struct bnxt_re_ah
, ib_ah
);
657 struct bnxt_re_dev
*rdev
= ah
->rdev
;
660 rc
= bnxt_qplib_destroy_ah(&rdev
->qplib_res
, &ah
->qplib_ah
);
662 dev_err(rdev_to_dev(rdev
), "Failed to destroy HW AH");
669 struct ib_ah
*bnxt_re_create_ah(struct ib_pd
*ib_pd
,
670 struct rdma_ah_attr
*ah_attr
,
671 struct ib_udata
*udata
)
673 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
674 struct bnxt_re_dev
*rdev
= pd
->rdev
;
675 struct bnxt_re_ah
*ah
;
676 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah_attr
);
680 struct ib_gid_attr sgid_attr
;
682 if (!(rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
)) {
683 dev_err(rdev_to_dev(rdev
), "Failed to alloc AH: GRH not set");
684 return ERR_PTR(-EINVAL
);
686 ah
= kzalloc(sizeof(*ah
), GFP_ATOMIC
);
688 return ERR_PTR(-ENOMEM
);
691 ah
->qplib_ah
.pd
= &pd
->qplib_pd
;
693 /* Supply the configuration for the HW */
694 memcpy(ah
->qplib_ah
.dgid
.data
, grh
->dgid
.raw
,
695 sizeof(union ib_gid
));
697 * If RoCE V2 is enabled, stack will have two entries for
698 * each GID entry. Avoiding this duplicte entry in HW. Dividing
699 * the GID index by 2 for RoCE V2
701 ah
->qplib_ah
.sgid_index
= grh
->sgid_index
/ 2;
702 ah
->qplib_ah
.host_sgid_index
= grh
->sgid_index
;
703 ah
->qplib_ah
.traffic_class
= grh
->traffic_class
;
704 ah
->qplib_ah
.flow_label
= grh
->flow_label
;
705 ah
->qplib_ah
.hop_limit
= grh
->hop_limit
;
706 ah
->qplib_ah
.sl
= rdma_ah_get_sl(ah_attr
);
707 if (ib_pd
->uobject
&&
708 !rdma_is_multicast_addr((struct in6_addr
*)
710 !rdma_link_local_addr((struct in6_addr
*)
714 rc
= ib_get_cached_gid(&rdev
->ibdev
, 1,
715 grh
->sgid_index
, &sgid
,
718 dev_err(rdev_to_dev(rdev
),
719 "Failed to query gid at index %d",
724 dev_put(sgid_attr
.ndev
);
725 /* Get network header type for this GID */
726 nw_type
= ib_gid_to_network_type(sgid_attr
.gid_type
, &sgid
);
728 case RDMA_NETWORK_IPV4
:
729 ah
->qplib_ah
.nw_type
= CMDQ_CREATE_AH_TYPE_V2IPV4
;
731 case RDMA_NETWORK_IPV6
:
732 ah
->qplib_ah
.nw_type
= CMDQ_CREATE_AH_TYPE_V2IPV6
;
735 ah
->qplib_ah
.nw_type
= CMDQ_CREATE_AH_TYPE_V1
;
740 memcpy(ah
->qplib_ah
.dmac
, ah_attr
->roce
.dmac
, ETH_ALEN
);
741 rc
= bnxt_qplib_create_ah(&rdev
->qplib_res
, &ah
->qplib_ah
);
743 dev_err(rdev_to_dev(rdev
), "Failed to allocate HW AH");
747 /* Write AVID to shared page. */
748 if (ib_pd
->uobject
) {
749 struct ib_ucontext
*ib_uctx
= ib_pd
->uobject
->context
;
750 struct bnxt_re_ucontext
*uctx
;
754 uctx
= container_of(ib_uctx
, struct bnxt_re_ucontext
, ib_uctx
);
755 spin_lock_irqsave(&uctx
->sh_lock
, flag
);
756 wrptr
= (u32
*)(uctx
->shpg
+ BNXT_RE_AVID_OFFT
);
757 *wrptr
= ah
->qplib_ah
.id
;
758 wmb(); /* make sure cache is updated. */
759 spin_unlock_irqrestore(&uctx
->sh_lock
, flag
);
769 int bnxt_re_modify_ah(struct ib_ah
*ib_ah
, struct rdma_ah_attr
*ah_attr
)
774 int bnxt_re_query_ah(struct ib_ah
*ib_ah
, struct rdma_ah_attr
*ah_attr
)
776 struct bnxt_re_ah
*ah
= container_of(ib_ah
, struct bnxt_re_ah
, ib_ah
);
778 ah_attr
->type
= ib_ah
->type
;
779 rdma_ah_set_sl(ah_attr
, ah
->qplib_ah
.sl
);
780 memcpy(ah_attr
->roce
.dmac
, ah
->qplib_ah
.dmac
, ETH_ALEN
);
781 rdma_ah_set_grh(ah_attr
, NULL
, 0,
782 ah
->qplib_ah
.host_sgid_index
,
783 0, ah
->qplib_ah
.traffic_class
);
784 rdma_ah_set_dgid_raw(ah_attr
, ah
->qplib_ah
.dgid
.data
);
785 rdma_ah_set_port_num(ah_attr
, 1);
786 rdma_ah_set_static_rate(ah_attr
, 0);
791 int bnxt_re_destroy_qp(struct ib_qp
*ib_qp
)
793 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
794 struct bnxt_re_dev
*rdev
= qp
->rdev
;
797 bnxt_qplib_flush_cqn_wq(&qp
->qplib_qp
);
798 bnxt_qplib_del_flush_qp(&qp
->qplib_qp
);
799 rc
= bnxt_qplib_destroy_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
801 dev_err(rdev_to_dev(rdev
), "Failed to destroy HW QP");
804 if (ib_qp
->qp_type
== IB_QPT_GSI
&& rdev
->qp1_sqp
) {
805 rc
= bnxt_qplib_destroy_ah(&rdev
->qplib_res
,
806 &rdev
->sqp_ah
->qplib_ah
);
808 dev_err(rdev_to_dev(rdev
),
809 "Failed to destroy HW AH for shadow QP");
813 bnxt_qplib_del_flush_qp(&qp
->qplib_qp
);
814 rc
= bnxt_qplib_destroy_qp(&rdev
->qplib_res
,
815 &rdev
->qp1_sqp
->qplib_qp
);
817 dev_err(rdev_to_dev(rdev
),
818 "Failed to destroy Shadow QP");
821 mutex_lock(&rdev
->qp_lock
);
822 list_del(&rdev
->qp1_sqp
->list
);
823 atomic_dec(&rdev
->qp_count
);
824 mutex_unlock(&rdev
->qp_lock
);
827 kfree(rdev
->qp1_sqp
);
828 rdev
->qp1_sqp
= NULL
;
832 if (!IS_ERR_OR_NULL(qp
->rumem
))
833 ib_umem_release(qp
->rumem
);
834 if (!IS_ERR_OR_NULL(qp
->sumem
))
835 ib_umem_release(qp
->sumem
);
837 mutex_lock(&rdev
->qp_lock
);
839 atomic_dec(&rdev
->qp_count
);
840 mutex_unlock(&rdev
->qp_lock
);
845 static u8
__from_ib_qp_type(enum ib_qp_type type
)
849 return CMDQ_CREATE_QP1_TYPE_GSI
;
851 return CMDQ_CREATE_QP_TYPE_RC
;
853 return CMDQ_CREATE_QP_TYPE_UD
;
859 static int bnxt_re_init_user_qp(struct bnxt_re_dev
*rdev
, struct bnxt_re_pd
*pd
,
860 struct bnxt_re_qp
*qp
, struct ib_udata
*udata
)
862 struct bnxt_re_qp_req ureq
;
863 struct bnxt_qplib_qp
*qplib_qp
= &qp
->qplib_qp
;
864 struct ib_umem
*umem
;
866 struct ib_ucontext
*context
= pd
->ib_pd
.uobject
->context
;
867 struct bnxt_re_ucontext
*cntx
= container_of(context
,
868 struct bnxt_re_ucontext
,
870 if (ib_copy_from_udata(&ureq
, udata
, sizeof(ureq
)))
873 bytes
= (qplib_qp
->sq
.max_wqe
* BNXT_QPLIB_MAX_SQE_ENTRY_SIZE
);
874 /* Consider mapping PSN search memory only for RC QPs. */
875 if (qplib_qp
->type
== CMDQ_CREATE_QP_TYPE_RC
)
876 bytes
+= (qplib_qp
->sq
.max_wqe
* sizeof(struct sq_psn_search
));
877 bytes
= PAGE_ALIGN(bytes
);
878 umem
= ib_umem_get(context
, ureq
.qpsva
, bytes
,
879 IB_ACCESS_LOCAL_WRITE
, 1);
881 return PTR_ERR(umem
);
884 qplib_qp
->sq
.sglist
= umem
->sg_head
.sgl
;
885 qplib_qp
->sq
.nmap
= umem
->nmap
;
886 qplib_qp
->qp_handle
= ureq
.qp_handle
;
888 if (!qp
->qplib_qp
.srq
) {
889 bytes
= (qplib_qp
->rq
.max_wqe
* BNXT_QPLIB_MAX_RQE_ENTRY_SIZE
);
890 bytes
= PAGE_ALIGN(bytes
);
891 umem
= ib_umem_get(context
, ureq
.qprva
, bytes
,
892 IB_ACCESS_LOCAL_WRITE
, 1);
896 qplib_qp
->rq
.sglist
= umem
->sg_head
.sgl
;
897 qplib_qp
->rq
.nmap
= umem
->nmap
;
900 qplib_qp
->dpi
= &cntx
->dpi
;
903 ib_umem_release(qp
->sumem
);
905 qplib_qp
->sq
.sglist
= NULL
;
906 qplib_qp
->sq
.nmap
= 0;
908 return PTR_ERR(umem
);
911 static struct bnxt_re_ah
*bnxt_re_create_shadow_qp_ah
912 (struct bnxt_re_pd
*pd
,
913 struct bnxt_qplib_res
*qp1_res
,
914 struct bnxt_qplib_qp
*qp1_qp
)
916 struct bnxt_re_dev
*rdev
= pd
->rdev
;
917 struct bnxt_re_ah
*ah
;
921 ah
= kzalloc(sizeof(*ah
), GFP_KERNEL
);
926 ah
->qplib_ah
.pd
= &pd
->qplib_pd
;
928 rc
= bnxt_re_query_gid(&rdev
->ibdev
, 1, 0, &sgid
);
932 /* supply the dgid data same as sgid */
933 memcpy(ah
->qplib_ah
.dgid
.data
, &sgid
.raw
,
934 sizeof(union ib_gid
));
935 ah
->qplib_ah
.sgid_index
= 0;
937 ah
->qplib_ah
.traffic_class
= 0;
938 ah
->qplib_ah
.flow_label
= 0;
939 ah
->qplib_ah
.hop_limit
= 1;
941 /* Have DMAC same as SMAC */
942 ether_addr_copy(ah
->qplib_ah
.dmac
, rdev
->netdev
->dev_addr
);
944 rc
= bnxt_qplib_create_ah(&rdev
->qplib_res
, &ah
->qplib_ah
);
946 dev_err(rdev_to_dev(rdev
),
947 "Failed to allocate HW AH for Shadow QP");
958 static struct bnxt_re_qp
*bnxt_re_create_shadow_qp
959 (struct bnxt_re_pd
*pd
,
960 struct bnxt_qplib_res
*qp1_res
,
961 struct bnxt_qplib_qp
*qp1_qp
)
963 struct bnxt_re_dev
*rdev
= pd
->rdev
;
964 struct bnxt_re_qp
*qp
;
967 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
973 /* Initialize the shadow QP structure from the QP1 values */
974 ether_addr_copy(qp
->qplib_qp
.smac
, rdev
->netdev
->dev_addr
);
976 qp
->qplib_qp
.pd
= &pd
->qplib_pd
;
977 qp
->qplib_qp
.qp_handle
= (u64
)(unsigned long)(&qp
->qplib_qp
);
978 qp
->qplib_qp
.type
= IB_QPT_UD
;
980 qp
->qplib_qp
.max_inline_data
= 0;
981 qp
->qplib_qp
.sig_type
= true;
983 /* Shadow QP SQ depth should be same as QP1 RQ depth */
984 qp
->qplib_qp
.sq
.max_wqe
= qp1_qp
->rq
.max_wqe
;
985 qp
->qplib_qp
.sq
.max_sge
= 2;
986 /* Q full delta can be 1 since it is internal QP */
987 qp
->qplib_qp
.sq
.q_full_delta
= 1;
989 qp
->qplib_qp
.scq
= qp1_qp
->scq
;
990 qp
->qplib_qp
.rcq
= qp1_qp
->rcq
;
992 qp
->qplib_qp
.rq
.max_wqe
= qp1_qp
->rq
.max_wqe
;
993 qp
->qplib_qp
.rq
.max_sge
= qp1_qp
->rq
.max_sge
;
994 /* Q full delta can be 1 since it is internal QP */
995 qp
->qplib_qp
.rq
.q_full_delta
= 1;
997 qp
->qplib_qp
.mtu
= qp1_qp
->mtu
;
999 qp
->qplib_qp
.sq_hdr_buf_size
= 0;
1000 qp
->qplib_qp
.rq_hdr_buf_size
= BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6
;
1001 qp
->qplib_qp
.dpi
= &rdev
->dpi_privileged
;
1003 rc
= bnxt_qplib_create_qp(qp1_res
, &qp
->qplib_qp
);
1007 rdev
->sqp_id
= qp
->qplib_qp
.id
;
1009 spin_lock_init(&qp
->sq_lock
);
1010 INIT_LIST_HEAD(&qp
->list
);
1011 mutex_lock(&rdev
->qp_lock
);
1012 list_add_tail(&qp
->list
, &rdev
->qp_list
);
1013 atomic_inc(&rdev
->qp_count
);
1014 mutex_unlock(&rdev
->qp_lock
);
1021 struct ib_qp
*bnxt_re_create_qp(struct ib_pd
*ib_pd
,
1022 struct ib_qp_init_attr
*qp_init_attr
,
1023 struct ib_udata
*udata
)
1025 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
1026 struct bnxt_re_dev
*rdev
= pd
->rdev
;
1027 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
1028 struct bnxt_re_qp
*qp
;
1029 struct bnxt_re_cq
*cq
;
1030 struct bnxt_re_srq
*srq
;
1033 if ((qp_init_attr
->cap
.max_send_wr
> dev_attr
->max_qp_wqes
) ||
1034 (qp_init_attr
->cap
.max_recv_wr
> dev_attr
->max_qp_wqes
) ||
1035 (qp_init_attr
->cap
.max_send_sge
> dev_attr
->max_qp_sges
) ||
1036 (qp_init_attr
->cap
.max_recv_sge
> dev_attr
->max_qp_sges
) ||
1037 (qp_init_attr
->cap
.max_inline_data
> dev_attr
->max_inline_data
))
1038 return ERR_PTR(-EINVAL
);
1040 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1042 return ERR_PTR(-ENOMEM
);
1045 ether_addr_copy(qp
->qplib_qp
.smac
, rdev
->netdev
->dev_addr
);
1046 qp
->qplib_qp
.pd
= &pd
->qplib_pd
;
1047 qp
->qplib_qp
.qp_handle
= (u64
)(unsigned long)(&qp
->qplib_qp
);
1048 qp
->qplib_qp
.type
= __from_ib_qp_type(qp_init_attr
->qp_type
);
1049 if (qp
->qplib_qp
.type
== IB_QPT_MAX
) {
1050 dev_err(rdev_to_dev(rdev
), "QP type 0x%x not supported",
1055 qp
->qplib_qp
.max_inline_data
= qp_init_attr
->cap
.max_inline_data
;
1056 qp
->qplib_qp
.sig_type
= ((qp_init_attr
->sq_sig_type
==
1057 IB_SIGNAL_ALL_WR
) ? true : false);
1059 qp
->qplib_qp
.sq
.max_sge
= qp_init_attr
->cap
.max_send_sge
;
1060 if (qp
->qplib_qp
.sq
.max_sge
> dev_attr
->max_qp_sges
)
1061 qp
->qplib_qp
.sq
.max_sge
= dev_attr
->max_qp_sges
;
1063 if (qp_init_attr
->send_cq
) {
1064 cq
= container_of(qp_init_attr
->send_cq
, struct bnxt_re_cq
,
1067 dev_err(rdev_to_dev(rdev
), "Send CQ not found");
1071 qp
->qplib_qp
.scq
= &cq
->qplib_cq
;
1074 if (qp_init_attr
->recv_cq
) {
1075 cq
= container_of(qp_init_attr
->recv_cq
, struct bnxt_re_cq
,
1078 dev_err(rdev_to_dev(rdev
), "Receive CQ not found");
1082 qp
->qplib_qp
.rcq
= &cq
->qplib_cq
;
1085 if (qp_init_attr
->srq
) {
1086 srq
= container_of(qp_init_attr
->srq
, struct bnxt_re_srq
,
1089 dev_err(rdev_to_dev(rdev
), "SRQ not found");
1093 qp
->qplib_qp
.srq
= &srq
->qplib_srq
;
1094 qp
->qplib_qp
.rq
.max_wqe
= 0;
1096 /* Allocate 1 more than what's provided so posting max doesn't
1099 entries
= roundup_pow_of_two(qp_init_attr
->cap
.max_recv_wr
+ 1);
1100 qp
->qplib_qp
.rq
.max_wqe
= min_t(u32
, entries
,
1101 dev_attr
->max_qp_wqes
+ 1);
1103 qp
->qplib_qp
.rq
.q_full_delta
= qp
->qplib_qp
.rq
.max_wqe
-
1104 qp_init_attr
->cap
.max_recv_wr
;
1106 qp
->qplib_qp
.rq
.max_sge
= qp_init_attr
->cap
.max_recv_sge
;
1107 if (qp
->qplib_qp
.rq
.max_sge
> dev_attr
->max_qp_sges
)
1108 qp
->qplib_qp
.rq
.max_sge
= dev_attr
->max_qp_sges
;
1111 qp
->qplib_qp
.mtu
= ib_mtu_enum_to_int(iboe_get_mtu(rdev
->netdev
->mtu
));
1113 if (qp_init_attr
->qp_type
== IB_QPT_GSI
) {
1114 /* Allocate 1 more than what's provided */
1115 entries
= roundup_pow_of_two(qp_init_attr
->cap
.max_send_wr
+ 1);
1116 qp
->qplib_qp
.sq
.max_wqe
= min_t(u32
, entries
,
1117 dev_attr
->max_qp_wqes
+ 1);
1118 qp
->qplib_qp
.sq
.q_full_delta
= qp
->qplib_qp
.sq
.max_wqe
-
1119 qp_init_attr
->cap
.max_send_wr
;
1120 qp
->qplib_qp
.rq
.max_sge
= dev_attr
->max_qp_sges
;
1121 if (qp
->qplib_qp
.rq
.max_sge
> dev_attr
->max_qp_sges
)
1122 qp
->qplib_qp
.rq
.max_sge
= dev_attr
->max_qp_sges
;
1123 qp
->qplib_qp
.sq
.max_sge
++;
1124 if (qp
->qplib_qp
.sq
.max_sge
> dev_attr
->max_qp_sges
)
1125 qp
->qplib_qp
.sq
.max_sge
= dev_attr
->max_qp_sges
;
1127 qp
->qplib_qp
.rq_hdr_buf_size
=
1128 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2
;
1130 qp
->qplib_qp
.sq_hdr_buf_size
=
1131 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2
;
1132 qp
->qplib_qp
.dpi
= &rdev
->dpi_privileged
;
1133 rc
= bnxt_qplib_create_qp1(&rdev
->qplib_res
, &qp
->qplib_qp
);
1135 dev_err(rdev_to_dev(rdev
), "Failed to create HW QP1");
1138 /* Create a shadow QP to handle the QP1 traffic */
1139 rdev
->qp1_sqp
= bnxt_re_create_shadow_qp(pd
, &rdev
->qplib_res
,
1141 if (!rdev
->qp1_sqp
) {
1143 dev_err(rdev_to_dev(rdev
),
1144 "Failed to create Shadow QP for QP1");
1147 rdev
->sqp_ah
= bnxt_re_create_shadow_qp_ah(pd
, &rdev
->qplib_res
,
1149 if (!rdev
->sqp_ah
) {
1150 bnxt_qplib_destroy_qp(&rdev
->qplib_res
,
1151 &rdev
->qp1_sqp
->qplib_qp
);
1153 dev_err(rdev_to_dev(rdev
),
1154 "Failed to create AH entry for ShadowQP");
1159 /* Allocate 128 + 1 more than what's provided */
1160 entries
= roundup_pow_of_two(qp_init_attr
->cap
.max_send_wr
+
1161 BNXT_QPLIB_RESERVED_QP_WRS
+ 1);
1162 qp
->qplib_qp
.sq
.max_wqe
= min_t(u32
, entries
,
1163 dev_attr
->max_qp_wqes
+
1164 BNXT_QPLIB_RESERVED_QP_WRS
+ 1);
1165 qp
->qplib_qp
.sq
.q_full_delta
= BNXT_QPLIB_RESERVED_QP_WRS
+ 1;
1168 * Reserving one slot for Phantom WQE. Application can
1169 * post one extra entry in this case. But allowing this to avoid
1170 * unexpected Queue full condition
1173 qp
->qplib_qp
.sq
.q_full_delta
-= 1;
1175 qp
->qplib_qp
.max_rd_atomic
= dev_attr
->max_qp_rd_atom
;
1176 qp
->qplib_qp
.max_dest_rd_atomic
= dev_attr
->max_qp_init_rd_atom
;
1178 rc
= bnxt_re_init_user_qp(rdev
, pd
, qp
, udata
);
1182 qp
->qplib_qp
.dpi
= &rdev
->dpi_privileged
;
1185 rc
= bnxt_qplib_create_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
1187 dev_err(rdev_to_dev(rdev
), "Failed to create HW QP");
1192 qp
->ib_qp
.qp_num
= qp
->qplib_qp
.id
;
1193 spin_lock_init(&qp
->sq_lock
);
1194 spin_lock_init(&qp
->rq_lock
);
1197 struct bnxt_re_qp_resp resp
;
1199 resp
.qpid
= qp
->ib_qp
.qp_num
;
1201 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
1203 dev_err(rdev_to_dev(rdev
), "Failed to copy QP udata");
1207 INIT_LIST_HEAD(&qp
->list
);
1208 mutex_lock(&rdev
->qp_lock
);
1209 list_add_tail(&qp
->list
, &rdev
->qp_list
);
1210 atomic_inc(&rdev
->qp_count
);
1211 mutex_unlock(&rdev
->qp_lock
);
1215 bnxt_qplib_destroy_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
1221 static u8
__from_ib_qp_state(enum ib_qp_state state
)
1225 return CMDQ_MODIFY_QP_NEW_STATE_RESET
;
1227 return CMDQ_MODIFY_QP_NEW_STATE_INIT
;
1229 return CMDQ_MODIFY_QP_NEW_STATE_RTR
;
1231 return CMDQ_MODIFY_QP_NEW_STATE_RTS
;
1233 return CMDQ_MODIFY_QP_NEW_STATE_SQD
;
1235 return CMDQ_MODIFY_QP_NEW_STATE_SQE
;
1238 return CMDQ_MODIFY_QP_NEW_STATE_ERR
;
1242 static enum ib_qp_state
__to_ib_qp_state(u8 state
)
1245 case CMDQ_MODIFY_QP_NEW_STATE_RESET
:
1246 return IB_QPS_RESET
;
1247 case CMDQ_MODIFY_QP_NEW_STATE_INIT
:
1249 case CMDQ_MODIFY_QP_NEW_STATE_RTR
:
1251 case CMDQ_MODIFY_QP_NEW_STATE_RTS
:
1253 case CMDQ_MODIFY_QP_NEW_STATE_SQD
:
1255 case CMDQ_MODIFY_QP_NEW_STATE_SQE
:
1257 case CMDQ_MODIFY_QP_NEW_STATE_ERR
:
1263 static u32
__from_ib_mtu(enum ib_mtu mtu
)
1267 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256
;
1269 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512
;
1271 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024
;
1273 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
;
1275 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096
;
1277 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
;
1281 static enum ib_mtu
__to_ib_mtu(u32 mtu
)
1283 switch (mtu
& CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK
) {
1284 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256
:
1286 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512
:
1288 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024
:
1290 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
:
1292 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096
:
1299 /* Shared Receive Queues */
1300 int bnxt_re_destroy_srq(struct ib_srq
*ib_srq
)
1302 struct bnxt_re_srq
*srq
= container_of(ib_srq
, struct bnxt_re_srq
,
1304 struct bnxt_re_dev
*rdev
= srq
->rdev
;
1305 struct bnxt_qplib_srq
*qplib_srq
= &srq
->qplib_srq
;
1306 struct bnxt_qplib_nq
*nq
= NULL
;
1310 nq
= qplib_srq
->cq
->nq
;
1311 rc
= bnxt_qplib_destroy_srq(&rdev
->qplib_res
, qplib_srq
);
1313 dev_err(rdev_to_dev(rdev
), "Destroy HW SRQ failed!");
1318 ib_umem_release(srq
->umem
);
1320 atomic_dec(&rdev
->srq_count
);
1326 static int bnxt_re_init_user_srq(struct bnxt_re_dev
*rdev
,
1327 struct bnxt_re_pd
*pd
,
1328 struct bnxt_re_srq
*srq
,
1329 struct ib_udata
*udata
)
1331 struct bnxt_re_srq_req ureq
;
1332 struct bnxt_qplib_srq
*qplib_srq
= &srq
->qplib_srq
;
1333 struct ib_umem
*umem
;
1335 struct ib_ucontext
*context
= pd
->ib_pd
.uobject
->context
;
1336 struct bnxt_re_ucontext
*cntx
= container_of(context
,
1337 struct bnxt_re_ucontext
,
1339 if (ib_copy_from_udata(&ureq
, udata
, sizeof(ureq
)))
1342 bytes
= (qplib_srq
->max_wqe
* BNXT_QPLIB_MAX_RQE_ENTRY_SIZE
);
1343 bytes
= PAGE_ALIGN(bytes
);
1344 umem
= ib_umem_get(context
, ureq
.srqva
, bytes
,
1345 IB_ACCESS_LOCAL_WRITE
, 1);
1347 return PTR_ERR(umem
);
1350 qplib_srq
->nmap
= umem
->nmap
;
1351 qplib_srq
->sglist
= umem
->sg_head
.sgl
;
1352 qplib_srq
->srq_handle
= ureq
.srq_handle
;
1353 qplib_srq
->dpi
= &cntx
->dpi
;
1358 struct ib_srq
*bnxt_re_create_srq(struct ib_pd
*ib_pd
,
1359 struct ib_srq_init_attr
*srq_init_attr
,
1360 struct ib_udata
*udata
)
1362 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
1363 struct bnxt_re_dev
*rdev
= pd
->rdev
;
1364 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
1365 struct bnxt_re_srq
*srq
;
1366 struct bnxt_qplib_nq
*nq
= NULL
;
1369 if (srq_init_attr
->attr
.max_wr
>= dev_attr
->max_srq_wqes
) {
1370 dev_err(rdev_to_dev(rdev
), "Create CQ failed - max exceeded");
1375 if (srq_init_attr
->srq_type
!= IB_SRQT_BASIC
) {
1380 srq
= kzalloc(sizeof(*srq
), GFP_KERNEL
);
1386 srq
->qplib_srq
.pd
= &pd
->qplib_pd
;
1387 srq
->qplib_srq
.dpi
= &rdev
->dpi_privileged
;
1388 /* Allocate 1 more than what's provided so posting max doesn't
1391 entries
= roundup_pow_of_two(srq_init_attr
->attr
.max_wr
+ 1);
1392 if (entries
> dev_attr
->max_srq_wqes
+ 1)
1393 entries
= dev_attr
->max_srq_wqes
+ 1;
1395 srq
->qplib_srq
.max_wqe
= entries
;
1396 srq
->qplib_srq
.max_sge
= srq_init_attr
->attr
.max_sge
;
1397 srq
->qplib_srq
.threshold
= srq_init_attr
->attr
.srq_limit
;
1398 srq
->srq_limit
= srq_init_attr
->attr
.srq_limit
;
1399 srq
->qplib_srq
.eventq_hw_ring_id
= rdev
->nq
[0].ring_id
;
1403 rc
= bnxt_re_init_user_srq(rdev
, pd
, srq
, udata
);
1408 rc
= bnxt_qplib_create_srq(&rdev
->qplib_res
, &srq
->qplib_srq
);
1410 dev_err(rdev_to_dev(rdev
), "Create HW SRQ failed!");
1415 struct bnxt_re_srq_resp resp
;
1417 resp
.srqid
= srq
->qplib_srq
.id
;
1418 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
1420 dev_err(rdev_to_dev(rdev
), "SRQ copy to udata failed!");
1421 bnxt_qplib_destroy_srq(&rdev
->qplib_res
,
1428 atomic_inc(&rdev
->srq_count
);
1430 return &srq
->ib_srq
;
1434 ib_umem_release(srq
->umem
);
1440 int bnxt_re_modify_srq(struct ib_srq
*ib_srq
, struct ib_srq_attr
*srq_attr
,
1441 enum ib_srq_attr_mask srq_attr_mask
,
1442 struct ib_udata
*udata
)
1444 struct bnxt_re_srq
*srq
= container_of(ib_srq
, struct bnxt_re_srq
,
1446 struct bnxt_re_dev
*rdev
= srq
->rdev
;
1449 switch (srq_attr_mask
) {
1451 /* SRQ resize is not supported */
1454 /* Change the SRQ threshold */
1455 if (srq_attr
->srq_limit
> srq
->qplib_srq
.max_wqe
)
1458 srq
->qplib_srq
.threshold
= srq_attr
->srq_limit
;
1459 rc
= bnxt_qplib_modify_srq(&rdev
->qplib_res
, &srq
->qplib_srq
);
1461 dev_err(rdev_to_dev(rdev
), "Modify HW SRQ failed!");
1464 /* On success, update the shadow */
1465 srq
->srq_limit
= srq_attr
->srq_limit
;
1466 /* No need to Build and send response back to udata */
1469 dev_err(rdev_to_dev(rdev
),
1470 "Unsupported srq_attr_mask 0x%x", srq_attr_mask
);
1476 int bnxt_re_query_srq(struct ib_srq
*ib_srq
, struct ib_srq_attr
*srq_attr
)
1478 struct bnxt_re_srq
*srq
= container_of(ib_srq
, struct bnxt_re_srq
,
1480 struct bnxt_re_srq tsrq
;
1481 struct bnxt_re_dev
*rdev
= srq
->rdev
;
1484 /* Get live SRQ attr */
1485 tsrq
.qplib_srq
.id
= srq
->qplib_srq
.id
;
1486 rc
= bnxt_qplib_query_srq(&rdev
->qplib_res
, &tsrq
.qplib_srq
);
1488 dev_err(rdev_to_dev(rdev
), "Query HW SRQ failed!");
1491 srq_attr
->max_wr
= srq
->qplib_srq
.max_wqe
;
1492 srq_attr
->max_sge
= srq
->qplib_srq
.max_sge
;
1493 srq_attr
->srq_limit
= tsrq
.qplib_srq
.threshold
;
1498 int bnxt_re_post_srq_recv(struct ib_srq
*ib_srq
, struct ib_recv_wr
*wr
,
1499 struct ib_recv_wr
**bad_wr
)
1501 struct bnxt_re_srq
*srq
= container_of(ib_srq
, struct bnxt_re_srq
,
1503 struct bnxt_qplib_swqe wqe
;
1504 unsigned long flags
;
1505 int rc
= 0, payload_sz
= 0;
1507 spin_lock_irqsave(&srq
->lock
, flags
);
1509 /* Transcribe each ib_recv_wr to qplib_swqe */
1510 wqe
.num_sge
= wr
->num_sge
;
1511 payload_sz
= bnxt_re_build_sgl(wr
->sg_list
, wqe
.sg_list
,
1513 wqe
.wr_id
= wr
->wr_id
;
1514 wqe
.type
= BNXT_QPLIB_SWQE_TYPE_RECV
;
1516 rc
= bnxt_qplib_post_srq_recv(&srq
->qplib_srq
, &wqe
);
1523 spin_unlock_irqrestore(&srq
->lock
, flags
);
1527 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev
*rdev
,
1528 struct bnxt_re_qp
*qp1_qp
,
1531 struct bnxt_re_qp
*qp
= rdev
->qp1_sqp
;
1534 if (qp_attr_mask
& IB_QP_STATE
) {
1535 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_STATE
;
1536 qp
->qplib_qp
.state
= qp1_qp
->qplib_qp
.state
;
1538 if (qp_attr_mask
& IB_QP_PKEY_INDEX
) {
1539 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
;
1540 qp
->qplib_qp
.pkey_index
= qp1_qp
->qplib_qp
.pkey_index
;
1543 if (qp_attr_mask
& IB_QP_QKEY
) {
1544 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY
;
1545 /* Using a Random QKEY */
1546 qp
->qplib_qp
.qkey
= 0x81818181;
1548 if (qp_attr_mask
& IB_QP_SQ_PSN
) {
1549 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN
;
1550 qp
->qplib_qp
.sq
.psn
= qp1_qp
->qplib_qp
.sq
.psn
;
1553 rc
= bnxt_qplib_modify_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
1555 dev_err(rdev_to_dev(rdev
),
1556 "Failed to modify Shadow QP for QP1");
1560 int bnxt_re_modify_qp(struct ib_qp
*ib_qp
, struct ib_qp_attr
*qp_attr
,
1561 int qp_attr_mask
, struct ib_udata
*udata
)
1563 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
1564 struct bnxt_re_dev
*rdev
= qp
->rdev
;
1565 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
1566 enum ib_qp_state curr_qp_state
, new_qp_state
;
1570 struct ib_gid_attr sgid_attr
;
1573 qp
->qplib_qp
.modify_flags
= 0;
1574 if (qp_attr_mask
& IB_QP_STATE
) {
1575 curr_qp_state
= __to_ib_qp_state(qp
->qplib_qp
.cur_qp_state
);
1576 new_qp_state
= qp_attr
->qp_state
;
1577 if (!ib_modify_qp_is_ok(curr_qp_state
, new_qp_state
,
1578 ib_qp
->qp_type
, qp_attr_mask
,
1579 IB_LINK_LAYER_ETHERNET
)) {
1580 dev_err(rdev_to_dev(rdev
),
1581 "Invalid attribute mask: %#x specified ",
1583 dev_err(rdev_to_dev(rdev
),
1584 "for qpn: %#x type: %#x",
1585 ib_qp
->qp_num
, ib_qp
->qp_type
);
1586 dev_err(rdev_to_dev(rdev
),
1587 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1588 curr_qp_state
, new_qp_state
);
1591 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_STATE
;
1592 qp
->qplib_qp
.state
= __from_ib_qp_state(qp_attr
->qp_state
);
1595 qp
->qplib_qp
.state
== CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1596 dev_dbg(rdev_to_dev(rdev
),
1597 "Move QP = %p to flush list\n",
1599 bnxt_qplib_add_flush_qp(&qp
->qplib_qp
);
1602 qp
->qplib_qp
.state
== CMDQ_MODIFY_QP_NEW_STATE_RESET
) {
1603 dev_dbg(rdev_to_dev(rdev
),
1604 "Move QP = %p out of flush list\n",
1606 bnxt_qplib_del_flush_qp(&qp
->qplib_qp
);
1609 if (qp_attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
) {
1610 qp
->qplib_qp
.modify_flags
|=
1611 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY
;
1612 qp
->qplib_qp
.en_sqd_async_notify
= true;
1614 if (qp_attr_mask
& IB_QP_ACCESS_FLAGS
) {
1615 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS
;
1616 qp
->qplib_qp
.access
=
1617 __from_ib_access_flags(qp_attr
->qp_access_flags
);
1618 /* LOCAL_WRITE access must be set to allow RC receive */
1619 qp
->qplib_qp
.access
|= BNXT_QPLIB_ACCESS_LOCAL_WRITE
;
1621 if (qp_attr_mask
& IB_QP_PKEY_INDEX
) {
1622 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
;
1623 qp
->qplib_qp
.pkey_index
= qp_attr
->pkey_index
;
1625 if (qp_attr_mask
& IB_QP_QKEY
) {
1626 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY
;
1627 qp
->qplib_qp
.qkey
= qp_attr
->qkey
;
1629 if (qp_attr_mask
& IB_QP_AV
) {
1630 const struct ib_global_route
*grh
=
1631 rdma_ah_read_grh(&qp_attr
->ah_attr
);
1633 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_DGID
|
1634 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL
|
1635 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
|
1636 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT
|
1637 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS
|
1638 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC
|
1639 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID
;
1640 memcpy(qp
->qplib_qp
.ah
.dgid
.data
, grh
->dgid
.raw
,
1641 sizeof(qp
->qplib_qp
.ah
.dgid
.data
));
1642 qp
->qplib_qp
.ah
.flow_label
= grh
->flow_label
;
1643 /* If RoCE V2 is enabled, stack will have two entries for
1644 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1645 * the GID index by 2 for RoCE V2
1647 qp
->qplib_qp
.ah
.sgid_index
= grh
->sgid_index
/ 2;
1648 qp
->qplib_qp
.ah
.host_sgid_index
= grh
->sgid_index
;
1649 qp
->qplib_qp
.ah
.hop_limit
= grh
->hop_limit
;
1650 qp
->qplib_qp
.ah
.traffic_class
= grh
->traffic_class
;
1651 qp
->qplib_qp
.ah
.sl
= rdma_ah_get_sl(&qp_attr
->ah_attr
);
1652 ether_addr_copy(qp
->qplib_qp
.ah
.dmac
,
1653 qp_attr
->ah_attr
.roce
.dmac
);
1655 status
= ib_get_cached_gid(&rdev
->ibdev
, 1,
1658 if (!status
&& sgid_attr
.ndev
) {
1659 memcpy(qp
->qplib_qp
.smac
, sgid_attr
.ndev
->dev_addr
,
1661 dev_put(sgid_attr
.ndev
);
1662 nw_type
= ib_gid_to_network_type(sgid_attr
.gid_type
,
1665 case RDMA_NETWORK_IPV4
:
1666 qp
->qplib_qp
.nw_type
=
1667 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4
;
1669 case RDMA_NETWORK_IPV6
:
1670 qp
->qplib_qp
.nw_type
=
1671 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6
;
1674 qp
->qplib_qp
.nw_type
=
1675 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1
;
1681 if (qp_attr_mask
& IB_QP_PATH_MTU
) {
1682 qp
->qplib_qp
.modify_flags
|=
1683 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
;
1684 qp
->qplib_qp
.path_mtu
= __from_ib_mtu(qp_attr
->path_mtu
);
1685 qp
->qplib_qp
.mtu
= ib_mtu_enum_to_int(qp_attr
->path_mtu
);
1686 } else if (qp_attr
->qp_state
== IB_QPS_RTR
) {
1687 qp
->qplib_qp
.modify_flags
|=
1688 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
;
1689 qp
->qplib_qp
.path_mtu
=
1690 __from_ib_mtu(iboe_get_mtu(rdev
->netdev
->mtu
));
1692 ib_mtu_enum_to_int(iboe_get_mtu(rdev
->netdev
->mtu
));
1695 if (qp_attr_mask
& IB_QP_TIMEOUT
) {
1696 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT
;
1697 qp
->qplib_qp
.timeout
= qp_attr
->timeout
;
1699 if (qp_attr_mask
& IB_QP_RETRY_CNT
) {
1700 qp
->qplib_qp
.modify_flags
|=
1701 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT
;
1702 qp
->qplib_qp
.retry_cnt
= qp_attr
->retry_cnt
;
1704 if (qp_attr_mask
& IB_QP_RNR_RETRY
) {
1705 qp
->qplib_qp
.modify_flags
|=
1706 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY
;
1707 qp
->qplib_qp
.rnr_retry
= qp_attr
->rnr_retry
;
1709 if (qp_attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1710 qp
->qplib_qp
.modify_flags
|=
1711 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER
;
1712 qp
->qplib_qp
.min_rnr_timer
= qp_attr
->min_rnr_timer
;
1714 if (qp_attr_mask
& IB_QP_RQ_PSN
) {
1715 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN
;
1716 qp
->qplib_qp
.rq
.psn
= qp_attr
->rq_psn
;
1718 if (qp_attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1719 qp
->qplib_qp
.modify_flags
|=
1720 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC
;
1721 /* Cap the max_rd_atomic to device max */
1722 qp
->qplib_qp
.max_rd_atomic
= min_t(u32
, qp_attr
->max_rd_atomic
,
1723 dev_attr
->max_qp_rd_atom
);
1725 if (qp_attr_mask
& IB_QP_SQ_PSN
) {
1726 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN
;
1727 qp
->qplib_qp
.sq
.psn
= qp_attr
->sq_psn
;
1729 if (qp_attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1730 if (qp_attr
->max_dest_rd_atomic
>
1731 dev_attr
->max_qp_init_rd_atom
) {
1732 dev_err(rdev_to_dev(rdev
),
1733 "max_dest_rd_atomic requested%d is > dev_max%d",
1734 qp_attr
->max_dest_rd_atomic
,
1735 dev_attr
->max_qp_init_rd_atom
);
1739 qp
->qplib_qp
.modify_flags
|=
1740 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC
;
1741 qp
->qplib_qp
.max_dest_rd_atomic
= qp_attr
->max_dest_rd_atomic
;
1743 if (qp_attr_mask
& IB_QP_CAP
) {
1744 qp
->qplib_qp
.modify_flags
|=
1745 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE
|
1746 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE
|
1747 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE
|
1748 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE
|
1749 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA
;
1750 if ((qp_attr
->cap
.max_send_wr
>= dev_attr
->max_qp_wqes
) ||
1751 (qp_attr
->cap
.max_recv_wr
>= dev_attr
->max_qp_wqes
) ||
1752 (qp_attr
->cap
.max_send_sge
>= dev_attr
->max_qp_sges
) ||
1753 (qp_attr
->cap
.max_recv_sge
>= dev_attr
->max_qp_sges
) ||
1754 (qp_attr
->cap
.max_inline_data
>=
1755 dev_attr
->max_inline_data
)) {
1756 dev_err(rdev_to_dev(rdev
),
1757 "Create QP failed - max exceeded");
1760 entries
= roundup_pow_of_two(qp_attr
->cap
.max_send_wr
);
1761 qp
->qplib_qp
.sq
.max_wqe
= min_t(u32
, entries
,
1762 dev_attr
->max_qp_wqes
+ 1);
1763 qp
->qplib_qp
.sq
.q_full_delta
= qp
->qplib_qp
.sq
.max_wqe
-
1764 qp_attr
->cap
.max_send_wr
;
1766 * Reserving one slot for Phantom WQE. Some application can
1767 * post one extra entry in this case. Allowing this to avoid
1768 * unexpected Queue full condition
1770 qp
->qplib_qp
.sq
.q_full_delta
-= 1;
1771 qp
->qplib_qp
.sq
.max_sge
= qp_attr
->cap
.max_send_sge
;
1772 if (qp
->qplib_qp
.rq
.max_wqe
) {
1773 entries
= roundup_pow_of_two(qp_attr
->cap
.max_recv_wr
);
1774 qp
->qplib_qp
.rq
.max_wqe
=
1775 min_t(u32
, entries
, dev_attr
->max_qp_wqes
+ 1);
1776 qp
->qplib_qp
.rq
.q_full_delta
= qp
->qplib_qp
.rq
.max_wqe
-
1777 qp_attr
->cap
.max_recv_wr
;
1778 qp
->qplib_qp
.rq
.max_sge
= qp_attr
->cap
.max_recv_sge
;
1780 /* SRQ was used prior, just ignore the RQ caps */
1783 if (qp_attr_mask
& IB_QP_DEST_QPN
) {
1784 qp
->qplib_qp
.modify_flags
|=
1785 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID
;
1786 qp
->qplib_qp
.dest_qpn
= qp_attr
->dest_qp_num
;
1788 rc
= bnxt_qplib_modify_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
1790 dev_err(rdev_to_dev(rdev
), "Failed to modify HW QP");
1793 if (ib_qp
->qp_type
== IB_QPT_GSI
&& rdev
->qp1_sqp
)
1794 rc
= bnxt_re_modify_shadow_qp(rdev
, qp
, qp_attr_mask
);
1798 int bnxt_re_query_qp(struct ib_qp
*ib_qp
, struct ib_qp_attr
*qp_attr
,
1799 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
1801 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
1802 struct bnxt_re_dev
*rdev
= qp
->rdev
;
1803 struct bnxt_qplib_qp
*qplib_qp
;
1806 qplib_qp
= kzalloc(sizeof(*qplib_qp
), GFP_KERNEL
);
1810 qplib_qp
->id
= qp
->qplib_qp
.id
;
1811 qplib_qp
->ah
.host_sgid_index
= qp
->qplib_qp
.ah
.host_sgid_index
;
1813 rc
= bnxt_qplib_query_qp(&rdev
->qplib_res
, qplib_qp
);
1815 dev_err(rdev_to_dev(rdev
), "Failed to query HW QP");
1818 qp_attr
->qp_state
= __to_ib_qp_state(qplib_qp
->state
);
1819 qp_attr
->en_sqd_async_notify
= qplib_qp
->en_sqd_async_notify
? 1 : 0;
1820 qp_attr
->qp_access_flags
= __to_ib_access_flags(qplib_qp
->access
);
1821 qp_attr
->pkey_index
= qplib_qp
->pkey_index
;
1822 qp_attr
->qkey
= qplib_qp
->qkey
;
1823 qp_attr
->ah_attr
.type
= RDMA_AH_ATTR_TYPE_ROCE
;
1824 rdma_ah_set_grh(&qp_attr
->ah_attr
, NULL
, qplib_qp
->ah
.flow_label
,
1825 qplib_qp
->ah
.host_sgid_index
,
1826 qplib_qp
->ah
.hop_limit
,
1827 qplib_qp
->ah
.traffic_class
);
1828 rdma_ah_set_dgid_raw(&qp_attr
->ah_attr
, qplib_qp
->ah
.dgid
.data
);
1829 rdma_ah_set_sl(&qp_attr
->ah_attr
, qplib_qp
->ah
.sl
);
1830 ether_addr_copy(qp_attr
->ah_attr
.roce
.dmac
, qplib_qp
->ah
.dmac
);
1831 qp_attr
->path_mtu
= __to_ib_mtu(qplib_qp
->path_mtu
);
1832 qp_attr
->timeout
= qplib_qp
->timeout
;
1833 qp_attr
->retry_cnt
= qplib_qp
->retry_cnt
;
1834 qp_attr
->rnr_retry
= qplib_qp
->rnr_retry
;
1835 qp_attr
->min_rnr_timer
= qplib_qp
->min_rnr_timer
;
1836 qp_attr
->rq_psn
= qplib_qp
->rq
.psn
;
1837 qp_attr
->max_rd_atomic
= qplib_qp
->max_rd_atomic
;
1838 qp_attr
->sq_psn
= qplib_qp
->sq
.psn
;
1839 qp_attr
->max_dest_rd_atomic
= qplib_qp
->max_dest_rd_atomic
;
1840 qp_init_attr
->sq_sig_type
= qplib_qp
->sig_type
? IB_SIGNAL_ALL_WR
:
1842 qp_attr
->dest_qp_num
= qplib_qp
->dest_qpn
;
1844 qp_attr
->cap
.max_send_wr
= qp
->qplib_qp
.sq
.max_wqe
;
1845 qp_attr
->cap
.max_send_sge
= qp
->qplib_qp
.sq
.max_sge
;
1846 qp_attr
->cap
.max_recv_wr
= qp
->qplib_qp
.rq
.max_wqe
;
1847 qp_attr
->cap
.max_recv_sge
= qp
->qplib_qp
.rq
.max_sge
;
1848 qp_attr
->cap
.max_inline_data
= qp
->qplib_qp
.max_inline_data
;
1849 qp_init_attr
->cap
= qp_attr
->cap
;
1856 /* Routine for sending QP1 packets for RoCE V1 an V2
1858 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp
*qp
,
1859 struct ib_send_wr
*wr
,
1860 struct bnxt_qplib_swqe
*wqe
,
1863 struct ib_device
*ibdev
= &qp
->rdev
->ibdev
;
1864 struct bnxt_re_ah
*ah
= container_of(ud_wr(wr
)->ah
, struct bnxt_re_ah
,
1866 struct bnxt_qplib_ah
*qplib_ah
= &ah
->qplib_ah
;
1867 struct bnxt_qplib_sge sge
;
1871 struct ib_gid_attr sgid_attr
;
1873 bool is_eth
= false;
1874 bool is_vlan
= false;
1875 bool is_grh
= false;
1876 bool is_udp
= false;
1878 u16 vlan_id
= 0xFFFF;
1882 memset(&qp
->qp1_hdr
, 0, sizeof(qp
->qp1_hdr
));
1884 rc
= ib_get_cached_gid(ibdev
, 1,
1885 qplib_ah
->host_sgid_index
, &sgid
,
1888 dev_err(rdev_to_dev(qp
->rdev
),
1889 "Failed to query gid at index %d",
1890 qplib_ah
->host_sgid_index
);
1893 if (sgid_attr
.ndev
) {
1894 if (is_vlan_dev(sgid_attr
.ndev
))
1895 vlan_id
= vlan_dev_vlan_id(sgid_attr
.ndev
);
1896 dev_put(sgid_attr
.ndev
);
1898 /* Get network header type for this GID */
1899 nw_type
= ib_gid_to_network_type(sgid_attr
.gid_type
, &sgid
);
1901 case RDMA_NETWORK_IPV4
:
1902 nw_type
= BNXT_RE_ROCEV2_IPV4_PACKET
;
1904 case RDMA_NETWORK_IPV6
:
1905 nw_type
= BNXT_RE_ROCEV2_IPV6_PACKET
;
1908 nw_type
= BNXT_RE_ROCE_V1_PACKET
;
1911 memcpy(&dgid
.raw
, &qplib_ah
->dgid
, 16);
1912 is_udp
= sgid_attr
.gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
;
1914 if (ipv6_addr_v4mapped((struct in6_addr
*)&sgid
)) {
1916 ether_type
= ETH_P_IP
;
1919 ether_type
= ETH_P_IPV6
;
1923 ether_type
= ETH_P_IBOE
;
1928 is_vlan
= (vlan_id
&& (vlan_id
< 0x1000)) ? true : false;
1930 ib_ud_header_init(payload_size
, !is_eth
, is_eth
, is_vlan
, is_grh
,
1931 ip_version
, is_udp
, 0, &qp
->qp1_hdr
);
1934 ether_addr_copy(qp
->qp1_hdr
.eth
.dmac_h
, ah
->qplib_ah
.dmac
);
1935 ether_addr_copy(qp
->qp1_hdr
.eth
.smac_h
, qp
->qplib_qp
.smac
);
1937 /* For vlan, check the sgid for vlan existence */
1940 qp
->qp1_hdr
.eth
.type
= cpu_to_be16(ether_type
);
1942 qp
->qp1_hdr
.vlan
.type
= cpu_to_be16(ether_type
);
1943 qp
->qp1_hdr
.vlan
.tag
= cpu_to_be16(vlan_id
);
1946 if (is_grh
|| (ip_version
== 6)) {
1947 memcpy(qp
->qp1_hdr
.grh
.source_gid
.raw
, sgid
.raw
, sizeof(sgid
));
1948 memcpy(qp
->qp1_hdr
.grh
.destination_gid
.raw
, qplib_ah
->dgid
.data
,
1950 qp
->qp1_hdr
.grh
.hop_limit
= qplib_ah
->hop_limit
;
1953 if (ip_version
== 4) {
1954 qp
->qp1_hdr
.ip4
.tos
= 0;
1955 qp
->qp1_hdr
.ip4
.id
= 0;
1956 qp
->qp1_hdr
.ip4
.frag_off
= htons(IP_DF
);
1957 qp
->qp1_hdr
.ip4
.ttl
= qplib_ah
->hop_limit
;
1959 memcpy(&qp
->qp1_hdr
.ip4
.saddr
, sgid
.raw
+ 12, 4);
1960 memcpy(&qp
->qp1_hdr
.ip4
.daddr
, qplib_ah
->dgid
.data
+ 12, 4);
1961 qp
->qp1_hdr
.ip4
.check
= ib_ud_ip4_csum(&qp
->qp1_hdr
);
1965 qp
->qp1_hdr
.udp
.dport
= htons(ROCE_V2_UDP_DPORT
);
1966 qp
->qp1_hdr
.udp
.sport
= htons(0x8CD1);
1967 qp
->qp1_hdr
.udp
.csum
= 0;
1971 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
) {
1972 qp
->qp1_hdr
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
1973 qp
->qp1_hdr
.immediate_present
= 1;
1975 qp
->qp1_hdr
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1977 if (wr
->send_flags
& IB_SEND_SOLICITED
)
1978 qp
->qp1_hdr
.bth
.solicited_event
= 1;
1980 qp
->qp1_hdr
.bth
.pad_count
= (4 - payload_size
) & 3;
1982 /* P_key for QP1 is for all members */
1983 qp
->qp1_hdr
.bth
.pkey
= cpu_to_be16(0xFFFF);
1984 qp
->qp1_hdr
.bth
.destination_qpn
= IB_QP1
;
1985 qp
->qp1_hdr
.bth
.ack_req
= 0;
1987 qp
->send_psn
&= BTH_PSN_MASK
;
1988 qp
->qp1_hdr
.bth
.psn
= cpu_to_be32(qp
->send_psn
);
1990 /* Use the priviledged Q_Key for QP1 */
1991 qp
->qp1_hdr
.deth
.qkey
= cpu_to_be32(IB_QP1_QKEY
);
1992 qp
->qp1_hdr
.deth
.source_qpn
= IB_QP1
;
1994 /* Pack the QP1 to the transmit buffer */
1995 buf
= bnxt_qplib_get_qp1_sq_buf(&qp
->qplib_qp
, &sge
);
1997 ib_ud_header_pack(&qp
->qp1_hdr
, buf
);
1998 for (i
= wqe
->num_sge
; i
; i
--) {
1999 wqe
->sg_list
[i
].addr
= wqe
->sg_list
[i
- 1].addr
;
2000 wqe
->sg_list
[i
].lkey
= wqe
->sg_list
[i
- 1].lkey
;
2001 wqe
->sg_list
[i
].size
= wqe
->sg_list
[i
- 1].size
;
2005 * Max Header buf size for IPV6 RoCE V2 is 86,
2006 * which is same as the QP1 SQ header buffer.
2007 * Header buf size for IPV4 RoCE V2 can be 66.
2008 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2009 * Subtract 20 bytes from QP1 SQ header buf size
2011 if (is_udp
&& ip_version
== 4)
2014 * Max Header buf size for RoCE V1 is 78.
2015 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2016 * Subtract 8 bytes from QP1 SQ header buf size
2021 /* Subtract 4 bytes for non vlan packets */
2025 wqe
->sg_list
[0].addr
= sge
.addr
;
2026 wqe
->sg_list
[0].lkey
= sge
.lkey
;
2027 wqe
->sg_list
[0].size
= sge
.size
;
2031 dev_err(rdev_to_dev(qp
->rdev
), "QP1 buffer is empty!");
2037 /* For the MAD layer, it only provides the recv SGE the size of
2038 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2039 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2040 * receive packet (334 bytes) with no VLAN and then copy the GRH
2041 * and the MAD datagram out to the provided SGE.
2043 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp
*qp
,
2044 struct ib_recv_wr
*wr
,
2045 struct bnxt_qplib_swqe
*wqe
,
2048 struct bnxt_qplib_sge ref
, sge
;
2050 struct bnxt_re_sqp_entries
*sqp_entry
;
2052 rq_prod_index
= bnxt_qplib_get_rq_prod_index(&qp
->qplib_qp
);
2054 if (!bnxt_qplib_get_qp1_rq_buf(&qp
->qplib_qp
, &sge
))
2057 /* Create 1 SGE to receive the entire
2060 /* Save the reference from ULP */
2061 ref
.addr
= wqe
->sg_list
[0].addr
;
2062 ref
.lkey
= wqe
->sg_list
[0].lkey
;
2063 ref
.size
= wqe
->sg_list
[0].size
;
2065 sqp_entry
= &qp
->rdev
->sqp_tbl
[rq_prod_index
];
2068 wqe
->sg_list
[0].addr
= sge
.addr
;
2069 wqe
->sg_list
[0].lkey
= sge
.lkey
;
2070 wqe
->sg_list
[0].size
= BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2
;
2071 sge
.size
-= wqe
->sg_list
[0].size
;
2073 sqp_entry
->sge
.addr
= ref
.addr
;
2074 sqp_entry
->sge
.lkey
= ref
.lkey
;
2075 sqp_entry
->sge
.size
= ref
.size
;
2076 /* Store the wrid for reporting completion */
2077 sqp_entry
->wrid
= wqe
->wr_id
;
2078 /* change the wqe->wrid to table index */
2079 wqe
->wr_id
= rq_prod_index
;
2083 static int is_ud_qp(struct bnxt_re_qp
*qp
)
2085 return qp
->qplib_qp
.type
== CMDQ_CREATE_QP_TYPE_UD
;
2088 static int bnxt_re_build_send_wqe(struct bnxt_re_qp
*qp
,
2089 struct ib_send_wr
*wr
,
2090 struct bnxt_qplib_swqe
*wqe
)
2092 struct bnxt_re_ah
*ah
= NULL
;
2095 ah
= container_of(ud_wr(wr
)->ah
, struct bnxt_re_ah
, ib_ah
);
2096 wqe
->send
.q_key
= ud_wr(wr
)->remote_qkey
;
2097 wqe
->send
.dst_qp
= ud_wr(wr
)->remote_qpn
;
2098 wqe
->send
.avid
= ah
->qplib_ah
.id
;
2100 switch (wr
->opcode
) {
2102 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_SEND
;
2104 case IB_WR_SEND_WITH_IMM
:
2105 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM
;
2106 wqe
->send
.imm_data
= wr
->ex
.imm_data
;
2108 case IB_WR_SEND_WITH_INV
:
2109 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV
;
2110 wqe
->send
.inv_key
= wr
->ex
.invalidate_rkey
;
2115 if (wr
->send_flags
& IB_SEND_SIGNALED
)
2116 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
2117 if (wr
->send_flags
& IB_SEND_FENCE
)
2118 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
2119 if (wr
->send_flags
& IB_SEND_SOLICITED
)
2120 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
2121 if (wr
->send_flags
& IB_SEND_INLINE
)
2122 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_INLINE
;
2127 static int bnxt_re_build_rdma_wqe(struct ib_send_wr
*wr
,
2128 struct bnxt_qplib_swqe
*wqe
)
2130 switch (wr
->opcode
) {
2131 case IB_WR_RDMA_WRITE
:
2132 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE
;
2134 case IB_WR_RDMA_WRITE_WITH_IMM
:
2135 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM
;
2136 wqe
->rdma
.imm_data
= wr
->ex
.imm_data
;
2138 case IB_WR_RDMA_READ
:
2139 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_RDMA_READ
;
2140 wqe
->rdma
.inv_key
= wr
->ex
.invalidate_rkey
;
2145 wqe
->rdma
.remote_va
= rdma_wr(wr
)->remote_addr
;
2146 wqe
->rdma
.r_key
= rdma_wr(wr
)->rkey
;
2147 if (wr
->send_flags
& IB_SEND_SIGNALED
)
2148 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
2149 if (wr
->send_flags
& IB_SEND_FENCE
)
2150 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
2151 if (wr
->send_flags
& IB_SEND_SOLICITED
)
2152 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
2153 if (wr
->send_flags
& IB_SEND_INLINE
)
2154 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_INLINE
;
2159 static int bnxt_re_build_atomic_wqe(struct ib_send_wr
*wr
,
2160 struct bnxt_qplib_swqe
*wqe
)
2162 switch (wr
->opcode
) {
2163 case IB_WR_ATOMIC_CMP_AND_SWP
:
2164 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP
;
2165 wqe
->atomic
.cmp_data
= atomic_wr(wr
)->compare_add
;
2166 wqe
->atomic
.swap_data
= atomic_wr(wr
)->swap
;
2168 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2169 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD
;
2170 wqe
->atomic
.cmp_data
= atomic_wr(wr
)->compare_add
;
2175 wqe
->atomic
.remote_va
= atomic_wr(wr
)->remote_addr
;
2176 wqe
->atomic
.r_key
= atomic_wr(wr
)->rkey
;
2177 if (wr
->send_flags
& IB_SEND_SIGNALED
)
2178 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
2179 if (wr
->send_flags
& IB_SEND_FENCE
)
2180 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
2181 if (wr
->send_flags
& IB_SEND_SOLICITED
)
2182 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
2186 static int bnxt_re_build_inv_wqe(struct ib_send_wr
*wr
,
2187 struct bnxt_qplib_swqe
*wqe
)
2189 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_LOCAL_INV
;
2190 wqe
->local_inv
.inv_l_key
= wr
->ex
.invalidate_rkey
;
2192 if (wr
->send_flags
& IB_SEND_SIGNALED
)
2193 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
2194 if (wr
->send_flags
& IB_SEND_FENCE
)
2195 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
2196 if (wr
->send_flags
& IB_SEND_SOLICITED
)
2197 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
2202 static int bnxt_re_build_reg_wqe(struct ib_reg_wr
*wr
,
2203 struct bnxt_qplib_swqe
*wqe
)
2205 struct bnxt_re_mr
*mr
= container_of(wr
->mr
, struct bnxt_re_mr
, ib_mr
);
2206 struct bnxt_qplib_frpl
*qplib_frpl
= &mr
->qplib_frpl
;
2207 int access
= wr
->access
;
2209 wqe
->frmr
.pbl_ptr
= (__le64
*)qplib_frpl
->hwq
.pbl_ptr
[0];
2210 wqe
->frmr
.pbl_dma_ptr
= qplib_frpl
->hwq
.pbl_dma_ptr
[0];
2211 wqe
->frmr
.page_list
= mr
->pages
;
2212 wqe
->frmr
.page_list_len
= mr
->npages
;
2213 wqe
->frmr
.levels
= qplib_frpl
->hwq
.level
+ 1;
2214 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_REG_MR
;
2216 if (wr
->wr
.send_flags
& IB_SEND_FENCE
)
2217 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
2218 if (wr
->wr
.send_flags
& IB_SEND_SIGNALED
)
2219 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
2221 if (access
& IB_ACCESS_LOCAL_WRITE
)
2222 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE
;
2223 if (access
& IB_ACCESS_REMOTE_READ
)
2224 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ
;
2225 if (access
& IB_ACCESS_REMOTE_WRITE
)
2226 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE
;
2227 if (access
& IB_ACCESS_REMOTE_ATOMIC
)
2228 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC
;
2229 if (access
& IB_ACCESS_MW_BIND
)
2230 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND
;
2232 wqe
->frmr
.l_key
= wr
->key
;
2233 wqe
->frmr
.length
= wr
->mr
->length
;
2234 wqe
->frmr
.pbl_pg_sz_log
= (wr
->mr
->page_size
>> PAGE_SHIFT_4K
) - 1;
2235 wqe
->frmr
.va
= wr
->mr
->iova
;
2239 static int bnxt_re_copy_inline_data(struct bnxt_re_dev
*rdev
,
2240 struct ib_send_wr
*wr
,
2241 struct bnxt_qplib_swqe
*wqe
)
2243 /* Copy the inline data to the data field */
2248 in_data
= wqe
->inline_data
;
2249 for (i
= 0; i
< wr
->num_sge
; i
++) {
2250 sge_addr
= (void *)(unsigned long)
2251 wr
->sg_list
[i
].addr
;
2252 sge_len
= wr
->sg_list
[i
].length
;
2254 if ((sge_len
+ wqe
->inline_len
) >
2255 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH
) {
2256 dev_err(rdev_to_dev(rdev
),
2257 "Inline data size requested > supported value");
2260 sge_len
= wr
->sg_list
[i
].length
;
2262 memcpy(in_data
, sge_addr
, sge_len
);
2263 in_data
+= wr
->sg_list
[i
].length
;
2264 wqe
->inline_len
+= wr
->sg_list
[i
].length
;
2266 return wqe
->inline_len
;
2269 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev
*rdev
,
2270 struct ib_send_wr
*wr
,
2271 struct bnxt_qplib_swqe
*wqe
)
2275 if (wr
->send_flags
& IB_SEND_INLINE
)
2276 payload_sz
= bnxt_re_copy_inline_data(rdev
, wr
, wqe
);
2278 payload_sz
= bnxt_re_build_sgl(wr
->sg_list
, wqe
->sg_list
,
2284 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp
*qp
)
2286 if ((qp
->ib_qp
.qp_type
== IB_QPT_UD
||
2287 qp
->ib_qp
.qp_type
== IB_QPT_GSI
||
2288 qp
->ib_qp
.qp_type
== IB_QPT_RAW_ETHERTYPE
) &&
2289 qp
->qplib_qp
.wqe_cnt
== BNXT_RE_UD_QP_HW_STALL
) {
2291 struct ib_qp_attr qp_attr
;
2293 qp_attr_mask
= IB_QP_STATE
;
2294 qp_attr
.qp_state
= IB_QPS_RTS
;
2295 bnxt_re_modify_qp(&qp
->ib_qp
, &qp_attr
, qp_attr_mask
, NULL
);
2296 qp
->qplib_qp
.wqe_cnt
= 0;
2300 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev
*rdev
,
2301 struct bnxt_re_qp
*qp
,
2302 struct ib_send_wr
*wr
)
2304 struct bnxt_qplib_swqe wqe
;
2305 int rc
= 0, payload_sz
= 0;
2306 unsigned long flags
;
2308 spin_lock_irqsave(&qp
->sq_lock
, flags
);
2309 memset(&wqe
, 0, sizeof(wqe
));
2312 memset(&wqe
, 0, sizeof(wqe
));
2315 wqe
.num_sge
= wr
->num_sge
;
2316 if (wr
->num_sge
> qp
->qplib_qp
.sq
.max_sge
) {
2317 dev_err(rdev_to_dev(rdev
),
2318 "Limit exceeded for Send SGEs");
2323 payload_sz
= bnxt_re_copy_wr_payload(qp
->rdev
, wr
, &wqe
);
2324 if (payload_sz
< 0) {
2328 wqe
.wr_id
= wr
->wr_id
;
2330 wqe
.type
= BNXT_QPLIB_SWQE_TYPE_SEND
;
2332 rc
= bnxt_re_build_send_wqe(qp
, wr
, &wqe
);
2334 rc
= bnxt_qplib_post_send(&qp
->qplib_qp
, &wqe
);
2337 dev_err(rdev_to_dev(rdev
),
2338 "Post send failed opcode = %#x rc = %d",
2344 bnxt_qplib_post_send_db(&qp
->qplib_qp
);
2345 bnxt_ud_qp_hw_stall_workaround(qp
);
2346 spin_unlock_irqrestore(&qp
->sq_lock
, flags
);
2350 int bnxt_re_post_send(struct ib_qp
*ib_qp
, struct ib_send_wr
*wr
,
2351 struct ib_send_wr
**bad_wr
)
2353 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
2354 struct bnxt_qplib_swqe wqe
;
2355 int rc
= 0, payload_sz
= 0;
2356 unsigned long flags
;
2358 spin_lock_irqsave(&qp
->sq_lock
, flags
);
2361 memset(&wqe
, 0, sizeof(wqe
));
2364 wqe
.num_sge
= wr
->num_sge
;
2365 if (wr
->num_sge
> qp
->qplib_qp
.sq
.max_sge
) {
2366 dev_err(rdev_to_dev(qp
->rdev
),
2367 "Limit exceeded for Send SGEs");
2372 payload_sz
= bnxt_re_copy_wr_payload(qp
->rdev
, wr
, &wqe
);
2373 if (payload_sz
< 0) {
2377 wqe
.wr_id
= wr
->wr_id
;
2379 switch (wr
->opcode
) {
2381 case IB_WR_SEND_WITH_IMM
:
2382 if (ib_qp
->qp_type
== IB_QPT_GSI
) {
2383 rc
= bnxt_re_build_qp1_send_v2(qp
, wr
, &wqe
,
2387 wqe
.rawqp1
.lflags
|=
2388 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC
;
2390 switch (wr
->send_flags
) {
2391 case IB_SEND_IP_CSUM
:
2392 wqe
.rawqp1
.lflags
|=
2393 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM
;
2398 /* Fall thru to build the wqe */
2399 case IB_WR_SEND_WITH_INV
:
2400 rc
= bnxt_re_build_send_wqe(qp
, wr
, &wqe
);
2402 case IB_WR_RDMA_WRITE
:
2403 case IB_WR_RDMA_WRITE_WITH_IMM
:
2404 case IB_WR_RDMA_READ
:
2405 rc
= bnxt_re_build_rdma_wqe(wr
, &wqe
);
2407 case IB_WR_ATOMIC_CMP_AND_SWP
:
2408 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2409 rc
= bnxt_re_build_atomic_wqe(wr
, &wqe
);
2411 case IB_WR_RDMA_READ_WITH_INV
:
2412 dev_err(rdev_to_dev(qp
->rdev
),
2413 "RDMA Read with Invalidate is not supported");
2416 case IB_WR_LOCAL_INV
:
2417 rc
= bnxt_re_build_inv_wqe(wr
, &wqe
);
2420 rc
= bnxt_re_build_reg_wqe(reg_wr(wr
), &wqe
);
2423 /* Unsupported WRs */
2424 dev_err(rdev_to_dev(qp
->rdev
),
2425 "WR (%#x) is not supported", wr
->opcode
);
2430 rc
= bnxt_qplib_post_send(&qp
->qplib_qp
, &wqe
);
2433 dev_err(rdev_to_dev(qp
->rdev
),
2434 "post_send failed op:%#x qps = %#x rc = %d\n",
2435 wr
->opcode
, qp
->qplib_qp
.state
, rc
);
2441 bnxt_qplib_post_send_db(&qp
->qplib_qp
);
2442 bnxt_ud_qp_hw_stall_workaround(qp
);
2443 spin_unlock_irqrestore(&qp
->sq_lock
, flags
);
2448 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev
*rdev
,
2449 struct bnxt_re_qp
*qp
,
2450 struct ib_recv_wr
*wr
)
2452 struct bnxt_qplib_swqe wqe
;
2455 memset(&wqe
, 0, sizeof(wqe
));
2458 memset(&wqe
, 0, sizeof(wqe
));
2461 wqe
.num_sge
= wr
->num_sge
;
2462 if (wr
->num_sge
> qp
->qplib_qp
.rq
.max_sge
) {
2463 dev_err(rdev_to_dev(rdev
),
2464 "Limit exceeded for Receive SGEs");
2468 bnxt_re_build_sgl(wr
->sg_list
, wqe
.sg_list
, wr
->num_sge
);
2469 wqe
.wr_id
= wr
->wr_id
;
2470 wqe
.type
= BNXT_QPLIB_SWQE_TYPE_RECV
;
2472 rc
= bnxt_qplib_post_recv(&qp
->qplib_qp
, &wqe
);
2479 bnxt_qplib_post_recv_db(&qp
->qplib_qp
);
2483 int bnxt_re_post_recv(struct ib_qp
*ib_qp
, struct ib_recv_wr
*wr
,
2484 struct ib_recv_wr
**bad_wr
)
2486 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
2487 struct bnxt_qplib_swqe wqe
;
2488 int rc
= 0, payload_sz
= 0;
2489 unsigned long flags
;
2492 spin_lock_irqsave(&qp
->rq_lock
, flags
);
2495 memset(&wqe
, 0, sizeof(wqe
));
2498 wqe
.num_sge
= wr
->num_sge
;
2499 if (wr
->num_sge
> qp
->qplib_qp
.rq
.max_sge
) {
2500 dev_err(rdev_to_dev(qp
->rdev
),
2501 "Limit exceeded for Receive SGEs");
2507 payload_sz
= bnxt_re_build_sgl(wr
->sg_list
, wqe
.sg_list
,
2509 wqe
.wr_id
= wr
->wr_id
;
2510 wqe
.type
= BNXT_QPLIB_SWQE_TYPE_RECV
;
2512 if (ib_qp
->qp_type
== IB_QPT_GSI
)
2513 rc
= bnxt_re_build_qp1_shadow_qp_recv(qp
, wr
, &wqe
,
2516 rc
= bnxt_qplib_post_recv(&qp
->qplib_qp
, &wqe
);
2522 /* Ring DB if the RQEs posted reaches a threshold value */
2523 if (++count
>= BNXT_RE_RQ_WQE_THRESHOLD
) {
2524 bnxt_qplib_post_recv_db(&qp
->qplib_qp
);
2532 bnxt_qplib_post_recv_db(&qp
->qplib_qp
);
2534 spin_unlock_irqrestore(&qp
->rq_lock
, flags
);
2539 /* Completion Queues */
2540 int bnxt_re_destroy_cq(struct ib_cq
*ib_cq
)
2543 struct bnxt_re_cq
*cq
;
2544 struct bnxt_qplib_nq
*nq
;
2545 struct bnxt_re_dev
*rdev
;
2547 cq
= container_of(ib_cq
, struct bnxt_re_cq
, ib_cq
);
2549 nq
= cq
->qplib_cq
.nq
;
2551 rc
= bnxt_qplib_destroy_cq(&rdev
->qplib_res
, &cq
->qplib_cq
);
2553 dev_err(rdev_to_dev(rdev
), "Failed to destroy HW CQ");
2556 if (!IS_ERR_OR_NULL(cq
->umem
))
2557 ib_umem_release(cq
->umem
);
2559 atomic_dec(&rdev
->cq_count
);
2567 struct ib_cq
*bnxt_re_create_cq(struct ib_device
*ibdev
,
2568 const struct ib_cq_init_attr
*attr
,
2569 struct ib_ucontext
*context
,
2570 struct ib_udata
*udata
)
2572 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
2573 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
2574 struct bnxt_re_cq
*cq
= NULL
;
2576 int cqe
= attr
->cqe
;
2577 struct bnxt_qplib_nq
*nq
= NULL
;
2578 unsigned int nq_alloc_cnt
;
2580 /* Validate CQ fields */
2581 if (cqe
< 1 || cqe
> dev_attr
->max_cq_wqes
) {
2582 dev_err(rdev_to_dev(rdev
), "Failed to create CQ -max exceeded");
2583 return ERR_PTR(-EINVAL
);
2585 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
2587 return ERR_PTR(-ENOMEM
);
2590 cq
->qplib_cq
.cq_handle
= (u64
)(unsigned long)(&cq
->qplib_cq
);
2592 entries
= roundup_pow_of_two(cqe
+ 1);
2593 if (entries
> dev_attr
->max_cq_wqes
+ 1)
2594 entries
= dev_attr
->max_cq_wqes
+ 1;
2597 struct bnxt_re_cq_req req
;
2598 struct bnxt_re_ucontext
*uctx
= container_of
2600 struct bnxt_re_ucontext
,
2602 if (ib_copy_from_udata(&req
, udata
, sizeof(req
))) {
2607 cq
->umem
= ib_umem_get(context
, req
.cq_va
,
2608 entries
* sizeof(struct cq_base
),
2609 IB_ACCESS_LOCAL_WRITE
, 1);
2610 if (IS_ERR(cq
->umem
)) {
2611 rc
= PTR_ERR(cq
->umem
);
2614 cq
->qplib_cq
.sghead
= cq
->umem
->sg_head
.sgl
;
2615 cq
->qplib_cq
.nmap
= cq
->umem
->nmap
;
2616 cq
->qplib_cq
.dpi
= &uctx
->dpi
;
2618 cq
->max_cql
= min_t(u32
, entries
, MAX_CQL_PER_POLL
);
2619 cq
->cql
= kcalloc(cq
->max_cql
, sizeof(struct bnxt_qplib_cqe
),
2626 cq
->qplib_cq
.dpi
= &rdev
->dpi_privileged
;
2627 cq
->qplib_cq
.sghead
= NULL
;
2628 cq
->qplib_cq
.nmap
= 0;
2631 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2632 * used for getting the NQ index.
2634 nq_alloc_cnt
= atomic_inc_return(&rdev
->nq_alloc_cnt
);
2635 nq
= &rdev
->nq
[nq_alloc_cnt
% (rdev
->num_msix
- 1)];
2636 cq
->qplib_cq
.max_wqe
= entries
;
2637 cq
->qplib_cq
.cnq_hw_ring_id
= nq
->ring_id
;
2638 cq
->qplib_cq
.nq
= nq
;
2640 rc
= bnxt_qplib_create_cq(&rdev
->qplib_res
, &cq
->qplib_cq
);
2642 dev_err(rdev_to_dev(rdev
), "Failed to create HW CQ");
2646 cq
->ib_cq
.cqe
= entries
;
2647 cq
->cq_period
= cq
->qplib_cq
.period
;
2650 atomic_inc(&rdev
->cq_count
);
2653 struct bnxt_re_cq_resp resp
;
2655 resp
.cqid
= cq
->qplib_cq
.id
;
2656 resp
.tail
= cq
->qplib_cq
.hwq
.cons
;
2657 resp
.phase
= cq
->qplib_cq
.period
;
2659 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
2661 dev_err(rdev_to_dev(rdev
), "Failed to copy CQ udata");
2662 bnxt_qplib_destroy_cq(&rdev
->qplib_res
, &cq
->qplib_cq
);
2671 ib_umem_release(cq
->umem
);
2678 static u8
__req_to_ib_wc_status(u8 qstatus
)
2681 case CQ_REQ_STATUS_OK
:
2682 return IB_WC_SUCCESS
;
2683 case CQ_REQ_STATUS_BAD_RESPONSE_ERR
:
2684 return IB_WC_BAD_RESP_ERR
;
2685 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR
:
2686 return IB_WC_LOC_LEN_ERR
;
2687 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR
:
2688 return IB_WC_LOC_QP_OP_ERR
;
2689 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR
:
2690 return IB_WC_LOC_PROT_ERR
;
2691 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR
:
2692 return IB_WC_GENERAL_ERR
;
2693 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR
:
2694 return IB_WC_REM_INV_REQ_ERR
;
2695 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR
:
2696 return IB_WC_REM_ACCESS_ERR
;
2697 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR
:
2698 return IB_WC_REM_OP_ERR
;
2699 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR
:
2700 return IB_WC_RNR_RETRY_EXC_ERR
;
2701 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR
:
2702 return IB_WC_RETRY_EXC_ERR
;
2703 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR
:
2704 return IB_WC_WR_FLUSH_ERR
;
2706 return IB_WC_GENERAL_ERR
;
2711 static u8
__rawqp1_to_ib_wc_status(u8 qstatus
)
2714 case CQ_RES_RAWETH_QP1_STATUS_OK
:
2715 return IB_WC_SUCCESS
;
2716 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR
:
2717 return IB_WC_LOC_ACCESS_ERR
;
2718 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR
:
2719 return IB_WC_LOC_LEN_ERR
;
2720 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR
:
2721 return IB_WC_LOC_PROT_ERR
;
2722 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR
:
2723 return IB_WC_LOC_QP_OP_ERR
;
2724 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR
:
2725 return IB_WC_GENERAL_ERR
;
2726 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR
:
2727 return IB_WC_WR_FLUSH_ERR
;
2728 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR
:
2729 return IB_WC_WR_FLUSH_ERR
;
2731 return IB_WC_GENERAL_ERR
;
2735 static u8
__rc_to_ib_wc_status(u8 qstatus
)
2738 case CQ_RES_RC_STATUS_OK
:
2739 return IB_WC_SUCCESS
;
2740 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR
:
2741 return IB_WC_LOC_ACCESS_ERR
;
2742 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR
:
2743 return IB_WC_LOC_LEN_ERR
;
2744 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR
:
2745 return IB_WC_LOC_PROT_ERR
;
2746 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR
:
2747 return IB_WC_LOC_QP_OP_ERR
;
2748 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR
:
2749 return IB_WC_GENERAL_ERR
;
2750 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR
:
2751 return IB_WC_REM_INV_REQ_ERR
;
2752 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR
:
2753 return IB_WC_WR_FLUSH_ERR
;
2754 case CQ_RES_RC_STATUS_HW_FLUSH_ERR
:
2755 return IB_WC_WR_FLUSH_ERR
;
2757 return IB_WC_GENERAL_ERR
;
2761 static void bnxt_re_process_req_wc(struct ib_wc
*wc
, struct bnxt_qplib_cqe
*cqe
)
2763 switch (cqe
->type
) {
2764 case BNXT_QPLIB_SWQE_TYPE_SEND
:
2765 wc
->opcode
= IB_WC_SEND
;
2767 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM
:
2768 wc
->opcode
= IB_WC_SEND
;
2769 wc
->wc_flags
|= IB_WC_WITH_IMM
;
2771 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV
:
2772 wc
->opcode
= IB_WC_SEND
;
2773 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
2775 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE
:
2776 wc
->opcode
= IB_WC_RDMA_WRITE
;
2778 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM
:
2779 wc
->opcode
= IB_WC_RDMA_WRITE
;
2780 wc
->wc_flags
|= IB_WC_WITH_IMM
;
2782 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ
:
2783 wc
->opcode
= IB_WC_RDMA_READ
;
2785 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP
:
2786 wc
->opcode
= IB_WC_COMP_SWAP
;
2788 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD
:
2789 wc
->opcode
= IB_WC_FETCH_ADD
;
2791 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV
:
2792 wc
->opcode
= IB_WC_LOCAL_INV
;
2794 case BNXT_QPLIB_SWQE_TYPE_REG_MR
:
2795 wc
->opcode
= IB_WC_REG_MR
;
2798 wc
->opcode
= IB_WC_SEND
;
2802 wc
->status
= __req_to_ib_wc_status(cqe
->status
);
2805 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags
,
2806 u16 raweth_qp1_flags2
)
2808 bool is_ipv6
= false, is_ipv4
= false;
2810 /* raweth_qp1_flags Bit 9-6 indicates itype */
2811 if ((raweth_qp1_flags
& CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE
)
2812 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE
)
2815 if (raweth_qp1_flags2
&
2816 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC
&&
2818 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC
) {
2819 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2820 (raweth_qp1_flags2
&
2821 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE
) ?
2822 (is_ipv6
= true) : (is_ipv4
= true);
2824 BNXT_RE_ROCEV2_IPV6_PACKET
:
2825 BNXT_RE_ROCEV2_IPV4_PACKET
);
2827 return BNXT_RE_ROCE_V1_PACKET
;
2831 static int bnxt_re_to_ib_nw_type(int nw_type
)
2833 u8 nw_hdr_type
= 0xFF;
2836 case BNXT_RE_ROCE_V1_PACKET
:
2837 nw_hdr_type
= RDMA_NETWORK_ROCE_V1
;
2839 case BNXT_RE_ROCEV2_IPV4_PACKET
:
2840 nw_hdr_type
= RDMA_NETWORK_IPV4
;
2842 case BNXT_RE_ROCEV2_IPV6_PACKET
:
2843 nw_hdr_type
= RDMA_NETWORK_IPV6
;
2849 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev
*rdev
,
2853 struct ethhdr
*eth_hdr
;
2857 tmp_buf
= (u8
*)rq_hdr_buf
;
2859 * If dest mac is not same as I/F mac, this could be a
2860 * loopback address or multicast address, check whether
2861 * it is a loopback packet
2863 if (!ether_addr_equal(tmp_buf
, rdev
->netdev
->dev_addr
)) {
2865 /* Check the ether type */
2866 eth_hdr
= (struct ethhdr
*)tmp_buf
;
2867 eth_type
= ntohs(eth_hdr
->h_proto
);
2875 struct udphdr
*udp_hdr
;
2877 len
= (eth_type
== ETH_P_IP
? sizeof(struct iphdr
) :
2878 sizeof(struct ipv6hdr
));
2879 tmp_buf
+= sizeof(struct ethhdr
) + len
;
2880 udp_hdr
= (struct udphdr
*)tmp_buf
;
2881 if (ntohs(udp_hdr
->dest
) ==
2894 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp
*qp1_qp
,
2895 struct bnxt_qplib_cqe
*cqe
)
2897 struct bnxt_re_dev
*rdev
= qp1_qp
->rdev
;
2898 struct bnxt_re_sqp_entries
*sqp_entry
= NULL
;
2899 struct bnxt_re_qp
*qp
= rdev
->qp1_sqp
;
2900 struct ib_send_wr
*swr
;
2901 struct ib_ud_wr udwr
;
2902 struct ib_recv_wr rwr
;
2906 dma_addr_t rq_hdr_buf_map
;
2907 dma_addr_t shrq_hdr_buf_map
;
2910 struct ib_sge s_sge
[2];
2911 struct ib_sge r_sge
[2];
2914 memset(&udwr
, 0, sizeof(udwr
));
2915 memset(&rwr
, 0, sizeof(rwr
));
2916 memset(&s_sge
, 0, sizeof(s_sge
));
2917 memset(&r_sge
, 0, sizeof(r_sge
));
2920 tbl_idx
= cqe
->wr_id
;
2922 rq_hdr_buf
= qp1_qp
->qplib_qp
.rq_hdr_buf
+
2923 (tbl_idx
* qp1_qp
->qplib_qp
.rq_hdr_buf_size
);
2924 rq_hdr_buf_map
= bnxt_qplib_get_qp_buf_from_index(&qp1_qp
->qplib_qp
,
2927 /* Shadow QP header buffer */
2928 shrq_hdr_buf_map
= bnxt_qplib_get_qp_buf_from_index(&qp
->qplib_qp
,
2930 sqp_entry
= &rdev
->sqp_tbl
[tbl_idx
];
2932 /* Store this cqe */
2933 memcpy(&sqp_entry
->cqe
, cqe
, sizeof(struct bnxt_qplib_cqe
));
2934 sqp_entry
->qp1_qp
= qp1_qp
;
2936 /* Find packet type from the cqe */
2938 pkt_type
= bnxt_re_check_packet_type(cqe
->raweth_qp1_flags
,
2939 cqe
->raweth_qp1_flags2
);
2941 dev_err(rdev_to_dev(rdev
), "Invalid packet\n");
2945 /* Adjust the offset for the user buffer and post in the rq */
2947 if (pkt_type
== BNXT_RE_ROCEV2_IPV4_PACKET
)
2951 * QP1 loopback packet has 4 bytes of internal header before
2952 * ether header. Skip these four bytes.
2954 if (bnxt_re_is_loopback_packet(rdev
, rq_hdr_buf
))
2957 /* First send SGE . Skip the ether header*/
2958 s_sge
[0].addr
= rq_hdr_buf_map
+ BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2960 s_sge
[0].lkey
= 0xFFFFFFFF;
2961 s_sge
[0].length
= offset
? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4
:
2962 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6
;
2964 /* Second Send SGE */
2965 s_sge
[1].addr
= s_sge
[0].addr
+ s_sge
[0].length
+
2966 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE
;
2967 if (pkt_type
!= BNXT_RE_ROCE_V1_PACKET
)
2969 s_sge
[1].lkey
= 0xFFFFFFFF;
2970 s_sge
[1].length
= 256;
2972 /* First recv SGE */
2974 r_sge
[0].addr
= shrq_hdr_buf_map
;
2975 r_sge
[0].lkey
= 0xFFFFFFFF;
2976 r_sge
[0].length
= 40;
2978 r_sge
[1].addr
= sqp_entry
->sge
.addr
+ offset
;
2979 r_sge
[1].lkey
= sqp_entry
->sge
.lkey
;
2980 r_sge
[1].length
= BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6
+ 256 - offset
;
2982 /* Create receive work request */
2984 rwr
.sg_list
= r_sge
;
2985 rwr
.wr_id
= tbl_idx
;
2988 rc
= bnxt_re_post_recv_shadow_qp(rdev
, qp
, &rwr
);
2990 dev_err(rdev_to_dev(rdev
),
2991 "Failed to post Rx buffers to shadow QP");
2996 swr
->sg_list
= s_sge
;
2997 swr
->wr_id
= tbl_idx
;
2998 swr
->opcode
= IB_WR_SEND
;
3001 udwr
.ah
= &rdev
->sqp_ah
->ib_ah
;
3002 udwr
.remote_qpn
= rdev
->qp1_sqp
->qplib_qp
.id
;
3003 udwr
.remote_qkey
= rdev
->qp1_sqp
->qplib_qp
.qkey
;
3005 /* post data received in the send queue */
3006 rc
= bnxt_re_post_send_shadow_qp(rdev
, qp
, swr
);
3011 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc
*wc
,
3012 struct bnxt_qplib_cqe
*cqe
)
3014 wc
->opcode
= IB_WC_RECV
;
3015 wc
->status
= __rawqp1_to_ib_wc_status(cqe
->status
);
3016 wc
->wc_flags
|= IB_WC_GRH
;
3019 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe
*orig_cqe
,
3026 metadata
= orig_cqe
->raweth_qp1_metadata
;
3027 if (orig_cqe
->raweth_qp1_flags2
&
3028 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN
) {
3030 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK
) >>
3031 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT
);
3032 if (tpid
== ETH_P_8021Q
) {
3034 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK
;
3036 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK
) >>
3037 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT
;
3045 static void bnxt_re_process_res_rc_wc(struct ib_wc
*wc
,
3046 struct bnxt_qplib_cqe
*cqe
)
3048 wc
->opcode
= IB_WC_RECV
;
3049 wc
->status
= __rc_to_ib_wc_status(cqe
->status
);
3051 if (cqe
->flags
& CQ_RES_RC_FLAGS_IMM
)
3052 wc
->wc_flags
|= IB_WC_WITH_IMM
;
3053 if (cqe
->flags
& CQ_RES_RC_FLAGS_INV
)
3054 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
3055 if ((cqe
->flags
& (CQ_RES_RC_FLAGS_RDMA
| CQ_RES_RC_FLAGS_IMM
)) ==
3056 (CQ_RES_RC_FLAGS_RDMA
| CQ_RES_RC_FLAGS_IMM
))
3057 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
3060 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp
*qp
,
3062 struct bnxt_qplib_cqe
*cqe
)
3064 struct bnxt_re_dev
*rdev
= qp
->rdev
;
3065 struct bnxt_re_qp
*qp1_qp
= NULL
;
3066 struct bnxt_qplib_cqe
*orig_cqe
= NULL
;
3067 struct bnxt_re_sqp_entries
*sqp_entry
= NULL
;
3073 tbl_idx
= cqe
->wr_id
;
3075 sqp_entry
= &rdev
->sqp_tbl
[tbl_idx
];
3076 qp1_qp
= sqp_entry
->qp1_qp
;
3077 orig_cqe
= &sqp_entry
->cqe
;
3079 wc
->wr_id
= sqp_entry
->wrid
;
3080 wc
->byte_len
= orig_cqe
->length
;
3081 wc
->qp
= &qp1_qp
->ib_qp
;
3083 wc
->ex
.imm_data
= orig_cqe
->immdata
;
3084 wc
->src_qp
= orig_cqe
->src_qp
;
3085 memcpy(wc
->smac
, orig_cqe
->smac
, ETH_ALEN
);
3086 if (bnxt_re_is_vlan_pkt(orig_cqe
, &vlan_id
, &sl
)) {
3087 wc
->vlan_id
= vlan_id
;
3089 wc
->wc_flags
|= IB_WC_WITH_VLAN
;
3092 wc
->vendor_err
= orig_cqe
->status
;
3094 wc
->opcode
= IB_WC_RECV
;
3095 wc
->status
= __rawqp1_to_ib_wc_status(orig_cqe
->status
);
3096 wc
->wc_flags
|= IB_WC_GRH
;
3098 nw_type
= bnxt_re_check_packet_type(orig_cqe
->raweth_qp1_flags
,
3099 orig_cqe
->raweth_qp1_flags2
);
3101 wc
->network_hdr_type
= bnxt_re_to_ib_nw_type(nw_type
);
3102 wc
->wc_flags
|= IB_WC_WITH_NETWORK_HDR_TYPE
;
3106 static void bnxt_re_process_res_ud_wc(struct ib_wc
*wc
,
3107 struct bnxt_qplib_cqe
*cqe
)
3109 wc
->opcode
= IB_WC_RECV
;
3110 wc
->status
= __rc_to_ib_wc_status(cqe
->status
);
3112 if (cqe
->flags
& CQ_RES_RC_FLAGS_IMM
)
3113 wc
->wc_flags
|= IB_WC_WITH_IMM
;
3114 if (cqe
->flags
& CQ_RES_RC_FLAGS_INV
)
3115 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
3116 if ((cqe
->flags
& (CQ_RES_RC_FLAGS_RDMA
| CQ_RES_RC_FLAGS_IMM
)) ==
3117 (CQ_RES_RC_FLAGS_RDMA
| CQ_RES_RC_FLAGS_IMM
))
3118 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
3121 static int send_phantom_wqe(struct bnxt_re_qp
*qp
)
3123 struct bnxt_qplib_qp
*lib_qp
= &qp
->qplib_qp
;
3124 unsigned long flags
;
3127 spin_lock_irqsave(&qp
->sq_lock
, flags
);
3129 rc
= bnxt_re_bind_fence_mw(lib_qp
);
3131 lib_qp
->sq
.phantom_wqe_cnt
++;
3132 dev_dbg(&lib_qp
->sq
.hwq
.pdev
->dev
,
3133 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3134 lib_qp
->id
, lib_qp
->sq
.hwq
.prod
,
3135 HWQ_CMP(lib_qp
->sq
.hwq
.prod
, &lib_qp
->sq
.hwq
),
3136 lib_qp
->sq
.phantom_wqe_cnt
);
3139 spin_unlock_irqrestore(&qp
->sq_lock
, flags
);
3143 int bnxt_re_poll_cq(struct ib_cq
*ib_cq
, int num_entries
, struct ib_wc
*wc
)
3145 struct bnxt_re_cq
*cq
= container_of(ib_cq
, struct bnxt_re_cq
, ib_cq
);
3146 struct bnxt_re_qp
*qp
;
3147 struct bnxt_qplib_cqe
*cqe
;
3148 int i
, ncqe
, budget
;
3149 struct bnxt_qplib_q
*sq
;
3150 struct bnxt_qplib_qp
*lib_qp
;
3152 struct bnxt_re_sqp_entries
*sqp_entry
= NULL
;
3153 unsigned long flags
;
3155 spin_lock_irqsave(&cq
->cq_lock
, flags
);
3156 budget
= min_t(u32
, num_entries
, cq
->max_cql
);
3157 num_entries
= budget
;
3159 dev_err(rdev_to_dev(cq
->rdev
), "POLL CQ : no CQL to use");
3165 ncqe
= bnxt_qplib_poll_cq(&cq
->qplib_cq
, cqe
, budget
, &lib_qp
);
3168 if (sq
->send_phantom
) {
3169 qp
= container_of(lib_qp
,
3170 struct bnxt_re_qp
, qplib_qp
);
3171 if (send_phantom_wqe(qp
) == -ENOMEM
)
3172 dev_err(rdev_to_dev(cq
->rdev
),
3173 "Phantom failed! Scheduled to send again\n");
3175 sq
->send_phantom
= false;
3179 ncqe
+= bnxt_qplib_process_flush_list(&cq
->qplib_cq
,
3186 for (i
= 0; i
< ncqe
; i
++, cqe
++) {
3187 /* Transcribe each qplib_wqe back to ib_wc */
3188 memset(wc
, 0, sizeof(*wc
));
3190 wc
->wr_id
= cqe
->wr_id
;
3191 wc
->byte_len
= cqe
->length
;
3193 ((struct bnxt_qplib_qp
*)
3194 (unsigned long)(cqe
->qp_handle
),
3195 struct bnxt_re_qp
, qplib_qp
);
3197 dev_err(rdev_to_dev(cq
->rdev
),
3198 "POLL CQ : bad QP handle");
3201 wc
->qp
= &qp
->ib_qp
;
3202 wc
->ex
.imm_data
= cqe
->immdata
;
3203 wc
->src_qp
= cqe
->src_qp
;
3204 memcpy(wc
->smac
, cqe
->smac
, ETH_ALEN
);
3206 wc
->vendor_err
= cqe
->status
;
3208 switch (cqe
->opcode
) {
3209 case CQ_BASE_CQE_TYPE_REQ
:
3210 if (qp
->qplib_qp
.id
==
3211 qp
->rdev
->qp1_sqp
->qplib_qp
.id
) {
3212 /* Handle this completion with
3213 * the stored completion
3215 memset(wc
, 0, sizeof(*wc
));
3218 bnxt_re_process_req_wc(wc
, cqe
);
3220 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
:
3224 rc
= bnxt_re_process_raw_qp_pkt_rx
3227 memset(wc
, 0, sizeof(*wc
));
3232 /* Errors need not be looped back.
3233 * But change the wr_id to the one
3234 * stored in the table
3236 tbl_idx
= cqe
->wr_id
;
3237 sqp_entry
= &cq
->rdev
->sqp_tbl
[tbl_idx
];
3238 wc
->wr_id
= sqp_entry
->wrid
;
3239 bnxt_re_process_res_rawqp1_wc(wc
, cqe
);
3241 case CQ_BASE_CQE_TYPE_RES_RC
:
3242 bnxt_re_process_res_rc_wc(wc
, cqe
);
3244 case CQ_BASE_CQE_TYPE_RES_UD
:
3245 if (qp
->qplib_qp
.id
==
3246 qp
->rdev
->qp1_sqp
->qplib_qp
.id
) {
3247 /* Handle this completion with
3248 * the stored completion
3253 bnxt_re_process_res_shadow_qp_wc
3258 bnxt_re_process_res_ud_wc(wc
, cqe
);
3261 dev_err(rdev_to_dev(cq
->rdev
),
3262 "POLL CQ : type 0x%x not handled",
3271 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
3272 return num_entries
- budget
;
3275 int bnxt_re_req_notify_cq(struct ib_cq
*ib_cq
,
3276 enum ib_cq_notify_flags ib_cqn_flags
)
3278 struct bnxt_re_cq
*cq
= container_of(ib_cq
, struct bnxt_re_cq
, ib_cq
);
3279 int type
= 0, rc
= 0;
3280 unsigned long flags
;
3282 spin_lock_irqsave(&cq
->cq_lock
, flags
);
3283 /* Trigger on the very next completion */
3284 if (ib_cqn_flags
& IB_CQ_NEXT_COMP
)
3285 type
= DBR_DBR_TYPE_CQ_ARMALL
;
3286 /* Trigger on the next solicited completion */
3287 else if (ib_cqn_flags
& IB_CQ_SOLICITED
)
3288 type
= DBR_DBR_TYPE_CQ_ARMSE
;
3290 /* Poll to see if there are missed events */
3291 if ((ib_cqn_flags
& IB_CQ_REPORT_MISSED_EVENTS
) &&
3292 !(bnxt_qplib_is_cq_empty(&cq
->qplib_cq
))) {
3296 bnxt_qplib_req_notify_cq(&cq
->qplib_cq
, type
);
3299 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
3303 /* Memory Regions */
3304 struct ib_mr
*bnxt_re_get_dma_mr(struct ib_pd
*ib_pd
, int mr_access_flags
)
3306 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3307 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3308 struct bnxt_re_mr
*mr
;
3312 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
3314 return ERR_PTR(-ENOMEM
);
3317 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
3318 mr
->qplib_mr
.flags
= __from_ib_access_flags(mr_access_flags
);
3319 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR
;
3321 /* Allocate and register 0 as the address */
3322 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3326 mr
->qplib_mr
.hwq
.level
= PBL_LVL_MAX
;
3327 mr
->qplib_mr
.total_size
= -1; /* Infinte length */
3328 rc
= bnxt_qplib_reg_mr(&rdev
->qplib_res
, &mr
->qplib_mr
, &pbl
, 0, false,
3333 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
3334 if (mr_access_flags
& (IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_REMOTE_READ
|
3335 IB_ACCESS_REMOTE_ATOMIC
))
3336 mr
->ib_mr
.rkey
= mr
->ib_mr
.lkey
;
3337 atomic_inc(&rdev
->mr_count
);
3342 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3348 int bnxt_re_dereg_mr(struct ib_mr
*ib_mr
)
3350 struct bnxt_re_mr
*mr
= container_of(ib_mr
, struct bnxt_re_mr
, ib_mr
);
3351 struct bnxt_re_dev
*rdev
= mr
->rdev
;
3354 rc
= bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3356 dev_err(rdev_to_dev(rdev
), "Dereg MR failed: %#x\n", rc
);
3359 rc
= bnxt_qplib_free_fast_reg_page_list(&rdev
->qplib_res
,
3365 if (!IS_ERR_OR_NULL(mr
->ib_umem
))
3366 ib_umem_release(mr
->ib_umem
);
3369 atomic_dec(&rdev
->mr_count
);
3373 static int bnxt_re_set_page(struct ib_mr
*ib_mr
, u64 addr
)
3375 struct bnxt_re_mr
*mr
= container_of(ib_mr
, struct bnxt_re_mr
, ib_mr
);
3377 if (unlikely(mr
->npages
== mr
->qplib_frpl
.max_pg_ptrs
))
3380 mr
->pages
[mr
->npages
++] = addr
;
3384 int bnxt_re_map_mr_sg(struct ib_mr
*ib_mr
, struct scatterlist
*sg
, int sg_nents
,
3385 unsigned int *sg_offset
)
3387 struct bnxt_re_mr
*mr
= container_of(ib_mr
, struct bnxt_re_mr
, ib_mr
);
3390 return ib_sg_to_pages(ib_mr
, sg
, sg_nents
, sg_offset
, bnxt_re_set_page
);
3393 struct ib_mr
*bnxt_re_alloc_mr(struct ib_pd
*ib_pd
, enum ib_mr_type type
,
3396 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3397 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3398 struct bnxt_re_mr
*mr
= NULL
;
3401 if (type
!= IB_MR_TYPE_MEM_REG
) {
3402 dev_dbg(rdev_to_dev(rdev
), "MR type 0x%x not supported", type
);
3403 return ERR_PTR(-EINVAL
);
3405 if (max_num_sg
> MAX_PBL_LVL_1_PGS
)
3406 return ERR_PTR(-EINVAL
);
3408 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
3410 return ERR_PTR(-ENOMEM
);
3413 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
3414 mr
->qplib_mr
.flags
= BNXT_QPLIB_FR_PMR
;
3415 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR
;
3417 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3421 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
3422 mr
->ib_mr
.rkey
= mr
->ib_mr
.lkey
;
3424 mr
->pages
= kcalloc(max_num_sg
, sizeof(u64
), GFP_KERNEL
);
3429 rc
= bnxt_qplib_alloc_fast_reg_page_list(&rdev
->qplib_res
,
3430 &mr
->qplib_frpl
, max_num_sg
);
3432 dev_err(rdev_to_dev(rdev
),
3433 "Failed to allocate HW FR page list");
3437 atomic_inc(&rdev
->mr_count
);
3443 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3449 struct ib_mw
*bnxt_re_alloc_mw(struct ib_pd
*ib_pd
, enum ib_mw_type type
,
3450 struct ib_udata
*udata
)
3452 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3453 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3454 struct bnxt_re_mw
*mw
;
3457 mw
= kzalloc(sizeof(*mw
), GFP_KERNEL
);
3459 return ERR_PTR(-ENOMEM
);
3461 mw
->qplib_mw
.pd
= &pd
->qplib_pd
;
3463 mw
->qplib_mw
.type
= (type
== IB_MW_TYPE_1
?
3464 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1
:
3465 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
);
3466 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mw
->qplib_mw
);
3468 dev_err(rdev_to_dev(rdev
), "Allocate MW failed!");
3471 mw
->ib_mw
.rkey
= mw
->qplib_mw
.rkey
;
3473 atomic_inc(&rdev
->mw_count
);
3481 int bnxt_re_dealloc_mw(struct ib_mw
*ib_mw
)
3483 struct bnxt_re_mw
*mw
= container_of(ib_mw
, struct bnxt_re_mw
, ib_mw
);
3484 struct bnxt_re_dev
*rdev
= mw
->rdev
;
3487 rc
= bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mw
->qplib_mw
);
3489 dev_err(rdev_to_dev(rdev
), "Free MW failed: %#x\n", rc
);
3494 atomic_dec(&rdev
->mw_count
);
3498 static int bnxt_re_page_size_ok(int page_shift
)
3500 switch (page_shift
) {
3501 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K
:
3502 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K
:
3503 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K
:
3504 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M
:
3505 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K
:
3506 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M
:
3507 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M
:
3508 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G
:
3515 static int fill_umem_pbl_tbl(struct ib_umem
*umem
, u64
*pbl_tbl_orig
,
3518 u64
*pbl_tbl
= pbl_tbl_orig
;
3520 u64 page_mask
= (1ULL << page_shift
) - 1;
3522 struct scatterlist
*sg
;
3525 for_each_sg(umem
->sg_head
.sgl
, sg
, umem
->nmap
, entry
) {
3526 pages
= sg_dma_len(sg
) >> PAGE_SHIFT
;
3527 for (i
= 0; i
< pages
; i
++) {
3528 paddr
= sg_dma_address(sg
) + (i
<< PAGE_SHIFT
);
3529 if (pbl_tbl
== pbl_tbl_orig
)
3530 *pbl_tbl
++ = paddr
& ~page_mask
;
3531 else if ((paddr
& page_mask
) == 0)
3535 return pbl_tbl
- pbl_tbl_orig
;
3539 struct ib_mr
*bnxt_re_reg_user_mr(struct ib_pd
*ib_pd
, u64 start
, u64 length
,
3540 u64 virt_addr
, int mr_access_flags
,
3541 struct ib_udata
*udata
)
3543 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3544 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3545 struct bnxt_re_mr
*mr
;
3546 struct ib_umem
*umem
;
3547 u64
*pbl_tbl
= NULL
;
3548 int umem_pgs
, page_shift
, rc
;
3550 if (length
> BNXT_RE_MAX_MR_SIZE
) {
3551 dev_err(rdev_to_dev(rdev
), "MR Size: %lld > Max supported:%ld\n",
3552 length
, BNXT_RE_MAX_MR_SIZE
);
3553 return ERR_PTR(-ENOMEM
);
3556 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
3558 return ERR_PTR(-ENOMEM
);
3561 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
3562 mr
->qplib_mr
.flags
= __from_ib_access_flags(mr_access_flags
);
3563 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR
;
3565 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3567 dev_err(rdev_to_dev(rdev
), "Failed to allocate MR");
3570 /* The fixed portion of the rkey is the same as the lkey */
3571 mr
->ib_mr
.rkey
= mr
->qplib_mr
.rkey
;
3573 umem
= ib_umem_get(ib_pd
->uobject
->context
, start
, length
,
3574 mr_access_flags
, 0);
3576 dev_err(rdev_to_dev(rdev
), "Failed to get umem");
3582 mr
->qplib_mr
.va
= virt_addr
;
3583 umem_pgs
= ib_umem_page_count(umem
);
3585 dev_err(rdev_to_dev(rdev
), "umem is invalid!");
3589 mr
->qplib_mr
.total_size
= length
;
3591 pbl_tbl
= kcalloc(umem_pgs
, sizeof(u64
*), GFP_KERNEL
);
3597 page_shift
= umem
->page_shift
;
3599 if (!bnxt_re_page_size_ok(page_shift
)) {
3600 dev_err(rdev_to_dev(rdev
), "umem page size unsupported!");
3605 if (!umem
->hugetlb
&& length
> BNXT_RE_MAX_MR_SIZE_LOW
) {
3606 dev_err(rdev_to_dev(rdev
), "Requested MR Sz:%llu Max sup:%llu",
3607 length
, (u64
)BNXT_RE_MAX_MR_SIZE_LOW
);
3611 if (umem
->hugetlb
&& length
> BNXT_RE_PAGE_SIZE_2M
) {
3612 page_shift
= BNXT_RE_PAGE_SHIFT_2M
;
3613 dev_warn(rdev_to_dev(rdev
), "umem hugetlb set page_size %x",
3617 /* Map umem buf ptrs to the PBL */
3618 umem_pgs
= fill_umem_pbl_tbl(umem
, pbl_tbl
, page_shift
);
3619 rc
= bnxt_qplib_reg_mr(&rdev
->qplib_res
, &mr
->qplib_mr
, pbl_tbl
,
3620 umem_pgs
, false, 1 << page_shift
);
3622 dev_err(rdev_to_dev(rdev
), "Failed to register user MR");
3628 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
3629 mr
->ib_mr
.rkey
= mr
->qplib_mr
.lkey
;
3630 atomic_inc(&rdev
->mr_count
);
3636 ib_umem_release(umem
);
3638 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3644 struct ib_ucontext
*bnxt_re_alloc_ucontext(struct ib_device
*ibdev
,
3645 struct ib_udata
*udata
)
3647 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
3648 struct bnxt_re_uctx_resp resp
;
3649 struct bnxt_re_ucontext
*uctx
;
3650 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
3653 dev_dbg(rdev_to_dev(rdev
), "ABI version requested %d",
3654 ibdev
->uverbs_abi_ver
);
3656 if (ibdev
->uverbs_abi_ver
!= BNXT_RE_ABI_VERSION
) {
3657 dev_dbg(rdev_to_dev(rdev
), " is different from the device %d ",
3658 BNXT_RE_ABI_VERSION
);
3659 return ERR_PTR(-EPERM
);
3662 uctx
= kzalloc(sizeof(*uctx
), GFP_KERNEL
);
3664 return ERR_PTR(-ENOMEM
);
3668 uctx
->shpg
= (void *)__get_free_page(GFP_KERNEL
);
3673 spin_lock_init(&uctx
->sh_lock
);
3675 resp
.dev_id
= rdev
->en_dev
->pdev
->devfn
; /*Temp, Use idr_alloc instead*/
3676 resp
.max_qp
= rdev
->qplib_ctx
.qpc_count
;
3677 resp
.pg_size
= PAGE_SIZE
;
3678 resp
.cqe_sz
= sizeof(struct cq_base
);
3679 resp
.max_cqd
= dev_attr
->max_cq_wqes
;
3682 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
3684 dev_err(rdev_to_dev(rdev
), "Failed to copy user context");
3689 return &uctx
->ib_uctx
;
3691 free_page((unsigned long)uctx
->shpg
);
3698 int bnxt_re_dealloc_ucontext(struct ib_ucontext
*ib_uctx
)
3700 struct bnxt_re_ucontext
*uctx
= container_of(ib_uctx
,
3701 struct bnxt_re_ucontext
,
3704 struct bnxt_re_dev
*rdev
= uctx
->rdev
;
3708 free_page((unsigned long)uctx
->shpg
);
3710 if (uctx
->dpi
.dbr
) {
3711 /* Free DPI only if this is the first PD allocated by the
3712 * application and mark the context dpi as NULL
3714 rc
= bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
3715 &rdev
->qplib_res
.dpi_tbl
,
3718 dev_err(rdev_to_dev(rdev
), "Deallocate HW DPI failed!");
3719 /* Don't fail, continue*/
3720 uctx
->dpi
.dbr
= NULL
;
3727 /* Helper function to mmap the virtual memory from user app */
3728 int bnxt_re_mmap(struct ib_ucontext
*ib_uctx
, struct vm_area_struct
*vma
)
3730 struct bnxt_re_ucontext
*uctx
= container_of(ib_uctx
,
3731 struct bnxt_re_ucontext
,
3733 struct bnxt_re_dev
*rdev
= uctx
->rdev
;
3736 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
3739 if (vma
->vm_pgoff
) {
3740 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
3741 if (io_remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
3742 PAGE_SIZE
, vma
->vm_page_prot
)) {
3743 dev_err(rdev_to_dev(rdev
), "Failed to map DPI");
3747 pfn
= virt_to_phys(uctx
->shpg
) >> PAGE_SHIFT
;
3748 if (remap_pfn_range(vma
, vma
->vm_start
,
3749 pfn
, PAGE_SIZE
, vma
->vm_page_prot
)) {
3750 dev_err(rdev_to_dev(rdev
),
3751 "Failed to map shared page");