2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Main component of the bnxt_re driver
39 #include <linux/module.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mutex.h>
43 #include <linux/list.h>
44 #include <linux/rculist.h>
45 #include <linux/spinlock.h>
46 #include <linux/pci.h>
47 #include <net/dcbnl.h>
49 #include <net/addrconf.h>
50 #include <linux/if_ether.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_addr.h>
59 #include "qplib_res.h"
62 #include "qplib_rcfw.h"
65 #include <rdma/bnxt_re-abi.h>
67 #include "hw_counters.h"
69 static char version
[] =
72 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
73 MODULE_DESCRIPTION(BNXT_RE_DESC
" Driver");
74 MODULE_LICENSE("Dual BSD/GPL");
77 static struct list_head bnxt_re_dev_list
= LIST_HEAD_INIT(bnxt_re_dev_list
);
78 /* Mutex to protect the list of bnxt_re devices added */
79 static DEFINE_MUTEX(bnxt_re_dev_lock
);
80 static struct workqueue_struct
*bnxt_re_wq
;
81 static void bnxt_re_remove_device(struct bnxt_re_dev
*rdev
);
82 static void bnxt_re_dealloc_driver(struct ib_device
*ib_dev
);
83 static void bnxt_re_stop_irq(void *handle
);
85 static void bnxt_re_set_drv_mode(struct bnxt_re_dev
*rdev
, u8 mode
)
87 struct bnxt_qplib_chip_ctx
*cctx
;
89 cctx
= rdev
->chip_ctx
;
90 cctx
->modes
.wqe_mode
= bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
) ?
91 mode
: BNXT_QPLIB_WQE_MODE_STATIC
;
94 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev
*rdev
)
96 struct bnxt_qplib_chip_ctx
*chip_ctx
;
100 chip_ctx
= rdev
->chip_ctx
;
101 rdev
->chip_ctx
= NULL
;
102 rdev
->rcfw
.res
= NULL
;
103 rdev
->qplib_res
.cctx
= NULL
;
104 rdev
->qplib_res
.pdev
= NULL
;
105 rdev
->qplib_res
.netdev
= NULL
;
109 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev
*rdev
, u8 wqe_mode
)
111 struct bnxt_qplib_chip_ctx
*chip_ctx
;
112 struct bnxt_en_dev
*en_dev
;
115 en_dev
= rdev
->en_dev
;
116 bp
= netdev_priv(en_dev
->net
);
118 chip_ctx
= kzalloc(sizeof(*chip_ctx
), GFP_KERNEL
);
121 chip_ctx
->chip_num
= bp
->chip_num
;
123 rdev
->chip_ctx
= chip_ctx
;
124 /* rest members to follow eventually */
126 rdev
->qplib_res
.cctx
= rdev
->chip_ctx
;
127 rdev
->rcfw
.res
= &rdev
->qplib_res
;
129 bnxt_re_set_drv_mode(rdev
, wqe_mode
);
133 /* SR-IOV helper functions */
135 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev
*rdev
)
139 bp
= netdev_priv(rdev
->en_dev
->net
);
144 /* Set the maximum number of each resource that the driver actually wants
145 * to allocate. This may be up to the maximum number the firmware has
146 * reserved for the function. The driver may choose to allocate fewer
147 * resources than the firmware maximum.
149 static void bnxt_re_limit_pf_res(struct bnxt_re_dev
*rdev
)
151 struct bnxt_qplib_dev_attr
*attr
;
152 struct bnxt_qplib_ctx
*ctx
;
155 attr
= &rdev
->dev_attr
;
156 ctx
= &rdev
->qplib_ctx
;
158 ctx
->qpc_count
= min_t(u32
, BNXT_RE_MAX_QPC_COUNT
,
160 ctx
->mrw_count
= BNXT_RE_MAX_MRW_COUNT_256K
;
161 /* Use max_mr from fw since max_mrw does not get set */
162 ctx
->mrw_count
= min_t(u32
, ctx
->mrw_count
, attr
->max_mr
);
163 ctx
->srqc_count
= min_t(u32
, BNXT_RE_MAX_SRQC_COUNT
,
165 ctx
->cq_count
= min_t(u32
, BNXT_RE_MAX_CQ_COUNT
, attr
->max_cq
);
166 if (!bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
))
167 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
; i
++)
168 rdev
->qplib_ctx
.tqm_ctx
.qcount
[i
] =
169 rdev
->dev_attr
.tqm_alloc_reqs
[i
];
172 static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx
*qplib_ctx
, u32 num_vf
)
174 struct bnxt_qplib_vf_res
*vf_res
;
179 vf_res
= &qplib_ctx
->vf_res
;
181 * Reserve a set of resources for the PF. Divide the remaining
182 * resources among the VFs
184 vf_pct
= 100 - BNXT_RE_PCT_RSVD_FOR_PF
;
186 num_vf
= 100 * num_vf
;
187 vf_res
->max_qp_per_vf
= (qplib_ctx
->qpc_count
* vf_pct
) / num_vf
;
188 vf_res
->max_srq_per_vf
= (qplib_ctx
->srqc_count
* vf_pct
) / num_vf
;
189 vf_res
->max_cq_per_vf
= (qplib_ctx
->cq_count
* vf_pct
) / num_vf
;
191 * The driver allows many more MRs than other resources. If the
192 * firmware does also, then reserve a fixed amount for the PF and
193 * divide the rest among VFs. VFs may use many MRs for NFS
194 * mounts, ISER, NVME applications, etc. If the firmware severely
195 * restricts the number of MRs, then let PF have half and divide
196 * the rest among VFs, as for the other resource types.
198 if (qplib_ctx
->mrw_count
< BNXT_RE_MAX_MRW_COUNT_64K
) {
199 mrws
= qplib_ctx
->mrw_count
* vf_pct
;
202 mrws
= qplib_ctx
->mrw_count
- BNXT_RE_RESVD_MR_FOR_PF
;
204 vf_res
->max_mrw_per_vf
= (mrws
/ nvfs
);
205 vf_res
->max_gid_per_vf
= BNXT_RE_MAX_GID_PER_VF
;
208 static void bnxt_re_set_resource_limits(struct bnxt_re_dev
*rdev
)
212 memset(&rdev
->qplib_ctx
.vf_res
, 0, sizeof(struct bnxt_qplib_vf_res
));
213 bnxt_re_limit_pf_res(rdev
);
215 num_vfs
= bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
) ?
216 BNXT_RE_GEN_P5_MAX_VF
: rdev
->num_vfs
;
218 bnxt_re_limit_vf_res(&rdev
->qplib_ctx
, num_vfs
);
221 /* for handling bnxt_en callbacks later */
222 static void bnxt_re_stop(void *p
)
226 static void bnxt_re_start(void *p
)
230 static void bnxt_re_sriov_config(void *p
, int num_vfs
)
232 struct bnxt_re_dev
*rdev
= p
;
237 rdev
->num_vfs
= num_vfs
;
238 if (!bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
)) {
239 bnxt_re_set_resource_limits(rdev
);
240 bnxt_qplib_set_func_resources(&rdev
->qplib_res
, &rdev
->rcfw
,
245 static void bnxt_re_shutdown(void *p
)
247 struct bnxt_re_dev
*rdev
= p
;
252 /* Release the MSIx vectors before queuing unregister */
253 bnxt_re_stop_irq(rdev
);
254 ib_unregister_device_queued(&rdev
->ibdev
);
257 static void bnxt_re_stop_irq(void *handle
)
259 struct bnxt_re_dev
*rdev
= (struct bnxt_re_dev
*)handle
;
260 struct bnxt_qplib_rcfw
*rcfw
= &rdev
->rcfw
;
261 struct bnxt_qplib_nq
*nq
;
264 for (indx
= BNXT_RE_NQ_IDX
; indx
< rdev
->num_msix
; indx
++) {
265 nq
= &rdev
->nq
[indx
- 1];
266 bnxt_qplib_nq_stop_irq(nq
, false);
269 bnxt_qplib_rcfw_stop_irq(rcfw
, false);
272 static void bnxt_re_start_irq(void *handle
, struct bnxt_msix_entry
*ent
)
274 struct bnxt_re_dev
*rdev
= (struct bnxt_re_dev
*)handle
;
275 struct bnxt_msix_entry
*msix_ent
= rdev
->msix_entries
;
276 struct bnxt_qplib_rcfw
*rcfw
= &rdev
->rcfw
;
277 struct bnxt_qplib_nq
*nq
;
281 /* Not setting the f/w timeout bit in rcfw.
282 * During the driver unload the first command
283 * to f/w will timeout and that will set the
286 ibdev_err(&rdev
->ibdev
, "Failed to re-start IRQs\n");
290 /* Vectors may change after restart, so update with new vectors
291 * in device sctructure.
293 for (indx
= 0; indx
< rdev
->num_msix
; indx
++)
294 rdev
->msix_entries
[indx
].vector
= ent
[indx
].vector
;
296 bnxt_qplib_rcfw_start_irq(rcfw
, msix_ent
[BNXT_RE_AEQ_IDX
].vector
,
298 for (indx
= BNXT_RE_NQ_IDX
; indx
< rdev
->num_msix
; indx
++) {
299 nq
= &rdev
->nq
[indx
- 1];
300 rc
= bnxt_qplib_nq_start_irq(nq
, indx
- 1,
301 msix_ent
[indx
].vector
, false);
303 ibdev_warn(&rdev
->ibdev
, "Failed to reinit NQ index %d\n",
308 static struct bnxt_ulp_ops bnxt_re_ulp_ops
= {
309 .ulp_async_notifier
= NULL
,
310 .ulp_stop
= bnxt_re_stop
,
311 .ulp_start
= bnxt_re_start
,
312 .ulp_sriov_config
= bnxt_re_sriov_config
,
313 .ulp_shutdown
= bnxt_re_shutdown
,
314 .ulp_irq_stop
= bnxt_re_stop_irq
,
315 .ulp_irq_restart
= bnxt_re_start_irq
318 /* RoCE -> Net driver */
320 /* Driver registration routines used to let the networking driver (bnxt_en)
321 * to know that the RoCE driver is now installed
323 static int bnxt_re_unregister_netdev(struct bnxt_re_dev
*rdev
)
325 struct bnxt_en_dev
*en_dev
;
331 en_dev
= rdev
->en_dev
;
333 rc
= en_dev
->en_ops
->bnxt_unregister_device(rdev
->en_dev
,
338 static int bnxt_re_register_netdev(struct bnxt_re_dev
*rdev
)
340 struct bnxt_en_dev
*en_dev
;
346 en_dev
= rdev
->en_dev
;
348 rc
= en_dev
->en_ops
->bnxt_register_device(en_dev
, BNXT_ROCE_ULP
,
349 &bnxt_re_ulp_ops
, rdev
);
350 rdev
->qplib_res
.pdev
= rdev
->en_dev
->pdev
;
354 static int bnxt_re_free_msix(struct bnxt_re_dev
*rdev
)
356 struct bnxt_en_dev
*en_dev
;
362 en_dev
= rdev
->en_dev
;
365 rc
= en_dev
->en_ops
->bnxt_free_msix(rdev
->en_dev
, BNXT_ROCE_ULP
);
370 static int bnxt_re_request_msix(struct bnxt_re_dev
*rdev
)
372 int rc
= 0, num_msix_want
= BNXT_RE_MAX_MSIX
, num_msix_got
;
373 struct bnxt_en_dev
*en_dev
;
378 en_dev
= rdev
->en_dev
;
380 num_msix_want
= min_t(u32
, BNXT_RE_MAX_MSIX
, num_online_cpus());
382 num_msix_got
= en_dev
->en_ops
->bnxt_request_msix(en_dev
, BNXT_ROCE_ULP
,
385 if (num_msix_got
< BNXT_RE_MIN_MSIX
) {
389 if (num_msix_got
!= num_msix_want
) {
390 ibdev_warn(&rdev
->ibdev
,
391 "Requested %d MSI-X vectors, got %d\n",
392 num_msix_want
, num_msix_got
);
394 rdev
->num_msix
= num_msix_got
;
399 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev
*rdev
, struct input
*hdr
,
400 u16 opcd
, u16 crid
, u16 trid
)
402 hdr
->req_type
= cpu_to_le16(opcd
);
403 hdr
->cmpl_ring
= cpu_to_le16(crid
);
404 hdr
->target_id
= cpu_to_le16(trid
);
407 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg
*fw_msg
, void *msg
,
408 int msg_len
, void *resp
, int resp_max_len
,
412 fw_msg
->msg_len
= msg_len
;
414 fw_msg
->resp_max_len
= resp_max_len
;
415 fw_msg
->timeout
= timeout
;
418 static int bnxt_re_net_ring_free(struct bnxt_re_dev
*rdev
,
419 u16 fw_ring_id
, int type
)
421 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
422 struct hwrm_ring_free_input req
= {0};
423 struct hwrm_ring_free_output resp
;
424 struct bnxt_fw_msg fw_msg
;
430 memset(&fw_msg
, 0, sizeof(fw_msg
));
432 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_RING_FREE
, -1, -1);
433 req
.ring_type
= type
;
434 req
.ring_id
= cpu_to_le16(fw_ring_id
);
435 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
436 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
437 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
439 ibdev_err(&rdev
->ibdev
, "Failed to free HW ring:%d :%#x",
444 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev
*rdev
,
445 struct bnxt_re_ring_attr
*ring_attr
,
448 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
449 struct hwrm_ring_alloc_input req
= {0};
450 struct hwrm_ring_alloc_output resp
;
451 struct bnxt_fw_msg fw_msg
;
457 memset(&fw_msg
, 0, sizeof(fw_msg
));
458 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_RING_ALLOC
, -1, -1);
460 req
.page_tbl_addr
= cpu_to_le64(ring_attr
->dma_arr
[0]);
461 if (ring_attr
->pages
> 1) {
462 /* Page size is in log2 units */
463 req
.page_size
= BNXT_PAGE_SHIFT
;
464 req
.page_tbl_depth
= 1;
467 /* Association of ring index with doorbell index and MSIX number */
468 req
.logical_id
= cpu_to_le16(ring_attr
->lrid
);
469 req
.length
= cpu_to_le32(ring_attr
->depth
+ 1);
470 req
.ring_type
= ring_attr
->type
;
471 req
.int_mode
= ring_attr
->mode
;
472 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
473 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
474 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
476 *fw_ring_id
= le16_to_cpu(resp
.ring_id
);
481 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev
*rdev
,
484 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
485 struct hwrm_stat_ctx_free_input req
= {0};
486 struct bnxt_fw_msg fw_msg
;
492 memset(&fw_msg
, 0, sizeof(fw_msg
));
494 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_STAT_CTX_FREE
, -1, -1);
495 req
.stat_ctx_id
= cpu_to_le32(fw_stats_ctx_id
);
496 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&req
,
497 sizeof(req
), DFLT_HWRM_CMD_TIMEOUT
);
498 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
500 ibdev_err(&rdev
->ibdev
, "Failed to free HW stats context %#x",
506 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev
*rdev
,
508 u32
*fw_stats_ctx_id
)
510 struct hwrm_stat_ctx_alloc_output resp
= {0};
511 struct hwrm_stat_ctx_alloc_input req
= {0};
512 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
513 struct bnxt_fw_msg fw_msg
;
516 *fw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
521 memset(&fw_msg
, 0, sizeof(fw_msg
));
523 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_STAT_CTX_ALLOC
, -1, -1);
524 req
.update_period_ms
= cpu_to_le32(1000);
525 req
.stats_dma_addr
= cpu_to_le64(dma_map
);
526 req
.stats_dma_length
= cpu_to_le16(sizeof(struct ctx_hw_stats_ext
));
527 req
.stat_ctx_flags
= STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE
;
528 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
529 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
530 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
532 *fw_stats_ctx_id
= le32_to_cpu(resp
.stat_ctx_id
);
539 static bool is_bnxt_re_dev(struct net_device
*netdev
)
541 struct ethtool_drvinfo drvinfo
;
543 if (netdev
->ethtool_ops
&& netdev
->ethtool_ops
->get_drvinfo
) {
544 memset(&drvinfo
, 0, sizeof(drvinfo
));
545 netdev
->ethtool_ops
->get_drvinfo(netdev
, &drvinfo
);
547 if (strcmp(drvinfo
.driver
, "bnxt_en"))
554 static struct bnxt_re_dev
*bnxt_re_from_netdev(struct net_device
*netdev
)
556 struct ib_device
*ibdev
=
557 ib_device_get_by_netdev(netdev
, RDMA_DRIVER_BNXT_RE
);
561 return container_of(ibdev
, struct bnxt_re_dev
, ibdev
);
564 static void bnxt_re_dev_unprobe(struct net_device
*netdev
,
565 struct bnxt_en_dev
*en_dev
)
568 module_put(en_dev
->pdev
->driver
->driver
.owner
);
571 static struct bnxt_en_dev
*bnxt_re_dev_probe(struct net_device
*netdev
)
573 struct bnxt
*bp
= netdev_priv(netdev
);
574 struct bnxt_en_dev
*en_dev
;
575 struct pci_dev
*pdev
;
577 /* Call bnxt_en's RoCE probe via indirect API */
579 return ERR_PTR(-EINVAL
);
581 en_dev
= bp
->ulp_probe(netdev
);
587 return ERR_PTR(-EINVAL
);
589 if (!(en_dev
->flags
& BNXT_EN_FLAG_ROCE_CAP
)) {
591 "%s: probe error: RoCE is not supported on this device",
592 ROCE_DRV_MODULE_NAME
);
593 return ERR_PTR(-ENODEV
);
596 /* Bump net device reference count */
597 if (!try_module_get(pdev
->driver
->driver
.owner
))
598 return ERR_PTR(-ENODEV
);
605 static ssize_t
hw_rev_show(struct device
*device
, struct device_attribute
*attr
,
608 struct bnxt_re_dev
*rdev
=
609 rdma_device_to_drv_device(device
, struct bnxt_re_dev
, ibdev
);
611 return sysfs_emit(buf
, "0x%x\n", rdev
->en_dev
->pdev
->vendor
);
613 static DEVICE_ATTR_RO(hw_rev
);
615 static ssize_t
hca_type_show(struct device
*device
,
616 struct device_attribute
*attr
, char *buf
)
618 struct bnxt_re_dev
*rdev
=
619 rdma_device_to_drv_device(device
, struct bnxt_re_dev
, ibdev
);
621 return sysfs_emit(buf
, "%s\n", rdev
->ibdev
.node_desc
);
623 static DEVICE_ATTR_RO(hca_type
);
625 static struct attribute
*bnxt_re_attributes
[] = {
626 &dev_attr_hw_rev
.attr
,
627 &dev_attr_hca_type
.attr
,
631 static const struct attribute_group bnxt_re_dev_attr_group
= {
632 .attrs
= bnxt_re_attributes
,
635 static const struct ib_device_ops bnxt_re_dev_ops
= {
636 .owner
= THIS_MODULE
,
637 .driver_id
= RDMA_DRIVER_BNXT_RE
,
638 .uverbs_abi_ver
= BNXT_RE_ABI_VERSION
,
640 .add_gid
= bnxt_re_add_gid
,
641 .alloc_hw_stats
= bnxt_re_ib_alloc_hw_stats
,
642 .alloc_mr
= bnxt_re_alloc_mr
,
643 .alloc_pd
= bnxt_re_alloc_pd
,
644 .alloc_ucontext
= bnxt_re_alloc_ucontext
,
645 .create_ah
= bnxt_re_create_ah
,
646 .create_cq
= bnxt_re_create_cq
,
647 .create_qp
= bnxt_re_create_qp
,
648 .create_srq
= bnxt_re_create_srq
,
649 .create_user_ah
= bnxt_re_create_ah
,
650 .dealloc_driver
= bnxt_re_dealloc_driver
,
651 .dealloc_pd
= bnxt_re_dealloc_pd
,
652 .dealloc_ucontext
= bnxt_re_dealloc_ucontext
,
653 .del_gid
= bnxt_re_del_gid
,
654 .dereg_mr
= bnxt_re_dereg_mr
,
655 .destroy_ah
= bnxt_re_destroy_ah
,
656 .destroy_cq
= bnxt_re_destroy_cq
,
657 .destroy_qp
= bnxt_re_destroy_qp
,
658 .destroy_srq
= bnxt_re_destroy_srq
,
659 .get_dev_fw_str
= bnxt_re_query_fw_str
,
660 .get_dma_mr
= bnxt_re_get_dma_mr
,
661 .get_hw_stats
= bnxt_re_ib_get_hw_stats
,
662 .get_link_layer
= bnxt_re_get_link_layer
,
663 .get_port_immutable
= bnxt_re_get_port_immutable
,
664 .map_mr_sg
= bnxt_re_map_mr_sg
,
665 .mmap
= bnxt_re_mmap
,
666 .modify_ah
= bnxt_re_modify_ah
,
667 .modify_qp
= bnxt_re_modify_qp
,
668 .modify_srq
= bnxt_re_modify_srq
,
669 .poll_cq
= bnxt_re_poll_cq
,
670 .post_recv
= bnxt_re_post_recv
,
671 .post_send
= bnxt_re_post_send
,
672 .post_srq_recv
= bnxt_re_post_srq_recv
,
673 .query_ah
= bnxt_re_query_ah
,
674 .query_device
= bnxt_re_query_device
,
675 .query_pkey
= bnxt_re_query_pkey
,
676 .query_port
= bnxt_re_query_port
,
677 .query_qp
= bnxt_re_query_qp
,
678 .query_srq
= bnxt_re_query_srq
,
679 .reg_user_mr
= bnxt_re_reg_user_mr
,
680 .req_notify_cq
= bnxt_re_req_notify_cq
,
681 INIT_RDMA_OBJ_SIZE(ib_ah
, bnxt_re_ah
, ib_ah
),
682 INIT_RDMA_OBJ_SIZE(ib_cq
, bnxt_re_cq
, ib_cq
),
683 INIT_RDMA_OBJ_SIZE(ib_pd
, bnxt_re_pd
, ib_pd
),
684 INIT_RDMA_OBJ_SIZE(ib_srq
, bnxt_re_srq
, ib_srq
),
685 INIT_RDMA_OBJ_SIZE(ib_ucontext
, bnxt_re_ucontext
, ib_uctx
),
688 static int bnxt_re_register_ib(struct bnxt_re_dev
*rdev
)
690 struct ib_device
*ibdev
= &rdev
->ibdev
;
694 ibdev
->node_type
= RDMA_NODE_IB_CA
;
695 strlcpy(ibdev
->node_desc
, BNXT_RE_DESC
" HCA",
696 strlen(BNXT_RE_DESC
) + 5);
697 ibdev
->phys_port_cnt
= 1;
699 bnxt_qplib_get_guid(rdev
->netdev
->dev_addr
, (u8
*)&ibdev
->node_guid
);
701 ibdev
->num_comp_vectors
= rdev
->num_msix
- 1;
702 ibdev
->dev
.parent
= &rdev
->en_dev
->pdev
->dev
;
703 ibdev
->local_dma_lkey
= BNXT_QPLIB_RSVD_LKEY
;
705 rdma_set_device_sysfs_group(ibdev
, &bnxt_re_dev_attr_group
);
706 ib_set_device_ops(ibdev
, &bnxt_re_dev_ops
);
707 ret
= ib_device_set_netdev(&rdev
->ibdev
, rdev
->netdev
, 1);
711 dma_set_max_seg_size(&rdev
->en_dev
->pdev
->dev
, UINT_MAX
);
712 return ib_register_device(ibdev
, "bnxt_re%d", &rdev
->en_dev
->pdev
->dev
);
715 static void bnxt_re_dev_remove(struct bnxt_re_dev
*rdev
)
717 dev_put(rdev
->netdev
);
719 mutex_lock(&bnxt_re_dev_lock
);
720 list_del_rcu(&rdev
->list
);
721 mutex_unlock(&bnxt_re_dev_lock
);
726 static struct bnxt_re_dev
*bnxt_re_dev_add(struct net_device
*netdev
,
727 struct bnxt_en_dev
*en_dev
)
729 struct bnxt_re_dev
*rdev
;
731 /* Allocate bnxt_re_dev instance here */
732 rdev
= ib_alloc_device(bnxt_re_dev
, ibdev
);
734 ibdev_err(NULL
, "%s: bnxt_re_dev allocation failure!",
735 ROCE_DRV_MODULE_NAME
);
739 rdev
->netdev
= netdev
;
740 dev_hold(rdev
->netdev
);
741 rdev
->en_dev
= en_dev
;
742 rdev
->id
= rdev
->en_dev
->pdev
->devfn
;
743 INIT_LIST_HEAD(&rdev
->qp_list
);
744 mutex_init(&rdev
->qp_lock
);
745 atomic_set(&rdev
->qp_count
, 0);
746 atomic_set(&rdev
->cq_count
, 0);
747 atomic_set(&rdev
->srq_count
, 0);
748 atomic_set(&rdev
->mr_count
, 0);
749 atomic_set(&rdev
->mw_count
, 0);
750 rdev
->cosq
[0] = 0xFFFF;
751 rdev
->cosq
[1] = 0xFFFF;
753 mutex_lock(&bnxt_re_dev_lock
);
754 list_add_tail_rcu(&rdev
->list
, &bnxt_re_dev_list
);
755 mutex_unlock(&bnxt_re_dev_lock
);
759 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
762 switch (unaffi_async
->event
) {
763 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR
:
765 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR
:
767 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR
:
769 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR
:
771 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR
:
773 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR
:
775 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR
:
777 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR
:
779 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR
:
781 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR
:
783 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR
:
791 static int bnxt_re_handle_qp_async_event(struct creq_qp_event
*qp_event
,
792 struct bnxt_re_qp
*qp
)
794 struct ib_event event
;
797 if (qp
->qplib_qp
.state
== CMDQ_MODIFY_QP_NEW_STATE_ERR
&&
798 rdma_is_kernel_res(&qp
->ib_qp
.res
)) {
799 flags
= bnxt_re_lock_cqs(qp
);
800 bnxt_qplib_add_flush_qp(&qp
->qplib_qp
);
801 bnxt_re_unlock_cqs(qp
, flags
);
804 memset(&event
, 0, sizeof(event
));
805 if (qp
->qplib_qp
.srq
) {
806 event
.device
= &qp
->rdev
->ibdev
;
807 event
.element
.qp
= &qp
->ib_qp
;
808 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
811 if (event
.device
&& qp
->ib_qp
.event_handler
)
812 qp
->ib_qp
.event_handler(&event
, qp
->ib_qp
.qp_context
);
817 static int bnxt_re_handle_affi_async_event(struct creq_qp_event
*affi_async
,
824 return rc
; /* QP was already dead, still return success */
826 event
= affi_async
->event
;
827 if (event
== CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION
) {
828 struct bnxt_qplib_qp
*lib_qp
= obj
;
829 struct bnxt_re_qp
*qp
= container_of(lib_qp
, struct bnxt_re_qp
,
831 rc
= bnxt_re_handle_qp_async_event(affi_async
, qp
);
836 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw
*rcfw
,
837 void *aeqe
, void *obj
)
839 struct creq_qp_event
*affi_async
;
840 struct creq_func_event
*unaffi_async
;
844 type
= ((struct creq_base
*)aeqe
)->type
;
845 if (type
== CREQ_BASE_TYPE_FUNC_EVENT
) {
847 rc
= bnxt_re_handle_unaffi_async_event(unaffi_async
);
850 rc
= bnxt_re_handle_affi_async_event(affi_async
, obj
);
856 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq
*nq
,
857 struct bnxt_qplib_srq
*handle
, u8 event
)
859 struct bnxt_re_srq
*srq
= container_of(handle
, struct bnxt_re_srq
,
861 struct ib_event ib_event
;
865 ibdev_err(NULL
, "%s: SRQ is NULL, SRQN not handled",
866 ROCE_DRV_MODULE_NAME
);
870 ib_event
.device
= &srq
->rdev
->ibdev
;
871 ib_event
.element
.srq
= &srq
->ib_srq
;
872 if (event
== NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT
)
873 ib_event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
875 ib_event
.event
= IB_EVENT_SRQ_ERR
;
877 if (srq
->ib_srq
.event_handler
) {
878 /* Lock event_handler? */
879 (*srq
->ib_srq
.event_handler
)(&ib_event
,
880 srq
->ib_srq
.srq_context
);
886 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq
*nq
,
887 struct bnxt_qplib_cq
*handle
)
889 struct bnxt_re_cq
*cq
= container_of(handle
, struct bnxt_re_cq
,
893 ibdev_err(NULL
, "%s: CQ is NULL, CQN not handled",
894 ROCE_DRV_MODULE_NAME
);
897 if (cq
->ib_cq
.comp_handler
) {
898 /* Lock comp_handler? */
899 (*cq
->ib_cq
.comp_handler
)(&cq
->ib_cq
, cq
->ib_cq
.cq_context
);
905 #define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
906 #define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
907 static u32
bnxt_re_get_nqdb_offset(struct bnxt_re_dev
*rdev
, u16 indx
)
909 return bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
) ?
910 (rdev
->is_virtfn
? BNXT_RE_GEN_P5_VF_NQ_DB
:
911 BNXT_RE_GEN_P5_PF_NQ_DB
) :
912 rdev
->msix_entries
[indx
].db_offset
;
915 static void bnxt_re_cleanup_res(struct bnxt_re_dev
*rdev
)
919 for (i
= 1; i
< rdev
->num_msix
; i
++)
920 bnxt_qplib_disable_nq(&rdev
->nq
[i
- 1]);
922 if (rdev
->qplib_res
.rcfw
)
923 bnxt_qplib_cleanup_res(&rdev
->qplib_res
);
926 static int bnxt_re_init_res(struct bnxt_re_dev
*rdev
)
928 int num_vec_enabled
= 0;
932 bnxt_qplib_init_res(&rdev
->qplib_res
);
934 for (i
= 1; i
< rdev
->num_msix
; i
++) {
935 db_offt
= bnxt_re_get_nqdb_offset(rdev
, i
);
936 rc
= bnxt_qplib_enable_nq(rdev
->en_dev
->pdev
, &rdev
->nq
[i
- 1],
937 i
- 1, rdev
->msix_entries
[i
].vector
,
938 db_offt
, &bnxt_re_cqn_handler
,
939 &bnxt_re_srqn_handler
);
941 ibdev_err(&rdev
->ibdev
,
942 "Failed to enable NQ with rc = 0x%x", rc
);
949 for (i
= num_vec_enabled
; i
>= 0; i
--)
950 bnxt_qplib_disable_nq(&rdev
->nq
[i
]);
954 static void bnxt_re_free_nq_res(struct bnxt_re_dev
*rdev
)
959 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
960 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
961 bnxt_re_net_ring_free(rdev
, rdev
->nq
[i
].ring_id
, type
);
962 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
963 rdev
->nq
[i
].res
= NULL
;
967 static void bnxt_re_free_res(struct bnxt_re_dev
*rdev
)
969 bnxt_re_free_nq_res(rdev
);
971 if (rdev
->qplib_res
.dpi_tbl
.max
) {
972 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
973 &rdev
->qplib_res
.dpi_tbl
,
974 &rdev
->dpi_privileged
);
976 if (rdev
->qplib_res
.rcfw
) {
977 bnxt_qplib_free_res(&rdev
->qplib_res
);
978 rdev
->qplib_res
.rcfw
= NULL
;
982 static int bnxt_re_alloc_res(struct bnxt_re_dev
*rdev
)
984 struct bnxt_re_ring_attr rattr
= {};
985 int num_vec_created
= 0;
989 /* Configure and allocate resources for qplib */
990 rdev
->qplib_res
.rcfw
= &rdev
->rcfw
;
991 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
,
996 rc
= bnxt_qplib_alloc_res(&rdev
->qplib_res
, rdev
->en_dev
->pdev
,
997 rdev
->netdev
, &rdev
->dev_attr
);
1001 rc
= bnxt_qplib_alloc_dpi(&rdev
->qplib_res
.dpi_tbl
,
1002 &rdev
->dpi_privileged
,
1007 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
1008 struct bnxt_qplib_nq
*nq
;
1011 nq
->hwq
.max_elements
= BNXT_QPLIB_NQE_MAX_CNT
;
1012 rc
= bnxt_qplib_alloc_nq(&rdev
->qplib_res
, &rdev
->nq
[i
]);
1014 ibdev_err(&rdev
->ibdev
, "Alloc Failed NQ%d rc:%#x",
1018 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1019 rattr
.dma_arr
= nq
->hwq
.pbl
[PBL_LVL_0
].pg_map_arr
;
1020 rattr
.pages
= nq
->hwq
.pbl
[rdev
->nq
[i
].hwq
.level
].pg_count
;
1022 rattr
.mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
1023 rattr
.depth
= BNXT_QPLIB_NQE_MAX_CNT
- 1;
1024 rattr
.lrid
= rdev
->msix_entries
[i
+ 1].ring_idx
;
1025 rc
= bnxt_re_net_ring_alloc(rdev
, &rattr
, &nq
->ring_id
);
1027 ibdev_err(&rdev
->ibdev
,
1028 "Failed to allocate NQ fw id with rc = 0x%x",
1030 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
1037 for (i
= num_vec_created
- 1; i
>= 0; i
--) {
1038 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1039 bnxt_re_net_ring_free(rdev
, rdev
->nq
[i
].ring_id
, type
);
1040 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
1042 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
1043 &rdev
->qplib_res
.dpi_tbl
,
1044 &rdev
->dpi_privileged
);
1046 bnxt_qplib_free_res(&rdev
->qplib_res
);
1049 rdev
->qplib_res
.rcfw
= NULL
;
1053 static void bnxt_re_dispatch_event(struct ib_device
*ibdev
, struct ib_qp
*qp
,
1054 u8 port_num
, enum ib_event_type event
)
1056 struct ib_event ib_event
;
1058 ib_event
.device
= ibdev
;
1060 ib_event
.element
.qp
= qp
;
1061 ib_event
.event
= event
;
1062 if (qp
->event_handler
)
1063 qp
->event_handler(&ib_event
, qp
->qp_context
);
1066 ib_event
.element
.port_num
= port_num
;
1067 ib_event
.event
= event
;
1068 ib_dispatch_event(&ib_event
);
1072 #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
1073 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev
*rdev
, u8 dir
,
1076 struct hwrm_queue_pri2cos_qcfg_input req
= {0};
1077 struct bnxt
*bp
= netdev_priv(rdev
->netdev
);
1078 struct hwrm_queue_pri2cos_qcfg_output resp
;
1079 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1080 struct bnxt_fw_msg fw_msg
;
1082 u8
*qcfgmap
, *tmp_map
;
1088 memset(&fw_msg
, 0, sizeof(fw_msg
));
1089 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
,
1090 HWRM_QUEUE_PRI2COS_QCFG
, -1, -1);
1091 flags
|= (dir
& 0x01);
1092 flags
|= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN
;
1093 req
.flags
= cpu_to_le32(flags
);
1094 req
.port_id
= bp
->pf
.port_id
;
1096 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
1097 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
1098 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
1102 if (resp
.queue_cfg_info
) {
1103 ibdev_warn(&rdev
->ibdev
,
1104 "Asymmetric cos queue configuration detected");
1105 ibdev_warn(&rdev
->ibdev
,
1106 " on device, QoS may not be fully functional\n");
1108 qcfgmap
= &resp
.pri0_cos_queue_id
;
1109 tmp_map
= (u8
*)cid_map
;
1110 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1111 tmp_map
[i
] = qcfgmap
[i
];
1116 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev
*rdev
,
1117 struct bnxt_re_qp
*qp
)
1119 return (qp
->ib_qp
.qp_type
== IB_QPT_GSI
) ||
1120 (qp
== rdev
->gsi_ctx
.gsi_sqp
);
1123 static void bnxt_re_dev_stop(struct bnxt_re_dev
*rdev
)
1125 int mask
= IB_QP_STATE
;
1126 struct ib_qp_attr qp_attr
;
1127 struct bnxt_re_qp
*qp
;
1129 qp_attr
.qp_state
= IB_QPS_ERR
;
1130 mutex_lock(&rdev
->qp_lock
);
1131 list_for_each_entry(qp
, &rdev
->qp_list
, list
) {
1132 /* Modify the state of all QPs except QP1/Shadow QP */
1133 if (!bnxt_re_is_qp1_or_shadow_qp(rdev
, qp
)) {
1134 if (qp
->qplib_qp
.state
!=
1135 CMDQ_MODIFY_QP_NEW_STATE_RESET
&&
1136 qp
->qplib_qp
.state
!=
1137 CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1138 bnxt_re_dispatch_event(&rdev
->ibdev
, &qp
->ib_qp
,
1139 1, IB_EVENT_QP_FATAL
);
1140 bnxt_re_modify_qp(&qp
->ib_qp
, &qp_attr
, mask
,
1145 mutex_unlock(&rdev
->qp_lock
);
1148 static int bnxt_re_update_gid(struct bnxt_re_dev
*rdev
)
1150 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
1151 struct bnxt_qplib_gid gid
;
1155 if (!ib_device_try_get(&rdev
->ibdev
))
1159 ibdev_err(&rdev
->ibdev
, "QPLIB: SGID table not allocated");
1164 for (index
= 0; index
< sgid_tbl
->active
; index
++) {
1165 gid_idx
= sgid_tbl
->hw_id
[index
];
1167 if (!memcmp(&sgid_tbl
->tbl
[index
], &bnxt_qplib_gid_zero
,
1168 sizeof(bnxt_qplib_gid_zero
)))
1170 /* need to modify the VLAN enable setting of non VLAN GID only
1171 * as setting is done for VLAN GID while adding GID
1173 if (sgid_tbl
->vlan
[index
])
1176 memcpy(&gid
, &sgid_tbl
->tbl
[index
], sizeof(gid
));
1178 rc
= bnxt_qplib_update_sgid(sgid_tbl
, &gid
, gid_idx
,
1179 rdev
->qplib_res
.netdev
->dev_addr
);
1182 ib_device_put(&rdev
->ibdev
);
1186 static u32
bnxt_re_get_priority_mask(struct bnxt_re_dev
*rdev
)
1188 u32 prio_map
= 0, tmp_map
= 0;
1189 struct net_device
*netdev
;
1192 netdev
= rdev
->netdev
;
1194 memset(&app
, 0, sizeof(app
));
1195 app
.selector
= IEEE_8021QAZ_APP_SEL_ETHERTYPE
;
1196 app
.protocol
= ETH_P_IBOE
;
1197 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
1200 app
.selector
= IEEE_8021QAZ_APP_SEL_DGRAM
;
1201 app
.protocol
= ROCE_V2_UDP_DPORT
;
1202 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
1203 prio_map
|= tmp_map
;
1208 static void bnxt_re_parse_cid_map(u8 prio_map
, u8
*cid_map
, u16
*cosq
)
1213 for (prio
= 0, id
= 0; prio
< 8; prio
++) {
1214 if (prio_map
& (1 << prio
)) {
1215 cosq
[id
] = cid_map
[prio
];
1217 if (id
== 2) /* Max 2 tcs supported */
1223 static int bnxt_re_setup_qos(struct bnxt_re_dev
*rdev
)
1229 /* Get priority for roce */
1230 prio_map
= bnxt_re_get_priority_mask(rdev
);
1232 if (prio_map
== rdev
->cur_prio_map
)
1234 rdev
->cur_prio_map
= prio_map
;
1235 /* Get cosq id for this priority */
1236 rc
= bnxt_re_query_hwrm_pri2cos(rdev
, 0, &cid_map
);
1238 ibdev_warn(&rdev
->ibdev
, "no cos for p_mask %x\n", prio_map
);
1241 /* Parse CoS IDs for app priority */
1242 bnxt_re_parse_cid_map(prio_map
, (u8
*)&cid_map
, rdev
->cosq
);
1245 rc
= bnxt_qplib_map_tc2cos(&rdev
->qplib_res
, rdev
->cosq
);
1247 ibdev_warn(&rdev
->ibdev
, "no tc for cos{%x, %x}\n",
1248 rdev
->cosq
[0], rdev
->cosq
[1]);
1252 /* Actual priorities are not programmed as they are already
1253 * done by L2 driver; just enable or disable priority vlan tagging
1255 if ((prio_map
== 0 && rdev
->qplib_res
.prio
) ||
1256 (prio_map
!= 0 && !rdev
->qplib_res
.prio
)) {
1257 rdev
->qplib_res
.prio
= prio_map
? true : false;
1259 bnxt_re_update_gid(rdev
);
1265 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev
*rdev
)
1267 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1268 struct hwrm_ver_get_output resp
= {0};
1269 struct hwrm_ver_get_input req
= {0};
1270 struct bnxt_fw_msg fw_msg
;
1273 memset(&fw_msg
, 0, sizeof(fw_msg
));
1274 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
,
1275 HWRM_VER_GET
, -1, -1);
1276 req
.hwrm_intf_maj
= HWRM_VERSION_MAJOR
;
1277 req
.hwrm_intf_min
= HWRM_VERSION_MINOR
;
1278 req
.hwrm_intf_upd
= HWRM_VERSION_UPDATE
;
1279 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
1280 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
1281 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
1283 ibdev_err(&rdev
->ibdev
, "Failed to query HW version, rc = 0x%x",
1287 rdev
->qplib_ctx
.hwrm_intf_ver
=
1288 (u64
)le16_to_cpu(resp
.hwrm_intf_major
) << 48 |
1289 (u64
)le16_to_cpu(resp
.hwrm_intf_minor
) << 32 |
1290 (u64
)le16_to_cpu(resp
.hwrm_intf_build
) << 16 |
1291 le16_to_cpu(resp
.hwrm_intf_patch
);
1294 static int bnxt_re_ib_init(struct bnxt_re_dev
*rdev
)
1299 /* Register ib dev */
1300 rc
= bnxt_re_register_ib(rdev
);
1302 pr_err("Failed to register with IB: %#x\n", rc
);
1305 dev_info(rdev_to_dev(rdev
), "Device registered successfully");
1306 ib_get_eth_speed(&rdev
->ibdev
, 1, &rdev
->active_speed
,
1307 &rdev
->active_width
);
1308 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS
, &rdev
->flags
);
1310 event
= netif_running(rdev
->netdev
) && netif_carrier_ok(rdev
->netdev
) ?
1311 IB_EVENT_PORT_ACTIVE
: IB_EVENT_PORT_ERR
;
1313 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1, event
);
1318 static void bnxt_re_dev_uninit(struct bnxt_re_dev
*rdev
)
1323 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
))
1324 cancel_delayed_work_sync(&rdev
->worker
);
1326 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED
,
1328 bnxt_re_cleanup_res(rdev
);
1329 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED
, &rdev
->flags
))
1330 bnxt_re_free_res(rdev
);
1332 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
)) {
1333 rc
= bnxt_qplib_deinit_rcfw(&rdev
->rcfw
);
1335 ibdev_warn(&rdev
->ibdev
,
1336 "Failed to deinitialize RCFW: %#x", rc
);
1337 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
);
1338 bnxt_qplib_free_ctx(&rdev
->qplib_res
, &rdev
->qplib_ctx
);
1339 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1340 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1341 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq
.ring_id
, type
);
1342 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1344 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX
, &rdev
->flags
)) {
1345 rc
= bnxt_re_free_msix(rdev
);
1347 ibdev_warn(&rdev
->ibdev
,
1348 "Failed to free MSI-X vectors: %#x", rc
);
1351 bnxt_re_destroy_chip_ctx(rdev
);
1352 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
)) {
1353 rc
= bnxt_re_unregister_netdev(rdev
);
1355 ibdev_warn(&rdev
->ibdev
,
1356 "Failed to unregister with netdev: %#x", rc
);
1360 /* worker thread for polling periodic events. Now used for QoS programming*/
1361 static void bnxt_re_worker(struct work_struct
*work
)
1363 struct bnxt_re_dev
*rdev
= container_of(work
, struct bnxt_re_dev
,
1366 bnxt_re_setup_qos(rdev
);
1367 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1370 static int bnxt_re_dev_init(struct bnxt_re_dev
*rdev
, u8 wqe_mode
)
1372 struct bnxt_qplib_creq_ctx
*creq
;
1373 struct bnxt_re_ring_attr rattr
;
1379 /* Registered a new RoCE device instance to netdev */
1380 memset(&rattr
, 0, sizeof(rattr
));
1381 rc
= bnxt_re_register_netdev(rdev
);
1384 ibdev_err(&rdev
->ibdev
,
1385 "Failed to register with netedev: %#x\n", rc
);
1388 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
);
1390 rc
= bnxt_re_setup_chip_ctx(rdev
, wqe_mode
);
1392 ibdev_err(&rdev
->ibdev
, "Failed to get chip context\n");
1396 /* Check whether VF or PF */
1397 bnxt_re_get_sriov_func_type(rdev
);
1399 rc
= bnxt_re_request_msix(rdev
);
1401 ibdev_err(&rdev
->ibdev
,
1402 "Failed to get MSI-X vectors: %#x\n", rc
);
1406 set_bit(BNXT_RE_FLAG_GOT_MSIX
, &rdev
->flags
);
1408 bnxt_re_query_hwrm_intf_version(rdev
);
1410 /* Establish RCFW Communication Channel to initialize the context
1411 * memory for the function and all child VFs
1413 rc
= bnxt_qplib_alloc_rcfw_channel(&rdev
->qplib_res
, &rdev
->rcfw
,
1415 BNXT_RE_MAX_QPC_COUNT
);
1417 ibdev_err(&rdev
->ibdev
,
1418 "Failed to allocate RCFW Channel: %#x\n", rc
);
1422 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1423 creq
= &rdev
->rcfw
.creq
;
1424 rattr
.dma_arr
= creq
->hwq
.pbl
[PBL_LVL_0
].pg_map_arr
;
1425 rattr
.pages
= creq
->hwq
.pbl
[creq
->hwq
.level
].pg_count
;
1427 rattr
.mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
1428 rattr
.depth
= BNXT_QPLIB_CREQE_MAX_CNT
- 1;
1429 rattr
.lrid
= rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].ring_idx
;
1430 rc
= bnxt_re_net_ring_alloc(rdev
, &rattr
, &creq
->ring_id
);
1432 ibdev_err(&rdev
->ibdev
, "Failed to allocate CREQ: %#x\n", rc
);
1435 db_offt
= bnxt_re_get_nqdb_offset(rdev
, BNXT_RE_AEQ_IDX
);
1436 vid
= rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].vector
;
1437 rc
= bnxt_qplib_enable_rcfw_channel(&rdev
->rcfw
,
1438 vid
, db_offt
, rdev
->is_virtfn
,
1439 &bnxt_re_aeq_handler
);
1441 ibdev_err(&rdev
->ibdev
, "Failed to enable RCFW channel: %#x\n",
1446 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
,
1451 bnxt_re_set_resource_limits(rdev
);
1453 rc
= bnxt_qplib_alloc_ctx(&rdev
->qplib_res
, &rdev
->qplib_ctx
, 0,
1454 bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
));
1456 ibdev_err(&rdev
->ibdev
,
1457 "Failed to allocate QPLIB context: %#x\n", rc
);
1460 rc
= bnxt_re_net_stats_ctx_alloc(rdev
,
1461 rdev
->qplib_ctx
.stats
.dma_map
,
1462 &rdev
->qplib_ctx
.stats
.fw_id
);
1464 ibdev_err(&rdev
->ibdev
,
1465 "Failed to allocate stats context: %#x\n", rc
);
1469 rc
= bnxt_qplib_init_rcfw(&rdev
->rcfw
, &rdev
->qplib_ctx
,
1472 ibdev_err(&rdev
->ibdev
,
1473 "Failed to initialize RCFW: %#x\n", rc
);
1476 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
);
1478 /* Resources based on the 'new' device caps */
1479 rc
= bnxt_re_alloc_res(rdev
);
1481 ibdev_err(&rdev
->ibdev
,
1482 "Failed to allocate resources: %#x\n", rc
);
1485 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED
, &rdev
->flags
);
1486 rc
= bnxt_re_init_res(rdev
);
1488 ibdev_err(&rdev
->ibdev
,
1489 "Failed to initialize resources: %#x\n", rc
);
1493 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED
, &rdev
->flags
);
1495 if (!rdev
->is_virtfn
) {
1496 rc
= bnxt_re_setup_qos(rdev
);
1498 ibdev_info(&rdev
->ibdev
,
1499 "RoCE priority not yet configured\n");
1501 INIT_DELAYED_WORK(&rdev
->worker
, bnxt_re_worker
);
1502 set_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
);
1503 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1508 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
);
1510 bnxt_qplib_free_ctx(&rdev
->qplib_res
, &rdev
->qplib_ctx
);
1512 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1514 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1515 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq
.ring_id
, type
);
1517 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1519 bnxt_re_dev_uninit(rdev
);
1524 static void bnxt_re_dev_unreg(struct bnxt_re_dev
*rdev
)
1526 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1527 struct net_device
*netdev
= rdev
->netdev
;
1529 bnxt_re_dev_remove(rdev
);
1532 bnxt_re_dev_unprobe(netdev
, en_dev
);
1535 static int bnxt_re_dev_reg(struct bnxt_re_dev
**rdev
, struct net_device
*netdev
)
1537 struct bnxt_en_dev
*en_dev
;
1540 if (!is_bnxt_re_dev(netdev
))
1543 en_dev
= bnxt_re_dev_probe(netdev
);
1544 if (IS_ERR(en_dev
)) {
1545 if (en_dev
!= ERR_PTR(-ENODEV
))
1546 ibdev_err(&(*rdev
)->ibdev
, "%s: Failed to probe\n",
1547 ROCE_DRV_MODULE_NAME
);
1548 rc
= PTR_ERR(en_dev
);
1551 *rdev
= bnxt_re_dev_add(netdev
, en_dev
);
1554 bnxt_re_dev_unprobe(netdev
, en_dev
);
1561 static void bnxt_re_remove_device(struct bnxt_re_dev
*rdev
)
1563 bnxt_re_dev_uninit(rdev
);
1564 pci_dev_put(rdev
->en_dev
->pdev
);
1565 bnxt_re_dev_unreg(rdev
);
1568 static int bnxt_re_add_device(struct bnxt_re_dev
**rdev
,
1569 struct net_device
*netdev
, u8 wqe_mode
)
1573 rc
= bnxt_re_dev_reg(rdev
, netdev
);
1577 pr_err("Failed to register with the device %s: %#x\n",
1582 pci_dev_get((*rdev
)->en_dev
->pdev
);
1583 rc
= bnxt_re_dev_init(*rdev
, wqe_mode
);
1585 pci_dev_put((*rdev
)->en_dev
->pdev
);
1586 bnxt_re_dev_unreg(*rdev
);
1592 static void bnxt_re_dealloc_driver(struct ib_device
*ib_dev
)
1594 struct bnxt_re_dev
*rdev
=
1595 container_of(ib_dev
, struct bnxt_re_dev
, ibdev
);
1597 dev_info(rdev_to_dev(rdev
), "Unregistering Device");
1600 bnxt_re_remove_device(rdev
);
1604 /* Handle all deferred netevents tasks */
1605 static void bnxt_re_task(struct work_struct
*work
)
1607 struct bnxt_re_work
*re_work
;
1608 struct bnxt_re_dev
*rdev
;
1611 re_work
= container_of(work
, struct bnxt_re_work
, work
);
1612 rdev
= re_work
->rdev
;
1614 if (re_work
->event
== NETDEV_REGISTER
) {
1615 rc
= bnxt_re_ib_init(rdev
);
1617 ibdev_err(&rdev
->ibdev
,
1618 "Failed to register with IB: %#x", rc
);
1620 bnxt_re_remove_device(rdev
);
1627 if (!ib_device_try_get(&rdev
->ibdev
))
1630 switch (re_work
->event
) {
1632 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1,
1633 IB_EVENT_PORT_ACTIVE
);
1636 bnxt_re_dev_stop(rdev
);
1639 if (!netif_carrier_ok(rdev
->netdev
))
1640 bnxt_re_dev_stop(rdev
);
1641 else if (netif_carrier_ok(rdev
->netdev
))
1642 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1,
1643 IB_EVENT_PORT_ACTIVE
);
1644 ib_get_eth_speed(&rdev
->ibdev
, 1, &rdev
->active_speed
,
1645 &rdev
->active_width
);
1650 ib_device_put(&rdev
->ibdev
);
1652 put_device(&rdev
->ibdev
.dev
);
1657 * "Notifier chain callback can be invoked for the same chain from
1658 * different CPUs at the same time".
1660 * For cases when the netdev is already present, our call to the
1661 * register_netdevice_notifier() will actually get the rtnl_lock()
1662 * before sending NETDEV_REGISTER and (if up) NETDEV_UP
1665 * But for cases when the netdev is not already present, the notifier
1666 * chain is subjected to be invoked from different CPUs simultaneously.
1668 * This is protected by the netdev_mutex.
1670 static int bnxt_re_netdev_event(struct notifier_block
*notifier
,
1671 unsigned long event
, void *ptr
)
1673 struct net_device
*real_dev
, *netdev
= netdev_notifier_info_to_dev(ptr
);
1674 struct bnxt_re_work
*re_work
;
1675 struct bnxt_re_dev
*rdev
;
1677 bool sch_work
= false;
1678 bool release
= true;
1680 real_dev
= rdma_vlan_dev_real_dev(netdev
);
1684 rdev
= bnxt_re_from_netdev(real_dev
);
1685 if (!rdev
&& event
!= NETDEV_REGISTER
)
1688 if (real_dev
!= netdev
)
1692 case NETDEV_REGISTER
:
1695 rc
= bnxt_re_add_device(&rdev
, real_dev
,
1696 BNXT_QPLIB_WQE_MODE_STATIC
);
1702 case NETDEV_UNREGISTER
:
1703 ib_unregister_device_queued(&rdev
->ibdev
);
1711 /* Allocate for the deferred task */
1712 re_work
= kzalloc(sizeof(*re_work
), GFP_ATOMIC
);
1714 get_device(&rdev
->ibdev
.dev
);
1715 re_work
->rdev
= rdev
;
1716 re_work
->event
= event
;
1717 re_work
->vlan_dev
= (real_dev
== netdev
?
1719 INIT_WORK(&re_work
->work
, bnxt_re_task
);
1720 queue_work(bnxt_re_wq
, &re_work
->work
);
1725 if (rdev
&& release
)
1726 ib_device_put(&rdev
->ibdev
);
1730 static struct notifier_block bnxt_re_netdev_notifier
= {
1731 .notifier_call
= bnxt_re_netdev_event
1734 static int __init
bnxt_re_mod_init(void)
1738 pr_info("%s: %s", ROCE_DRV_MODULE_NAME
, version
);
1740 bnxt_re_wq
= create_singlethread_workqueue("bnxt_re");
1744 INIT_LIST_HEAD(&bnxt_re_dev_list
);
1746 rc
= register_netdevice_notifier(&bnxt_re_netdev_notifier
);
1748 pr_err("%s: Cannot register to netdevice_notifier",
1749 ROCE_DRV_MODULE_NAME
);
1755 destroy_workqueue(bnxt_re_wq
);
1760 static void __exit
bnxt_re_mod_exit(void)
1762 struct bnxt_re_dev
*rdev
;
1764 unregister_netdevice_notifier(&bnxt_re_netdev_notifier
);
1766 destroy_workqueue(bnxt_re_wq
);
1767 list_for_each_entry(rdev
, &bnxt_re_dev_list
, list
) {
1768 /* VF device removal should be called before the removal
1769 * of PF device. Queue VFs unregister first, so that VFs
1770 * shall be removed before the PF during the call of
1771 * ib_unregister_driver.
1773 if (rdev
->is_virtfn
)
1774 ib_unregister_device(&rdev
->ibdev
);
1776 ib_unregister_driver(RDMA_DRIVER_BNXT_RE
);
1779 module_init(bnxt_re_mod_init
);
1780 module_exit(bnxt_re_mod_exit
);