2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Main component of the bnxt_re driver
39 #include <linux/module.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mutex.h>
43 #include <linux/list.h>
44 #include <linux/rculist.h>
45 #include <linux/spinlock.h>
46 #include <linux/pci.h>
47 #include <net/dcbnl.h>
49 #include <net/addrconf.h>
50 #include <linux/if_ether.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_addr.h>
59 #include "qplib_res.h"
62 #include "qplib_rcfw.h"
65 #include <rdma/bnxt_re-abi.h>
67 #include "hw_counters.h"
69 static char version
[] =
72 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
73 MODULE_DESCRIPTION(BNXT_RE_DESC
" Driver");
74 MODULE_LICENSE("Dual BSD/GPL");
77 static struct list_head bnxt_re_dev_list
= LIST_HEAD_INIT(bnxt_re_dev_list
);
78 /* Mutex to protect the list of bnxt_re devices added */
79 static DEFINE_MUTEX(bnxt_re_dev_lock
);
80 static struct workqueue_struct
*bnxt_re_wq
;
81 static void bnxt_re_remove_device(struct bnxt_re_dev
*rdev
);
82 static void bnxt_re_dealloc_driver(struct ib_device
*ib_dev
);
83 static void bnxt_re_stop_irq(void *handle
);
85 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev
*rdev
)
87 struct bnxt_qplib_chip_ctx
*chip_ctx
;
91 chip_ctx
= rdev
->chip_ctx
;
92 rdev
->chip_ctx
= NULL
;
93 rdev
->rcfw
.res
= NULL
;
94 rdev
->qplib_res
.cctx
= NULL
;
95 rdev
->qplib_res
.pdev
= NULL
;
96 rdev
->qplib_res
.netdev
= NULL
;
100 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev
*rdev
)
102 struct bnxt_qplib_chip_ctx
*chip_ctx
;
103 struct bnxt_en_dev
*en_dev
;
106 en_dev
= rdev
->en_dev
;
107 bp
= netdev_priv(en_dev
->net
);
109 chip_ctx
= kzalloc(sizeof(*chip_ctx
), GFP_KERNEL
);
112 chip_ctx
->chip_num
= bp
->chip_num
;
114 rdev
->chip_ctx
= chip_ctx
;
115 /* rest members to follow eventually */
117 rdev
->qplib_res
.cctx
= rdev
->chip_ctx
;
118 rdev
->rcfw
.res
= &rdev
->qplib_res
;
123 /* SR-IOV helper functions */
125 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev
*rdev
)
129 bp
= netdev_priv(rdev
->en_dev
->net
);
134 /* Set the maximum number of each resource that the driver actually wants
135 * to allocate. This may be up to the maximum number the firmware has
136 * reserved for the function. The driver may choose to allocate fewer
137 * resources than the firmware maximum.
139 static void bnxt_re_limit_pf_res(struct bnxt_re_dev
*rdev
)
141 struct bnxt_qplib_dev_attr
*attr
;
142 struct bnxt_qplib_ctx
*ctx
;
145 attr
= &rdev
->dev_attr
;
146 ctx
= &rdev
->qplib_ctx
;
148 ctx
->qpc_count
= min_t(u32
, BNXT_RE_MAX_QPC_COUNT
,
150 ctx
->mrw_count
= BNXT_RE_MAX_MRW_COUNT_256K
;
151 /* Use max_mr from fw since max_mrw does not get set */
152 ctx
->mrw_count
= min_t(u32
, ctx
->mrw_count
, attr
->max_mr
);
153 ctx
->srqc_count
= min_t(u32
, BNXT_RE_MAX_SRQC_COUNT
,
155 ctx
->cq_count
= min_t(u32
, BNXT_RE_MAX_CQ_COUNT
, attr
->max_cq
);
156 if (!bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
))
157 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
; i
++)
158 rdev
->qplib_ctx
.tqm_ctx
.qcount
[i
] =
159 rdev
->dev_attr
.tqm_alloc_reqs
[i
];
162 static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx
*qplib_ctx
, u32 num_vf
)
164 struct bnxt_qplib_vf_res
*vf_res
;
169 vf_res
= &qplib_ctx
->vf_res
;
171 * Reserve a set of resources for the PF. Divide the remaining
172 * resources among the VFs
174 vf_pct
= 100 - BNXT_RE_PCT_RSVD_FOR_PF
;
176 num_vf
= 100 * num_vf
;
177 vf_res
->max_qp_per_vf
= (qplib_ctx
->qpc_count
* vf_pct
) / num_vf
;
178 vf_res
->max_srq_per_vf
= (qplib_ctx
->srqc_count
* vf_pct
) / num_vf
;
179 vf_res
->max_cq_per_vf
= (qplib_ctx
->cq_count
* vf_pct
) / num_vf
;
181 * The driver allows many more MRs than other resources. If the
182 * firmware does also, then reserve a fixed amount for the PF and
183 * divide the rest among VFs. VFs may use many MRs for NFS
184 * mounts, ISER, NVME applications, etc. If the firmware severely
185 * restricts the number of MRs, then let PF have half and divide
186 * the rest among VFs, as for the other resource types.
188 if (qplib_ctx
->mrw_count
< BNXT_RE_MAX_MRW_COUNT_64K
) {
189 mrws
= qplib_ctx
->mrw_count
* vf_pct
;
192 mrws
= qplib_ctx
->mrw_count
- BNXT_RE_RESVD_MR_FOR_PF
;
194 vf_res
->max_mrw_per_vf
= (mrws
/ nvfs
);
195 vf_res
->max_gid_per_vf
= BNXT_RE_MAX_GID_PER_VF
;
198 static void bnxt_re_set_resource_limits(struct bnxt_re_dev
*rdev
)
202 memset(&rdev
->qplib_ctx
.vf_res
, 0, sizeof(struct bnxt_qplib_vf_res
));
203 bnxt_re_limit_pf_res(rdev
);
205 num_vfs
= bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
) ?
206 BNXT_RE_GEN_P5_MAX_VF
: rdev
->num_vfs
;
208 bnxt_re_limit_vf_res(&rdev
->qplib_ctx
, num_vfs
);
211 /* for handling bnxt_en callbacks later */
212 static void bnxt_re_stop(void *p
)
216 static void bnxt_re_start(void *p
)
220 static void bnxt_re_sriov_config(void *p
, int num_vfs
)
222 struct bnxt_re_dev
*rdev
= p
;
227 rdev
->num_vfs
= num_vfs
;
228 if (!bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
)) {
229 bnxt_re_set_resource_limits(rdev
);
230 bnxt_qplib_set_func_resources(&rdev
->qplib_res
, &rdev
->rcfw
,
235 static void bnxt_re_shutdown(void *p
)
237 struct bnxt_re_dev
*rdev
= p
;
242 /* Release the MSIx vectors before queuing unregister */
243 bnxt_re_stop_irq(rdev
);
244 ib_unregister_device_queued(&rdev
->ibdev
);
247 static void bnxt_re_stop_irq(void *handle
)
249 struct bnxt_re_dev
*rdev
= (struct bnxt_re_dev
*)handle
;
250 struct bnxt_qplib_rcfw
*rcfw
= &rdev
->rcfw
;
251 struct bnxt_qplib_nq
*nq
;
254 for (indx
= BNXT_RE_NQ_IDX
; indx
< rdev
->num_msix
; indx
++) {
255 nq
= &rdev
->nq
[indx
- 1];
256 bnxt_qplib_nq_stop_irq(nq
, false);
259 bnxt_qplib_rcfw_stop_irq(rcfw
, false);
262 static void bnxt_re_start_irq(void *handle
, struct bnxt_msix_entry
*ent
)
264 struct bnxt_re_dev
*rdev
= (struct bnxt_re_dev
*)handle
;
265 struct bnxt_msix_entry
*msix_ent
= rdev
->msix_entries
;
266 struct bnxt_qplib_rcfw
*rcfw
= &rdev
->rcfw
;
267 struct bnxt_qplib_nq
*nq
;
271 /* Not setting the f/w timeout bit in rcfw.
272 * During the driver unload the first command
273 * to f/w will timeout and that will set the
276 ibdev_err(&rdev
->ibdev
, "Failed to re-start IRQs\n");
280 /* Vectors may change after restart, so update with new vectors
281 * in device sctructure.
283 for (indx
= 0; indx
< rdev
->num_msix
; indx
++)
284 rdev
->msix_entries
[indx
].vector
= ent
[indx
].vector
;
286 bnxt_qplib_rcfw_start_irq(rcfw
, msix_ent
[BNXT_RE_AEQ_IDX
].vector
,
288 for (indx
= BNXT_RE_NQ_IDX
; indx
< rdev
->num_msix
; indx
++) {
289 nq
= &rdev
->nq
[indx
- 1];
290 rc
= bnxt_qplib_nq_start_irq(nq
, indx
- 1,
291 msix_ent
[indx
].vector
, false);
293 ibdev_warn(&rdev
->ibdev
, "Failed to reinit NQ index %d\n",
298 static struct bnxt_ulp_ops bnxt_re_ulp_ops
= {
299 .ulp_async_notifier
= NULL
,
300 .ulp_stop
= bnxt_re_stop
,
301 .ulp_start
= bnxt_re_start
,
302 .ulp_sriov_config
= bnxt_re_sriov_config
,
303 .ulp_shutdown
= bnxt_re_shutdown
,
304 .ulp_irq_stop
= bnxt_re_stop_irq
,
305 .ulp_irq_restart
= bnxt_re_start_irq
308 /* RoCE -> Net driver */
310 /* Driver registration routines used to let the networking driver (bnxt_en)
311 * to know that the RoCE driver is now installed
313 static int bnxt_re_unregister_netdev(struct bnxt_re_dev
*rdev
)
315 struct bnxt_en_dev
*en_dev
;
321 en_dev
= rdev
->en_dev
;
323 rc
= en_dev
->en_ops
->bnxt_unregister_device(rdev
->en_dev
,
328 static int bnxt_re_register_netdev(struct bnxt_re_dev
*rdev
)
330 struct bnxt_en_dev
*en_dev
;
336 en_dev
= rdev
->en_dev
;
338 rc
= en_dev
->en_ops
->bnxt_register_device(en_dev
, BNXT_ROCE_ULP
,
339 &bnxt_re_ulp_ops
, rdev
);
340 rdev
->qplib_res
.pdev
= rdev
->en_dev
->pdev
;
344 static int bnxt_re_free_msix(struct bnxt_re_dev
*rdev
)
346 struct bnxt_en_dev
*en_dev
;
352 en_dev
= rdev
->en_dev
;
355 rc
= en_dev
->en_ops
->bnxt_free_msix(rdev
->en_dev
, BNXT_ROCE_ULP
);
360 static int bnxt_re_request_msix(struct bnxt_re_dev
*rdev
)
362 int rc
= 0, num_msix_want
= BNXT_RE_MAX_MSIX
, num_msix_got
;
363 struct bnxt_en_dev
*en_dev
;
368 en_dev
= rdev
->en_dev
;
370 num_msix_want
= min_t(u32
, BNXT_RE_MAX_MSIX
, num_online_cpus());
372 num_msix_got
= en_dev
->en_ops
->bnxt_request_msix(en_dev
, BNXT_ROCE_ULP
,
375 if (num_msix_got
< BNXT_RE_MIN_MSIX
) {
379 if (num_msix_got
!= num_msix_want
) {
380 ibdev_warn(&rdev
->ibdev
,
381 "Requested %d MSI-X vectors, got %d\n",
382 num_msix_want
, num_msix_got
);
384 rdev
->num_msix
= num_msix_got
;
389 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev
*rdev
, struct input
*hdr
,
390 u16 opcd
, u16 crid
, u16 trid
)
392 hdr
->req_type
= cpu_to_le16(opcd
);
393 hdr
->cmpl_ring
= cpu_to_le16(crid
);
394 hdr
->target_id
= cpu_to_le16(trid
);
397 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg
*fw_msg
, void *msg
,
398 int msg_len
, void *resp
, int resp_max_len
,
402 fw_msg
->msg_len
= msg_len
;
404 fw_msg
->resp_max_len
= resp_max_len
;
405 fw_msg
->timeout
= timeout
;
408 static int bnxt_re_net_ring_free(struct bnxt_re_dev
*rdev
,
409 u16 fw_ring_id
, int type
)
411 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
412 struct hwrm_ring_free_input req
= {0};
413 struct hwrm_ring_free_output resp
;
414 struct bnxt_fw_msg fw_msg
;
420 memset(&fw_msg
, 0, sizeof(fw_msg
));
422 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_RING_FREE
, -1, -1);
423 req
.ring_type
= type
;
424 req
.ring_id
= cpu_to_le16(fw_ring_id
);
425 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
426 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
427 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
429 ibdev_err(&rdev
->ibdev
, "Failed to free HW ring:%d :%#x",
434 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev
*rdev
,
435 struct bnxt_re_ring_attr
*ring_attr
,
438 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
439 struct hwrm_ring_alloc_input req
= {0};
440 struct hwrm_ring_alloc_output resp
;
441 struct bnxt_fw_msg fw_msg
;
447 memset(&fw_msg
, 0, sizeof(fw_msg
));
448 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_RING_ALLOC
, -1, -1);
450 req
.page_tbl_addr
= cpu_to_le64(ring_attr
->dma_arr
[0]);
451 if (ring_attr
->pages
> 1) {
452 /* Page size is in log2 units */
453 req
.page_size
= BNXT_PAGE_SHIFT
;
454 req
.page_tbl_depth
= 1;
457 /* Association of ring index with doorbell index and MSIX number */
458 req
.logical_id
= cpu_to_le16(ring_attr
->lrid
);
459 req
.length
= cpu_to_le32(ring_attr
->depth
+ 1);
460 req
.ring_type
= ring_attr
->type
;
461 req
.int_mode
= ring_attr
->mode
;
462 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
463 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
464 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
466 *fw_ring_id
= le16_to_cpu(resp
.ring_id
);
471 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev
*rdev
,
474 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
475 struct hwrm_stat_ctx_free_input req
= {0};
476 struct bnxt_fw_msg fw_msg
;
482 memset(&fw_msg
, 0, sizeof(fw_msg
));
484 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_STAT_CTX_FREE
, -1, -1);
485 req
.stat_ctx_id
= cpu_to_le32(fw_stats_ctx_id
);
486 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&req
,
487 sizeof(req
), DFLT_HWRM_CMD_TIMEOUT
);
488 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
490 ibdev_err(&rdev
->ibdev
, "Failed to free HW stats context %#x",
496 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev
*rdev
,
498 u32
*fw_stats_ctx_id
)
500 struct hwrm_stat_ctx_alloc_output resp
= {0};
501 struct hwrm_stat_ctx_alloc_input req
= {0};
502 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
503 struct bnxt_fw_msg fw_msg
;
506 *fw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
511 memset(&fw_msg
, 0, sizeof(fw_msg
));
513 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_STAT_CTX_ALLOC
, -1, -1);
514 req
.update_period_ms
= cpu_to_le32(1000);
515 req
.stats_dma_addr
= cpu_to_le64(dma_map
);
516 req
.stats_dma_length
= cpu_to_le16(sizeof(struct ctx_hw_stats_ext
));
517 req
.stat_ctx_flags
= STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE
;
518 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
519 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
520 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
522 *fw_stats_ctx_id
= le32_to_cpu(resp
.stat_ctx_id
);
529 static bool is_bnxt_re_dev(struct net_device
*netdev
)
531 struct ethtool_drvinfo drvinfo
;
533 if (netdev
->ethtool_ops
&& netdev
->ethtool_ops
->get_drvinfo
) {
534 memset(&drvinfo
, 0, sizeof(drvinfo
));
535 netdev
->ethtool_ops
->get_drvinfo(netdev
, &drvinfo
);
537 if (strcmp(drvinfo
.driver
, "bnxt_en"))
544 static struct bnxt_re_dev
*bnxt_re_from_netdev(struct net_device
*netdev
)
546 struct ib_device
*ibdev
=
547 ib_device_get_by_netdev(netdev
, RDMA_DRIVER_BNXT_RE
);
551 return container_of(ibdev
, struct bnxt_re_dev
, ibdev
);
554 static void bnxt_re_dev_unprobe(struct net_device
*netdev
,
555 struct bnxt_en_dev
*en_dev
)
558 module_put(en_dev
->pdev
->driver
->driver
.owner
);
561 static struct bnxt_en_dev
*bnxt_re_dev_probe(struct net_device
*netdev
)
563 struct bnxt
*bp
= netdev_priv(netdev
);
564 struct bnxt_en_dev
*en_dev
;
565 struct pci_dev
*pdev
;
567 /* Call bnxt_en's RoCE probe via indirect API */
569 return ERR_PTR(-EINVAL
);
571 en_dev
= bp
->ulp_probe(netdev
);
577 return ERR_PTR(-EINVAL
);
579 if (!(en_dev
->flags
& BNXT_EN_FLAG_ROCE_CAP
)) {
581 "%s: probe error: RoCE is not supported on this device",
582 ROCE_DRV_MODULE_NAME
);
583 return ERR_PTR(-ENODEV
);
586 /* Bump net device reference count */
587 if (!try_module_get(pdev
->driver
->driver
.owner
))
588 return ERR_PTR(-ENODEV
);
595 static ssize_t
hw_rev_show(struct device
*device
, struct device_attribute
*attr
,
598 struct bnxt_re_dev
*rdev
=
599 rdma_device_to_drv_device(device
, struct bnxt_re_dev
, ibdev
);
601 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", rdev
->en_dev
->pdev
->vendor
);
603 static DEVICE_ATTR_RO(hw_rev
);
605 static ssize_t
hca_type_show(struct device
*device
,
606 struct device_attribute
*attr
, char *buf
)
608 struct bnxt_re_dev
*rdev
=
609 rdma_device_to_drv_device(device
, struct bnxt_re_dev
, ibdev
);
611 return scnprintf(buf
, PAGE_SIZE
, "%s\n", rdev
->ibdev
.node_desc
);
613 static DEVICE_ATTR_RO(hca_type
);
615 static struct attribute
*bnxt_re_attributes
[] = {
616 &dev_attr_hw_rev
.attr
,
617 &dev_attr_hca_type
.attr
,
621 static const struct attribute_group bnxt_re_dev_attr_group
= {
622 .attrs
= bnxt_re_attributes
,
625 static const struct ib_device_ops bnxt_re_dev_ops
= {
626 .owner
= THIS_MODULE
,
627 .driver_id
= RDMA_DRIVER_BNXT_RE
,
628 .uverbs_abi_ver
= BNXT_RE_ABI_VERSION
,
630 .add_gid
= bnxt_re_add_gid
,
631 .alloc_hw_stats
= bnxt_re_ib_alloc_hw_stats
,
632 .alloc_mr
= bnxt_re_alloc_mr
,
633 .alloc_pd
= bnxt_re_alloc_pd
,
634 .alloc_ucontext
= bnxt_re_alloc_ucontext
,
635 .create_ah
= bnxt_re_create_ah
,
636 .create_cq
= bnxt_re_create_cq
,
637 .create_qp
= bnxt_re_create_qp
,
638 .create_srq
= bnxt_re_create_srq
,
639 .dealloc_driver
= bnxt_re_dealloc_driver
,
640 .dealloc_pd
= bnxt_re_dealloc_pd
,
641 .dealloc_ucontext
= bnxt_re_dealloc_ucontext
,
642 .del_gid
= bnxt_re_del_gid
,
643 .dereg_mr
= bnxt_re_dereg_mr
,
644 .destroy_ah
= bnxt_re_destroy_ah
,
645 .destroy_cq
= bnxt_re_destroy_cq
,
646 .destroy_qp
= bnxt_re_destroy_qp
,
647 .destroy_srq
= bnxt_re_destroy_srq
,
648 .get_dev_fw_str
= bnxt_re_query_fw_str
,
649 .get_dma_mr
= bnxt_re_get_dma_mr
,
650 .get_hw_stats
= bnxt_re_ib_get_hw_stats
,
651 .get_link_layer
= bnxt_re_get_link_layer
,
652 .get_port_immutable
= bnxt_re_get_port_immutable
,
653 .map_mr_sg
= bnxt_re_map_mr_sg
,
654 .mmap
= bnxt_re_mmap
,
655 .modify_ah
= bnxt_re_modify_ah
,
656 .modify_qp
= bnxt_re_modify_qp
,
657 .modify_srq
= bnxt_re_modify_srq
,
658 .poll_cq
= bnxt_re_poll_cq
,
659 .post_recv
= bnxt_re_post_recv
,
660 .post_send
= bnxt_re_post_send
,
661 .post_srq_recv
= bnxt_re_post_srq_recv
,
662 .query_ah
= bnxt_re_query_ah
,
663 .query_device
= bnxt_re_query_device
,
664 .query_pkey
= bnxt_re_query_pkey
,
665 .query_port
= bnxt_re_query_port
,
666 .query_qp
= bnxt_re_query_qp
,
667 .query_srq
= bnxt_re_query_srq
,
668 .reg_user_mr
= bnxt_re_reg_user_mr
,
669 .req_notify_cq
= bnxt_re_req_notify_cq
,
670 INIT_RDMA_OBJ_SIZE(ib_ah
, bnxt_re_ah
, ib_ah
),
671 INIT_RDMA_OBJ_SIZE(ib_cq
, bnxt_re_cq
, ib_cq
),
672 INIT_RDMA_OBJ_SIZE(ib_pd
, bnxt_re_pd
, ib_pd
),
673 INIT_RDMA_OBJ_SIZE(ib_srq
, bnxt_re_srq
, ib_srq
),
674 INIT_RDMA_OBJ_SIZE(ib_ucontext
, bnxt_re_ucontext
, ib_uctx
),
677 static int bnxt_re_register_ib(struct bnxt_re_dev
*rdev
)
679 struct ib_device
*ibdev
= &rdev
->ibdev
;
683 ibdev
->node_type
= RDMA_NODE_IB_CA
;
684 strlcpy(ibdev
->node_desc
, BNXT_RE_DESC
" HCA",
685 strlen(BNXT_RE_DESC
) + 5);
686 ibdev
->phys_port_cnt
= 1;
688 bnxt_qplib_get_guid(rdev
->netdev
->dev_addr
, (u8
*)&ibdev
->node_guid
);
690 ibdev
->num_comp_vectors
= rdev
->num_msix
- 1;
691 ibdev
->dev
.parent
= &rdev
->en_dev
->pdev
->dev
;
692 ibdev
->local_dma_lkey
= BNXT_QPLIB_RSVD_LKEY
;
695 ibdev
->uverbs_cmd_mask
=
696 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
697 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
698 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
699 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
700 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
701 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
702 (1ull << IB_USER_VERBS_CMD_REREG_MR
) |
703 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
704 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
705 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
706 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
707 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
708 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
709 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
710 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
711 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
712 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
713 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
714 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
715 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
716 (1ull << IB_USER_VERBS_CMD_CREATE_AH
) |
717 (1ull << IB_USER_VERBS_CMD_MODIFY_AH
) |
718 (1ull << IB_USER_VERBS_CMD_QUERY_AH
) |
719 (1ull << IB_USER_VERBS_CMD_DESTROY_AH
);
720 /* POLL_CQ and REQ_NOTIFY_CQ is directly handled in libbnxt_re */
723 rdma_set_device_sysfs_group(ibdev
, &bnxt_re_dev_attr_group
);
724 ib_set_device_ops(ibdev
, &bnxt_re_dev_ops
);
725 ret
= ib_device_set_netdev(&rdev
->ibdev
, rdev
->netdev
, 1);
729 return ib_register_device(ibdev
, "bnxt_re%d");
732 static void bnxt_re_dev_remove(struct bnxt_re_dev
*rdev
)
734 dev_put(rdev
->netdev
);
736 mutex_lock(&bnxt_re_dev_lock
);
737 list_del_rcu(&rdev
->list
);
738 mutex_unlock(&bnxt_re_dev_lock
);
743 static struct bnxt_re_dev
*bnxt_re_dev_add(struct net_device
*netdev
,
744 struct bnxt_en_dev
*en_dev
)
746 struct bnxt_re_dev
*rdev
;
748 /* Allocate bnxt_re_dev instance here */
749 rdev
= ib_alloc_device(bnxt_re_dev
, ibdev
);
751 ibdev_err(NULL
, "%s: bnxt_re_dev allocation failure!",
752 ROCE_DRV_MODULE_NAME
);
756 rdev
->netdev
= netdev
;
757 dev_hold(rdev
->netdev
);
758 rdev
->en_dev
= en_dev
;
759 rdev
->id
= rdev
->en_dev
->pdev
->devfn
;
760 INIT_LIST_HEAD(&rdev
->qp_list
);
761 mutex_init(&rdev
->qp_lock
);
762 atomic_set(&rdev
->qp_count
, 0);
763 atomic_set(&rdev
->cq_count
, 0);
764 atomic_set(&rdev
->srq_count
, 0);
765 atomic_set(&rdev
->mr_count
, 0);
766 atomic_set(&rdev
->mw_count
, 0);
767 rdev
->cosq
[0] = 0xFFFF;
768 rdev
->cosq
[1] = 0xFFFF;
770 mutex_lock(&bnxt_re_dev_lock
);
771 list_add_tail_rcu(&rdev
->list
, &bnxt_re_dev_list
);
772 mutex_unlock(&bnxt_re_dev_lock
);
776 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
779 switch (unaffi_async
->event
) {
780 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR
:
782 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR
:
784 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR
:
786 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR
:
788 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR
:
790 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR
:
792 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR
:
794 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR
:
796 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR
:
798 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR
:
800 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR
:
808 static int bnxt_re_handle_qp_async_event(struct creq_qp_event
*qp_event
,
809 struct bnxt_re_qp
*qp
)
811 struct ib_event event
;
814 if (qp
->qplib_qp
.state
== CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
815 flags
= bnxt_re_lock_cqs(qp
);
816 bnxt_qplib_add_flush_qp(&qp
->qplib_qp
);
817 bnxt_re_unlock_cqs(qp
, flags
);
820 memset(&event
, 0, sizeof(event
));
821 if (qp
->qplib_qp
.srq
) {
822 event
.device
= &qp
->rdev
->ibdev
;
823 event
.element
.qp
= &qp
->ib_qp
;
824 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
827 if (event
.device
&& qp
->ib_qp
.event_handler
)
828 qp
->ib_qp
.event_handler(&event
, qp
->ib_qp
.qp_context
);
833 static int bnxt_re_handle_affi_async_event(struct creq_qp_event
*affi_async
,
840 return rc
; /* QP was already dead, still return success */
842 event
= affi_async
->event
;
843 if (event
== CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION
) {
844 struct bnxt_qplib_qp
*lib_qp
= obj
;
845 struct bnxt_re_qp
*qp
= container_of(lib_qp
, struct bnxt_re_qp
,
847 rc
= bnxt_re_handle_qp_async_event(affi_async
, qp
);
852 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw
*rcfw
,
853 void *aeqe
, void *obj
)
855 struct creq_qp_event
*affi_async
;
856 struct creq_func_event
*unaffi_async
;
860 type
= ((struct creq_base
*)aeqe
)->type
;
861 if (type
== CREQ_BASE_TYPE_FUNC_EVENT
) {
863 rc
= bnxt_re_handle_unaffi_async_event(unaffi_async
);
866 rc
= bnxt_re_handle_affi_async_event(affi_async
, obj
);
872 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq
*nq
,
873 struct bnxt_qplib_srq
*handle
, u8 event
)
875 struct bnxt_re_srq
*srq
= container_of(handle
, struct bnxt_re_srq
,
877 struct ib_event ib_event
;
881 ibdev_err(NULL
, "%s: SRQ is NULL, SRQN not handled",
882 ROCE_DRV_MODULE_NAME
);
886 ib_event
.device
= &srq
->rdev
->ibdev
;
887 ib_event
.element
.srq
= &srq
->ib_srq
;
888 if (event
== NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT
)
889 ib_event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
891 ib_event
.event
= IB_EVENT_SRQ_ERR
;
893 if (srq
->ib_srq
.event_handler
) {
894 /* Lock event_handler? */
895 (*srq
->ib_srq
.event_handler
)(&ib_event
,
896 srq
->ib_srq
.srq_context
);
902 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq
*nq
,
903 struct bnxt_qplib_cq
*handle
)
905 struct bnxt_re_cq
*cq
= container_of(handle
, struct bnxt_re_cq
,
909 ibdev_err(NULL
, "%s: CQ is NULL, CQN not handled",
910 ROCE_DRV_MODULE_NAME
);
913 if (cq
->ib_cq
.comp_handler
) {
914 /* Lock comp_handler? */
915 (*cq
->ib_cq
.comp_handler
)(&cq
->ib_cq
, cq
->ib_cq
.cq_context
);
921 #define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
922 #define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
923 static u32
bnxt_re_get_nqdb_offset(struct bnxt_re_dev
*rdev
, u16 indx
)
925 return bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
) ?
926 (rdev
->is_virtfn
? BNXT_RE_GEN_P5_VF_NQ_DB
:
927 BNXT_RE_GEN_P5_PF_NQ_DB
) :
928 rdev
->msix_entries
[indx
].db_offset
;
931 static void bnxt_re_cleanup_res(struct bnxt_re_dev
*rdev
)
935 for (i
= 1; i
< rdev
->num_msix
; i
++)
936 bnxt_qplib_disable_nq(&rdev
->nq
[i
- 1]);
938 if (rdev
->qplib_res
.rcfw
)
939 bnxt_qplib_cleanup_res(&rdev
->qplib_res
);
942 static int bnxt_re_init_res(struct bnxt_re_dev
*rdev
)
944 int num_vec_enabled
= 0;
948 bnxt_qplib_init_res(&rdev
->qplib_res
);
950 for (i
= 1; i
< rdev
->num_msix
; i
++) {
951 db_offt
= bnxt_re_get_nqdb_offset(rdev
, i
);
952 rc
= bnxt_qplib_enable_nq(rdev
->en_dev
->pdev
, &rdev
->nq
[i
- 1],
953 i
- 1, rdev
->msix_entries
[i
].vector
,
954 db_offt
, &bnxt_re_cqn_handler
,
955 &bnxt_re_srqn_handler
);
957 ibdev_err(&rdev
->ibdev
,
958 "Failed to enable NQ with rc = 0x%x", rc
);
965 for (i
= num_vec_enabled
; i
>= 0; i
--)
966 bnxt_qplib_disable_nq(&rdev
->nq
[i
]);
970 static void bnxt_re_free_nq_res(struct bnxt_re_dev
*rdev
)
975 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
976 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
977 bnxt_re_net_ring_free(rdev
, rdev
->nq
[i
].ring_id
, type
);
978 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
979 rdev
->nq
[i
].res
= NULL
;
983 static void bnxt_re_free_res(struct bnxt_re_dev
*rdev
)
985 bnxt_re_free_nq_res(rdev
);
987 if (rdev
->qplib_res
.dpi_tbl
.max
) {
988 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
989 &rdev
->qplib_res
.dpi_tbl
,
990 &rdev
->dpi_privileged
);
992 if (rdev
->qplib_res
.rcfw
) {
993 bnxt_qplib_free_res(&rdev
->qplib_res
);
994 rdev
->qplib_res
.rcfw
= NULL
;
998 static int bnxt_re_alloc_res(struct bnxt_re_dev
*rdev
)
1000 struct bnxt_re_ring_attr rattr
= {};
1001 struct bnxt_qplib_ctx
*qplib_ctx
;
1002 int num_vec_created
= 0;
1006 /* Configure and allocate resources for qplib */
1007 rdev
->qplib_res
.rcfw
= &rdev
->rcfw
;
1008 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
,
1013 rc
= bnxt_qplib_alloc_res(&rdev
->qplib_res
, rdev
->en_dev
->pdev
,
1014 rdev
->netdev
, &rdev
->dev_attr
);
1018 rc
= bnxt_qplib_alloc_dpi(&rdev
->qplib_res
.dpi_tbl
,
1019 &rdev
->dpi_privileged
,
1024 qplib_ctx
= &rdev
->qplib_ctx
;
1025 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
1026 struct bnxt_qplib_nq
*nq
;
1029 nq
->hwq
.max_elements
= (qplib_ctx
->cq_count
+
1030 qplib_ctx
->srqc_count
+ 2);
1031 rc
= bnxt_qplib_alloc_nq(&rdev
->qplib_res
, &rdev
->nq
[i
]);
1033 ibdev_err(&rdev
->ibdev
, "Alloc Failed NQ%d rc:%#x",
1037 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1038 rattr
.dma_arr
= nq
->hwq
.pbl
[PBL_LVL_0
].pg_map_arr
;
1039 rattr
.pages
= nq
->hwq
.pbl
[rdev
->nq
[i
].hwq
.level
].pg_count
;
1041 rattr
.mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
1042 rattr
.depth
= BNXT_QPLIB_NQE_MAX_CNT
- 1;
1043 rattr
.lrid
= rdev
->msix_entries
[i
+ 1].ring_idx
;
1044 rc
= bnxt_re_net_ring_alloc(rdev
, &rattr
, &nq
->ring_id
);
1046 ibdev_err(&rdev
->ibdev
,
1047 "Failed to allocate NQ fw id with rc = 0x%x",
1049 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
1056 for (i
= num_vec_created
- 1; i
>= 0; i
--) {
1057 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1058 bnxt_re_net_ring_free(rdev
, rdev
->nq
[i
].ring_id
, type
);
1059 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
1061 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
1062 &rdev
->qplib_res
.dpi_tbl
,
1063 &rdev
->dpi_privileged
);
1065 bnxt_qplib_free_res(&rdev
->qplib_res
);
1068 rdev
->qplib_res
.rcfw
= NULL
;
1072 static void bnxt_re_dispatch_event(struct ib_device
*ibdev
, struct ib_qp
*qp
,
1073 u8 port_num
, enum ib_event_type event
)
1075 struct ib_event ib_event
;
1077 ib_event
.device
= ibdev
;
1079 ib_event
.element
.qp
= qp
;
1080 ib_event
.event
= event
;
1081 if (qp
->event_handler
)
1082 qp
->event_handler(&ib_event
, qp
->qp_context
);
1085 ib_event
.element
.port_num
= port_num
;
1086 ib_event
.event
= event
;
1087 ib_dispatch_event(&ib_event
);
1091 #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
1092 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev
*rdev
, u8 dir
,
1095 struct hwrm_queue_pri2cos_qcfg_input req
= {0};
1096 struct bnxt
*bp
= netdev_priv(rdev
->netdev
);
1097 struct hwrm_queue_pri2cos_qcfg_output resp
;
1098 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1099 struct bnxt_fw_msg fw_msg
;
1101 u8
*qcfgmap
, *tmp_map
;
1107 memset(&fw_msg
, 0, sizeof(fw_msg
));
1108 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
,
1109 HWRM_QUEUE_PRI2COS_QCFG
, -1, -1);
1110 flags
|= (dir
& 0x01);
1111 flags
|= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN
;
1112 req
.flags
= cpu_to_le32(flags
);
1113 req
.port_id
= bp
->pf
.port_id
;
1115 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
1116 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
1117 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
1121 if (resp
.queue_cfg_info
) {
1122 ibdev_warn(&rdev
->ibdev
,
1123 "Asymmetric cos queue configuration detected");
1124 ibdev_warn(&rdev
->ibdev
,
1125 " on device, QoS may not be fully functional\n");
1127 qcfgmap
= &resp
.pri0_cos_queue_id
;
1128 tmp_map
= (u8
*)cid_map
;
1129 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1130 tmp_map
[i
] = qcfgmap
[i
];
1135 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev
*rdev
,
1136 struct bnxt_re_qp
*qp
)
1138 return (qp
->ib_qp
.qp_type
== IB_QPT_GSI
) ||
1139 (qp
== rdev
->gsi_ctx
.gsi_sqp
);
1142 static void bnxt_re_dev_stop(struct bnxt_re_dev
*rdev
)
1144 int mask
= IB_QP_STATE
;
1145 struct ib_qp_attr qp_attr
;
1146 struct bnxt_re_qp
*qp
;
1148 qp_attr
.qp_state
= IB_QPS_ERR
;
1149 mutex_lock(&rdev
->qp_lock
);
1150 list_for_each_entry(qp
, &rdev
->qp_list
, list
) {
1151 /* Modify the state of all QPs except QP1/Shadow QP */
1152 if (!bnxt_re_is_qp1_or_shadow_qp(rdev
, qp
)) {
1153 if (qp
->qplib_qp
.state
!=
1154 CMDQ_MODIFY_QP_NEW_STATE_RESET
&&
1155 qp
->qplib_qp
.state
!=
1156 CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1157 bnxt_re_dispatch_event(&rdev
->ibdev
, &qp
->ib_qp
,
1158 1, IB_EVENT_QP_FATAL
);
1159 bnxt_re_modify_qp(&qp
->ib_qp
, &qp_attr
, mask
,
1164 mutex_unlock(&rdev
->qp_lock
);
1167 static int bnxt_re_update_gid(struct bnxt_re_dev
*rdev
)
1169 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
1170 struct bnxt_qplib_gid gid
;
1174 if (!ib_device_try_get(&rdev
->ibdev
))
1178 ibdev_err(&rdev
->ibdev
, "QPLIB: SGID table not allocated");
1183 for (index
= 0; index
< sgid_tbl
->active
; index
++) {
1184 gid_idx
= sgid_tbl
->hw_id
[index
];
1186 if (!memcmp(&sgid_tbl
->tbl
[index
], &bnxt_qplib_gid_zero
,
1187 sizeof(bnxt_qplib_gid_zero
)))
1189 /* need to modify the VLAN enable setting of non VLAN GID only
1190 * as setting is done for VLAN GID while adding GID
1192 if (sgid_tbl
->vlan
[index
])
1195 memcpy(&gid
, &sgid_tbl
->tbl
[index
], sizeof(gid
));
1197 rc
= bnxt_qplib_update_sgid(sgid_tbl
, &gid
, gid_idx
,
1198 rdev
->qplib_res
.netdev
->dev_addr
);
1201 ib_device_put(&rdev
->ibdev
);
1205 static u32
bnxt_re_get_priority_mask(struct bnxt_re_dev
*rdev
)
1207 u32 prio_map
= 0, tmp_map
= 0;
1208 struct net_device
*netdev
;
1211 netdev
= rdev
->netdev
;
1213 memset(&app
, 0, sizeof(app
));
1214 app
.selector
= IEEE_8021QAZ_APP_SEL_ETHERTYPE
;
1215 app
.protocol
= ETH_P_IBOE
;
1216 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
1219 app
.selector
= IEEE_8021QAZ_APP_SEL_DGRAM
;
1220 app
.protocol
= ROCE_V2_UDP_DPORT
;
1221 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
1222 prio_map
|= tmp_map
;
1227 static void bnxt_re_parse_cid_map(u8 prio_map
, u8
*cid_map
, u16
*cosq
)
1232 for (prio
= 0, id
= 0; prio
< 8; prio
++) {
1233 if (prio_map
& (1 << prio
)) {
1234 cosq
[id
] = cid_map
[prio
];
1236 if (id
== 2) /* Max 2 tcs supported */
1242 static int bnxt_re_setup_qos(struct bnxt_re_dev
*rdev
)
1248 /* Get priority for roce */
1249 prio_map
= bnxt_re_get_priority_mask(rdev
);
1251 if (prio_map
== rdev
->cur_prio_map
)
1253 rdev
->cur_prio_map
= prio_map
;
1254 /* Get cosq id for this priority */
1255 rc
= bnxt_re_query_hwrm_pri2cos(rdev
, 0, &cid_map
);
1257 ibdev_warn(&rdev
->ibdev
, "no cos for p_mask %x\n", prio_map
);
1260 /* Parse CoS IDs for app priority */
1261 bnxt_re_parse_cid_map(prio_map
, (u8
*)&cid_map
, rdev
->cosq
);
1264 rc
= bnxt_qplib_map_tc2cos(&rdev
->qplib_res
, rdev
->cosq
);
1266 ibdev_warn(&rdev
->ibdev
, "no tc for cos{%x, %x}\n",
1267 rdev
->cosq
[0], rdev
->cosq
[1]);
1271 /* Actual priorities are not programmed as they are already
1272 * done by L2 driver; just enable or disable priority vlan tagging
1274 if ((prio_map
== 0 && rdev
->qplib_res
.prio
) ||
1275 (prio_map
!= 0 && !rdev
->qplib_res
.prio
)) {
1276 rdev
->qplib_res
.prio
= prio_map
? true : false;
1278 bnxt_re_update_gid(rdev
);
1284 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev
*rdev
)
1286 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1287 struct hwrm_ver_get_output resp
= {0};
1288 struct hwrm_ver_get_input req
= {0};
1289 struct bnxt_fw_msg fw_msg
;
1292 memset(&fw_msg
, 0, sizeof(fw_msg
));
1293 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
,
1294 HWRM_VER_GET
, -1, -1);
1295 req
.hwrm_intf_maj
= HWRM_VERSION_MAJOR
;
1296 req
.hwrm_intf_min
= HWRM_VERSION_MINOR
;
1297 req
.hwrm_intf_upd
= HWRM_VERSION_UPDATE
;
1298 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
1299 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
1300 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
1302 ibdev_err(&rdev
->ibdev
, "Failed to query HW version, rc = 0x%x",
1306 rdev
->qplib_ctx
.hwrm_intf_ver
=
1307 (u64
)le16_to_cpu(resp
.hwrm_intf_major
) << 48 |
1308 (u64
)le16_to_cpu(resp
.hwrm_intf_minor
) << 32 |
1309 (u64
)le16_to_cpu(resp
.hwrm_intf_build
) << 16 |
1310 le16_to_cpu(resp
.hwrm_intf_patch
);
1313 static int bnxt_re_ib_init(struct bnxt_re_dev
*rdev
)
1318 /* Register ib dev */
1319 rc
= bnxt_re_register_ib(rdev
);
1321 pr_err("Failed to register with IB: %#x\n", rc
);
1324 dev_info(rdev_to_dev(rdev
), "Device registered successfully");
1325 ib_get_eth_speed(&rdev
->ibdev
, 1, &rdev
->active_speed
,
1326 &rdev
->active_width
);
1327 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS
, &rdev
->flags
);
1329 event
= netif_running(rdev
->netdev
) && netif_carrier_ok(rdev
->netdev
) ?
1330 IB_EVENT_PORT_ACTIVE
: IB_EVENT_PORT_ERR
;
1332 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1, event
);
1337 static void bnxt_re_dev_uninit(struct bnxt_re_dev
*rdev
)
1342 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
))
1343 cancel_delayed_work_sync(&rdev
->worker
);
1345 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED
,
1347 bnxt_re_cleanup_res(rdev
);
1348 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED
, &rdev
->flags
))
1349 bnxt_re_free_res(rdev
);
1351 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
)) {
1352 rc
= bnxt_qplib_deinit_rcfw(&rdev
->rcfw
);
1354 ibdev_warn(&rdev
->ibdev
,
1355 "Failed to deinitialize RCFW: %#x", rc
);
1356 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
);
1357 bnxt_qplib_free_ctx(&rdev
->qplib_res
, &rdev
->qplib_ctx
);
1358 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1359 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1360 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq
.ring_id
, type
);
1361 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1363 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX
, &rdev
->flags
)) {
1364 rc
= bnxt_re_free_msix(rdev
);
1366 ibdev_warn(&rdev
->ibdev
,
1367 "Failed to free MSI-X vectors: %#x", rc
);
1370 bnxt_re_destroy_chip_ctx(rdev
);
1371 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
)) {
1372 rc
= bnxt_re_unregister_netdev(rdev
);
1374 ibdev_warn(&rdev
->ibdev
,
1375 "Failed to unregister with netdev: %#x", rc
);
1379 /* worker thread for polling periodic events. Now used for QoS programming*/
1380 static void bnxt_re_worker(struct work_struct
*work
)
1382 struct bnxt_re_dev
*rdev
= container_of(work
, struct bnxt_re_dev
,
1385 bnxt_re_setup_qos(rdev
);
1386 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1389 static int bnxt_re_dev_init(struct bnxt_re_dev
*rdev
)
1391 struct bnxt_qplib_creq_ctx
*creq
;
1392 struct bnxt_re_ring_attr rattr
;
1398 /* Registered a new RoCE device instance to netdev */
1399 memset(&rattr
, 0, sizeof(rattr
));
1400 rc
= bnxt_re_register_netdev(rdev
);
1403 ibdev_err(&rdev
->ibdev
,
1404 "Failed to register with netedev: %#x\n", rc
);
1407 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
);
1409 rc
= bnxt_re_setup_chip_ctx(rdev
);
1411 ibdev_err(&rdev
->ibdev
, "Failed to get chip context\n");
1415 /* Check whether VF or PF */
1416 bnxt_re_get_sriov_func_type(rdev
);
1418 rc
= bnxt_re_request_msix(rdev
);
1420 ibdev_err(&rdev
->ibdev
,
1421 "Failed to get MSI-X vectors: %#x\n", rc
);
1425 set_bit(BNXT_RE_FLAG_GOT_MSIX
, &rdev
->flags
);
1427 bnxt_re_query_hwrm_intf_version(rdev
);
1429 /* Establish RCFW Communication Channel to initialize the context
1430 * memory for the function and all child VFs
1432 rc
= bnxt_qplib_alloc_rcfw_channel(&rdev
->qplib_res
, &rdev
->rcfw
,
1434 BNXT_RE_MAX_QPC_COUNT
);
1436 ibdev_err(&rdev
->ibdev
,
1437 "Failed to allocate RCFW Channel: %#x\n", rc
);
1441 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1442 creq
= &rdev
->rcfw
.creq
;
1443 rattr
.dma_arr
= creq
->hwq
.pbl
[PBL_LVL_0
].pg_map_arr
;
1444 rattr
.pages
= creq
->hwq
.pbl
[creq
->hwq
.level
].pg_count
;
1446 rattr
.mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
1447 rattr
.depth
= BNXT_QPLIB_CREQE_MAX_CNT
- 1;
1448 rattr
.lrid
= rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].ring_idx
;
1449 rc
= bnxt_re_net_ring_alloc(rdev
, &rattr
, &creq
->ring_id
);
1451 ibdev_err(&rdev
->ibdev
, "Failed to allocate CREQ: %#x\n", rc
);
1454 db_offt
= bnxt_re_get_nqdb_offset(rdev
, BNXT_RE_AEQ_IDX
);
1455 vid
= rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].vector
;
1456 rc
= bnxt_qplib_enable_rcfw_channel(&rdev
->rcfw
,
1457 vid
, db_offt
, rdev
->is_virtfn
,
1458 &bnxt_re_aeq_handler
);
1460 ibdev_err(&rdev
->ibdev
, "Failed to enable RCFW channel: %#x\n",
1465 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
,
1470 bnxt_re_set_resource_limits(rdev
);
1472 rc
= bnxt_qplib_alloc_ctx(&rdev
->qplib_res
, &rdev
->qplib_ctx
, 0,
1473 bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
));
1475 ibdev_err(&rdev
->ibdev
,
1476 "Failed to allocate QPLIB context: %#x\n", rc
);
1479 rc
= bnxt_re_net_stats_ctx_alloc(rdev
,
1480 rdev
->qplib_ctx
.stats
.dma_map
,
1481 &rdev
->qplib_ctx
.stats
.fw_id
);
1483 ibdev_err(&rdev
->ibdev
,
1484 "Failed to allocate stats context: %#x\n", rc
);
1488 rc
= bnxt_qplib_init_rcfw(&rdev
->rcfw
, &rdev
->qplib_ctx
,
1491 ibdev_err(&rdev
->ibdev
,
1492 "Failed to initialize RCFW: %#x\n", rc
);
1495 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
);
1497 /* Resources based on the 'new' device caps */
1498 rc
= bnxt_re_alloc_res(rdev
);
1500 ibdev_err(&rdev
->ibdev
,
1501 "Failed to allocate resources: %#x\n", rc
);
1504 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED
, &rdev
->flags
);
1505 rc
= bnxt_re_init_res(rdev
);
1507 ibdev_err(&rdev
->ibdev
,
1508 "Failed to initialize resources: %#x\n", rc
);
1512 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED
, &rdev
->flags
);
1514 if (!rdev
->is_virtfn
) {
1515 rc
= bnxt_re_setup_qos(rdev
);
1517 ibdev_info(&rdev
->ibdev
,
1518 "RoCE priority not yet configured\n");
1520 INIT_DELAYED_WORK(&rdev
->worker
, bnxt_re_worker
);
1521 set_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
);
1522 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1527 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
);
1529 bnxt_qplib_free_ctx(&rdev
->qplib_res
, &rdev
->qplib_ctx
);
1531 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1533 type
= bnxt_qplib_get_ring_type(rdev
->chip_ctx
);
1534 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq
.ring_id
, type
);
1536 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1538 bnxt_re_dev_uninit(rdev
);
1543 static void bnxt_re_dev_unreg(struct bnxt_re_dev
*rdev
)
1545 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1546 struct net_device
*netdev
= rdev
->netdev
;
1548 bnxt_re_dev_remove(rdev
);
1551 bnxt_re_dev_unprobe(netdev
, en_dev
);
1554 static int bnxt_re_dev_reg(struct bnxt_re_dev
**rdev
, struct net_device
*netdev
)
1556 struct bnxt_en_dev
*en_dev
;
1559 if (!is_bnxt_re_dev(netdev
))
1562 en_dev
= bnxt_re_dev_probe(netdev
);
1563 if (IS_ERR(en_dev
)) {
1564 if (en_dev
!= ERR_PTR(-ENODEV
))
1565 ibdev_err(&(*rdev
)->ibdev
, "%s: Failed to probe\n",
1566 ROCE_DRV_MODULE_NAME
);
1567 rc
= PTR_ERR(en_dev
);
1570 *rdev
= bnxt_re_dev_add(netdev
, en_dev
);
1573 bnxt_re_dev_unprobe(netdev
, en_dev
);
1580 static void bnxt_re_remove_device(struct bnxt_re_dev
*rdev
)
1582 bnxt_re_dev_uninit(rdev
);
1583 pci_dev_put(rdev
->en_dev
->pdev
);
1584 bnxt_re_dev_unreg(rdev
);
1587 static int bnxt_re_add_device(struct bnxt_re_dev
**rdev
,
1588 struct net_device
*netdev
)
1592 rc
= bnxt_re_dev_reg(rdev
, netdev
);
1596 pr_err("Failed to register with the device %s: %#x\n",
1601 pci_dev_get((*rdev
)->en_dev
->pdev
);
1602 rc
= bnxt_re_dev_init(*rdev
);
1604 pci_dev_put((*rdev
)->en_dev
->pdev
);
1605 bnxt_re_dev_unreg(*rdev
);
1611 static void bnxt_re_dealloc_driver(struct ib_device
*ib_dev
)
1613 struct bnxt_re_dev
*rdev
=
1614 container_of(ib_dev
, struct bnxt_re_dev
, ibdev
);
1616 dev_info(rdev_to_dev(rdev
), "Unregistering Device");
1619 bnxt_re_remove_device(rdev
);
1623 /* Handle all deferred netevents tasks */
1624 static void bnxt_re_task(struct work_struct
*work
)
1626 struct bnxt_re_work
*re_work
;
1627 struct bnxt_re_dev
*rdev
;
1630 re_work
= container_of(work
, struct bnxt_re_work
, work
);
1631 rdev
= re_work
->rdev
;
1633 if (re_work
->event
== NETDEV_REGISTER
) {
1634 rc
= bnxt_re_ib_init(rdev
);
1636 ibdev_err(&rdev
->ibdev
,
1637 "Failed to register with IB: %#x", rc
);
1639 bnxt_re_remove_device(rdev
);
1646 if (!ib_device_try_get(&rdev
->ibdev
))
1649 switch (re_work
->event
) {
1651 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1,
1652 IB_EVENT_PORT_ACTIVE
);
1655 bnxt_re_dev_stop(rdev
);
1658 if (!netif_carrier_ok(rdev
->netdev
))
1659 bnxt_re_dev_stop(rdev
);
1660 else if (netif_carrier_ok(rdev
->netdev
))
1661 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1,
1662 IB_EVENT_PORT_ACTIVE
);
1663 ib_get_eth_speed(&rdev
->ibdev
, 1, &rdev
->active_speed
,
1664 &rdev
->active_width
);
1669 ib_device_put(&rdev
->ibdev
);
1671 put_device(&rdev
->ibdev
.dev
);
1676 * "Notifier chain callback can be invoked for the same chain from
1677 * different CPUs at the same time".
1679 * For cases when the netdev is already present, our call to the
1680 * register_netdevice_notifier() will actually get the rtnl_lock()
1681 * before sending NETDEV_REGISTER and (if up) NETDEV_UP
1684 * But for cases when the netdev is not already present, the notifier
1685 * chain is subjected to be invoked from different CPUs simultaneously.
1687 * This is protected by the netdev_mutex.
1689 static int bnxt_re_netdev_event(struct notifier_block
*notifier
,
1690 unsigned long event
, void *ptr
)
1692 struct net_device
*real_dev
, *netdev
= netdev_notifier_info_to_dev(ptr
);
1693 struct bnxt_re_work
*re_work
;
1694 struct bnxt_re_dev
*rdev
;
1696 bool sch_work
= false;
1697 bool release
= true;
1699 real_dev
= rdma_vlan_dev_real_dev(netdev
);
1703 rdev
= bnxt_re_from_netdev(real_dev
);
1704 if (!rdev
&& event
!= NETDEV_REGISTER
)
1707 if (real_dev
!= netdev
)
1711 case NETDEV_REGISTER
:
1714 rc
= bnxt_re_add_device(&rdev
, real_dev
);
1720 case NETDEV_UNREGISTER
:
1721 ib_unregister_device_queued(&rdev
->ibdev
);
1729 /* Allocate for the deferred task */
1730 re_work
= kzalloc(sizeof(*re_work
), GFP_ATOMIC
);
1732 get_device(&rdev
->ibdev
.dev
);
1733 re_work
->rdev
= rdev
;
1734 re_work
->event
= event
;
1735 re_work
->vlan_dev
= (real_dev
== netdev
?
1737 INIT_WORK(&re_work
->work
, bnxt_re_task
);
1738 queue_work(bnxt_re_wq
, &re_work
->work
);
1743 if (rdev
&& release
)
1744 ib_device_put(&rdev
->ibdev
);
1748 static struct notifier_block bnxt_re_netdev_notifier
= {
1749 .notifier_call
= bnxt_re_netdev_event
1752 static int __init
bnxt_re_mod_init(void)
1756 pr_info("%s: %s", ROCE_DRV_MODULE_NAME
, version
);
1758 bnxt_re_wq
= create_singlethread_workqueue("bnxt_re");
1762 INIT_LIST_HEAD(&bnxt_re_dev_list
);
1764 rc
= register_netdevice_notifier(&bnxt_re_netdev_notifier
);
1766 pr_err("%s: Cannot register to netdevice_notifier",
1767 ROCE_DRV_MODULE_NAME
);
1773 destroy_workqueue(bnxt_re_wq
);
1778 static void __exit
bnxt_re_mod_exit(void)
1780 struct bnxt_re_dev
*rdev
;
1782 unregister_netdevice_notifier(&bnxt_re_netdev_notifier
);
1784 destroy_workqueue(bnxt_re_wq
);
1785 list_for_each_entry(rdev
, &bnxt_re_dev_list
, list
) {
1786 /* VF device removal should be called before the removal
1787 * of PF device. Queue VFs unregister first, so that VFs
1788 * shall be removed before the PF during the call of
1789 * ib_unregister_driver.
1791 if (rdev
->is_virtfn
)
1792 ib_unregister_device(&rdev
->ibdev
);
1794 ib_unregister_driver(RDMA_DRIVER_BNXT_RE
);
1797 module_init(bnxt_re_mod_init
);
1798 module_exit(bnxt_re_mod_exit
);