2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Main component of the bnxt_re driver
39 #include <linux/module.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mutex.h>
43 #include <linux/list.h>
44 #include <linux/rculist.h>
45 #include <linux/spinlock.h>
46 #include <linux/pci.h>
47 #include <net/dcbnl.h>
49 #include <net/addrconf.h>
50 #include <linux/if_ether.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_addr.h>
59 #include "qplib_res.h"
62 #include "qplib_rcfw.h"
65 #include <rdma/bnxt_re-abi.h>
67 #include "hw_counters.h"
69 static char version
[] =
72 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
73 MODULE_DESCRIPTION(BNXT_RE_DESC
" Driver");
74 MODULE_LICENSE("Dual BSD/GPL");
77 static struct list_head bnxt_re_dev_list
= LIST_HEAD_INIT(bnxt_re_dev_list
);
78 /* Mutex to protect the list of bnxt_re devices added */
79 static DEFINE_MUTEX(bnxt_re_dev_lock
);
80 static struct workqueue_struct
*bnxt_re_wq
;
81 static void bnxt_re_ib_unreg(struct bnxt_re_dev
*rdev
);
83 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev
*rdev
)
85 rdev
->rcfw
.res
= NULL
;
86 rdev
->qplib_res
.cctx
= NULL
;
89 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev
*rdev
)
91 struct bnxt_en_dev
*en_dev
;
94 en_dev
= rdev
->en_dev
;
95 bp
= netdev_priv(en_dev
->net
);
97 rdev
->chip_ctx
.chip_num
= bp
->chip_num
;
98 /* rest members to follow eventually */
100 rdev
->qplib_res
.cctx
= &rdev
->chip_ctx
;
101 rdev
->rcfw
.res
= &rdev
->qplib_res
;
106 /* SR-IOV helper functions */
108 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev
*rdev
)
112 bp
= netdev_priv(rdev
->en_dev
->net
);
117 /* Set the maximum number of each resource that the driver actually wants
118 * to allocate. This may be up to the maximum number the firmware has
119 * reserved for the function. The driver may choose to allocate fewer
120 * resources than the firmware maximum.
122 static void bnxt_re_limit_pf_res(struct bnxt_re_dev
*rdev
)
124 struct bnxt_qplib_dev_attr
*attr
;
125 struct bnxt_qplib_ctx
*ctx
;
128 attr
= &rdev
->dev_attr
;
129 ctx
= &rdev
->qplib_ctx
;
131 ctx
->qpc_count
= min_t(u32
, BNXT_RE_MAX_QPC_COUNT
,
133 ctx
->mrw_count
= BNXT_RE_MAX_MRW_COUNT_256K
;
134 /* Use max_mr from fw since max_mrw does not get set */
135 ctx
->mrw_count
= min_t(u32
, ctx
->mrw_count
, attr
->max_mr
);
136 ctx
->srqc_count
= min_t(u32
, BNXT_RE_MAX_SRQC_COUNT
,
138 ctx
->cq_count
= min_t(u32
, BNXT_RE_MAX_CQ_COUNT
, attr
->max_cq
);
139 if (!bnxt_qplib_is_chip_gen_p5(&rdev
->chip_ctx
))
140 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
; i
++)
141 rdev
->qplib_ctx
.tqm_count
[i
] =
142 rdev
->dev_attr
.tqm_alloc_reqs
[i
];
145 static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx
*qplib_ctx
, u32 num_vf
)
147 struct bnxt_qplib_vf_res
*vf_res
;
152 vf_res
= &qplib_ctx
->vf_res
;
154 * Reserve a set of resources for the PF. Divide the remaining
155 * resources among the VFs
157 vf_pct
= 100 - BNXT_RE_PCT_RSVD_FOR_PF
;
159 num_vf
= 100 * num_vf
;
160 vf_res
->max_qp_per_vf
= (qplib_ctx
->qpc_count
* vf_pct
) / num_vf
;
161 vf_res
->max_srq_per_vf
= (qplib_ctx
->srqc_count
* vf_pct
) / num_vf
;
162 vf_res
->max_cq_per_vf
= (qplib_ctx
->cq_count
* vf_pct
) / num_vf
;
164 * The driver allows many more MRs than other resources. If the
165 * firmware does also, then reserve a fixed amount for the PF and
166 * divide the rest among VFs. VFs may use many MRs for NFS
167 * mounts, ISER, NVME applications, etc. If the firmware severely
168 * restricts the number of MRs, then let PF have half and divide
169 * the rest among VFs, as for the other resource types.
171 if (qplib_ctx
->mrw_count
< BNXT_RE_MAX_MRW_COUNT_64K
) {
172 mrws
= qplib_ctx
->mrw_count
* vf_pct
;
175 mrws
= qplib_ctx
->mrw_count
- BNXT_RE_RESVD_MR_FOR_PF
;
177 vf_res
->max_mrw_per_vf
= (mrws
/ nvfs
);
178 vf_res
->max_gid_per_vf
= BNXT_RE_MAX_GID_PER_VF
;
181 static void bnxt_re_set_resource_limits(struct bnxt_re_dev
*rdev
)
185 memset(&rdev
->qplib_ctx
.vf_res
, 0, sizeof(struct bnxt_qplib_vf_res
));
186 bnxt_re_limit_pf_res(rdev
);
188 num_vfs
= bnxt_qplib_is_chip_gen_p5(&rdev
->chip_ctx
) ?
189 BNXT_RE_GEN_P5_MAX_VF
: rdev
->num_vfs
;
191 bnxt_re_limit_vf_res(&rdev
->qplib_ctx
, num_vfs
);
194 /* for handling bnxt_en callbacks later */
195 static void bnxt_re_stop(void *p
)
199 static void bnxt_re_start(void *p
)
203 static void bnxt_re_sriov_config(void *p
, int num_vfs
)
205 struct bnxt_re_dev
*rdev
= p
;
210 rdev
->num_vfs
= num_vfs
;
211 if (!bnxt_qplib_is_chip_gen_p5(&rdev
->chip_ctx
)) {
212 bnxt_re_set_resource_limits(rdev
);
213 bnxt_qplib_set_func_resources(&rdev
->qplib_res
, &rdev
->rcfw
,
218 static void bnxt_re_shutdown(void *p
)
220 struct bnxt_re_dev
*rdev
= p
;
225 bnxt_re_ib_unreg(rdev
);
228 static void bnxt_re_stop_irq(void *handle
)
230 struct bnxt_re_dev
*rdev
= (struct bnxt_re_dev
*)handle
;
231 struct bnxt_qplib_rcfw
*rcfw
= &rdev
->rcfw
;
232 struct bnxt_qplib_nq
*nq
;
235 for (indx
= BNXT_RE_NQ_IDX
; indx
< rdev
->num_msix
; indx
++) {
236 nq
= &rdev
->nq
[indx
- 1];
237 bnxt_qplib_nq_stop_irq(nq
, false);
240 bnxt_qplib_rcfw_stop_irq(rcfw
, false);
243 static void bnxt_re_start_irq(void *handle
, struct bnxt_msix_entry
*ent
)
245 struct bnxt_re_dev
*rdev
= (struct bnxt_re_dev
*)handle
;
246 struct bnxt_msix_entry
*msix_ent
= rdev
->msix_entries
;
247 struct bnxt_qplib_rcfw
*rcfw
= &rdev
->rcfw
;
248 struct bnxt_qplib_nq
*nq
;
252 /* Not setting the f/w timeout bit in rcfw.
253 * During the driver unload the first command
254 * to f/w will timeout and that will set the
257 dev_err(rdev_to_dev(rdev
), "Failed to re-start IRQs\n");
261 /* Vectors may change after restart, so update with new vectors
262 * in device sctructure.
264 for (indx
= 0; indx
< rdev
->num_msix
; indx
++)
265 rdev
->msix_entries
[indx
].vector
= ent
[indx
].vector
;
267 bnxt_qplib_rcfw_start_irq(rcfw
, msix_ent
[BNXT_RE_AEQ_IDX
].vector
,
269 for (indx
= BNXT_RE_NQ_IDX
; indx
< rdev
->num_msix
; indx
++) {
270 nq
= &rdev
->nq
[indx
- 1];
271 rc
= bnxt_qplib_nq_start_irq(nq
, indx
- 1,
272 msix_ent
[indx
].vector
, false);
274 dev_warn(rdev_to_dev(rdev
),
275 "Failed to reinit NQ index %d\n", indx
- 1);
279 static struct bnxt_ulp_ops bnxt_re_ulp_ops
= {
280 .ulp_async_notifier
= NULL
,
281 .ulp_stop
= bnxt_re_stop
,
282 .ulp_start
= bnxt_re_start
,
283 .ulp_sriov_config
= bnxt_re_sriov_config
,
284 .ulp_shutdown
= bnxt_re_shutdown
,
285 .ulp_irq_stop
= bnxt_re_stop_irq
,
286 .ulp_irq_restart
= bnxt_re_start_irq
289 /* RoCE -> Net driver */
291 /* Driver registration routines used to let the networking driver (bnxt_en)
292 * to know that the RoCE driver is now installed
294 static int bnxt_re_unregister_netdev(struct bnxt_re_dev
*rdev
)
296 struct bnxt_en_dev
*en_dev
;
302 en_dev
= rdev
->en_dev
;
304 rc
= en_dev
->en_ops
->bnxt_unregister_device(rdev
->en_dev
,
309 static int bnxt_re_register_netdev(struct bnxt_re_dev
*rdev
)
311 struct bnxt_en_dev
*en_dev
;
317 en_dev
= rdev
->en_dev
;
319 rc
= en_dev
->en_ops
->bnxt_register_device(en_dev
, BNXT_ROCE_ULP
,
320 &bnxt_re_ulp_ops
, rdev
);
321 rdev
->qplib_res
.pdev
= rdev
->en_dev
->pdev
;
325 static int bnxt_re_free_msix(struct bnxt_re_dev
*rdev
)
327 struct bnxt_en_dev
*en_dev
;
333 en_dev
= rdev
->en_dev
;
336 rc
= en_dev
->en_ops
->bnxt_free_msix(rdev
->en_dev
, BNXT_ROCE_ULP
);
341 static int bnxt_re_request_msix(struct bnxt_re_dev
*rdev
)
343 int rc
= 0, num_msix_want
= BNXT_RE_MAX_MSIX
, num_msix_got
;
344 struct bnxt_en_dev
*en_dev
;
349 en_dev
= rdev
->en_dev
;
351 num_msix_want
= min_t(u32
, BNXT_RE_MAX_MSIX
, num_online_cpus());
353 num_msix_got
= en_dev
->en_ops
->bnxt_request_msix(en_dev
, BNXT_ROCE_ULP
,
356 if (num_msix_got
< BNXT_RE_MIN_MSIX
) {
360 if (num_msix_got
!= num_msix_want
) {
361 dev_warn(rdev_to_dev(rdev
),
362 "Requested %d MSI-X vectors, got %d\n",
363 num_msix_want
, num_msix_got
);
365 rdev
->num_msix
= num_msix_got
;
370 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev
*rdev
, struct input
*hdr
,
371 u16 opcd
, u16 crid
, u16 trid
)
373 hdr
->req_type
= cpu_to_le16(opcd
);
374 hdr
->cmpl_ring
= cpu_to_le16(crid
);
375 hdr
->target_id
= cpu_to_le16(trid
);
378 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg
*fw_msg
, void *msg
,
379 int msg_len
, void *resp
, int resp_max_len
,
383 fw_msg
->msg_len
= msg_len
;
385 fw_msg
->resp_max_len
= resp_max_len
;
386 fw_msg
->timeout
= timeout
;
389 static int bnxt_re_net_ring_free(struct bnxt_re_dev
*rdev
,
390 u16 fw_ring_id
, int type
)
392 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
393 struct hwrm_ring_free_input req
= {0};
394 struct hwrm_ring_free_output resp
;
395 struct bnxt_fw_msg fw_msg
;
401 memset(&fw_msg
, 0, sizeof(fw_msg
));
403 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_RING_FREE
, -1, -1);
404 req
.ring_type
= type
;
405 req
.ring_id
= cpu_to_le16(fw_ring_id
);
406 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
407 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
408 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
410 dev_err(rdev_to_dev(rdev
),
411 "Failed to free HW ring:%d :%#x", req
.ring_id
, rc
);
415 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev
*rdev
, dma_addr_t
*dma_arr
,
416 int pages
, int type
, u32 ring_mask
,
417 u32 map_index
, u16
*fw_ring_id
)
419 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
420 struct hwrm_ring_alloc_input req
= {0};
421 struct hwrm_ring_alloc_output resp
;
422 struct bnxt_fw_msg fw_msg
;
428 memset(&fw_msg
, 0, sizeof(fw_msg
));
429 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_RING_ALLOC
, -1, -1);
431 req
.page_tbl_addr
= cpu_to_le64(dma_arr
[0]);
433 /* Page size is in log2 units */
434 req
.page_size
= BNXT_PAGE_SHIFT
;
435 req
.page_tbl_depth
= 1;
438 /* Association of ring index with doorbell index and MSIX number */
439 req
.logical_id
= cpu_to_le16(map_index
);
440 req
.length
= cpu_to_le32(ring_mask
+ 1);
441 req
.ring_type
= type
;
442 req
.int_mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
443 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
444 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
445 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
447 *fw_ring_id
= le16_to_cpu(resp
.ring_id
);
452 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev
*rdev
,
455 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
456 struct hwrm_stat_ctx_free_input req
= {0};
457 struct bnxt_fw_msg fw_msg
;
463 memset(&fw_msg
, 0, sizeof(fw_msg
));
465 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_STAT_CTX_FREE
, -1, -1);
466 req
.stat_ctx_id
= cpu_to_le32(fw_stats_ctx_id
);
467 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&req
,
468 sizeof(req
), DFLT_HWRM_CMD_TIMEOUT
);
469 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
471 dev_err(rdev_to_dev(rdev
),
472 "Failed to free HW stats context %#x", rc
);
477 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev
*rdev
,
479 u32
*fw_stats_ctx_id
)
481 struct hwrm_stat_ctx_alloc_output resp
= {0};
482 struct hwrm_stat_ctx_alloc_input req
= {0};
483 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
484 struct bnxt_fw_msg fw_msg
;
487 *fw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
492 memset(&fw_msg
, 0, sizeof(fw_msg
));
494 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_STAT_CTX_ALLOC
, -1, -1);
495 req
.update_period_ms
= cpu_to_le32(1000);
496 req
.stats_dma_addr
= cpu_to_le64(dma_map
);
497 req
.stats_dma_length
= cpu_to_le16(sizeof(struct ctx_hw_stats_ext
));
498 req
.stat_ctx_flags
= STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE
;
499 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
500 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
501 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
503 *fw_stats_ctx_id
= le32_to_cpu(resp
.stat_ctx_id
);
510 static bool is_bnxt_re_dev(struct net_device
*netdev
)
512 struct ethtool_drvinfo drvinfo
;
514 if (netdev
->ethtool_ops
&& netdev
->ethtool_ops
->get_drvinfo
) {
515 memset(&drvinfo
, 0, sizeof(drvinfo
));
516 netdev
->ethtool_ops
->get_drvinfo(netdev
, &drvinfo
);
518 if (strcmp(drvinfo
.driver
, "bnxt_en"))
525 static struct bnxt_re_dev
*bnxt_re_from_netdev(struct net_device
*netdev
)
527 struct bnxt_re_dev
*rdev
;
530 list_for_each_entry_rcu(rdev
, &bnxt_re_dev_list
, list
) {
531 if (rdev
->netdev
== netdev
) {
540 static void bnxt_re_dev_unprobe(struct net_device
*netdev
,
541 struct bnxt_en_dev
*en_dev
)
544 module_put(en_dev
->pdev
->driver
->driver
.owner
);
547 static struct bnxt_en_dev
*bnxt_re_dev_probe(struct net_device
*netdev
)
549 struct bnxt
*bp
= netdev_priv(netdev
);
550 struct bnxt_en_dev
*en_dev
;
551 struct pci_dev
*pdev
;
553 /* Call bnxt_en's RoCE probe via indirect API */
555 return ERR_PTR(-EINVAL
);
557 en_dev
= bp
->ulp_probe(netdev
);
563 return ERR_PTR(-EINVAL
);
565 if (!(en_dev
->flags
& BNXT_EN_FLAG_ROCE_CAP
)) {
567 "%s: probe error: RoCE is not supported on this device",
568 ROCE_DRV_MODULE_NAME
);
569 return ERR_PTR(-ENODEV
);
572 /* Bump net device reference count */
573 if (!try_module_get(pdev
->driver
->driver
.owner
))
574 return ERR_PTR(-ENODEV
);
581 static ssize_t
hw_rev_show(struct device
*device
, struct device_attribute
*attr
,
584 struct bnxt_re_dev
*rdev
=
585 rdma_device_to_drv_device(device
, struct bnxt_re_dev
, ibdev
);
587 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", rdev
->en_dev
->pdev
->vendor
);
589 static DEVICE_ATTR_RO(hw_rev
);
591 static ssize_t
hca_type_show(struct device
*device
,
592 struct device_attribute
*attr
, char *buf
)
594 struct bnxt_re_dev
*rdev
=
595 rdma_device_to_drv_device(device
, struct bnxt_re_dev
, ibdev
);
597 return scnprintf(buf
, PAGE_SIZE
, "%s\n", rdev
->ibdev
.node_desc
);
599 static DEVICE_ATTR_RO(hca_type
);
601 static struct attribute
*bnxt_re_attributes
[] = {
602 &dev_attr_hw_rev
.attr
,
603 &dev_attr_hca_type
.attr
,
607 static const struct attribute_group bnxt_re_dev_attr_group
= {
608 .attrs
= bnxt_re_attributes
,
611 static void bnxt_re_unregister_ib(struct bnxt_re_dev
*rdev
)
613 ib_unregister_device(&rdev
->ibdev
);
616 static const struct ib_device_ops bnxt_re_dev_ops
= {
617 .owner
= THIS_MODULE
,
618 .driver_id
= RDMA_DRIVER_BNXT_RE
,
619 .uverbs_abi_ver
= BNXT_RE_ABI_VERSION
,
621 .add_gid
= bnxt_re_add_gid
,
622 .alloc_hw_stats
= bnxt_re_ib_alloc_hw_stats
,
623 .alloc_mr
= bnxt_re_alloc_mr
,
624 .alloc_pd
= bnxt_re_alloc_pd
,
625 .alloc_ucontext
= bnxt_re_alloc_ucontext
,
626 .create_ah
= bnxt_re_create_ah
,
627 .create_cq
= bnxt_re_create_cq
,
628 .create_qp
= bnxt_re_create_qp
,
629 .create_srq
= bnxt_re_create_srq
,
630 .dealloc_pd
= bnxt_re_dealloc_pd
,
631 .dealloc_ucontext
= bnxt_re_dealloc_ucontext
,
632 .del_gid
= bnxt_re_del_gid
,
633 .dereg_mr
= bnxt_re_dereg_mr
,
634 .destroy_ah
= bnxt_re_destroy_ah
,
635 .destroy_cq
= bnxt_re_destroy_cq
,
636 .destroy_qp
= bnxt_re_destroy_qp
,
637 .destroy_srq
= bnxt_re_destroy_srq
,
638 .get_dev_fw_str
= bnxt_re_query_fw_str
,
639 .get_dma_mr
= bnxt_re_get_dma_mr
,
640 .get_hw_stats
= bnxt_re_ib_get_hw_stats
,
641 .get_link_layer
= bnxt_re_get_link_layer
,
642 .get_port_immutable
= bnxt_re_get_port_immutable
,
643 .map_mr_sg
= bnxt_re_map_mr_sg
,
644 .mmap
= bnxt_re_mmap
,
645 .modify_ah
= bnxt_re_modify_ah
,
646 .modify_qp
= bnxt_re_modify_qp
,
647 .modify_srq
= bnxt_re_modify_srq
,
648 .poll_cq
= bnxt_re_poll_cq
,
649 .post_recv
= bnxt_re_post_recv
,
650 .post_send
= bnxt_re_post_send
,
651 .post_srq_recv
= bnxt_re_post_srq_recv
,
652 .query_ah
= bnxt_re_query_ah
,
653 .query_device
= bnxt_re_query_device
,
654 .query_pkey
= bnxt_re_query_pkey
,
655 .query_port
= bnxt_re_query_port
,
656 .query_qp
= bnxt_re_query_qp
,
657 .query_srq
= bnxt_re_query_srq
,
658 .reg_user_mr
= bnxt_re_reg_user_mr
,
659 .req_notify_cq
= bnxt_re_req_notify_cq
,
660 INIT_RDMA_OBJ_SIZE(ib_ah
, bnxt_re_ah
, ib_ah
),
661 INIT_RDMA_OBJ_SIZE(ib_cq
, bnxt_re_cq
, ib_cq
),
662 INIT_RDMA_OBJ_SIZE(ib_pd
, bnxt_re_pd
, ib_pd
),
663 INIT_RDMA_OBJ_SIZE(ib_srq
, bnxt_re_srq
, ib_srq
),
664 INIT_RDMA_OBJ_SIZE(ib_ucontext
, bnxt_re_ucontext
, ib_uctx
),
667 static int bnxt_re_register_ib(struct bnxt_re_dev
*rdev
)
669 struct ib_device
*ibdev
= &rdev
->ibdev
;
673 ibdev
->node_type
= RDMA_NODE_IB_CA
;
674 strlcpy(ibdev
->node_desc
, BNXT_RE_DESC
" HCA",
675 strlen(BNXT_RE_DESC
) + 5);
676 ibdev
->phys_port_cnt
= 1;
678 bnxt_qplib_get_guid(rdev
->netdev
->dev_addr
, (u8
*)&ibdev
->node_guid
);
680 ibdev
->num_comp_vectors
= rdev
->num_msix
- 1;
681 ibdev
->dev
.parent
= &rdev
->en_dev
->pdev
->dev
;
682 ibdev
->local_dma_lkey
= BNXT_QPLIB_RSVD_LKEY
;
685 ibdev
->uverbs_cmd_mask
=
686 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
687 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
688 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
689 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
690 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
691 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
692 (1ull << IB_USER_VERBS_CMD_REREG_MR
) |
693 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
694 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
695 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
696 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
697 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
698 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
699 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
700 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
701 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
702 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
703 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
704 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
705 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
706 (1ull << IB_USER_VERBS_CMD_CREATE_AH
) |
707 (1ull << IB_USER_VERBS_CMD_MODIFY_AH
) |
708 (1ull << IB_USER_VERBS_CMD_QUERY_AH
) |
709 (1ull << IB_USER_VERBS_CMD_DESTROY_AH
);
710 /* POLL_CQ and REQ_NOTIFY_CQ is directly handled in libbnxt_re */
713 rdma_set_device_sysfs_group(ibdev
, &bnxt_re_dev_attr_group
);
714 ib_set_device_ops(ibdev
, &bnxt_re_dev_ops
);
715 ret
= ib_device_set_netdev(&rdev
->ibdev
, rdev
->netdev
, 1);
719 return ib_register_device(ibdev
, "bnxt_re%d");
722 static void bnxt_re_dev_remove(struct bnxt_re_dev
*rdev
)
724 dev_put(rdev
->netdev
);
727 mutex_lock(&bnxt_re_dev_lock
);
728 list_del_rcu(&rdev
->list
);
729 mutex_unlock(&bnxt_re_dev_lock
);
733 ib_dealloc_device(&rdev
->ibdev
);
737 static struct bnxt_re_dev
*bnxt_re_dev_add(struct net_device
*netdev
,
738 struct bnxt_en_dev
*en_dev
)
740 struct bnxt_re_dev
*rdev
;
742 /* Allocate bnxt_re_dev instance here */
743 rdev
= ib_alloc_device(bnxt_re_dev
, ibdev
);
745 dev_err(NULL
, "%s: bnxt_re_dev allocation failure!",
746 ROCE_DRV_MODULE_NAME
);
750 rdev
->netdev
= netdev
;
751 dev_hold(rdev
->netdev
);
752 rdev
->en_dev
= en_dev
;
753 rdev
->id
= rdev
->en_dev
->pdev
->devfn
;
754 INIT_LIST_HEAD(&rdev
->qp_list
);
755 mutex_init(&rdev
->qp_lock
);
756 atomic_set(&rdev
->qp_count
, 0);
757 atomic_set(&rdev
->cq_count
, 0);
758 atomic_set(&rdev
->srq_count
, 0);
759 atomic_set(&rdev
->mr_count
, 0);
760 atomic_set(&rdev
->mw_count
, 0);
761 rdev
->cosq
[0] = 0xFFFF;
762 rdev
->cosq
[1] = 0xFFFF;
764 mutex_lock(&bnxt_re_dev_lock
);
765 list_add_tail_rcu(&rdev
->list
, &bnxt_re_dev_list
);
766 mutex_unlock(&bnxt_re_dev_lock
);
770 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
773 switch (unaffi_async
->event
) {
774 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR
:
776 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR
:
778 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR
:
780 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR
:
782 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR
:
784 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR
:
786 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR
:
788 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR
:
790 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR
:
792 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR
:
794 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR
:
802 static int bnxt_re_handle_qp_async_event(struct creq_qp_event
*qp_event
,
803 struct bnxt_re_qp
*qp
)
805 struct ib_event event
;
808 if (qp
->qplib_qp
.state
== CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
809 flags
= bnxt_re_lock_cqs(qp
);
810 bnxt_qplib_add_flush_qp(&qp
->qplib_qp
);
811 bnxt_re_unlock_cqs(qp
, flags
);
814 memset(&event
, 0, sizeof(event
));
815 if (qp
->qplib_qp
.srq
) {
816 event
.device
= &qp
->rdev
->ibdev
;
817 event
.element
.qp
= &qp
->ib_qp
;
818 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
821 if (event
.device
&& qp
->ib_qp
.event_handler
)
822 qp
->ib_qp
.event_handler(&event
, qp
->ib_qp
.qp_context
);
827 static int bnxt_re_handle_affi_async_event(struct creq_qp_event
*affi_async
,
834 return rc
; /* QP was already dead, still return success */
836 event
= affi_async
->event
;
837 if (event
== CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION
) {
838 struct bnxt_qplib_qp
*lib_qp
= obj
;
839 struct bnxt_re_qp
*qp
= container_of(lib_qp
, struct bnxt_re_qp
,
841 rc
= bnxt_re_handle_qp_async_event(affi_async
, qp
);
846 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw
*rcfw
,
847 void *aeqe
, void *obj
)
849 struct creq_qp_event
*affi_async
;
850 struct creq_func_event
*unaffi_async
;
854 type
= ((struct creq_base
*)aeqe
)->type
;
855 if (type
== CREQ_BASE_TYPE_FUNC_EVENT
) {
857 rc
= bnxt_re_handle_unaffi_async_event(unaffi_async
);
860 rc
= bnxt_re_handle_affi_async_event(affi_async
, obj
);
866 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq
*nq
,
867 struct bnxt_qplib_srq
*handle
, u8 event
)
869 struct bnxt_re_srq
*srq
= container_of(handle
, struct bnxt_re_srq
,
871 struct ib_event ib_event
;
875 dev_err(NULL
, "%s: SRQ is NULL, SRQN not handled",
876 ROCE_DRV_MODULE_NAME
);
880 ib_event
.device
= &srq
->rdev
->ibdev
;
881 ib_event
.element
.srq
= &srq
->ib_srq
;
882 if (event
== NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT
)
883 ib_event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
885 ib_event
.event
= IB_EVENT_SRQ_ERR
;
887 if (srq
->ib_srq
.event_handler
) {
888 /* Lock event_handler? */
889 (*srq
->ib_srq
.event_handler
)(&ib_event
,
890 srq
->ib_srq
.srq_context
);
896 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq
*nq
,
897 struct bnxt_qplib_cq
*handle
)
899 struct bnxt_re_cq
*cq
= container_of(handle
, struct bnxt_re_cq
,
903 dev_err(NULL
, "%s: CQ is NULL, CQN not handled",
904 ROCE_DRV_MODULE_NAME
);
907 if (cq
->ib_cq
.comp_handler
) {
908 /* Lock comp_handler? */
909 (*cq
->ib_cq
.comp_handler
)(&cq
->ib_cq
, cq
->ib_cq
.cq_context
);
915 #define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
916 #define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
917 static u32
bnxt_re_get_nqdb_offset(struct bnxt_re_dev
*rdev
, u16 indx
)
919 return bnxt_qplib_is_chip_gen_p5(&rdev
->chip_ctx
) ?
920 (rdev
->is_virtfn
? BNXT_RE_GEN_P5_VF_NQ_DB
:
921 BNXT_RE_GEN_P5_PF_NQ_DB
) :
922 rdev
->msix_entries
[indx
].db_offset
;
925 static void bnxt_re_cleanup_res(struct bnxt_re_dev
*rdev
)
929 for (i
= 1; i
< rdev
->num_msix
; i
++)
930 bnxt_qplib_disable_nq(&rdev
->nq
[i
- 1]);
932 if (rdev
->qplib_res
.rcfw
)
933 bnxt_qplib_cleanup_res(&rdev
->qplib_res
);
936 static int bnxt_re_init_res(struct bnxt_re_dev
*rdev
)
938 int num_vec_enabled
= 0;
942 bnxt_qplib_init_res(&rdev
->qplib_res
);
944 for (i
= 1; i
< rdev
->num_msix
; i
++) {
945 db_offt
= bnxt_re_get_nqdb_offset(rdev
, i
);
946 rc
= bnxt_qplib_enable_nq(rdev
->en_dev
->pdev
, &rdev
->nq
[i
- 1],
947 i
- 1, rdev
->msix_entries
[i
].vector
,
948 db_offt
, &bnxt_re_cqn_handler
,
949 &bnxt_re_srqn_handler
);
951 dev_err(rdev_to_dev(rdev
),
952 "Failed to enable NQ with rc = 0x%x", rc
);
959 for (i
= num_vec_enabled
; i
>= 0; i
--)
960 bnxt_qplib_disable_nq(&rdev
->nq
[i
]);
964 static void bnxt_re_free_nq_res(struct bnxt_re_dev
*rdev
)
969 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
970 type
= bnxt_qplib_get_ring_type(&rdev
->chip_ctx
);
971 bnxt_re_net_ring_free(rdev
, rdev
->nq
[i
].ring_id
, type
);
972 rdev
->nq
[i
].res
= NULL
;
973 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
977 static void bnxt_re_free_res(struct bnxt_re_dev
*rdev
)
979 bnxt_re_free_nq_res(rdev
);
981 if (rdev
->qplib_res
.dpi_tbl
.max
) {
982 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
983 &rdev
->qplib_res
.dpi_tbl
,
984 &rdev
->dpi_privileged
);
986 if (rdev
->qplib_res
.rcfw
) {
987 bnxt_qplib_free_res(&rdev
->qplib_res
);
988 rdev
->qplib_res
.rcfw
= NULL
;
992 static int bnxt_re_alloc_res(struct bnxt_re_dev
*rdev
)
994 int num_vec_created
= 0;
1000 /* Configure and allocate resources for qplib */
1001 rdev
->qplib_res
.rcfw
= &rdev
->rcfw
;
1002 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
,
1007 rc
= bnxt_qplib_alloc_res(&rdev
->qplib_res
, rdev
->en_dev
->pdev
,
1008 rdev
->netdev
, &rdev
->dev_attr
);
1012 rc
= bnxt_qplib_alloc_dpi(&rdev
->qplib_res
.dpi_tbl
,
1013 &rdev
->dpi_privileged
,
1018 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
1019 rdev
->nq
[i
].res
= &rdev
->qplib_res
;
1020 rdev
->nq
[i
].hwq
.max_elements
= BNXT_RE_MAX_CQ_COUNT
+
1021 BNXT_RE_MAX_SRQC_COUNT
+ 2;
1022 rc
= bnxt_qplib_alloc_nq(rdev
->en_dev
->pdev
, &rdev
->nq
[i
]);
1024 dev_err(rdev_to_dev(rdev
), "Alloc Failed NQ%d rc:%#x",
1028 type
= bnxt_qplib_get_ring_type(&rdev
->chip_ctx
);
1029 pg_map
= rdev
->nq
[i
].hwq
.pbl
[PBL_LVL_0
].pg_map_arr
;
1030 pages
= rdev
->nq
[i
].hwq
.pbl
[rdev
->nq
[i
].hwq
.level
].pg_count
;
1031 rc
= bnxt_re_net_ring_alloc(rdev
, pg_map
, pages
, type
,
1032 BNXT_QPLIB_NQE_MAX_CNT
- 1,
1033 rdev
->msix_entries
[i
+ 1].ring_idx
,
1034 &rdev
->nq
[i
].ring_id
);
1036 dev_err(rdev_to_dev(rdev
),
1037 "Failed to allocate NQ fw id with rc = 0x%x",
1039 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
1046 for (i
= num_vec_created
; i
>= 0; i
--) {
1047 type
= bnxt_qplib_get_ring_type(&rdev
->chip_ctx
);
1048 bnxt_re_net_ring_free(rdev
, rdev
->nq
[i
].ring_id
, type
);
1049 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
1051 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
1052 &rdev
->qplib_res
.dpi_tbl
,
1053 &rdev
->dpi_privileged
);
1055 bnxt_qplib_free_res(&rdev
->qplib_res
);
1058 rdev
->qplib_res
.rcfw
= NULL
;
1062 static void bnxt_re_dispatch_event(struct ib_device
*ibdev
, struct ib_qp
*qp
,
1063 u8 port_num
, enum ib_event_type event
)
1065 struct ib_event ib_event
;
1067 ib_event
.device
= ibdev
;
1069 ib_event
.element
.qp
= qp
;
1070 ib_event
.event
= event
;
1071 if (qp
->event_handler
)
1072 qp
->event_handler(&ib_event
, qp
->qp_context
);
1075 ib_event
.element
.port_num
= port_num
;
1076 ib_event
.event
= event
;
1077 ib_dispatch_event(&ib_event
);
1081 #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
1082 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev
*rdev
, u8 dir
,
1085 struct hwrm_queue_pri2cos_qcfg_input req
= {0};
1086 struct bnxt
*bp
= netdev_priv(rdev
->netdev
);
1087 struct hwrm_queue_pri2cos_qcfg_output resp
;
1088 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1089 struct bnxt_fw_msg fw_msg
;
1091 u8
*qcfgmap
, *tmp_map
;
1097 memset(&fw_msg
, 0, sizeof(fw_msg
));
1098 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
,
1099 HWRM_QUEUE_PRI2COS_QCFG
, -1, -1);
1100 flags
|= (dir
& 0x01);
1101 flags
|= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN
;
1102 req
.flags
= cpu_to_le32(flags
);
1103 req
.port_id
= bp
->pf
.port_id
;
1105 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
1106 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
1107 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
1111 if (resp
.queue_cfg_info
) {
1112 dev_warn(rdev_to_dev(rdev
),
1113 "Asymmetric cos queue configuration detected");
1114 dev_warn(rdev_to_dev(rdev
),
1115 " on device, QoS may not be fully functional\n");
1117 qcfgmap
= &resp
.pri0_cos_queue_id
;
1118 tmp_map
= (u8
*)cid_map
;
1119 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1120 tmp_map
[i
] = qcfgmap
[i
];
1125 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev
*rdev
,
1126 struct bnxt_re_qp
*qp
)
1128 return (qp
->ib_qp
.qp_type
== IB_QPT_GSI
) || (qp
== rdev
->qp1_sqp
);
1131 static void bnxt_re_dev_stop(struct bnxt_re_dev
*rdev
)
1133 int mask
= IB_QP_STATE
;
1134 struct ib_qp_attr qp_attr
;
1135 struct bnxt_re_qp
*qp
;
1137 qp_attr
.qp_state
= IB_QPS_ERR
;
1138 mutex_lock(&rdev
->qp_lock
);
1139 list_for_each_entry(qp
, &rdev
->qp_list
, list
) {
1140 /* Modify the state of all QPs except QP1/Shadow QP */
1141 if (!bnxt_re_is_qp1_or_shadow_qp(rdev
, qp
)) {
1142 if (qp
->qplib_qp
.state
!=
1143 CMDQ_MODIFY_QP_NEW_STATE_RESET
&&
1144 qp
->qplib_qp
.state
!=
1145 CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1146 bnxt_re_dispatch_event(&rdev
->ibdev
, &qp
->ib_qp
,
1147 1, IB_EVENT_QP_FATAL
);
1148 bnxt_re_modify_qp(&qp
->ib_qp
, &qp_attr
, mask
,
1153 mutex_unlock(&rdev
->qp_lock
);
1156 static int bnxt_re_update_gid(struct bnxt_re_dev
*rdev
)
1158 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
1159 struct bnxt_qplib_gid gid
;
1163 if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
))
1167 dev_err(rdev_to_dev(rdev
), "QPLIB: SGID table not allocated");
1171 for (index
= 0; index
< sgid_tbl
->active
; index
++) {
1172 gid_idx
= sgid_tbl
->hw_id
[index
];
1174 if (!memcmp(&sgid_tbl
->tbl
[index
], &bnxt_qplib_gid_zero
,
1175 sizeof(bnxt_qplib_gid_zero
)))
1177 /* need to modify the VLAN enable setting of non VLAN GID only
1178 * as setting is done for VLAN GID while adding GID
1180 if (sgid_tbl
->vlan
[index
])
1183 memcpy(&gid
, &sgid_tbl
->tbl
[index
], sizeof(gid
));
1185 rc
= bnxt_qplib_update_sgid(sgid_tbl
, &gid
, gid_idx
,
1186 rdev
->qplib_res
.netdev
->dev_addr
);
1192 static u32
bnxt_re_get_priority_mask(struct bnxt_re_dev
*rdev
)
1194 u32 prio_map
= 0, tmp_map
= 0;
1195 struct net_device
*netdev
;
1198 netdev
= rdev
->netdev
;
1200 memset(&app
, 0, sizeof(app
));
1201 app
.selector
= IEEE_8021QAZ_APP_SEL_ETHERTYPE
;
1202 app
.protocol
= ETH_P_IBOE
;
1203 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
1206 app
.selector
= IEEE_8021QAZ_APP_SEL_DGRAM
;
1207 app
.protocol
= ROCE_V2_UDP_DPORT
;
1208 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
1209 prio_map
|= tmp_map
;
1214 static void bnxt_re_parse_cid_map(u8 prio_map
, u8
*cid_map
, u16
*cosq
)
1219 for (prio
= 0, id
= 0; prio
< 8; prio
++) {
1220 if (prio_map
& (1 << prio
)) {
1221 cosq
[id
] = cid_map
[prio
];
1223 if (id
== 2) /* Max 2 tcs supported */
1229 static int bnxt_re_setup_qos(struct bnxt_re_dev
*rdev
)
1235 /* Get priority for roce */
1236 prio_map
= bnxt_re_get_priority_mask(rdev
);
1238 if (prio_map
== rdev
->cur_prio_map
)
1240 rdev
->cur_prio_map
= prio_map
;
1241 /* Get cosq id for this priority */
1242 rc
= bnxt_re_query_hwrm_pri2cos(rdev
, 0, &cid_map
);
1244 dev_warn(rdev_to_dev(rdev
), "no cos for p_mask %x\n", prio_map
);
1247 /* Parse CoS IDs for app priority */
1248 bnxt_re_parse_cid_map(prio_map
, (u8
*)&cid_map
, rdev
->cosq
);
1251 rc
= bnxt_qplib_map_tc2cos(&rdev
->qplib_res
, rdev
->cosq
);
1253 dev_warn(rdev_to_dev(rdev
), "no tc for cos{%x, %x}\n",
1254 rdev
->cosq
[0], rdev
->cosq
[1]);
1258 /* Actual priorities are not programmed as they are already
1259 * done by L2 driver; just enable or disable priority vlan tagging
1261 if ((prio_map
== 0 && rdev
->qplib_res
.prio
) ||
1262 (prio_map
!= 0 && !rdev
->qplib_res
.prio
)) {
1263 rdev
->qplib_res
.prio
= prio_map
? true : false;
1265 bnxt_re_update_gid(rdev
);
1271 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev
*rdev
)
1273 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1274 struct hwrm_ver_get_output resp
= {0};
1275 struct hwrm_ver_get_input req
= {0};
1276 struct bnxt_fw_msg fw_msg
;
1279 memset(&fw_msg
, 0, sizeof(fw_msg
));
1280 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
,
1281 HWRM_VER_GET
, -1, -1);
1282 req
.hwrm_intf_maj
= HWRM_VERSION_MAJOR
;
1283 req
.hwrm_intf_min
= HWRM_VERSION_MINOR
;
1284 req
.hwrm_intf_upd
= HWRM_VERSION_UPDATE
;
1285 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
1286 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
1287 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
1289 dev_err(rdev_to_dev(rdev
),
1290 "Failed to query HW version, rc = 0x%x", rc
);
1293 rdev
->qplib_ctx
.hwrm_intf_ver
=
1294 (u64
)le16_to_cpu(resp
.hwrm_intf_major
) << 48 |
1295 (u64
)le16_to_cpu(resp
.hwrm_intf_minor
) << 32 |
1296 (u64
)le16_to_cpu(resp
.hwrm_intf_build
) << 16 |
1297 le16_to_cpu(resp
.hwrm_intf_patch
);
1300 static void bnxt_re_ib_unreg(struct bnxt_re_dev
*rdev
)
1305 if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
)) {
1306 /* Cleanup ib dev */
1307 bnxt_re_unregister_ib(rdev
);
1309 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
))
1310 cancel_delayed_work_sync(&rdev
->worker
);
1312 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED
,
1314 bnxt_re_cleanup_res(rdev
);
1315 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED
, &rdev
->flags
))
1316 bnxt_re_free_res(rdev
);
1318 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
)) {
1319 rc
= bnxt_qplib_deinit_rcfw(&rdev
->rcfw
);
1321 dev_warn(rdev_to_dev(rdev
),
1322 "Failed to deinitialize RCFW: %#x", rc
);
1323 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
);
1324 bnxt_qplib_free_ctx(rdev
->en_dev
->pdev
, &rdev
->qplib_ctx
);
1325 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1326 type
= bnxt_qplib_get_ring_type(&rdev
->chip_ctx
);
1327 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq_ring_id
, type
);
1328 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1330 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX
, &rdev
->flags
)) {
1331 rc
= bnxt_re_free_msix(rdev
);
1333 dev_warn(rdev_to_dev(rdev
),
1334 "Failed to free MSI-X vectors: %#x", rc
);
1337 bnxt_re_destroy_chip_ctx(rdev
);
1338 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
)) {
1339 rc
= bnxt_re_unregister_netdev(rdev
);
1341 dev_warn(rdev_to_dev(rdev
),
1342 "Failed to unregister with netdev: %#x", rc
);
1346 /* worker thread for polling periodic events. Now used for QoS programming*/
1347 static void bnxt_re_worker(struct work_struct
*work
)
1349 struct bnxt_re_dev
*rdev
= container_of(work
, struct bnxt_re_dev
,
1352 bnxt_re_setup_qos(rdev
);
1353 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1356 static int bnxt_re_ib_reg(struct bnxt_re_dev
*rdev
)
1365 /* Acquire rtnl lock through out this function */
1369 /* Registered a new RoCE device instance to netdev */
1370 rc
= bnxt_re_register_netdev(rdev
);
1373 pr_err("Failed to register with netedev: %#x\n", rc
);
1376 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
);
1378 rc
= bnxt_re_setup_chip_ctx(rdev
);
1380 dev_err(rdev_to_dev(rdev
), "Failed to get chip context\n");
1384 /* Check whether VF or PF */
1385 bnxt_re_get_sriov_func_type(rdev
);
1387 rc
= bnxt_re_request_msix(rdev
);
1389 pr_err("Failed to get MSI-X vectors: %#x\n", rc
);
1393 set_bit(BNXT_RE_FLAG_GOT_MSIX
, &rdev
->flags
);
1395 bnxt_re_query_hwrm_intf_version(rdev
);
1397 /* Establish RCFW Communication Channel to initialize the context
1398 * memory for the function and all child VFs
1400 rc
= bnxt_qplib_alloc_rcfw_channel(rdev
->en_dev
->pdev
, &rdev
->rcfw
,
1402 BNXT_RE_MAX_QPC_COUNT
);
1404 pr_err("Failed to allocate RCFW Channel: %#x\n", rc
);
1407 type
= bnxt_qplib_get_ring_type(&rdev
->chip_ctx
);
1408 pg_map
= rdev
->rcfw
.creq
.pbl
[PBL_LVL_0
].pg_map_arr
;
1409 pages
= rdev
->rcfw
.creq
.pbl
[rdev
->rcfw
.creq
.level
].pg_count
;
1410 ridx
= rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].ring_idx
;
1411 rc
= bnxt_re_net_ring_alloc(rdev
, pg_map
, pages
, type
,
1412 BNXT_QPLIB_CREQE_MAX_CNT
- 1,
1413 ridx
, &rdev
->rcfw
.creq_ring_id
);
1415 pr_err("Failed to allocate CREQ: %#x\n", rc
);
1418 db_offt
= bnxt_re_get_nqdb_offset(rdev
, BNXT_RE_AEQ_IDX
);
1419 vid
= rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].vector
;
1420 rc
= bnxt_qplib_enable_rcfw_channel(rdev
->en_dev
->pdev
, &rdev
->rcfw
,
1421 vid
, db_offt
, rdev
->is_virtfn
,
1422 &bnxt_re_aeq_handler
);
1424 pr_err("Failed to enable RCFW channel: %#x\n", rc
);
1428 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
,
1433 bnxt_re_set_resource_limits(rdev
);
1435 rc
= bnxt_qplib_alloc_ctx(rdev
->en_dev
->pdev
, &rdev
->qplib_ctx
, 0,
1436 bnxt_qplib_is_chip_gen_p5(&rdev
->chip_ctx
));
1438 pr_err("Failed to allocate QPLIB context: %#x\n", rc
);
1441 rc
= bnxt_re_net_stats_ctx_alloc(rdev
,
1442 rdev
->qplib_ctx
.stats
.dma_map
,
1443 &rdev
->qplib_ctx
.stats
.fw_id
);
1445 pr_err("Failed to allocate stats context: %#x\n", rc
);
1449 rc
= bnxt_qplib_init_rcfw(&rdev
->rcfw
, &rdev
->qplib_ctx
,
1452 pr_err("Failed to initialize RCFW: %#x\n", rc
);
1455 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
);
1457 /* Resources based on the 'new' device caps */
1458 rc
= bnxt_re_alloc_res(rdev
);
1460 pr_err("Failed to allocate resources: %#x\n", rc
);
1463 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED
, &rdev
->flags
);
1464 rc
= bnxt_re_init_res(rdev
);
1466 pr_err("Failed to initialize resources: %#x\n", rc
);
1470 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED
, &rdev
->flags
);
1472 if (!rdev
->is_virtfn
) {
1473 rc
= bnxt_re_setup_qos(rdev
);
1475 pr_info("RoCE priority not yet configured\n");
1477 INIT_DELAYED_WORK(&rdev
->worker
, bnxt_re_worker
);
1478 set_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
);
1479 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1485 /* Register ib dev */
1486 rc
= bnxt_re_register_ib(rdev
);
1488 pr_err("Failed to register with IB: %#x\n", rc
);
1491 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
);
1492 dev_info(rdev_to_dev(rdev
), "Device registered successfully");
1493 ib_get_eth_speed(&rdev
->ibdev
, 1, &rdev
->active_speed
,
1494 &rdev
->active_width
);
1495 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS
, &rdev
->flags
);
1496 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1, IB_EVENT_PORT_ACTIVE
);
1500 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
);
1502 bnxt_qplib_free_ctx(rdev
->en_dev
->pdev
, &rdev
->qplib_ctx
);
1504 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1506 type
= bnxt_qplib_get_ring_type(&rdev
->chip_ctx
);
1507 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq_ring_id
, type
);
1509 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1513 bnxt_re_ib_unreg(rdev
);
1519 static void bnxt_re_dev_unreg(struct bnxt_re_dev
*rdev
)
1521 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1522 struct net_device
*netdev
= rdev
->netdev
;
1524 bnxt_re_dev_remove(rdev
);
1527 bnxt_re_dev_unprobe(netdev
, en_dev
);
1530 static int bnxt_re_dev_reg(struct bnxt_re_dev
**rdev
, struct net_device
*netdev
)
1532 struct bnxt_en_dev
*en_dev
;
1535 if (!is_bnxt_re_dev(netdev
))
1538 en_dev
= bnxt_re_dev_probe(netdev
);
1539 if (IS_ERR(en_dev
)) {
1540 if (en_dev
!= ERR_PTR(-ENODEV
))
1541 pr_err("%s: Failed to probe\n", ROCE_DRV_MODULE_NAME
);
1542 rc
= PTR_ERR(en_dev
);
1545 *rdev
= bnxt_re_dev_add(netdev
, en_dev
);
1548 bnxt_re_dev_unprobe(netdev
, en_dev
);
1555 static void bnxt_re_remove_one(struct bnxt_re_dev
*rdev
)
1557 pci_dev_put(rdev
->en_dev
->pdev
);
1560 /* Handle all deferred netevents tasks */
1561 static void bnxt_re_task(struct work_struct
*work
)
1563 struct bnxt_re_work
*re_work
;
1564 struct bnxt_re_dev
*rdev
;
1567 re_work
= container_of(work
, struct bnxt_re_work
, work
);
1568 rdev
= re_work
->rdev
;
1570 if (re_work
->event
!= NETDEV_REGISTER
&&
1571 !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
))
1574 switch (re_work
->event
) {
1575 case NETDEV_REGISTER
:
1576 rc
= bnxt_re_ib_reg(rdev
);
1578 dev_err(rdev_to_dev(rdev
),
1579 "Failed to register with IB: %#x", rc
);
1580 bnxt_re_remove_one(rdev
);
1581 bnxt_re_dev_unreg(rdev
);
1586 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1,
1587 IB_EVENT_PORT_ACTIVE
);
1590 bnxt_re_dev_stop(rdev
);
1593 if (!netif_carrier_ok(rdev
->netdev
))
1594 bnxt_re_dev_stop(rdev
);
1595 else if (netif_carrier_ok(rdev
->netdev
))
1596 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1,
1597 IB_EVENT_PORT_ACTIVE
);
1598 ib_get_eth_speed(&rdev
->ibdev
, 1, &rdev
->active_speed
,
1599 &rdev
->active_width
);
1604 smp_mb__before_atomic();
1605 atomic_dec(&rdev
->sched_count
);
1610 static void bnxt_re_init_one(struct bnxt_re_dev
*rdev
)
1612 pci_dev_get(rdev
->en_dev
->pdev
);
1616 * "Notifier chain callback can be invoked for the same chain from
1617 * different CPUs at the same time".
1619 * For cases when the netdev is already present, our call to the
1620 * register_netdevice_notifier() will actually get the rtnl_lock()
1621 * before sending NETDEV_REGISTER and (if up) NETDEV_UP
1624 * But for cases when the netdev is not already present, the notifier
1625 * chain is subjected to be invoked from different CPUs simultaneously.
1627 * This is protected by the netdev_mutex.
1629 static int bnxt_re_netdev_event(struct notifier_block
*notifier
,
1630 unsigned long event
, void *ptr
)
1632 struct net_device
*real_dev
, *netdev
= netdev_notifier_info_to_dev(ptr
);
1633 struct bnxt_re_work
*re_work
;
1634 struct bnxt_re_dev
*rdev
;
1636 bool sch_work
= false;
1638 real_dev
= rdma_vlan_dev_real_dev(netdev
);
1642 rdev
= bnxt_re_from_netdev(real_dev
);
1643 if (!rdev
&& event
!= NETDEV_REGISTER
)
1645 if (real_dev
!= netdev
)
1649 case NETDEV_REGISTER
:
1652 rc
= bnxt_re_dev_reg(&rdev
, real_dev
);
1656 pr_err("Failed to register with the device %s: %#x\n",
1657 real_dev
->name
, rc
);
1660 bnxt_re_init_one(rdev
);
1664 case NETDEV_UNREGISTER
:
1665 /* netdev notifier will call NETDEV_UNREGISTER again later since
1666 * we are still holding the reference to the netdev
1668 if (atomic_read(&rdev
->sched_count
) > 0)
1670 bnxt_re_ib_unreg(rdev
);
1671 bnxt_re_remove_one(rdev
);
1672 bnxt_re_dev_unreg(rdev
);
1680 /* Allocate for the deferred task */
1681 re_work
= kzalloc(sizeof(*re_work
), GFP_ATOMIC
);
1683 re_work
->rdev
= rdev
;
1684 re_work
->event
= event
;
1685 re_work
->vlan_dev
= (real_dev
== netdev
?
1687 INIT_WORK(&re_work
->work
, bnxt_re_task
);
1688 atomic_inc(&rdev
->sched_count
);
1689 queue_work(bnxt_re_wq
, &re_work
->work
);
1697 static struct notifier_block bnxt_re_netdev_notifier
= {
1698 .notifier_call
= bnxt_re_netdev_event
1701 static int __init
bnxt_re_mod_init(void)
1705 pr_info("%s: %s", ROCE_DRV_MODULE_NAME
, version
);
1707 bnxt_re_wq
= create_singlethread_workqueue("bnxt_re");
1711 INIT_LIST_HEAD(&bnxt_re_dev_list
);
1713 rc
= register_netdevice_notifier(&bnxt_re_netdev_notifier
);
1715 pr_err("%s: Cannot register to netdevice_notifier",
1716 ROCE_DRV_MODULE_NAME
);
1722 destroy_workqueue(bnxt_re_wq
);
1727 static void __exit
bnxt_re_mod_exit(void)
1729 struct bnxt_re_dev
*rdev
, *next
;
1730 LIST_HEAD(to_be_deleted
);
1732 mutex_lock(&bnxt_re_dev_lock
);
1733 /* Free all adapter allocated resources */
1734 if (!list_empty(&bnxt_re_dev_list
))
1735 list_splice_init(&bnxt_re_dev_list
, &to_be_deleted
);
1736 mutex_unlock(&bnxt_re_dev_lock
);
1738 * Cleanup the devices in reverse order so that the VF device
1739 * cleanup is done before PF cleanup
1741 list_for_each_entry_safe_reverse(rdev
, next
, &to_be_deleted
, list
) {
1742 dev_info(rdev_to_dev(rdev
), "Unregistering Device");
1744 * Flush out any scheduled tasks before destroying the
1747 flush_workqueue(bnxt_re_wq
);
1748 bnxt_re_dev_stop(rdev
);
1749 /* Acquire the rtnl_lock as the L2 resources are freed here */
1751 bnxt_re_ib_unreg(rdev
);
1753 bnxt_re_remove_one(rdev
);
1754 bnxt_re_dev_unreg(rdev
);
1756 unregister_netdevice_notifier(&bnxt_re_netdev_notifier
);
1758 destroy_workqueue(bnxt_re_wq
);
1761 module_init(bnxt_re_mod_init
);
1762 module_exit(bnxt_re_mod_exit
);