2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Main component of the bnxt_re driver
39 #include <linux/module.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mutex.h>
43 #include <linux/list.h>
44 #include <linux/rculist.h>
45 #include <linux/spinlock.h>
46 #include <linux/pci.h>
47 #include <net/dcbnl.h>
49 #include <net/addrconf.h>
50 #include <linux/if_ether.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_addr.h>
59 #include "qplib_res.h"
62 #include "qplib_rcfw.h"
65 #include <rdma/bnxt_re-abi.h>
67 #include "hw_counters.h"
69 static char version
[] =
70 BNXT_RE_DESC
" v" ROCE_DRV_MODULE_VERSION
"\n";
72 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
73 MODULE_DESCRIPTION(BNXT_RE_DESC
" Driver");
74 MODULE_LICENSE("Dual BSD/GPL");
77 static struct list_head bnxt_re_dev_list
= LIST_HEAD_INIT(bnxt_re_dev_list
);
78 /* Mutex to protect the list of bnxt_re devices added */
79 static DEFINE_MUTEX(bnxt_re_dev_lock
);
80 static struct workqueue_struct
*bnxt_re_wq
;
81 static void bnxt_re_ib_unreg(struct bnxt_re_dev
*rdev
, bool lock_wait
);
83 /* SR-IOV helper functions */
85 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev
*rdev
)
89 bp
= netdev_priv(rdev
->en_dev
->net
);
94 /* Set the maximum number of each resource that the driver actually wants
95 * to allocate. This may be up to the maximum number the firmware has
96 * reserved for the function. The driver may choose to allocate fewer
97 * resources than the firmware maximum.
99 static void bnxt_re_set_resource_limits(struct bnxt_re_dev
*rdev
)
101 u32 vf_qps
= 0, vf_srqs
= 0, vf_cqs
= 0, vf_mrws
= 0, vf_gids
= 0;
105 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
107 rdev
->qplib_ctx
.qpc_count
= min_t(u32
, BNXT_RE_MAX_QPC_COUNT
,
110 rdev
->qplib_ctx
.mrw_count
= BNXT_RE_MAX_MRW_COUNT_256K
;
111 /* Use max_mr from fw since max_mrw does not get set */
112 rdev
->qplib_ctx
.mrw_count
= min_t(u32
, rdev
->qplib_ctx
.mrw_count
,
114 rdev
->qplib_ctx
.srqc_count
= min_t(u32
, BNXT_RE_MAX_SRQC_COUNT
,
116 rdev
->qplib_ctx
.cq_count
= min_t(u32
, BNXT_RE_MAX_CQ_COUNT
,
119 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
; i
++)
120 rdev
->qplib_ctx
.tqm_count
[i
] =
121 rdev
->dev_attr
.tqm_alloc_reqs
[i
];
125 * Reserve a set of resources for the PF. Divide the remaining
126 * resources among the VFs
128 vf_pct
= 100 - BNXT_RE_PCT_RSVD_FOR_PF
;
129 num_vfs
= 100 * rdev
->num_vfs
;
130 vf_qps
= (rdev
->qplib_ctx
.qpc_count
* vf_pct
) / num_vfs
;
131 vf_srqs
= (rdev
->qplib_ctx
.srqc_count
* vf_pct
) / num_vfs
;
132 vf_cqs
= (rdev
->qplib_ctx
.cq_count
* vf_pct
) / num_vfs
;
134 * The driver allows many more MRs than other resources. If the
135 * firmware does also, then reserve a fixed amount for the PF
136 * and divide the rest among VFs. VFs may use many MRs for NFS
137 * mounts, ISER, NVME applications, etc. If the firmware
138 * severely restricts the number of MRs, then let PF have
139 * half and divide the rest among VFs, as for the other
142 if (rdev
->qplib_ctx
.mrw_count
< BNXT_RE_MAX_MRW_COUNT_64K
)
143 vf_mrws
= rdev
->qplib_ctx
.mrw_count
* vf_pct
/ num_vfs
;
145 vf_mrws
= (rdev
->qplib_ctx
.mrw_count
-
146 BNXT_RE_RESVD_MR_FOR_PF
) / rdev
->num_vfs
;
147 vf_gids
= BNXT_RE_MAX_GID_PER_VF
;
149 rdev
->qplib_ctx
.vf_res
.max_mrw_per_vf
= vf_mrws
;
150 rdev
->qplib_ctx
.vf_res
.max_gid_per_vf
= vf_gids
;
151 rdev
->qplib_ctx
.vf_res
.max_qp_per_vf
= vf_qps
;
152 rdev
->qplib_ctx
.vf_res
.max_srq_per_vf
= vf_srqs
;
153 rdev
->qplib_ctx
.vf_res
.max_cq_per_vf
= vf_cqs
;
156 /* for handling bnxt_en callbacks later */
157 static void bnxt_re_stop(void *p
)
161 static void bnxt_re_start(void *p
)
165 static void bnxt_re_sriov_config(void *p
, int num_vfs
)
167 struct bnxt_re_dev
*rdev
= p
;
172 rdev
->num_vfs
= num_vfs
;
173 bnxt_re_set_resource_limits(rdev
);
174 bnxt_qplib_set_func_resources(&rdev
->qplib_res
, &rdev
->rcfw
,
178 static void bnxt_re_shutdown(void *p
)
180 struct bnxt_re_dev
*rdev
= p
;
185 bnxt_re_ib_unreg(rdev
, false);
188 static struct bnxt_ulp_ops bnxt_re_ulp_ops
= {
189 .ulp_async_notifier
= NULL
,
190 .ulp_stop
= bnxt_re_stop
,
191 .ulp_start
= bnxt_re_start
,
192 .ulp_sriov_config
= bnxt_re_sriov_config
,
193 .ulp_shutdown
= bnxt_re_shutdown
196 /* RoCE -> Net driver */
198 /* Driver registration routines used to let the networking driver (bnxt_en)
199 * to know that the RoCE driver is now installed
201 static int bnxt_re_unregister_netdev(struct bnxt_re_dev
*rdev
, bool lock_wait
)
203 struct bnxt_en_dev
*en_dev
;
209 en_dev
= rdev
->en_dev
;
210 /* Acquire rtnl lock if it is not invokded from netdev event */
214 rc
= en_dev
->en_ops
->bnxt_unregister_device(rdev
->en_dev
,
221 static int bnxt_re_register_netdev(struct bnxt_re_dev
*rdev
)
223 struct bnxt_en_dev
*en_dev
;
229 en_dev
= rdev
->en_dev
;
232 rc
= en_dev
->en_ops
->bnxt_register_device(en_dev
, BNXT_ROCE_ULP
,
233 &bnxt_re_ulp_ops
, rdev
);
238 static int bnxt_re_free_msix(struct bnxt_re_dev
*rdev
, bool lock_wait
)
240 struct bnxt_en_dev
*en_dev
;
246 en_dev
= rdev
->en_dev
;
251 rc
= en_dev
->en_ops
->bnxt_free_msix(rdev
->en_dev
, BNXT_ROCE_ULP
);
258 static int bnxt_re_request_msix(struct bnxt_re_dev
*rdev
)
260 int rc
= 0, num_msix_want
= BNXT_RE_MAX_MSIX
, num_msix_got
;
261 struct bnxt_en_dev
*en_dev
;
266 en_dev
= rdev
->en_dev
;
268 num_msix_want
= min_t(u32
, BNXT_RE_MAX_MSIX
, num_online_cpus());
271 num_msix_got
= en_dev
->en_ops
->bnxt_request_msix(en_dev
, BNXT_ROCE_ULP
,
274 if (num_msix_got
< BNXT_RE_MIN_MSIX
) {
278 if (num_msix_got
!= num_msix_want
) {
279 dev_warn(rdev_to_dev(rdev
),
280 "Requested %d MSI-X vectors, got %d\n",
281 num_msix_want
, num_msix_got
);
283 rdev
->num_msix
= num_msix_got
;
289 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev
*rdev
, struct input
*hdr
,
290 u16 opcd
, u16 crid
, u16 trid
)
292 hdr
->req_type
= cpu_to_le16(opcd
);
293 hdr
->cmpl_ring
= cpu_to_le16(crid
);
294 hdr
->target_id
= cpu_to_le16(trid
);
297 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg
*fw_msg
, void *msg
,
298 int msg_len
, void *resp
, int resp_max_len
,
302 fw_msg
->msg_len
= msg_len
;
304 fw_msg
->resp_max_len
= resp_max_len
;
305 fw_msg
->timeout
= timeout
;
308 static int bnxt_re_net_ring_free(struct bnxt_re_dev
*rdev
, u16 fw_ring_id
,
311 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
312 struct hwrm_ring_free_input req
= {0};
313 struct hwrm_ring_free_output resp
;
314 struct bnxt_fw_msg fw_msg
;
315 bool do_unlock
= false;
321 memset(&fw_msg
, 0, sizeof(fw_msg
));
327 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_RING_FREE
, -1, -1);
328 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_L2_CMPL
;
329 req
.ring_id
= cpu_to_le16(fw_ring_id
);
330 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
331 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
332 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
334 dev_err(rdev_to_dev(rdev
),
335 "Failed to free HW ring:%d :%#x", req
.ring_id
, rc
);
341 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev
*rdev
, dma_addr_t
*dma_arr
,
342 int pages
, int type
, u32 ring_mask
,
343 u32 map_index
, u16
*fw_ring_id
)
345 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
346 struct hwrm_ring_alloc_input req
= {0};
347 struct hwrm_ring_alloc_output resp
;
348 struct bnxt_fw_msg fw_msg
;
354 memset(&fw_msg
, 0, sizeof(fw_msg
));
356 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_RING_ALLOC
, -1, -1);
358 req
.page_tbl_addr
= cpu_to_le64(dma_arr
[0]);
360 /* Page size is in log2 units */
361 req
.page_size
= BNXT_PAGE_SHIFT
;
362 req
.page_tbl_depth
= 1;
365 /* Association of ring index with doorbell index and MSIX number */
366 req
.logical_id
= cpu_to_le16(map_index
);
367 req
.length
= cpu_to_le32(ring_mask
+ 1);
368 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_L2_CMPL
;
369 req
.int_mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
370 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
371 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
372 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
374 *fw_ring_id
= le16_to_cpu(resp
.ring_id
);
380 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev
*rdev
,
381 u32 fw_stats_ctx_id
, bool lock_wait
)
383 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
384 struct hwrm_stat_ctx_free_input req
= {0};
385 struct bnxt_fw_msg fw_msg
;
386 bool do_unlock
= false;
392 memset(&fw_msg
, 0, sizeof(fw_msg
));
398 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_STAT_CTX_FREE
, -1, -1);
399 req
.stat_ctx_id
= cpu_to_le32(fw_stats_ctx_id
);
400 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&req
,
401 sizeof(req
), DFLT_HWRM_CMD_TIMEOUT
);
402 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
404 dev_err(rdev_to_dev(rdev
),
405 "Failed to free HW stats context %#x", rc
);
412 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev
*rdev
,
414 u32
*fw_stats_ctx_id
)
416 struct hwrm_stat_ctx_alloc_output resp
= {0};
417 struct hwrm_stat_ctx_alloc_input req
= {0};
418 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
419 struct bnxt_fw_msg fw_msg
;
422 *fw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
427 memset(&fw_msg
, 0, sizeof(fw_msg
));
430 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_STAT_CTX_ALLOC
, -1, -1);
431 req
.update_period_ms
= cpu_to_le32(1000);
432 req
.stats_dma_addr
= cpu_to_le64(dma_map
);
433 req
.stat_ctx_flags
= STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE
;
434 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
435 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
436 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
438 *fw_stats_ctx_id
= le32_to_cpu(resp
.stat_ctx_id
);
446 static bool is_bnxt_re_dev(struct net_device
*netdev
)
448 struct ethtool_drvinfo drvinfo
;
450 if (netdev
->ethtool_ops
&& netdev
->ethtool_ops
->get_drvinfo
) {
451 memset(&drvinfo
, 0, sizeof(drvinfo
));
452 netdev
->ethtool_ops
->get_drvinfo(netdev
, &drvinfo
);
454 if (strcmp(drvinfo
.driver
, "bnxt_en"))
461 static struct bnxt_re_dev
*bnxt_re_from_netdev(struct net_device
*netdev
)
463 struct bnxt_re_dev
*rdev
;
466 list_for_each_entry_rcu(rdev
, &bnxt_re_dev_list
, list
) {
467 if (rdev
->netdev
== netdev
) {
476 static void bnxt_re_dev_unprobe(struct net_device
*netdev
,
477 struct bnxt_en_dev
*en_dev
)
480 module_put(en_dev
->pdev
->driver
->driver
.owner
);
483 static struct bnxt_en_dev
*bnxt_re_dev_probe(struct net_device
*netdev
)
485 struct bnxt
*bp
= netdev_priv(netdev
);
486 struct bnxt_en_dev
*en_dev
;
487 struct pci_dev
*pdev
;
489 /* Call bnxt_en's RoCE probe via indirect API */
491 return ERR_PTR(-EINVAL
);
493 en_dev
= bp
->ulp_probe(netdev
);
499 return ERR_PTR(-EINVAL
);
501 if (!(en_dev
->flags
& BNXT_EN_FLAG_ROCE_CAP
)) {
503 "%s: probe error: RoCE is not supported on this device",
504 ROCE_DRV_MODULE_NAME
);
505 return ERR_PTR(-ENODEV
);
508 /* Bump net device reference count */
509 if (!try_module_get(pdev
->driver
->driver
.owner
))
510 return ERR_PTR(-ENODEV
);
517 static void bnxt_re_unregister_ib(struct bnxt_re_dev
*rdev
)
519 ib_unregister_device(&rdev
->ibdev
);
522 static int bnxt_re_register_ib(struct bnxt_re_dev
*rdev
)
524 struct ib_device
*ibdev
= &rdev
->ibdev
;
527 ibdev
->owner
= THIS_MODULE
;
528 ibdev
->node_type
= RDMA_NODE_IB_CA
;
529 strlcpy(ibdev
->name
, "bnxt_re%d", IB_DEVICE_NAME_MAX
);
530 strlcpy(ibdev
->node_desc
, BNXT_RE_DESC
" HCA",
531 strlen(BNXT_RE_DESC
) + 5);
532 ibdev
->phys_port_cnt
= 1;
534 bnxt_qplib_get_guid(rdev
->netdev
->dev_addr
, (u8
*)&ibdev
->node_guid
);
536 ibdev
->num_comp_vectors
= 1;
537 ibdev
->dev
.parent
= &rdev
->en_dev
->pdev
->dev
;
538 ibdev
->local_dma_lkey
= BNXT_QPLIB_RSVD_LKEY
;
541 ibdev
->uverbs_abi_ver
= BNXT_RE_ABI_VERSION
;
542 ibdev
->uverbs_cmd_mask
=
543 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
544 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
545 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
546 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
547 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
548 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
549 (1ull << IB_USER_VERBS_CMD_REREG_MR
) |
550 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
551 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
552 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
553 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
554 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
555 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
556 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
557 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
558 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
559 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
560 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
561 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
562 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
563 (1ull << IB_USER_VERBS_CMD_CREATE_AH
) |
564 (1ull << IB_USER_VERBS_CMD_MODIFY_AH
) |
565 (1ull << IB_USER_VERBS_CMD_QUERY_AH
) |
566 (1ull << IB_USER_VERBS_CMD_DESTROY_AH
);
567 /* POLL_CQ and REQ_NOTIFY_CQ is directly handled in libbnxt_re */
570 ibdev
->query_device
= bnxt_re_query_device
;
571 ibdev
->modify_device
= bnxt_re_modify_device
;
573 ibdev
->query_port
= bnxt_re_query_port
;
574 ibdev
->get_port_immutable
= bnxt_re_get_port_immutable
;
575 ibdev
->get_dev_fw_str
= bnxt_re_query_fw_str
;
576 ibdev
->query_pkey
= bnxt_re_query_pkey
;
577 ibdev
->query_gid
= bnxt_re_query_gid
;
578 ibdev
->get_netdev
= bnxt_re_get_netdev
;
579 ibdev
->add_gid
= bnxt_re_add_gid
;
580 ibdev
->del_gid
= bnxt_re_del_gid
;
581 ibdev
->get_link_layer
= bnxt_re_get_link_layer
;
583 ibdev
->alloc_pd
= bnxt_re_alloc_pd
;
584 ibdev
->dealloc_pd
= bnxt_re_dealloc_pd
;
586 ibdev
->create_ah
= bnxt_re_create_ah
;
587 ibdev
->modify_ah
= bnxt_re_modify_ah
;
588 ibdev
->query_ah
= bnxt_re_query_ah
;
589 ibdev
->destroy_ah
= bnxt_re_destroy_ah
;
591 ibdev
->create_srq
= bnxt_re_create_srq
;
592 ibdev
->modify_srq
= bnxt_re_modify_srq
;
593 ibdev
->query_srq
= bnxt_re_query_srq
;
594 ibdev
->destroy_srq
= bnxt_re_destroy_srq
;
595 ibdev
->post_srq_recv
= bnxt_re_post_srq_recv
;
597 ibdev
->create_qp
= bnxt_re_create_qp
;
598 ibdev
->modify_qp
= bnxt_re_modify_qp
;
599 ibdev
->query_qp
= bnxt_re_query_qp
;
600 ibdev
->destroy_qp
= bnxt_re_destroy_qp
;
602 ibdev
->post_send
= bnxt_re_post_send
;
603 ibdev
->post_recv
= bnxt_re_post_recv
;
605 ibdev
->create_cq
= bnxt_re_create_cq
;
606 ibdev
->destroy_cq
= bnxt_re_destroy_cq
;
607 ibdev
->poll_cq
= bnxt_re_poll_cq
;
608 ibdev
->req_notify_cq
= bnxt_re_req_notify_cq
;
610 ibdev
->get_dma_mr
= bnxt_re_get_dma_mr
;
611 ibdev
->dereg_mr
= bnxt_re_dereg_mr
;
612 ibdev
->alloc_mr
= bnxt_re_alloc_mr
;
613 ibdev
->map_mr_sg
= bnxt_re_map_mr_sg
;
615 ibdev
->reg_user_mr
= bnxt_re_reg_user_mr
;
616 ibdev
->alloc_ucontext
= bnxt_re_alloc_ucontext
;
617 ibdev
->dealloc_ucontext
= bnxt_re_dealloc_ucontext
;
618 ibdev
->mmap
= bnxt_re_mmap
;
619 ibdev
->get_hw_stats
= bnxt_re_ib_get_hw_stats
;
620 ibdev
->alloc_hw_stats
= bnxt_re_ib_alloc_hw_stats
;
622 return ib_register_device(ibdev
, NULL
);
625 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
628 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(device
, ibdev
.dev
);
630 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", rdev
->en_dev
->pdev
->vendor
);
633 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
636 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(device
, ibdev
.dev
);
638 return scnprintf(buf
, PAGE_SIZE
, "%s\n", rdev
->ibdev
.node_desc
);
641 static DEVICE_ATTR(hw_rev
, 0444, show_rev
, NULL
);
642 static DEVICE_ATTR(hca_type
, 0444, show_hca
, NULL
);
644 static struct device_attribute
*bnxt_re_attributes
[] = {
649 static void bnxt_re_dev_remove(struct bnxt_re_dev
*rdev
)
651 dev_put(rdev
->netdev
);
654 mutex_lock(&bnxt_re_dev_lock
);
655 list_del_rcu(&rdev
->list
);
656 mutex_unlock(&bnxt_re_dev_lock
);
659 flush_workqueue(bnxt_re_wq
);
661 ib_dealloc_device(&rdev
->ibdev
);
665 static struct bnxt_re_dev
*bnxt_re_dev_add(struct net_device
*netdev
,
666 struct bnxt_en_dev
*en_dev
)
668 struct bnxt_re_dev
*rdev
;
670 /* Allocate bnxt_re_dev instance here */
671 rdev
= (struct bnxt_re_dev
*)ib_alloc_device(sizeof(*rdev
));
673 dev_err(NULL
, "%s: bnxt_re_dev allocation failure!",
674 ROCE_DRV_MODULE_NAME
);
678 rdev
->netdev
= netdev
;
679 dev_hold(rdev
->netdev
);
680 rdev
->en_dev
= en_dev
;
681 rdev
->id
= rdev
->en_dev
->pdev
->devfn
;
682 INIT_LIST_HEAD(&rdev
->qp_list
);
683 mutex_init(&rdev
->qp_lock
);
684 atomic_set(&rdev
->qp_count
, 0);
685 atomic_set(&rdev
->cq_count
, 0);
686 atomic_set(&rdev
->srq_count
, 0);
687 atomic_set(&rdev
->mr_count
, 0);
688 atomic_set(&rdev
->mw_count
, 0);
689 rdev
->cosq
[0] = 0xFFFF;
690 rdev
->cosq
[1] = 0xFFFF;
692 mutex_lock(&bnxt_re_dev_lock
);
693 list_add_tail_rcu(&rdev
->list
, &bnxt_re_dev_list
);
694 mutex_unlock(&bnxt_re_dev_lock
);
698 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
701 switch (unaffi_async
->event
) {
702 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR
:
704 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR
:
706 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR
:
708 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR
:
710 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR
:
712 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR
:
714 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR
:
716 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR
:
718 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR
:
720 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR
:
722 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR
:
730 static int bnxt_re_handle_qp_async_event(struct creq_qp_event
*qp_event
,
731 struct bnxt_re_qp
*qp
)
733 struct ib_event event
;
735 memset(&event
, 0, sizeof(event
));
736 if (qp
->qplib_qp
.srq
) {
737 event
.device
= &qp
->rdev
->ibdev
;
738 event
.element
.qp
= &qp
->ib_qp
;
739 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
742 if (event
.device
&& qp
->ib_qp
.event_handler
)
743 qp
->ib_qp
.event_handler(&event
, qp
->ib_qp
.qp_context
);
748 static int bnxt_re_handle_affi_async_event(struct creq_qp_event
*affi_async
,
755 return rc
; /* QP was already dead, still return success */
757 event
= affi_async
->event
;
758 if (event
== CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION
) {
759 struct bnxt_qplib_qp
*lib_qp
= obj
;
760 struct bnxt_re_qp
*qp
= container_of(lib_qp
, struct bnxt_re_qp
,
762 rc
= bnxt_re_handle_qp_async_event(affi_async
, qp
);
767 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw
*rcfw
,
768 void *aeqe
, void *obj
)
770 struct creq_qp_event
*affi_async
;
771 struct creq_func_event
*unaffi_async
;
775 type
= ((struct creq_base
*)aeqe
)->type
;
776 if (type
== CREQ_BASE_TYPE_FUNC_EVENT
) {
778 rc
= bnxt_re_handle_unaffi_async_event(unaffi_async
);
781 rc
= bnxt_re_handle_affi_async_event(affi_async
, obj
);
787 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq
*nq
,
788 struct bnxt_qplib_srq
*handle
, u8 event
)
790 struct bnxt_re_srq
*srq
= container_of(handle
, struct bnxt_re_srq
,
792 struct ib_event ib_event
;
796 dev_err(NULL
, "%s: SRQ is NULL, SRQN not handled",
797 ROCE_DRV_MODULE_NAME
);
801 ib_event
.device
= &srq
->rdev
->ibdev
;
802 ib_event
.element
.srq
= &srq
->ib_srq
;
803 if (event
== NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT
)
804 ib_event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
806 ib_event
.event
= IB_EVENT_SRQ_ERR
;
808 if (srq
->ib_srq
.event_handler
) {
809 /* Lock event_handler? */
810 (*srq
->ib_srq
.event_handler
)(&ib_event
,
811 srq
->ib_srq
.srq_context
);
817 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq
*nq
,
818 struct bnxt_qplib_cq
*handle
)
820 struct bnxt_re_cq
*cq
= container_of(handle
, struct bnxt_re_cq
,
824 dev_err(NULL
, "%s: CQ is NULL, CQN not handled",
825 ROCE_DRV_MODULE_NAME
);
828 if (cq
->ib_cq
.comp_handler
) {
829 /* Lock comp_handler? */
830 (*cq
->ib_cq
.comp_handler
)(&cq
->ib_cq
, cq
->ib_cq
.cq_context
);
836 static void bnxt_re_cleanup_res(struct bnxt_re_dev
*rdev
)
840 if (rdev
->nq
[0].hwq
.max_elements
) {
841 for (i
= 1; i
< rdev
->num_msix
; i
++)
842 bnxt_qplib_disable_nq(&rdev
->nq
[i
- 1]);
845 if (rdev
->qplib_res
.rcfw
)
846 bnxt_qplib_cleanup_res(&rdev
->qplib_res
);
849 static int bnxt_re_init_res(struct bnxt_re_dev
*rdev
)
853 bnxt_qplib_init_res(&rdev
->qplib_res
);
855 for (i
= 1; i
< rdev
->num_msix
; i
++) {
856 rc
= bnxt_qplib_enable_nq(rdev
->en_dev
->pdev
, &rdev
->nq
[i
- 1],
857 i
- 1, rdev
->msix_entries
[i
].vector
,
858 rdev
->msix_entries
[i
].db_offset
,
859 &bnxt_re_cqn_handler
,
860 &bnxt_re_srqn_handler
);
863 dev_err(rdev_to_dev(rdev
),
864 "Failed to enable NQ with rc = 0x%x", rc
);
873 static void bnxt_re_free_nq_res(struct bnxt_re_dev
*rdev
, bool lock_wait
)
877 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
878 bnxt_re_net_ring_free(rdev
, rdev
->nq
[i
].ring_id
, lock_wait
);
879 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
883 static void bnxt_re_free_res(struct bnxt_re_dev
*rdev
, bool lock_wait
)
885 bnxt_re_free_nq_res(rdev
, lock_wait
);
887 if (rdev
->qplib_res
.dpi_tbl
.max
) {
888 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
889 &rdev
->qplib_res
.dpi_tbl
,
890 &rdev
->dpi_privileged
);
892 if (rdev
->qplib_res
.rcfw
) {
893 bnxt_qplib_free_res(&rdev
->qplib_res
);
894 rdev
->qplib_res
.rcfw
= NULL
;
898 static int bnxt_re_alloc_res(struct bnxt_re_dev
*rdev
)
902 /* Configure and allocate resources for qplib */
903 rdev
->qplib_res
.rcfw
= &rdev
->rcfw
;
904 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
,
909 rc
= bnxt_qplib_alloc_res(&rdev
->qplib_res
, rdev
->en_dev
->pdev
,
910 rdev
->netdev
, &rdev
->dev_attr
);
914 rc
= bnxt_qplib_alloc_dpi(&rdev
->qplib_res
.dpi_tbl
,
915 &rdev
->dpi_privileged
,
920 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
921 rdev
->nq
[i
].hwq
.max_elements
= BNXT_RE_MAX_CQ_COUNT
+
922 BNXT_RE_MAX_SRQC_COUNT
+ 2;
923 rc
= bnxt_qplib_alloc_nq(rdev
->en_dev
->pdev
, &rdev
->nq
[i
]);
925 dev_err(rdev_to_dev(rdev
), "Alloc Failed NQ%d rc:%#x",
929 rc
= bnxt_re_net_ring_alloc
930 (rdev
, rdev
->nq
[i
].hwq
.pbl
[PBL_LVL_0
].pg_map_arr
,
931 rdev
->nq
[i
].hwq
.pbl
[rdev
->nq
[i
].hwq
.level
].pg_count
,
932 HWRM_RING_ALLOC_CMPL
,
933 BNXT_QPLIB_NQE_MAX_CNT
- 1,
934 rdev
->msix_entries
[i
+ 1].ring_idx
,
935 &rdev
->nq
[i
].ring_id
);
937 dev_err(rdev_to_dev(rdev
),
938 "Failed to allocate NQ fw id with rc = 0x%x",
945 for (i
= 0; i
< rdev
->num_msix
- 1; i
++)
946 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
948 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
949 &rdev
->qplib_res
.dpi_tbl
,
950 &rdev
->dpi_privileged
);
952 bnxt_qplib_free_res(&rdev
->qplib_res
);
955 rdev
->qplib_res
.rcfw
= NULL
;
959 static void bnxt_re_dispatch_event(struct ib_device
*ibdev
, struct ib_qp
*qp
,
960 u8 port_num
, enum ib_event_type event
)
962 struct ib_event ib_event
;
964 ib_event
.device
= ibdev
;
966 ib_event
.element
.qp
= qp
;
968 ib_event
.element
.port_num
= port_num
;
969 ib_event
.event
= event
;
970 ib_dispatch_event(&ib_event
);
973 #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
974 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev
*rdev
, u8 dir
,
977 struct hwrm_queue_pri2cos_qcfg_input req
= {0};
978 struct bnxt
*bp
= netdev_priv(rdev
->netdev
);
979 struct hwrm_queue_pri2cos_qcfg_output resp
;
980 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
981 struct bnxt_fw_msg fw_msg
;
983 u8
*qcfgmap
, *tmp_map
;
989 memset(&fw_msg
, 0, sizeof(fw_msg
));
990 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
,
991 HWRM_QUEUE_PRI2COS_QCFG
, -1, -1);
992 flags
|= (dir
& 0x01);
993 flags
|= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN
;
994 req
.flags
= cpu_to_le32(flags
);
995 req
.port_id
= bp
->pf
.port_id
;
997 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
998 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
999 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
1003 if (resp
.queue_cfg_info
) {
1004 dev_warn(rdev_to_dev(rdev
),
1005 "Asymmetric cos queue configuration detected");
1006 dev_warn(rdev_to_dev(rdev
),
1007 " on device, QoS may not be fully functional\n");
1009 qcfgmap
= &resp
.pri0_cos_queue_id
;
1010 tmp_map
= (u8
*)cid_map
;
1011 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1012 tmp_map
[i
] = qcfgmap
[i
];
1017 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev
*rdev
,
1018 struct bnxt_re_qp
*qp
)
1020 return (qp
->ib_qp
.qp_type
== IB_QPT_GSI
) || (qp
== rdev
->qp1_sqp
);
1023 static void bnxt_re_dev_stop(struct bnxt_re_dev
*rdev
)
1025 int mask
= IB_QP_STATE
;
1026 struct ib_qp_attr qp_attr
;
1027 struct bnxt_re_qp
*qp
;
1029 qp_attr
.qp_state
= IB_QPS_ERR
;
1030 mutex_lock(&rdev
->qp_lock
);
1031 list_for_each_entry(qp
, &rdev
->qp_list
, list
) {
1032 /* Modify the state of all QPs except QP1/Shadow QP */
1033 if (!bnxt_re_is_qp1_or_shadow_qp(rdev
, qp
)) {
1034 if (qp
->qplib_qp
.state
!=
1035 CMDQ_MODIFY_QP_NEW_STATE_RESET
&&
1036 qp
->qplib_qp
.state
!=
1037 CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1038 bnxt_re_dispatch_event(&rdev
->ibdev
, &qp
->ib_qp
,
1039 1, IB_EVENT_QP_FATAL
);
1040 bnxt_re_modify_qp(&qp
->ib_qp
, &qp_attr
, mask
,
1045 mutex_unlock(&rdev
->qp_lock
);
1048 static int bnxt_re_update_gid(struct bnxt_re_dev
*rdev
)
1050 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
1051 struct bnxt_qplib_gid gid
;
1055 if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
))
1059 dev_err(rdev_to_dev(rdev
), "QPLIB: SGID table not allocated");
1063 for (index
= 0; index
< sgid_tbl
->active
; index
++) {
1064 gid_idx
= sgid_tbl
->hw_id
[index
];
1066 if (!memcmp(&sgid_tbl
->tbl
[index
], &bnxt_qplib_gid_zero
,
1067 sizeof(bnxt_qplib_gid_zero
)))
1069 /* need to modify the VLAN enable setting of non VLAN GID only
1070 * as setting is done for VLAN GID while adding GID
1072 if (sgid_tbl
->vlan
[index
])
1075 memcpy(&gid
, &sgid_tbl
->tbl
[index
], sizeof(gid
));
1077 rc
= bnxt_qplib_update_sgid(sgid_tbl
, &gid
, gid_idx
,
1078 rdev
->qplib_res
.netdev
->dev_addr
);
1084 static u32
bnxt_re_get_priority_mask(struct bnxt_re_dev
*rdev
)
1086 u32 prio_map
= 0, tmp_map
= 0;
1087 struct net_device
*netdev
;
1090 netdev
= rdev
->netdev
;
1092 memset(&app
, 0, sizeof(app
));
1093 app
.selector
= IEEE_8021QAZ_APP_SEL_ETHERTYPE
;
1094 app
.protocol
= ETH_P_IBOE
;
1095 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
1098 app
.selector
= IEEE_8021QAZ_APP_SEL_DGRAM
;
1099 app
.protocol
= ROCE_V2_UDP_DPORT
;
1100 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
1101 prio_map
|= tmp_map
;
1106 static void bnxt_re_parse_cid_map(u8 prio_map
, u8
*cid_map
, u16
*cosq
)
1111 for (prio
= 0, id
= 0; prio
< 8; prio
++) {
1112 if (prio_map
& (1 << prio
)) {
1113 cosq
[id
] = cid_map
[prio
];
1115 if (id
== 2) /* Max 2 tcs supported */
1121 static int bnxt_re_setup_qos(struct bnxt_re_dev
*rdev
)
1127 /* Get priority for roce */
1128 prio_map
= bnxt_re_get_priority_mask(rdev
);
1130 if (prio_map
== rdev
->cur_prio_map
)
1132 rdev
->cur_prio_map
= prio_map
;
1133 /* Get cosq id for this priority */
1134 rc
= bnxt_re_query_hwrm_pri2cos(rdev
, 0, &cid_map
);
1136 dev_warn(rdev_to_dev(rdev
), "no cos for p_mask %x\n", prio_map
);
1139 /* Parse CoS IDs for app priority */
1140 bnxt_re_parse_cid_map(prio_map
, (u8
*)&cid_map
, rdev
->cosq
);
1143 rc
= bnxt_qplib_map_tc2cos(&rdev
->qplib_res
, rdev
->cosq
);
1145 dev_warn(rdev_to_dev(rdev
), "no tc for cos{%x, %x}\n",
1146 rdev
->cosq
[0], rdev
->cosq
[1]);
1150 /* Actual priorities are not programmed as they are already
1151 * done by L2 driver; just enable or disable priority vlan tagging
1153 if ((prio_map
== 0 && rdev
->qplib_res
.prio
) ||
1154 (prio_map
!= 0 && !rdev
->qplib_res
.prio
)) {
1155 rdev
->qplib_res
.prio
= prio_map
? true : false;
1157 bnxt_re_update_gid(rdev
);
1163 static void bnxt_re_ib_unreg(struct bnxt_re_dev
*rdev
, bool lock_wait
)
1167 if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
)) {
1168 for (i
= 0; i
< ARRAY_SIZE(bnxt_re_attributes
); i
++)
1169 device_remove_file(&rdev
->ibdev
.dev
,
1170 bnxt_re_attributes
[i
]);
1171 /* Cleanup ib dev */
1172 bnxt_re_unregister_ib(rdev
);
1174 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
))
1175 cancel_delayed_work(&rdev
->worker
);
1177 bnxt_re_cleanup_res(rdev
);
1178 bnxt_re_free_res(rdev
, lock_wait
);
1180 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
)) {
1181 rc
= bnxt_qplib_deinit_rcfw(&rdev
->rcfw
);
1183 dev_warn(rdev_to_dev(rdev
),
1184 "Failed to deinitialize RCFW: %#x", rc
);
1185 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
,
1187 bnxt_qplib_free_ctx(rdev
->en_dev
->pdev
, &rdev
->qplib_ctx
);
1188 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1189 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq_ring_id
, lock_wait
);
1190 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1192 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX
, &rdev
->flags
)) {
1193 rc
= bnxt_re_free_msix(rdev
, lock_wait
);
1195 dev_warn(rdev_to_dev(rdev
),
1196 "Failed to free MSI-X vectors: %#x", rc
);
1198 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
)) {
1199 rc
= bnxt_re_unregister_netdev(rdev
, lock_wait
);
1201 dev_warn(rdev_to_dev(rdev
),
1202 "Failed to unregister with netdev: %#x", rc
);
1206 /* worker thread for polling periodic events. Now used for QoS programming*/
1207 static void bnxt_re_worker(struct work_struct
*work
)
1209 struct bnxt_re_dev
*rdev
= container_of(work
, struct bnxt_re_dev
,
1212 bnxt_re_setup_qos(rdev
);
1213 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1216 static int bnxt_re_ib_reg(struct bnxt_re_dev
*rdev
)
1220 /* Registered a new RoCE device instance to netdev */
1221 rc
= bnxt_re_register_netdev(rdev
);
1223 pr_err("Failed to register with netedev: %#x\n", rc
);
1226 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
);
1228 /* Check whether VF or PF */
1229 bnxt_re_get_sriov_func_type(rdev
);
1231 rc
= bnxt_re_request_msix(rdev
);
1233 pr_err("Failed to get MSI-X vectors: %#x\n", rc
);
1237 set_bit(BNXT_RE_FLAG_GOT_MSIX
, &rdev
->flags
);
1239 /* Establish RCFW Communication Channel to initialize the context
1240 * memory for the function and all child VFs
1242 rc
= bnxt_qplib_alloc_rcfw_channel(rdev
->en_dev
->pdev
, &rdev
->rcfw
,
1243 BNXT_RE_MAX_QPC_COUNT
);
1245 pr_err("Failed to allocate RCFW Channel: %#x\n", rc
);
1248 rc
= bnxt_re_net_ring_alloc
1249 (rdev
, rdev
->rcfw
.creq
.pbl
[PBL_LVL_0
].pg_map_arr
,
1250 rdev
->rcfw
.creq
.pbl
[rdev
->rcfw
.creq
.level
].pg_count
,
1251 HWRM_RING_ALLOC_CMPL
, BNXT_QPLIB_CREQE_MAX_CNT
- 1,
1252 rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].ring_idx
,
1253 &rdev
->rcfw
.creq_ring_id
);
1255 pr_err("Failed to allocate CREQ: %#x\n", rc
);
1258 rc
= bnxt_qplib_enable_rcfw_channel
1259 (rdev
->en_dev
->pdev
, &rdev
->rcfw
,
1260 rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].vector
,
1261 rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].db_offset
,
1262 rdev
->is_virtfn
, &bnxt_re_aeq_handler
);
1264 pr_err("Failed to enable RCFW channel: %#x\n", rc
);
1268 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
,
1272 if (!rdev
->is_virtfn
)
1273 bnxt_re_set_resource_limits(rdev
);
1275 rc
= bnxt_qplib_alloc_ctx(rdev
->en_dev
->pdev
, &rdev
->qplib_ctx
, 0);
1277 pr_err("Failed to allocate QPLIB context: %#x\n", rc
);
1280 rc
= bnxt_re_net_stats_ctx_alloc(rdev
,
1281 rdev
->qplib_ctx
.stats
.dma_map
,
1282 &rdev
->qplib_ctx
.stats
.fw_id
);
1284 pr_err("Failed to allocate stats context: %#x\n", rc
);
1288 rc
= bnxt_qplib_init_rcfw(&rdev
->rcfw
, &rdev
->qplib_ctx
,
1291 pr_err("Failed to initialize RCFW: %#x\n", rc
);
1294 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
);
1296 /* Resources based on the 'new' device caps */
1297 rc
= bnxt_re_alloc_res(rdev
);
1299 pr_err("Failed to allocate resources: %#x\n", rc
);
1302 rc
= bnxt_re_init_res(rdev
);
1304 pr_err("Failed to initialize resources: %#x\n", rc
);
1308 if (!rdev
->is_virtfn
) {
1309 rc
= bnxt_re_setup_qos(rdev
);
1311 pr_info("RoCE priority not yet configured\n");
1313 INIT_DELAYED_WORK(&rdev
->worker
, bnxt_re_worker
);
1314 set_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
);
1315 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1318 /* Register ib dev */
1319 rc
= bnxt_re_register_ib(rdev
);
1321 pr_err("Failed to register with IB: %#x\n", rc
);
1324 dev_info(rdev_to_dev(rdev
), "Device registered successfully");
1325 for (i
= 0; i
< ARRAY_SIZE(bnxt_re_attributes
); i
++) {
1326 rc
= device_create_file(&rdev
->ibdev
.dev
,
1327 bnxt_re_attributes
[i
]);
1329 dev_err(rdev_to_dev(rdev
),
1330 "Failed to create IB sysfs: %#x", rc
);
1331 /* Must clean up all created device files */
1332 for (j
= 0; j
< i
; j
++)
1333 device_remove_file(&rdev
->ibdev
.dev
,
1334 bnxt_re_attributes
[j
]);
1335 bnxt_re_unregister_ib(rdev
);
1339 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
);
1340 ib_get_eth_speed(&rdev
->ibdev
, 1, &rdev
->active_speed
,
1341 &rdev
->active_width
);
1342 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS
, &rdev
->flags
);
1343 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1, IB_EVENT_PORT_ACTIVE
);
1344 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1, IB_EVENT_GID_CHANGE
);
1348 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
, true);
1350 bnxt_qplib_free_ctx(rdev
->en_dev
->pdev
, &rdev
->qplib_ctx
);
1352 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1354 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq_ring_id
, true);
1356 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1358 bnxt_re_ib_unreg(rdev
, true);
1362 static void bnxt_re_dev_unreg(struct bnxt_re_dev
*rdev
)
1364 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1365 struct net_device
*netdev
= rdev
->netdev
;
1367 bnxt_re_dev_remove(rdev
);
1370 bnxt_re_dev_unprobe(netdev
, en_dev
);
1373 static int bnxt_re_dev_reg(struct bnxt_re_dev
**rdev
, struct net_device
*netdev
)
1375 struct bnxt_en_dev
*en_dev
;
1378 if (!is_bnxt_re_dev(netdev
))
1381 en_dev
= bnxt_re_dev_probe(netdev
);
1382 if (IS_ERR(en_dev
)) {
1383 if (en_dev
!= ERR_PTR(-ENODEV
))
1384 pr_err("%s: Failed to probe\n", ROCE_DRV_MODULE_NAME
);
1385 rc
= PTR_ERR(en_dev
);
1388 *rdev
= bnxt_re_dev_add(netdev
, en_dev
);
1391 bnxt_re_dev_unprobe(netdev
, en_dev
);
1398 static void bnxt_re_remove_one(struct bnxt_re_dev
*rdev
)
1400 pci_dev_put(rdev
->en_dev
->pdev
);
1403 /* Handle all deferred netevents tasks */
1404 static void bnxt_re_task(struct work_struct
*work
)
1406 struct bnxt_re_work
*re_work
;
1407 struct bnxt_re_dev
*rdev
;
1410 re_work
= container_of(work
, struct bnxt_re_work
, work
);
1411 rdev
= re_work
->rdev
;
1413 if (re_work
->event
!= NETDEV_REGISTER
&&
1414 !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
))
1417 switch (re_work
->event
) {
1418 case NETDEV_REGISTER
:
1419 rc
= bnxt_re_ib_reg(rdev
);
1421 dev_err(rdev_to_dev(rdev
),
1422 "Failed to register with IB: %#x", rc
);
1425 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1,
1426 IB_EVENT_PORT_ACTIVE
);
1429 bnxt_re_dev_stop(rdev
);
1432 if (!netif_carrier_ok(rdev
->netdev
))
1433 bnxt_re_dev_stop(rdev
);
1434 else if (netif_carrier_ok(rdev
->netdev
))
1435 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1,
1436 IB_EVENT_PORT_ACTIVE
);
1437 ib_get_eth_speed(&rdev
->ibdev
, 1, &rdev
->active_speed
,
1438 &rdev
->active_width
);
1443 smp_mb__before_atomic();
1444 clear_bit(BNXT_RE_FLAG_TASK_IN_PROG
, &rdev
->flags
);
1448 static void bnxt_re_init_one(struct bnxt_re_dev
*rdev
)
1450 pci_dev_get(rdev
->en_dev
->pdev
);
1454 * "Notifier chain callback can be invoked for the same chain from
1455 * different CPUs at the same time".
1457 * For cases when the netdev is already present, our call to the
1458 * register_netdevice_notifier() will actually get the rtnl_lock()
1459 * before sending NETDEV_REGISTER and (if up) NETDEV_UP
1462 * But for cases when the netdev is not already present, the notifier
1463 * chain is subjected to be invoked from different CPUs simultaneously.
1465 * This is protected by the netdev_mutex.
1467 static int bnxt_re_netdev_event(struct notifier_block
*notifier
,
1468 unsigned long event
, void *ptr
)
1470 struct net_device
*real_dev
, *netdev
= netdev_notifier_info_to_dev(ptr
);
1471 struct bnxt_re_work
*re_work
;
1472 struct bnxt_re_dev
*rdev
;
1474 bool sch_work
= false;
1476 real_dev
= rdma_vlan_dev_real_dev(netdev
);
1480 rdev
= bnxt_re_from_netdev(real_dev
);
1481 if (!rdev
&& event
!= NETDEV_REGISTER
)
1483 if (real_dev
!= netdev
)
1487 case NETDEV_REGISTER
:
1490 rc
= bnxt_re_dev_reg(&rdev
, real_dev
);
1494 pr_err("Failed to register with the device %s: %#x\n",
1495 real_dev
->name
, rc
);
1498 bnxt_re_init_one(rdev
);
1502 case NETDEV_UNREGISTER
:
1503 /* netdev notifier will call NETDEV_UNREGISTER again later since
1504 * we are still holding the reference to the netdev
1506 if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG
, &rdev
->flags
))
1508 bnxt_re_ib_unreg(rdev
, false);
1509 bnxt_re_remove_one(rdev
);
1510 bnxt_re_dev_unreg(rdev
);
1518 /* Allocate for the deferred task */
1519 re_work
= kzalloc(sizeof(*re_work
), GFP_ATOMIC
);
1521 re_work
->rdev
= rdev
;
1522 re_work
->event
= event
;
1523 re_work
->vlan_dev
= (real_dev
== netdev
?
1525 INIT_WORK(&re_work
->work
, bnxt_re_task
);
1526 set_bit(BNXT_RE_FLAG_TASK_IN_PROG
, &rdev
->flags
);
1527 queue_work(bnxt_re_wq
, &re_work
->work
);
1535 static struct notifier_block bnxt_re_netdev_notifier
= {
1536 .notifier_call
= bnxt_re_netdev_event
1539 static int __init
bnxt_re_mod_init(void)
1543 pr_info("%s: %s", ROCE_DRV_MODULE_NAME
, version
);
1545 bnxt_re_wq
= create_singlethread_workqueue("bnxt_re");
1549 INIT_LIST_HEAD(&bnxt_re_dev_list
);
1551 rc
= register_netdevice_notifier(&bnxt_re_netdev_notifier
);
1553 pr_err("%s: Cannot register to netdevice_notifier",
1554 ROCE_DRV_MODULE_NAME
);
1560 destroy_workqueue(bnxt_re_wq
);
1565 static void __exit
bnxt_re_mod_exit(void)
1567 struct bnxt_re_dev
*rdev
, *next
;
1568 LIST_HEAD(to_be_deleted
);
1570 mutex_lock(&bnxt_re_dev_lock
);
1571 /* Free all adapter allocated resources */
1572 if (!list_empty(&bnxt_re_dev_list
))
1573 list_splice_init(&bnxt_re_dev_list
, &to_be_deleted
);
1574 mutex_unlock(&bnxt_re_dev_lock
);
1576 * Cleanup the devices in reverse order so that the VF device
1577 * cleanup is done before PF cleanup
1579 list_for_each_entry_safe_reverse(rdev
, next
, &to_be_deleted
, list
) {
1580 dev_info(rdev_to_dev(rdev
), "Unregistering Device");
1581 bnxt_re_dev_stop(rdev
);
1582 bnxt_re_ib_unreg(rdev
, true);
1583 bnxt_re_remove_one(rdev
);
1584 bnxt_re_dev_unreg(rdev
);
1586 unregister_netdevice_notifier(&bnxt_re_netdev_notifier
);
1588 destroy_workqueue(bnxt_re_wq
);
1591 module_init(bnxt_re_mod_init
);
1592 module_exit(bnxt_re_mod_exit
);