1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2018 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/netdevice.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/bitops.h>
19 #include <linux/irq.h>
20 #include <asm/byteorder.h>
21 #include <linux/bitmap.h>
27 static int bnxt_register_dev(struct bnxt_en_dev
*edev
, int ulp_id
,
28 struct bnxt_ulp_ops
*ulp_ops
, void *handle
)
30 struct net_device
*dev
= edev
->net
;
31 struct bnxt
*bp
= netdev_priv(dev
);
35 if (ulp_id
>= BNXT_MAX_ULP
)
38 ulp
= &edev
->ulp_tbl
[ulp_id
];
39 if (rcu_access_pointer(ulp
->ulp_ops
)) {
40 netdev_err(bp
->dev
, "ulp id %d already registered\n", ulp_id
);
43 if (ulp_id
== BNXT_ROCE_ULP
) {
44 unsigned int max_stat_ctxs
;
46 max_stat_ctxs
= bnxt_get_max_func_stat_ctxs(bp
);
47 if (max_stat_ctxs
<= BNXT_MIN_ROCE_STAT_CTXS
||
48 bp
->num_stat_ctxs
== max_stat_ctxs
)
50 bnxt_set_max_func_stat_ctxs(bp
, max_stat_ctxs
-
51 BNXT_MIN_ROCE_STAT_CTXS
);
54 atomic_set(&ulp
->ref_count
, 0);
56 rcu_assign_pointer(ulp
->ulp_ops
, ulp_ops
);
58 if (ulp_id
== BNXT_ROCE_ULP
) {
59 if (test_bit(BNXT_STATE_OPEN
, &bp
->state
))
60 bnxt_hwrm_vnic_cfg(bp
, 0);
66 static int bnxt_unregister_dev(struct bnxt_en_dev
*edev
, int ulp_id
)
68 struct net_device
*dev
= edev
->net
;
69 struct bnxt
*bp
= netdev_priv(dev
);
74 if (ulp_id
>= BNXT_MAX_ULP
)
77 ulp
= &edev
->ulp_tbl
[ulp_id
];
78 if (!rcu_access_pointer(ulp
->ulp_ops
)) {
79 netdev_err(bp
->dev
, "ulp id %d not registered\n", ulp_id
);
82 if (ulp_id
== BNXT_ROCE_ULP
) {
83 unsigned int max_stat_ctxs
;
85 max_stat_ctxs
= bnxt_get_max_func_stat_ctxs(bp
);
86 bnxt_set_max_func_stat_ctxs(bp
, max_stat_ctxs
+ 1);
87 if (ulp
->msix_requested
)
88 edev
->en_ops
->bnxt_free_msix(edev
, ulp_id
);
90 if (ulp
->max_async_event_id
)
91 bnxt_hwrm_func_rgtr_async_events(bp
, NULL
, 0);
93 RCU_INIT_POINTER(ulp
->ulp_ops
, NULL
);
95 ulp
->max_async_event_id
= 0;
96 ulp
->async_events_bmap
= NULL
;
97 while (atomic_read(&ulp
->ref_count
) != 0 && i
< 10) {
104 static void bnxt_fill_msix_vecs(struct bnxt
*bp
, struct bnxt_msix_entry
*ent
)
106 struct bnxt_en_dev
*edev
= bp
->edev
;
107 int num_msix
, idx
, i
;
109 num_msix
= edev
->ulp_tbl
[BNXT_ROCE_ULP
].msix_requested
;
110 idx
= edev
->ulp_tbl
[BNXT_ROCE_ULP
].msix_base
;
111 for (i
= 0; i
< num_msix
; i
++) {
112 ent
[i
].vector
= bp
->irq_tbl
[idx
+ i
].vector
;
113 ent
[i
].ring_idx
= idx
+ i
;
114 ent
[i
].db_offset
= (idx
+ i
) * 0x80;
118 static int bnxt_req_msix_vecs(struct bnxt_en_dev
*edev
, int ulp_id
,
119 struct bnxt_msix_entry
*ent
, int num_msix
)
121 struct net_device
*dev
= edev
->net
;
122 struct bnxt
*bp
= netdev_priv(dev
);
123 int max_idx
, max_cp_rings
;
128 if (ulp_id
!= BNXT_ROCE_ULP
)
131 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
))
134 if (edev
->ulp_tbl
[ulp_id
].msix_requested
)
137 max_cp_rings
= bnxt_get_max_func_cp_rings(bp
);
138 avail_msix
= bnxt_get_avail_msix(bp
, num_msix
);
141 if (avail_msix
> num_msix
)
142 avail_msix
= num_msix
;
144 if (BNXT_NEW_RM(bp
)) {
145 idx
= bp
->cp_nr_rings
;
147 max_idx
= min_t(int, bp
->total_irqs
, max_cp_rings
);
148 idx
= max_idx
- avail_msix
;
150 edev
->ulp_tbl
[ulp_id
].msix_base
= idx
;
151 edev
->ulp_tbl
[ulp_id
].msix_requested
= avail_msix
;
152 if (bp
->total_irqs
< (idx
+ avail_msix
)) {
153 if (netif_running(dev
)) {
154 bnxt_close_nic(bp
, true, false);
155 rc
= bnxt_open_nic(bp
, true, false);
157 rc
= bnxt_reserve_rings(bp
);
161 edev
->ulp_tbl
[ulp_id
].msix_requested
= 0;
165 if (BNXT_NEW_RM(bp
)) {
166 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
168 avail_msix
= hw_resc
->resv_cp_rings
- bp
->cp_nr_rings
;
169 edev
->ulp_tbl
[ulp_id
].msix_requested
= avail_msix
;
171 bnxt_fill_msix_vecs(bp
, ent
);
172 edev
->flags
|= BNXT_EN_FLAG_MSIX_REQUESTED
;
176 static int bnxt_free_msix_vecs(struct bnxt_en_dev
*edev
, int ulp_id
)
178 struct net_device
*dev
= edev
->net
;
179 struct bnxt
*bp
= netdev_priv(dev
);
182 if (ulp_id
!= BNXT_ROCE_ULP
)
185 if (!(edev
->flags
& BNXT_EN_FLAG_MSIX_REQUESTED
))
188 edev
->ulp_tbl
[ulp_id
].msix_requested
= 0;
189 edev
->flags
&= ~BNXT_EN_FLAG_MSIX_REQUESTED
;
190 if (netif_running(dev
)) {
191 bnxt_close_nic(bp
, true, false);
192 bnxt_open_nic(bp
, true, false);
197 int bnxt_get_ulp_msix_num(struct bnxt
*bp
)
199 if (bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
)) {
200 struct bnxt_en_dev
*edev
= bp
->edev
;
202 return edev
->ulp_tbl
[BNXT_ROCE_ULP
].msix_requested
;
207 int bnxt_get_ulp_msix_base(struct bnxt
*bp
)
209 if (bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
)) {
210 struct bnxt_en_dev
*edev
= bp
->edev
;
212 if (edev
->ulp_tbl
[BNXT_ROCE_ULP
].msix_requested
)
213 return edev
->ulp_tbl
[BNXT_ROCE_ULP
].msix_base
;
218 static int bnxt_send_msg(struct bnxt_en_dev
*edev
, int ulp_id
,
219 struct bnxt_fw_msg
*fw_msg
)
221 struct net_device
*dev
= edev
->net
;
222 struct bnxt
*bp
= netdev_priv(dev
);
226 mutex_lock(&bp
->hwrm_cmd_lock
);
228 req
->resp_addr
= cpu_to_le64(bp
->hwrm_cmd_resp_dma_addr
);
229 rc
= _hwrm_send_message(bp
, fw_msg
->msg
, fw_msg
->msg_len
,
232 struct output
*resp
= bp
->hwrm_cmd_resp_addr
;
233 u32 len
= le16_to_cpu(resp
->resp_len
);
235 if (fw_msg
->resp_max_len
< len
)
236 len
= fw_msg
->resp_max_len
;
238 memcpy(fw_msg
->resp
, resp
, len
);
240 mutex_unlock(&bp
->hwrm_cmd_lock
);
244 static void bnxt_ulp_get(struct bnxt_ulp
*ulp
)
246 atomic_inc(&ulp
->ref_count
);
249 static void bnxt_ulp_put(struct bnxt_ulp
*ulp
)
251 atomic_dec(&ulp
->ref_count
);
254 void bnxt_ulp_stop(struct bnxt
*bp
)
256 struct bnxt_en_dev
*edev
= bp
->edev
;
257 struct bnxt_ulp_ops
*ops
;
263 for (i
= 0; i
< BNXT_MAX_ULP
; i
++) {
264 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[i
];
266 ops
= rtnl_dereference(ulp
->ulp_ops
);
267 if (!ops
|| !ops
->ulp_stop
)
269 ops
->ulp_stop(ulp
->handle
);
273 void bnxt_ulp_start(struct bnxt
*bp
)
275 struct bnxt_en_dev
*edev
= bp
->edev
;
276 struct bnxt_ulp_ops
*ops
;
282 for (i
= 0; i
< BNXT_MAX_ULP
; i
++) {
283 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[i
];
285 ops
= rtnl_dereference(ulp
->ulp_ops
);
286 if (!ops
|| !ops
->ulp_start
)
288 ops
->ulp_start(ulp
->handle
);
292 void bnxt_ulp_sriov_cfg(struct bnxt
*bp
, int num_vfs
)
294 struct bnxt_en_dev
*edev
= bp
->edev
;
295 struct bnxt_ulp_ops
*ops
;
301 for (i
= 0; i
< BNXT_MAX_ULP
; i
++) {
302 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[i
];
305 ops
= rcu_dereference(ulp
->ulp_ops
);
306 if (!ops
|| !ops
->ulp_sriov_config
) {
312 ops
->ulp_sriov_config(ulp
->handle
, num_vfs
);
317 void bnxt_ulp_shutdown(struct bnxt
*bp
)
319 struct bnxt_en_dev
*edev
= bp
->edev
;
320 struct bnxt_ulp_ops
*ops
;
326 for (i
= 0; i
< BNXT_MAX_ULP
; i
++) {
327 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[i
];
329 ops
= rtnl_dereference(ulp
->ulp_ops
);
330 if (!ops
|| !ops
->ulp_shutdown
)
332 ops
->ulp_shutdown(ulp
->handle
);
336 void bnxt_ulp_irq_stop(struct bnxt
*bp
)
338 struct bnxt_en_dev
*edev
= bp
->edev
;
339 struct bnxt_ulp_ops
*ops
;
341 if (!edev
|| !(edev
->flags
& BNXT_EN_FLAG_MSIX_REQUESTED
))
344 if (bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
)) {
345 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[BNXT_ROCE_ULP
];
347 if (!ulp
->msix_requested
)
350 ops
= rtnl_dereference(ulp
->ulp_ops
);
351 if (!ops
|| !ops
->ulp_irq_stop
)
353 ops
->ulp_irq_stop(ulp
->handle
);
357 void bnxt_ulp_irq_restart(struct bnxt
*bp
, int err
)
359 struct bnxt_en_dev
*edev
= bp
->edev
;
360 struct bnxt_ulp_ops
*ops
;
362 if (!edev
|| !(edev
->flags
& BNXT_EN_FLAG_MSIX_REQUESTED
))
365 if (bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
)) {
366 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[BNXT_ROCE_ULP
];
367 struct bnxt_msix_entry
*ent
= NULL
;
369 if (!ulp
->msix_requested
)
372 ops
= rtnl_dereference(ulp
->ulp_ops
);
373 if (!ops
|| !ops
->ulp_irq_restart
)
377 ent
= kcalloc(ulp
->msix_requested
, sizeof(*ent
),
381 bnxt_fill_msix_vecs(bp
, ent
);
383 ops
->ulp_irq_restart(ulp
->handle
, ent
);
388 void bnxt_ulp_async_events(struct bnxt
*bp
, struct hwrm_async_event_cmpl
*cmpl
)
390 u16 event_id
= le16_to_cpu(cmpl
->event_id
);
391 struct bnxt_en_dev
*edev
= bp
->edev
;
392 struct bnxt_ulp_ops
*ops
;
399 for (i
= 0; i
< BNXT_MAX_ULP
; i
++) {
400 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[i
];
402 ops
= rcu_dereference(ulp
->ulp_ops
);
403 if (!ops
|| !ops
->ulp_async_notifier
)
405 if (!ulp
->async_events_bmap
||
406 event_id
> ulp
->max_async_event_id
)
409 /* Read max_async_event_id first before testing the bitmap. */
411 if (test_bit(event_id
, ulp
->async_events_bmap
))
412 ops
->ulp_async_notifier(ulp
->handle
, cmpl
);
417 static int bnxt_register_async_events(struct bnxt_en_dev
*edev
, int ulp_id
,
418 unsigned long *events_bmap
, u16 max_id
)
420 struct net_device
*dev
= edev
->net
;
421 struct bnxt
*bp
= netdev_priv(dev
);
422 struct bnxt_ulp
*ulp
;
424 if (ulp_id
>= BNXT_MAX_ULP
)
427 ulp
= &edev
->ulp_tbl
[ulp_id
];
428 ulp
->async_events_bmap
= events_bmap
;
429 /* Make sure bnxt_ulp_async_events() sees this order */
431 ulp
->max_async_event_id
= max_id
;
432 bnxt_hwrm_func_rgtr_async_events(bp
, events_bmap
, max_id
+ 1);
436 static const struct bnxt_en_ops bnxt_en_ops_tbl
= {
437 .bnxt_register_device
= bnxt_register_dev
,
438 .bnxt_unregister_device
= bnxt_unregister_dev
,
439 .bnxt_request_msix
= bnxt_req_msix_vecs
,
440 .bnxt_free_msix
= bnxt_free_msix_vecs
,
441 .bnxt_send_fw_msg
= bnxt_send_msg
,
442 .bnxt_register_fw_async_events
= bnxt_register_async_events
,
445 struct bnxt_en_dev
*bnxt_ulp_probe(struct net_device
*dev
)
447 struct bnxt
*bp
= netdev_priv(dev
);
448 struct bnxt_en_dev
*edev
;
452 edev
= kzalloc(sizeof(*edev
), GFP_KERNEL
);
454 return ERR_PTR(-ENOMEM
);
455 edev
->en_ops
= &bnxt_en_ops_tbl
;
456 if (bp
->flags
& BNXT_FLAG_ROCEV1_CAP
)
457 edev
->flags
|= BNXT_EN_FLAG_ROCEV1_CAP
;
458 if (bp
->flags
& BNXT_FLAG_ROCEV2_CAP
)
459 edev
->flags
|= BNXT_EN_FLAG_ROCEV2_CAP
;
461 edev
->pdev
= bp
->pdev
;