1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
71 #define BNXT_TX_TIMEOUT (5 * HZ)
73 static const char version
[] =
74 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION
"\n";
76 MODULE_LICENSE("GPL");
77 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
78 MODULE_VERSION(DRV_MODULE_VERSION
);
80 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82 #define BNXT_RX_COPY_THRESH 256
84 #define BNXT_TX_PUSH_THRESH 164
131 /* indexed by enum above */
132 static const struct {
135 [BCM57301
] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
136 [BCM57302
] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
137 [BCM57304
] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
138 [BCM57417_NPAR
] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
139 [BCM58700
] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
140 [BCM57311
] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
141 [BCM57312
] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57402
] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
143 [BCM57404
] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
144 [BCM57406
] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
145 [BCM57402_NPAR
] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
146 [BCM57407
] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
147 [BCM57412
] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
148 [BCM57414
] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57416
] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57417
] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
151 [BCM57412_NPAR
] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
152 [BCM57314
] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
153 [BCM57417_SFP
] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416_SFP
] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
155 [BCM57404_NPAR
] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
156 [BCM57406_NPAR
] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
157 [BCM57407_SFP
] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
158 [BCM57407_NPAR
] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
159 [BCM57414_NPAR
] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
160 [BCM57416_NPAR
] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
161 [BCM57452
] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
162 [BCM57454
] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
163 [BCM5745x_NPAR
] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
164 [BCM57508
] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
165 [BCM57504
] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
166 [BCM57502
] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
167 [BCM57508_NPAR
] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
168 [BCM57504_NPAR
] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
169 [BCM57502_NPAR
] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
170 [BCM58802
] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
171 [BCM58804
] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
172 [BCM58808
] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173 [NETXTREME_E_VF
] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
174 [NETXTREME_C_VF
] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
175 [NETXTREME_S_VF
] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
176 [NETXTREME_E_P5_VF
] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
179 static const struct pci_device_id bnxt_pci_tbl
[] = {
180 { PCI_VDEVICE(BROADCOM
, 0x1604), .driver_data
= BCM5745x_NPAR
},
181 { PCI_VDEVICE(BROADCOM
, 0x1605), .driver_data
= BCM5745x_NPAR
},
182 { PCI_VDEVICE(BROADCOM
, 0x1614), .driver_data
= BCM57454
},
183 { PCI_VDEVICE(BROADCOM
, 0x16c0), .driver_data
= BCM57417_NPAR
},
184 { PCI_VDEVICE(BROADCOM
, 0x16c8), .driver_data
= BCM57301
},
185 { PCI_VDEVICE(BROADCOM
, 0x16c9), .driver_data
= BCM57302
},
186 { PCI_VDEVICE(BROADCOM
, 0x16ca), .driver_data
= BCM57304
},
187 { PCI_VDEVICE(BROADCOM
, 0x16cc), .driver_data
= BCM57417_NPAR
},
188 { PCI_VDEVICE(BROADCOM
, 0x16cd), .driver_data
= BCM58700
},
189 { PCI_VDEVICE(BROADCOM
, 0x16ce), .driver_data
= BCM57311
},
190 { PCI_VDEVICE(BROADCOM
, 0x16cf), .driver_data
= BCM57312
},
191 { PCI_VDEVICE(BROADCOM
, 0x16d0), .driver_data
= BCM57402
},
192 { PCI_VDEVICE(BROADCOM
, 0x16d1), .driver_data
= BCM57404
},
193 { PCI_VDEVICE(BROADCOM
, 0x16d2), .driver_data
= BCM57406
},
194 { PCI_VDEVICE(BROADCOM
, 0x16d4), .driver_data
= BCM57402_NPAR
},
195 { PCI_VDEVICE(BROADCOM
, 0x16d5), .driver_data
= BCM57407
},
196 { PCI_VDEVICE(BROADCOM
, 0x16d6), .driver_data
= BCM57412
},
197 { PCI_VDEVICE(BROADCOM
, 0x16d7), .driver_data
= BCM57414
},
198 { PCI_VDEVICE(BROADCOM
, 0x16d8), .driver_data
= BCM57416
},
199 { PCI_VDEVICE(BROADCOM
, 0x16d9), .driver_data
= BCM57417
},
200 { PCI_VDEVICE(BROADCOM
, 0x16de), .driver_data
= BCM57412_NPAR
},
201 { PCI_VDEVICE(BROADCOM
, 0x16df), .driver_data
= BCM57314
},
202 { PCI_VDEVICE(BROADCOM
, 0x16e2), .driver_data
= BCM57417_SFP
},
203 { PCI_VDEVICE(BROADCOM
, 0x16e3), .driver_data
= BCM57416_SFP
},
204 { PCI_VDEVICE(BROADCOM
, 0x16e7), .driver_data
= BCM57404_NPAR
},
205 { PCI_VDEVICE(BROADCOM
, 0x16e8), .driver_data
= BCM57406_NPAR
},
206 { PCI_VDEVICE(BROADCOM
, 0x16e9), .driver_data
= BCM57407_SFP
},
207 { PCI_VDEVICE(BROADCOM
, 0x16ea), .driver_data
= BCM57407_NPAR
},
208 { PCI_VDEVICE(BROADCOM
, 0x16eb), .driver_data
= BCM57412_NPAR
},
209 { PCI_VDEVICE(BROADCOM
, 0x16ec), .driver_data
= BCM57414_NPAR
},
210 { PCI_VDEVICE(BROADCOM
, 0x16ed), .driver_data
= BCM57414_NPAR
},
211 { PCI_VDEVICE(BROADCOM
, 0x16ee), .driver_data
= BCM57416_NPAR
},
212 { PCI_VDEVICE(BROADCOM
, 0x16ef), .driver_data
= BCM57416_NPAR
},
213 { PCI_VDEVICE(BROADCOM
, 0x16f0), .driver_data
= BCM58808
},
214 { PCI_VDEVICE(BROADCOM
, 0x16f1), .driver_data
= BCM57452
},
215 { PCI_VDEVICE(BROADCOM
, 0x1750), .driver_data
= BCM57508
},
216 { PCI_VDEVICE(BROADCOM
, 0x1751), .driver_data
= BCM57504
},
217 { PCI_VDEVICE(BROADCOM
, 0x1752), .driver_data
= BCM57502
},
218 { PCI_VDEVICE(BROADCOM
, 0x1800), .driver_data
= BCM57508_NPAR
},
219 { PCI_VDEVICE(BROADCOM
, 0x1801), .driver_data
= BCM57504_NPAR
},
220 { PCI_VDEVICE(BROADCOM
, 0x1802), .driver_data
= BCM57502_NPAR
},
221 { PCI_VDEVICE(BROADCOM
, 0x1803), .driver_data
= BCM57508_NPAR
},
222 { PCI_VDEVICE(BROADCOM
, 0x1804), .driver_data
= BCM57504_NPAR
},
223 { PCI_VDEVICE(BROADCOM
, 0x1805), .driver_data
= BCM57502_NPAR
},
224 { PCI_VDEVICE(BROADCOM
, 0xd802), .driver_data
= BCM58802
},
225 { PCI_VDEVICE(BROADCOM
, 0xd804), .driver_data
= BCM58804
},
226 #ifdef CONFIG_BNXT_SRIOV
227 { PCI_VDEVICE(BROADCOM
, 0x1606), .driver_data
= NETXTREME_E_VF
},
228 { PCI_VDEVICE(BROADCOM
, 0x1609), .driver_data
= NETXTREME_E_VF
},
229 { PCI_VDEVICE(BROADCOM
, 0x16c1), .driver_data
= NETXTREME_E_VF
},
230 { PCI_VDEVICE(BROADCOM
, 0x16cb), .driver_data
= NETXTREME_C_VF
},
231 { PCI_VDEVICE(BROADCOM
, 0x16d3), .driver_data
= NETXTREME_E_VF
},
232 { PCI_VDEVICE(BROADCOM
, 0x16dc), .driver_data
= NETXTREME_E_VF
},
233 { PCI_VDEVICE(BROADCOM
, 0x16e1), .driver_data
= NETXTREME_C_VF
},
234 { PCI_VDEVICE(BROADCOM
, 0x16e5), .driver_data
= NETXTREME_C_VF
},
235 { PCI_VDEVICE(BROADCOM
, 0x1806), .driver_data
= NETXTREME_E_P5_VF
},
236 { PCI_VDEVICE(BROADCOM
, 0x1807), .driver_data
= NETXTREME_E_P5_VF
},
237 { PCI_VDEVICE(BROADCOM
, 0xd800), .driver_data
= NETXTREME_S_VF
},
242 MODULE_DEVICE_TABLE(pci
, bnxt_pci_tbl
);
244 static const u16 bnxt_vf_req_snif
[] = {
248 HWRM_CFA_L2_FILTER_ALLOC
,
251 static const u16 bnxt_async_events_arr
[] = {
252 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
,
253 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE
,
254 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD
,
255 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED
,
256 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE
,
257 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
,
258 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE
,
259 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY
,
260 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY
,
263 static struct workqueue_struct
*bnxt_pf_wq
;
265 static bool bnxt_vf_pciid(enum board_idx idx
)
267 return (idx
== NETXTREME_C_VF
|| idx
== NETXTREME_E_VF
||
268 idx
== NETXTREME_S_VF
|| idx
== NETXTREME_E_P5_VF
);
271 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
272 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
273 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
275 #define BNXT_CP_DB_IRQ_DIS(db) \
276 writel(DB_CP_IRQ_DIS_FLAGS, db)
278 #define BNXT_DB_CQ(db, idx) \
279 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
281 #define BNXT_DB_NQ_P5(db, idx) \
282 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
284 #define BNXT_DB_CQ_ARM(db, idx) \
285 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
287 #define BNXT_DB_NQ_ARM_P5(db, idx) \
288 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
290 static void bnxt_db_nq(struct bnxt
*bp
, struct bnxt_db_info
*db
, u32 idx
)
292 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
293 BNXT_DB_NQ_P5(db
, idx
);
298 static void bnxt_db_nq_arm(struct bnxt
*bp
, struct bnxt_db_info
*db
, u32 idx
)
300 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
301 BNXT_DB_NQ_ARM_P5(db
, idx
);
303 BNXT_DB_CQ_ARM(db
, idx
);
306 static void bnxt_db_cq(struct bnxt
*bp
, struct bnxt_db_info
*db
, u32 idx
)
308 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
309 writeq(db
->db_key64
| DBR_TYPE_CQ_ARMALL
| RING_CMP(idx
),
315 const u16 bnxt_lhint_arr
[] = {
316 TX_BD_FLAGS_LHINT_512_AND_SMALLER
,
317 TX_BD_FLAGS_LHINT_512_TO_1023
,
318 TX_BD_FLAGS_LHINT_1024_TO_2047
,
319 TX_BD_FLAGS_LHINT_1024_TO_2047
,
320 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
321 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
322 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
323 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
324 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
325 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
326 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
327 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
328 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
329 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
330 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
331 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
332 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
333 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
334 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
337 static u16
bnxt_xmit_get_cfa_action(struct sk_buff
*skb
)
339 struct metadata_dst
*md_dst
= skb_metadata_dst(skb
);
341 if (!md_dst
|| md_dst
->type
!= METADATA_HW_PORT_MUX
)
344 return md_dst
->u
.port_info
.port_id
;
347 static netdev_tx_t
bnxt_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
349 struct bnxt
*bp
= netdev_priv(dev
);
351 struct tx_bd_ext
*txbd1
;
352 struct netdev_queue
*txq
;
355 unsigned int length
, pad
= 0;
356 u32 len
, free_size
, vlan_tag_flags
, cfa_action
, flags
;
358 struct pci_dev
*pdev
= bp
->pdev
;
359 struct bnxt_tx_ring_info
*txr
;
360 struct bnxt_sw_tx_bd
*tx_buf
;
362 i
= skb_get_queue_mapping(skb
);
363 if (unlikely(i
>= bp
->tx_nr_rings
)) {
364 dev_kfree_skb_any(skb
);
368 txq
= netdev_get_tx_queue(dev
, i
);
369 txr
= &bp
->tx_ring
[bp
->tx_ring_map
[i
]];
372 free_size
= bnxt_tx_avail(bp
, txr
);
373 if (unlikely(free_size
< skb_shinfo(skb
)->nr_frags
+ 2)) {
374 netif_tx_stop_queue(txq
);
375 return NETDEV_TX_BUSY
;
379 len
= skb_headlen(skb
);
380 last_frag
= skb_shinfo(skb
)->nr_frags
;
382 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
384 txbd
->tx_bd_opaque
= prod
;
386 tx_buf
= &txr
->tx_buf_ring
[prod
];
388 tx_buf
->nr_frags
= last_frag
;
391 cfa_action
= bnxt_xmit_get_cfa_action(skb
);
392 if (skb_vlan_tag_present(skb
)) {
393 vlan_tag_flags
= TX_BD_CFA_META_KEY_VLAN
|
394 skb_vlan_tag_get(skb
);
395 /* Currently supports 8021Q, 8021AD vlan offloads
396 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
398 if (skb
->vlan_proto
== htons(ETH_P_8021Q
))
399 vlan_tag_flags
|= 1 << TX_BD_CFA_META_TPID_SHIFT
;
402 if (free_size
== bp
->tx_ring_size
&& length
<= bp
->tx_push_thresh
) {
403 struct tx_push_buffer
*tx_push_buf
= txr
->tx_push
;
404 struct tx_push_bd
*tx_push
= &tx_push_buf
->push_bd
;
405 struct tx_bd_ext
*tx_push1
= &tx_push
->txbd2
;
406 void __iomem
*db
= txr
->tx_db
.doorbell
;
407 void *pdata
= tx_push_buf
->data
;
411 /* Set COAL_NOW to be ready quickly for the next push */
412 tx_push
->tx_bd_len_flags_type
=
413 cpu_to_le32((length
<< TX_BD_LEN_SHIFT
) |
414 TX_BD_TYPE_LONG_TX_BD
|
415 TX_BD_FLAGS_LHINT_512_AND_SMALLER
|
416 TX_BD_FLAGS_COAL_NOW
|
417 TX_BD_FLAGS_PACKET_END
|
418 (2 << TX_BD_FLAGS_BD_CNT_SHIFT
));
420 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
421 tx_push1
->tx_bd_hsize_lflags
=
422 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM
);
424 tx_push1
->tx_bd_hsize_lflags
= 0;
426 tx_push1
->tx_bd_cfa_meta
= cpu_to_le32(vlan_tag_flags
);
427 tx_push1
->tx_bd_cfa_action
=
428 cpu_to_le32(cfa_action
<< TX_BD_CFA_ACTION_SHIFT
);
430 end
= pdata
+ length
;
431 end
= PTR_ALIGN(end
, 8) - 1;
434 skb_copy_from_linear_data(skb
, pdata
, len
);
436 for (j
= 0; j
< last_frag
; j
++) {
437 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[j
];
440 fptr
= skb_frag_address_safe(frag
);
444 memcpy(pdata
, fptr
, skb_frag_size(frag
));
445 pdata
+= skb_frag_size(frag
);
448 txbd
->tx_bd_len_flags_type
= tx_push
->tx_bd_len_flags_type
;
449 txbd
->tx_bd_haddr
= txr
->data_mapping
;
450 prod
= NEXT_TX(prod
);
451 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
452 memcpy(txbd
, tx_push1
, sizeof(*txbd
));
453 prod
= NEXT_TX(prod
);
455 cpu_to_le32(DB_KEY_TX_PUSH
| DB_LONG_TX_PUSH
| prod
);
459 netdev_tx_sent_queue(txq
, skb
->len
);
460 wmb(); /* Sync is_push and byte queue before pushing data */
462 push_len
= (length
+ sizeof(*tx_push
) + 7) / 8;
464 __iowrite64_copy(db
, tx_push_buf
, 16);
465 __iowrite32_copy(db
+ 4, tx_push_buf
+ 1,
466 (push_len
- 16) << 1);
468 __iowrite64_copy(db
, tx_push_buf
, push_len
);
475 if (length
< BNXT_MIN_PKT_SIZE
) {
476 pad
= BNXT_MIN_PKT_SIZE
- length
;
477 if (skb_pad(skb
, pad
)) {
478 /* SKB already freed. */
482 length
= BNXT_MIN_PKT_SIZE
;
485 mapping
= dma_map_single(&pdev
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
487 if (unlikely(dma_mapping_error(&pdev
->dev
, mapping
))) {
488 dev_kfree_skb_any(skb
);
493 dma_unmap_addr_set(tx_buf
, mapping
, mapping
);
494 flags
= (len
<< TX_BD_LEN_SHIFT
) | TX_BD_TYPE_LONG_TX_BD
|
495 ((last_frag
+ 2) << TX_BD_FLAGS_BD_CNT_SHIFT
);
497 txbd
->tx_bd_haddr
= cpu_to_le64(mapping
);
499 prod
= NEXT_TX(prod
);
500 txbd1
= (struct tx_bd_ext
*)
501 &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
503 txbd1
->tx_bd_hsize_lflags
= 0;
504 if (skb_is_gso(skb
)) {
507 if (skb
->encapsulation
)
508 hdr_len
= skb_inner_network_offset(skb
) +
509 skb_inner_network_header_len(skb
) +
510 inner_tcp_hdrlen(skb
);
512 hdr_len
= skb_transport_offset(skb
) +
515 txbd1
->tx_bd_hsize_lflags
= cpu_to_le32(TX_BD_FLAGS_LSO
|
517 (hdr_len
<< (TX_BD_HSIZE_SHIFT
- 1)));
518 length
= skb_shinfo(skb
)->gso_size
;
519 txbd1
->tx_bd_mss
= cpu_to_le32(length
);
521 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
522 txbd1
->tx_bd_hsize_lflags
=
523 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM
);
524 txbd1
->tx_bd_mss
= 0;
528 if (unlikely(length
>= ARRAY_SIZE(bnxt_lhint_arr
))) {
529 dev_warn_ratelimited(&pdev
->dev
, "Dropped oversize %d bytes TX packet.\n",
534 flags
|= bnxt_lhint_arr
[length
];
535 txbd
->tx_bd_len_flags_type
= cpu_to_le32(flags
);
537 txbd1
->tx_bd_cfa_meta
= cpu_to_le32(vlan_tag_flags
);
538 txbd1
->tx_bd_cfa_action
=
539 cpu_to_le32(cfa_action
<< TX_BD_CFA_ACTION_SHIFT
);
540 for (i
= 0; i
< last_frag
; i
++) {
541 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
543 prod
= NEXT_TX(prod
);
544 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
546 len
= skb_frag_size(frag
);
547 mapping
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, len
,
550 if (unlikely(dma_mapping_error(&pdev
->dev
, mapping
)))
553 tx_buf
= &txr
->tx_buf_ring
[prod
];
554 dma_unmap_addr_set(tx_buf
, mapping
, mapping
);
556 txbd
->tx_bd_haddr
= cpu_to_le64(mapping
);
558 flags
= len
<< TX_BD_LEN_SHIFT
;
559 txbd
->tx_bd_len_flags_type
= cpu_to_le32(flags
);
563 txbd
->tx_bd_len_flags_type
=
564 cpu_to_le32(((len
+ pad
) << TX_BD_LEN_SHIFT
) | flags
|
565 TX_BD_FLAGS_PACKET_END
);
567 netdev_tx_sent_queue(txq
, skb
->len
);
569 /* Sync BD data before updating doorbell */
572 prod
= NEXT_TX(prod
);
575 if (!netdev_xmit_more() || netif_xmit_stopped(txq
))
576 bnxt_db_write(bp
, &txr
->tx_db
, prod
);
580 if (unlikely(bnxt_tx_avail(bp
, txr
) <= MAX_SKB_FRAGS
+ 1)) {
581 if (netdev_xmit_more() && !tx_buf
->is_push
)
582 bnxt_db_write(bp
, &txr
->tx_db
, prod
);
584 netif_tx_stop_queue(txq
);
586 /* netif_tx_stop_queue() must be done before checking
587 * tx index in bnxt_tx_avail() below, because in
588 * bnxt_tx_int(), we update tx index before checking for
589 * netif_tx_queue_stopped().
592 if (bnxt_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)
593 netif_tx_wake_queue(txq
);
600 /* start back at beginning and unmap skb */
602 tx_buf
= &txr
->tx_buf_ring
[prod
];
604 dma_unmap_single(&pdev
->dev
, dma_unmap_addr(tx_buf
, mapping
),
605 skb_headlen(skb
), PCI_DMA_TODEVICE
);
606 prod
= NEXT_TX(prod
);
608 /* unmap remaining mapped pages */
609 for (i
= 0; i
< last_frag
; i
++) {
610 prod
= NEXT_TX(prod
);
611 tx_buf
= &txr
->tx_buf_ring
[prod
];
612 dma_unmap_page(&pdev
->dev
, dma_unmap_addr(tx_buf
, mapping
),
613 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
617 dev_kfree_skb_any(skb
);
621 static void bnxt_tx_int(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, int nr_pkts
)
623 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
624 struct netdev_queue
*txq
= netdev_get_tx_queue(bp
->dev
, txr
->txq_index
);
625 u16 cons
= txr
->tx_cons
;
626 struct pci_dev
*pdev
= bp
->pdev
;
628 unsigned int tx_bytes
= 0;
630 for (i
= 0; i
< nr_pkts
; i
++) {
631 struct bnxt_sw_tx_bd
*tx_buf
;
635 tx_buf
= &txr
->tx_buf_ring
[cons
];
636 cons
= NEXT_TX(cons
);
640 if (tx_buf
->is_push
) {
645 dma_unmap_single(&pdev
->dev
, dma_unmap_addr(tx_buf
, mapping
),
646 skb_headlen(skb
), PCI_DMA_TODEVICE
);
647 last
= tx_buf
->nr_frags
;
649 for (j
= 0; j
< last
; j
++) {
650 cons
= NEXT_TX(cons
);
651 tx_buf
= &txr
->tx_buf_ring
[cons
];
654 dma_unmap_addr(tx_buf
, mapping
),
655 skb_frag_size(&skb_shinfo(skb
)->frags
[j
]),
660 cons
= NEXT_TX(cons
);
662 tx_bytes
+= skb
->len
;
663 dev_kfree_skb_any(skb
);
666 netdev_tx_completed_queue(txq
, nr_pkts
, tx_bytes
);
669 /* Need to make the tx_cons update visible to bnxt_start_xmit()
670 * before checking for netif_tx_queue_stopped(). Without the
671 * memory barrier, there is a small possibility that bnxt_start_xmit()
672 * will miss it and cause the queue to be stopped forever.
676 if (unlikely(netif_tx_queue_stopped(txq
)) &&
677 (bnxt_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)) {
678 __netif_tx_lock(txq
, smp_processor_id());
679 if (netif_tx_queue_stopped(txq
) &&
680 bnxt_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
&&
681 txr
->dev_state
!= BNXT_DEV_STATE_CLOSING
)
682 netif_tx_wake_queue(txq
);
683 __netif_tx_unlock(txq
);
687 static struct page
*__bnxt_alloc_rx_page(struct bnxt
*bp
, dma_addr_t
*mapping
,
688 struct bnxt_rx_ring_info
*rxr
,
691 struct device
*dev
= &bp
->pdev
->dev
;
694 page
= page_pool_dev_alloc_pages(rxr
->page_pool
);
698 *mapping
= dma_map_page_attrs(dev
, page
, 0, PAGE_SIZE
, bp
->rx_dir
,
699 DMA_ATTR_WEAK_ORDERING
);
700 if (dma_mapping_error(dev
, *mapping
)) {
701 page_pool_recycle_direct(rxr
->page_pool
, page
);
704 *mapping
+= bp
->rx_dma_offset
;
708 static inline u8
*__bnxt_alloc_rx_data(struct bnxt
*bp
, dma_addr_t
*mapping
,
712 struct pci_dev
*pdev
= bp
->pdev
;
714 data
= kmalloc(bp
->rx_buf_size
, gfp
);
718 *mapping
= dma_map_single_attrs(&pdev
->dev
, data
+ bp
->rx_dma_offset
,
719 bp
->rx_buf_use_size
, bp
->rx_dir
,
720 DMA_ATTR_WEAK_ORDERING
);
722 if (dma_mapping_error(&pdev
->dev
, *mapping
)) {
729 int bnxt_alloc_rx_data(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
,
732 struct rx_bd
*rxbd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
733 struct bnxt_sw_rx_bd
*rx_buf
= &rxr
->rx_buf_ring
[prod
];
736 if (BNXT_RX_PAGE_MODE(bp
)) {
738 __bnxt_alloc_rx_page(bp
, &mapping
, rxr
, gfp
);
744 rx_buf
->data_ptr
= page_address(page
) + bp
->rx_offset
;
746 u8
*data
= __bnxt_alloc_rx_data(bp
, &mapping
, gfp
);
752 rx_buf
->data_ptr
= data
+ bp
->rx_offset
;
754 rx_buf
->mapping
= mapping
;
756 rxbd
->rx_bd_haddr
= cpu_to_le64(mapping
);
760 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info
*rxr
, u16 cons
, void *data
)
762 u16 prod
= rxr
->rx_prod
;
763 struct bnxt_sw_rx_bd
*cons_rx_buf
, *prod_rx_buf
;
764 struct rx_bd
*cons_bd
, *prod_bd
;
766 prod_rx_buf
= &rxr
->rx_buf_ring
[prod
];
767 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
769 prod_rx_buf
->data
= data
;
770 prod_rx_buf
->data_ptr
= cons_rx_buf
->data_ptr
;
772 prod_rx_buf
->mapping
= cons_rx_buf
->mapping
;
774 prod_bd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
775 cons_bd
= &rxr
->rx_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
777 prod_bd
->rx_bd_haddr
= cons_bd
->rx_bd_haddr
;
780 static inline u16
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info
*rxr
, u16 idx
)
782 u16 next
, max
= rxr
->rx_agg_bmap_size
;
784 next
= find_next_zero_bit(rxr
->rx_agg_bmap
, max
, idx
);
786 next
= find_first_zero_bit(rxr
->rx_agg_bmap
, max
);
790 static inline int bnxt_alloc_rx_page(struct bnxt
*bp
,
791 struct bnxt_rx_ring_info
*rxr
,
795 &rxr
->rx_agg_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
796 struct bnxt_sw_rx_agg_bd
*rx_agg_buf
;
797 struct pci_dev
*pdev
= bp
->pdev
;
800 u16 sw_prod
= rxr
->rx_sw_agg_prod
;
801 unsigned int offset
= 0;
803 if (PAGE_SIZE
> BNXT_RX_PAGE_SIZE
) {
806 page
= alloc_page(gfp
);
810 rxr
->rx_page_offset
= 0;
812 offset
= rxr
->rx_page_offset
;
813 rxr
->rx_page_offset
+= BNXT_RX_PAGE_SIZE
;
814 if (rxr
->rx_page_offset
== PAGE_SIZE
)
819 page
= alloc_page(gfp
);
824 mapping
= dma_map_page_attrs(&pdev
->dev
, page
, offset
,
825 BNXT_RX_PAGE_SIZE
, PCI_DMA_FROMDEVICE
,
826 DMA_ATTR_WEAK_ORDERING
);
827 if (dma_mapping_error(&pdev
->dev
, mapping
)) {
832 if (unlikely(test_bit(sw_prod
, rxr
->rx_agg_bmap
)))
833 sw_prod
= bnxt_find_next_agg_idx(rxr
, sw_prod
);
835 __set_bit(sw_prod
, rxr
->rx_agg_bmap
);
836 rx_agg_buf
= &rxr
->rx_agg_ring
[sw_prod
];
837 rxr
->rx_sw_agg_prod
= NEXT_RX_AGG(sw_prod
);
839 rx_agg_buf
->page
= page
;
840 rx_agg_buf
->offset
= offset
;
841 rx_agg_buf
->mapping
= mapping
;
842 rxbd
->rx_bd_haddr
= cpu_to_le64(mapping
);
843 rxbd
->rx_bd_opaque
= sw_prod
;
847 static struct rx_agg_cmp
*bnxt_get_agg(struct bnxt
*bp
,
848 struct bnxt_cp_ring_info
*cpr
,
849 u16 cp_cons
, u16 curr
)
851 struct rx_agg_cmp
*agg
;
853 cp_cons
= RING_CMP(ADV_RAW_CMP(cp_cons
, curr
));
854 agg
= (struct rx_agg_cmp
*)
855 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
859 static struct rx_agg_cmp
*bnxt_get_tpa_agg_p5(struct bnxt
*bp
,
860 struct bnxt_rx_ring_info
*rxr
,
861 u16 agg_id
, u16 curr
)
863 struct bnxt_tpa_info
*tpa_info
= &rxr
->rx_tpa
[agg_id
];
865 return &tpa_info
->agg_arr
[curr
];
868 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info
*cpr
, u16 idx
,
869 u16 start
, u32 agg_bufs
, bool tpa
)
871 struct bnxt_napi
*bnapi
= cpr
->bnapi
;
872 struct bnxt
*bp
= bnapi
->bp
;
873 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
874 u16 prod
= rxr
->rx_agg_prod
;
875 u16 sw_prod
= rxr
->rx_sw_agg_prod
;
879 if ((bp
->flags
& BNXT_FLAG_CHIP_P5
) && tpa
)
882 for (i
= 0; i
< agg_bufs
; i
++) {
884 struct rx_agg_cmp
*agg
;
885 struct bnxt_sw_rx_agg_bd
*cons_rx_buf
, *prod_rx_buf
;
886 struct rx_bd
*prod_bd
;
890 agg
= bnxt_get_tpa_agg_p5(bp
, rxr
, idx
, start
+ i
);
892 agg
= bnxt_get_agg(bp
, cpr
, idx
, start
+ i
);
893 cons
= agg
->rx_agg_cmp_opaque
;
894 __clear_bit(cons
, rxr
->rx_agg_bmap
);
896 if (unlikely(test_bit(sw_prod
, rxr
->rx_agg_bmap
)))
897 sw_prod
= bnxt_find_next_agg_idx(rxr
, sw_prod
);
899 __set_bit(sw_prod
, rxr
->rx_agg_bmap
);
900 prod_rx_buf
= &rxr
->rx_agg_ring
[sw_prod
];
901 cons_rx_buf
= &rxr
->rx_agg_ring
[cons
];
903 /* It is possible for sw_prod to be equal to cons, so
904 * set cons_rx_buf->page to NULL first.
906 page
= cons_rx_buf
->page
;
907 cons_rx_buf
->page
= NULL
;
908 prod_rx_buf
->page
= page
;
909 prod_rx_buf
->offset
= cons_rx_buf
->offset
;
911 prod_rx_buf
->mapping
= cons_rx_buf
->mapping
;
913 prod_bd
= &rxr
->rx_agg_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
915 prod_bd
->rx_bd_haddr
= cpu_to_le64(cons_rx_buf
->mapping
);
916 prod_bd
->rx_bd_opaque
= sw_prod
;
918 prod
= NEXT_RX_AGG(prod
);
919 sw_prod
= NEXT_RX_AGG(sw_prod
);
921 rxr
->rx_agg_prod
= prod
;
922 rxr
->rx_sw_agg_prod
= sw_prod
;
925 static struct sk_buff
*bnxt_rx_page_skb(struct bnxt
*bp
,
926 struct bnxt_rx_ring_info
*rxr
,
927 u16 cons
, void *data
, u8
*data_ptr
,
929 unsigned int offset_and_len
)
931 unsigned int payload
= offset_and_len
>> 16;
932 unsigned int len
= offset_and_len
& 0xffff;
934 struct page
*page
= data
;
935 u16 prod
= rxr
->rx_prod
;
939 err
= bnxt_alloc_rx_data(bp
, rxr
, prod
, GFP_ATOMIC
);
941 bnxt_reuse_rx_data(rxr
, cons
, data
);
944 dma_addr
-= bp
->rx_dma_offset
;
945 dma_unmap_page_attrs(&bp
->pdev
->dev
, dma_addr
, PAGE_SIZE
, bp
->rx_dir
,
946 DMA_ATTR_WEAK_ORDERING
);
947 page_pool_release_page(rxr
->page_pool
, page
);
949 if (unlikely(!payload
))
950 payload
= eth_get_headlen(bp
->dev
, data_ptr
, len
);
952 skb
= napi_alloc_skb(&rxr
->bnapi
->napi
, payload
);
958 off
= (void *)data_ptr
- page_address(page
);
959 skb_add_rx_frag(skb
, 0, page
, off
, len
, PAGE_SIZE
);
960 memcpy(skb
->data
- NET_IP_ALIGN
, data_ptr
- NET_IP_ALIGN
,
961 payload
+ NET_IP_ALIGN
);
963 frag
= &skb_shinfo(skb
)->frags
[0];
964 skb_frag_size_sub(frag
, payload
);
965 skb_frag_off_add(frag
, payload
);
966 skb
->data_len
-= payload
;
967 skb
->tail
+= payload
;
972 static struct sk_buff
*bnxt_rx_skb(struct bnxt
*bp
,
973 struct bnxt_rx_ring_info
*rxr
, u16 cons
,
974 void *data
, u8
*data_ptr
,
976 unsigned int offset_and_len
)
978 u16 prod
= rxr
->rx_prod
;
982 err
= bnxt_alloc_rx_data(bp
, rxr
, prod
, GFP_ATOMIC
);
984 bnxt_reuse_rx_data(rxr
, cons
, data
);
988 skb
= build_skb(data
, 0);
989 dma_unmap_single_attrs(&bp
->pdev
->dev
, dma_addr
, bp
->rx_buf_use_size
,
990 bp
->rx_dir
, DMA_ATTR_WEAK_ORDERING
);
996 skb_reserve(skb
, bp
->rx_offset
);
997 skb_put(skb
, offset_and_len
& 0xffff);
1001 static struct sk_buff
*bnxt_rx_pages(struct bnxt
*bp
,
1002 struct bnxt_cp_ring_info
*cpr
,
1003 struct sk_buff
*skb
, u16 idx
,
1004 u32 agg_bufs
, bool tpa
)
1006 struct bnxt_napi
*bnapi
= cpr
->bnapi
;
1007 struct pci_dev
*pdev
= bp
->pdev
;
1008 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
1009 u16 prod
= rxr
->rx_agg_prod
;
1010 bool p5_tpa
= false;
1013 if ((bp
->flags
& BNXT_FLAG_CHIP_P5
) && tpa
)
1016 for (i
= 0; i
< agg_bufs
; i
++) {
1018 struct rx_agg_cmp
*agg
;
1019 struct bnxt_sw_rx_agg_bd
*cons_rx_buf
;
1024 agg
= bnxt_get_tpa_agg_p5(bp
, rxr
, idx
, i
);
1026 agg
= bnxt_get_agg(bp
, cpr
, idx
, i
);
1027 cons
= agg
->rx_agg_cmp_opaque
;
1028 frag_len
= (le32_to_cpu(agg
->rx_agg_cmp_len_flags_type
) &
1029 RX_AGG_CMP_LEN
) >> RX_AGG_CMP_LEN_SHIFT
;
1031 cons_rx_buf
= &rxr
->rx_agg_ring
[cons
];
1032 skb_fill_page_desc(skb
, i
, cons_rx_buf
->page
,
1033 cons_rx_buf
->offset
, frag_len
);
1034 __clear_bit(cons
, rxr
->rx_agg_bmap
);
1036 /* It is possible for bnxt_alloc_rx_page() to allocate
1037 * a sw_prod index that equals the cons index, so we
1038 * need to clear the cons entry now.
1040 mapping
= cons_rx_buf
->mapping
;
1041 page
= cons_rx_buf
->page
;
1042 cons_rx_buf
->page
= NULL
;
1044 if (bnxt_alloc_rx_page(bp
, rxr
, prod
, GFP_ATOMIC
) != 0) {
1045 struct skb_shared_info
*shinfo
;
1046 unsigned int nr_frags
;
1048 shinfo
= skb_shinfo(skb
);
1049 nr_frags
= --shinfo
->nr_frags
;
1050 __skb_frag_set_page(&shinfo
->frags
[nr_frags
], NULL
);
1054 cons_rx_buf
->page
= page
;
1056 /* Update prod since possibly some pages have been
1057 * allocated already.
1059 rxr
->rx_agg_prod
= prod
;
1060 bnxt_reuse_rx_agg_bufs(cpr
, idx
, i
, agg_bufs
- i
, tpa
);
1064 dma_unmap_page_attrs(&pdev
->dev
, mapping
, BNXT_RX_PAGE_SIZE
,
1066 DMA_ATTR_WEAK_ORDERING
);
1068 skb
->data_len
+= frag_len
;
1069 skb
->len
+= frag_len
;
1070 skb
->truesize
+= PAGE_SIZE
;
1072 prod
= NEXT_RX_AGG(prod
);
1074 rxr
->rx_agg_prod
= prod
;
1078 static int bnxt_agg_bufs_valid(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
1079 u8 agg_bufs
, u32
*raw_cons
)
1082 struct rx_agg_cmp
*agg
;
1084 *raw_cons
= ADV_RAW_CMP(*raw_cons
, agg_bufs
);
1085 last
= RING_CMP(*raw_cons
);
1086 agg
= (struct rx_agg_cmp
*)
1087 &cpr
->cp_desc_ring
[CP_RING(last
)][CP_IDX(last
)];
1088 return RX_AGG_CMP_VALID(agg
, *raw_cons
);
1091 static inline struct sk_buff
*bnxt_copy_skb(struct bnxt_napi
*bnapi
, u8
*data
,
1095 struct bnxt
*bp
= bnapi
->bp
;
1096 struct pci_dev
*pdev
= bp
->pdev
;
1097 struct sk_buff
*skb
;
1099 skb
= napi_alloc_skb(&bnapi
->napi
, len
);
1103 dma_sync_single_for_cpu(&pdev
->dev
, mapping
, bp
->rx_copy_thresh
,
1106 memcpy(skb
->data
- NET_IP_ALIGN
, data
- NET_IP_ALIGN
,
1107 len
+ NET_IP_ALIGN
);
1109 dma_sync_single_for_device(&pdev
->dev
, mapping
, bp
->rx_copy_thresh
,
1116 static int bnxt_discard_rx(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
1117 u32
*raw_cons
, void *cmp
)
1119 struct rx_cmp
*rxcmp
= cmp
;
1120 u32 tmp_raw_cons
= *raw_cons
;
1121 u8 cmp_type
, agg_bufs
= 0;
1123 cmp_type
= RX_CMP_TYPE(rxcmp
);
1125 if (cmp_type
== CMP_TYPE_RX_L2_CMP
) {
1126 agg_bufs
= (le32_to_cpu(rxcmp
->rx_cmp_misc_v1
) &
1128 RX_CMP_AGG_BUFS_SHIFT
;
1129 } else if (cmp_type
== CMP_TYPE_RX_L2_TPA_END_CMP
) {
1130 struct rx_tpa_end_cmp
*tpa_end
= cmp
;
1132 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
1135 agg_bufs
= TPA_END_AGG_BUFS(tpa_end
);
1139 if (!bnxt_agg_bufs_valid(bp
, cpr
, agg_bufs
, &tmp_raw_cons
))
1142 *raw_cons
= tmp_raw_cons
;
1146 static void bnxt_queue_fw_reset_work(struct bnxt
*bp
, unsigned long delay
)
1149 queue_delayed_work(bnxt_pf_wq
, &bp
->fw_reset_task
, delay
);
1151 schedule_delayed_work(&bp
->fw_reset_task
, delay
);
1154 static void bnxt_queue_sp_work(struct bnxt
*bp
)
1157 queue_work(bnxt_pf_wq
, &bp
->sp_task
);
1159 schedule_work(&bp
->sp_task
);
1162 static void bnxt_cancel_sp_work(struct bnxt
*bp
)
1165 flush_workqueue(bnxt_pf_wq
);
1167 cancel_work_sync(&bp
->sp_task
);
1170 static void bnxt_sched_reset(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
)
1172 if (!rxr
->bnapi
->in_reset
) {
1173 rxr
->bnapi
->in_reset
= true;
1174 set_bit(BNXT_RESET_TASK_SP_EVENT
, &bp
->sp_event
);
1175 bnxt_queue_sp_work(bp
);
1177 rxr
->rx_next_cons
= 0xffff;
1180 static u16
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info
*rxr
, u16 agg_id
)
1182 struct bnxt_tpa_idx_map
*map
= rxr
->rx_tpa_idx_map
;
1183 u16 idx
= agg_id
& MAX_TPA_P5_MASK
;
1185 if (test_bit(idx
, map
->agg_idx_bmap
))
1186 idx
= find_first_zero_bit(map
->agg_idx_bmap
,
1187 BNXT_AGG_IDX_BMAP_SIZE
);
1188 __set_bit(idx
, map
->agg_idx_bmap
);
1189 map
->agg_id_tbl
[agg_id
] = idx
;
1193 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info
*rxr
, u16 idx
)
1195 struct bnxt_tpa_idx_map
*map
= rxr
->rx_tpa_idx_map
;
1197 __clear_bit(idx
, map
->agg_idx_bmap
);
1200 static u16
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info
*rxr
, u16 agg_id
)
1202 struct bnxt_tpa_idx_map
*map
= rxr
->rx_tpa_idx_map
;
1204 return map
->agg_id_tbl
[agg_id
];
1207 static void bnxt_tpa_start(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
,
1208 struct rx_tpa_start_cmp
*tpa_start
,
1209 struct rx_tpa_start_cmp_ext
*tpa_start1
)
1211 struct bnxt_sw_rx_bd
*cons_rx_buf
, *prod_rx_buf
;
1212 struct bnxt_tpa_info
*tpa_info
;
1213 u16 cons
, prod
, agg_id
;
1214 struct rx_bd
*prod_bd
;
1217 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
1218 agg_id
= TPA_START_AGG_ID_P5(tpa_start
);
1219 agg_id
= bnxt_alloc_agg_idx(rxr
, agg_id
);
1221 agg_id
= TPA_START_AGG_ID(tpa_start
);
1223 cons
= tpa_start
->rx_tpa_start_cmp_opaque
;
1224 prod
= rxr
->rx_prod
;
1225 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
1226 prod_rx_buf
= &rxr
->rx_buf_ring
[prod
];
1227 tpa_info
= &rxr
->rx_tpa
[agg_id
];
1229 if (unlikely(cons
!= rxr
->rx_next_cons
||
1230 TPA_START_ERROR(tpa_start
))) {
1231 netdev_warn(bp
->dev
, "TPA cons %x, expected cons %x, error code %x\n",
1232 cons
, rxr
->rx_next_cons
,
1233 TPA_START_ERROR_CODE(tpa_start1
));
1234 bnxt_sched_reset(bp
, rxr
);
1237 /* Store cfa_code in tpa_info to use in tpa_end
1238 * completion processing.
1240 tpa_info
->cfa_code
= TPA_START_CFA_CODE(tpa_start1
);
1241 prod_rx_buf
->data
= tpa_info
->data
;
1242 prod_rx_buf
->data_ptr
= tpa_info
->data_ptr
;
1244 mapping
= tpa_info
->mapping
;
1245 prod_rx_buf
->mapping
= mapping
;
1247 prod_bd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
1249 prod_bd
->rx_bd_haddr
= cpu_to_le64(mapping
);
1251 tpa_info
->data
= cons_rx_buf
->data
;
1252 tpa_info
->data_ptr
= cons_rx_buf
->data_ptr
;
1253 cons_rx_buf
->data
= NULL
;
1254 tpa_info
->mapping
= cons_rx_buf
->mapping
;
1257 le32_to_cpu(tpa_start
->rx_tpa_start_cmp_len_flags_type
) >>
1258 RX_TPA_START_CMP_LEN_SHIFT
;
1259 if (likely(TPA_START_HASH_VALID(tpa_start
))) {
1260 u32 hash_type
= TPA_START_HASH_TYPE(tpa_start
);
1262 tpa_info
->hash_type
= PKT_HASH_TYPE_L4
;
1263 tpa_info
->gso_type
= SKB_GSO_TCPV4
;
1264 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1265 if (hash_type
== 3 || TPA_START_IS_IPV6(tpa_start1
))
1266 tpa_info
->gso_type
= SKB_GSO_TCPV6
;
1267 tpa_info
->rss_hash
=
1268 le32_to_cpu(tpa_start
->rx_tpa_start_cmp_rss_hash
);
1270 tpa_info
->hash_type
= PKT_HASH_TYPE_NONE
;
1271 tpa_info
->gso_type
= 0;
1272 if (netif_msg_rx_err(bp
))
1273 netdev_warn(bp
->dev
, "TPA packet without valid hash\n");
1275 tpa_info
->flags2
= le32_to_cpu(tpa_start1
->rx_tpa_start_cmp_flags2
);
1276 tpa_info
->metadata
= le32_to_cpu(tpa_start1
->rx_tpa_start_cmp_metadata
);
1277 tpa_info
->hdr_info
= le32_to_cpu(tpa_start1
->rx_tpa_start_cmp_hdr_info
);
1278 tpa_info
->agg_count
= 0;
1280 rxr
->rx_prod
= NEXT_RX(prod
);
1281 cons
= NEXT_RX(cons
);
1282 rxr
->rx_next_cons
= NEXT_RX(cons
);
1283 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
1285 bnxt_reuse_rx_data(rxr
, cons
, cons_rx_buf
->data
);
1286 rxr
->rx_prod
= NEXT_RX(rxr
->rx_prod
);
1287 cons_rx_buf
->data
= NULL
;
1290 static void bnxt_abort_tpa(struct bnxt_cp_ring_info
*cpr
, u16 idx
, u32 agg_bufs
)
1293 bnxt_reuse_rx_agg_bufs(cpr
, idx
, 0, agg_bufs
, true);
1297 static void bnxt_gro_tunnel(struct sk_buff
*skb
, __be16 ip_proto
)
1299 struct udphdr
*uh
= NULL
;
1301 if (ip_proto
== htons(ETH_P_IP
)) {
1302 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
1304 if (iph
->protocol
== IPPROTO_UDP
)
1305 uh
= (struct udphdr
*)(iph
+ 1);
1307 struct ipv6hdr
*iph
= (struct ipv6hdr
*)skb
->data
;
1309 if (iph
->nexthdr
== IPPROTO_UDP
)
1310 uh
= (struct udphdr
*)(iph
+ 1);
1314 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_TUNNEL_CSUM
;
1316 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_TUNNEL
;
1321 static struct sk_buff
*bnxt_gro_func_5731x(struct bnxt_tpa_info
*tpa_info
,
1322 int payload_off
, int tcp_ts
,
1323 struct sk_buff
*skb
)
1328 u16 outer_ip_off
, inner_ip_off
, inner_mac_off
;
1329 u32 hdr_info
= tpa_info
->hdr_info
;
1330 bool loopback
= false;
1332 inner_ip_off
= BNXT_TPA_INNER_L3_OFF(hdr_info
);
1333 inner_mac_off
= BNXT_TPA_INNER_L2_OFF(hdr_info
);
1334 outer_ip_off
= BNXT_TPA_OUTER_L3_OFF(hdr_info
);
1336 /* If the packet is an internal loopback packet, the offsets will
1337 * have an extra 4 bytes.
1339 if (inner_mac_off
== 4) {
1341 } else if (inner_mac_off
> 4) {
1342 __be16 proto
= *((__be16
*)(skb
->data
+ inner_ip_off
-
1345 /* We only support inner iPv4/ipv6. If we don't see the
1346 * correct protocol ID, it must be a loopback packet where
1347 * the offsets are off by 4.
1349 if (proto
!= htons(ETH_P_IP
) && proto
!= htons(ETH_P_IPV6
))
1353 /* internal loopback packet, subtract all offsets by 4 */
1359 nw_off
= inner_ip_off
- ETH_HLEN
;
1360 skb_set_network_header(skb
, nw_off
);
1361 if (tpa_info
->flags2
& RX_TPA_START_CMP_FLAGS2_IP_TYPE
) {
1362 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
1364 skb_set_transport_header(skb
, nw_off
+ sizeof(struct ipv6hdr
));
1365 len
= skb
->len
- skb_transport_offset(skb
);
1367 th
->check
= ~tcp_v6_check(len
, &iph
->saddr
, &iph
->daddr
, 0);
1369 struct iphdr
*iph
= ip_hdr(skb
);
1371 skb_set_transport_header(skb
, nw_off
+ sizeof(struct iphdr
));
1372 len
= skb
->len
- skb_transport_offset(skb
);
1374 th
->check
= ~tcp_v4_check(len
, iph
->saddr
, iph
->daddr
, 0);
1377 if (inner_mac_off
) { /* tunnel */
1378 __be16 proto
= *((__be16
*)(skb
->data
+ outer_ip_off
-
1381 bnxt_gro_tunnel(skb
, proto
);
1387 static struct sk_buff
*bnxt_gro_func_5750x(struct bnxt_tpa_info
*tpa_info
,
1388 int payload_off
, int tcp_ts
,
1389 struct sk_buff
*skb
)
1392 u16 outer_ip_off
, inner_ip_off
, inner_mac_off
;
1393 u32 hdr_info
= tpa_info
->hdr_info
;
1394 int iphdr_len
, nw_off
;
1396 inner_ip_off
= BNXT_TPA_INNER_L3_OFF(hdr_info
);
1397 inner_mac_off
= BNXT_TPA_INNER_L2_OFF(hdr_info
);
1398 outer_ip_off
= BNXT_TPA_OUTER_L3_OFF(hdr_info
);
1400 nw_off
= inner_ip_off
- ETH_HLEN
;
1401 skb_set_network_header(skb
, nw_off
);
1402 iphdr_len
= (tpa_info
->flags2
& RX_TPA_START_CMP_FLAGS2_IP_TYPE
) ?
1403 sizeof(struct ipv6hdr
) : sizeof(struct iphdr
);
1404 skb_set_transport_header(skb
, nw_off
+ iphdr_len
);
1406 if (inner_mac_off
) { /* tunnel */
1407 __be16 proto
= *((__be16
*)(skb
->data
+ outer_ip_off
-
1410 bnxt_gro_tunnel(skb
, proto
);
1416 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1417 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1419 static struct sk_buff
*bnxt_gro_func_5730x(struct bnxt_tpa_info
*tpa_info
,
1420 int payload_off
, int tcp_ts
,
1421 struct sk_buff
*skb
)
1425 int len
, nw_off
, tcp_opt_len
= 0;
1430 if (tpa_info
->gso_type
== SKB_GSO_TCPV4
) {
1433 nw_off
= payload_off
- BNXT_IPV4_HDR_SIZE
- tcp_opt_len
-
1435 skb_set_network_header(skb
, nw_off
);
1437 skb_set_transport_header(skb
, nw_off
+ sizeof(struct iphdr
));
1438 len
= skb
->len
- skb_transport_offset(skb
);
1440 th
->check
= ~tcp_v4_check(len
, iph
->saddr
, iph
->daddr
, 0);
1441 } else if (tpa_info
->gso_type
== SKB_GSO_TCPV6
) {
1442 struct ipv6hdr
*iph
;
1444 nw_off
= payload_off
- BNXT_IPV6_HDR_SIZE
- tcp_opt_len
-
1446 skb_set_network_header(skb
, nw_off
);
1447 iph
= ipv6_hdr(skb
);
1448 skb_set_transport_header(skb
, nw_off
+ sizeof(struct ipv6hdr
));
1449 len
= skb
->len
- skb_transport_offset(skb
);
1451 th
->check
= ~tcp_v6_check(len
, &iph
->saddr
, &iph
->daddr
, 0);
1453 dev_kfree_skb_any(skb
);
1457 if (nw_off
) /* tunnel */
1458 bnxt_gro_tunnel(skb
, skb
->protocol
);
1463 static inline struct sk_buff
*bnxt_gro_skb(struct bnxt
*bp
,
1464 struct bnxt_tpa_info
*tpa_info
,
1465 struct rx_tpa_end_cmp
*tpa_end
,
1466 struct rx_tpa_end_cmp_ext
*tpa_end1
,
1467 struct sk_buff
*skb
)
1473 segs
= TPA_END_TPA_SEGS(tpa_end
);
1477 NAPI_GRO_CB(skb
)->count
= segs
;
1478 skb_shinfo(skb
)->gso_size
=
1479 le32_to_cpu(tpa_end1
->rx_tpa_end_cmp_seg_len
);
1480 skb_shinfo(skb
)->gso_type
= tpa_info
->gso_type
;
1481 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
1482 payload_off
= TPA_END_PAYLOAD_OFF_P5(tpa_end1
);
1484 payload_off
= TPA_END_PAYLOAD_OFF(tpa_end
);
1485 skb
= bp
->gro_func(tpa_info
, payload_off
, TPA_END_GRO_TS(tpa_end
), skb
);
1487 tcp_gro_complete(skb
);
1492 /* Given the cfa_code of a received packet determine which
1493 * netdev (vf-rep or PF) the packet is destined to.
1495 static struct net_device
*bnxt_get_pkt_dev(struct bnxt
*bp
, u16 cfa_code
)
1497 struct net_device
*dev
= bnxt_get_vf_rep(bp
, cfa_code
);
1499 /* if vf-rep dev is NULL, the must belongs to the PF */
1500 return dev
? dev
: bp
->dev
;
1503 static inline struct sk_buff
*bnxt_tpa_end(struct bnxt
*bp
,
1504 struct bnxt_cp_ring_info
*cpr
,
1506 struct rx_tpa_end_cmp
*tpa_end
,
1507 struct rx_tpa_end_cmp_ext
*tpa_end1
,
1510 struct bnxt_napi
*bnapi
= cpr
->bnapi
;
1511 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
1512 u8
*data_ptr
, agg_bufs
;
1514 struct bnxt_tpa_info
*tpa_info
;
1516 struct sk_buff
*skb
;
1517 u16 idx
= 0, agg_id
;
1521 if (unlikely(bnapi
->in_reset
)) {
1522 int rc
= bnxt_discard_rx(bp
, cpr
, raw_cons
, tpa_end
);
1525 return ERR_PTR(-EBUSY
);
1529 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
1530 agg_id
= TPA_END_AGG_ID_P5(tpa_end
);
1531 agg_id
= bnxt_lookup_agg_idx(rxr
, agg_id
);
1532 agg_bufs
= TPA_END_AGG_BUFS_P5(tpa_end1
);
1533 tpa_info
= &rxr
->rx_tpa
[agg_id
];
1534 if (unlikely(agg_bufs
!= tpa_info
->agg_count
)) {
1535 netdev_warn(bp
->dev
, "TPA end agg_buf %d != expected agg_bufs %d\n",
1536 agg_bufs
, tpa_info
->agg_count
);
1537 agg_bufs
= tpa_info
->agg_count
;
1539 tpa_info
->agg_count
= 0;
1540 *event
|= BNXT_AGG_EVENT
;
1541 bnxt_free_agg_idx(rxr
, agg_id
);
1543 gro
= !!(bp
->flags
& BNXT_FLAG_GRO
);
1545 agg_id
= TPA_END_AGG_ID(tpa_end
);
1546 agg_bufs
= TPA_END_AGG_BUFS(tpa_end
);
1547 tpa_info
= &rxr
->rx_tpa
[agg_id
];
1548 idx
= RING_CMP(*raw_cons
);
1550 if (!bnxt_agg_bufs_valid(bp
, cpr
, agg_bufs
, raw_cons
))
1551 return ERR_PTR(-EBUSY
);
1553 *event
|= BNXT_AGG_EVENT
;
1554 idx
= NEXT_CMP(idx
);
1556 gro
= !!TPA_END_GRO(tpa_end
);
1558 data
= tpa_info
->data
;
1559 data_ptr
= tpa_info
->data_ptr
;
1561 len
= tpa_info
->len
;
1562 mapping
= tpa_info
->mapping
;
1564 if (unlikely(agg_bufs
> MAX_SKB_FRAGS
|| TPA_END_ERRORS(tpa_end1
))) {
1565 bnxt_abort_tpa(cpr
, idx
, agg_bufs
);
1566 if (agg_bufs
> MAX_SKB_FRAGS
)
1567 netdev_warn(bp
->dev
, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1568 agg_bufs
, (int)MAX_SKB_FRAGS
);
1572 if (len
<= bp
->rx_copy_thresh
) {
1573 skb
= bnxt_copy_skb(bnapi
, data_ptr
, len
, mapping
);
1575 bnxt_abort_tpa(cpr
, idx
, agg_bufs
);
1580 dma_addr_t new_mapping
;
1582 new_data
= __bnxt_alloc_rx_data(bp
, &new_mapping
, GFP_ATOMIC
);
1584 bnxt_abort_tpa(cpr
, idx
, agg_bufs
);
1588 tpa_info
->data
= new_data
;
1589 tpa_info
->data_ptr
= new_data
+ bp
->rx_offset
;
1590 tpa_info
->mapping
= new_mapping
;
1592 skb
= build_skb(data
, 0);
1593 dma_unmap_single_attrs(&bp
->pdev
->dev
, mapping
,
1594 bp
->rx_buf_use_size
, bp
->rx_dir
,
1595 DMA_ATTR_WEAK_ORDERING
);
1599 bnxt_abort_tpa(cpr
, idx
, agg_bufs
);
1602 skb_reserve(skb
, bp
->rx_offset
);
1607 skb
= bnxt_rx_pages(bp
, cpr
, skb
, idx
, agg_bufs
, true);
1609 /* Page reuse already handled by bnxt_rx_pages(). */
1615 eth_type_trans(skb
, bnxt_get_pkt_dev(bp
, tpa_info
->cfa_code
));
1617 if (tpa_info
->hash_type
!= PKT_HASH_TYPE_NONE
)
1618 skb_set_hash(skb
, tpa_info
->rss_hash
, tpa_info
->hash_type
);
1620 if ((tpa_info
->flags2
& RX_CMP_FLAGS2_META_FORMAT_VLAN
) &&
1621 (skb
->dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
1622 u16 vlan_proto
= tpa_info
->metadata
>>
1623 RX_CMP_FLAGS2_METADATA_TPID_SFT
;
1624 u16 vtag
= tpa_info
->metadata
& RX_CMP_FLAGS2_METADATA_TCI_MASK
;
1626 __vlan_hwaccel_put_tag(skb
, htons(vlan_proto
), vtag
);
1629 skb_checksum_none_assert(skb
);
1630 if (likely(tpa_info
->flags2
& RX_TPA_START_CMP_FLAGS2_L4_CS_CALC
)) {
1631 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1633 (tpa_info
->flags2
& RX_CMP_FLAGS2_T_L4_CS_CALC
) >> 3;
1637 skb
= bnxt_gro_skb(bp
, tpa_info
, tpa_end
, tpa_end1
, skb
);
1642 static void bnxt_tpa_agg(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
,
1643 struct rx_agg_cmp
*rx_agg
)
1645 u16 agg_id
= TPA_AGG_AGG_ID(rx_agg
);
1646 struct bnxt_tpa_info
*tpa_info
;
1648 agg_id
= bnxt_lookup_agg_idx(rxr
, agg_id
);
1649 tpa_info
= &rxr
->rx_tpa
[agg_id
];
1650 BUG_ON(tpa_info
->agg_count
>= MAX_SKB_FRAGS
);
1651 tpa_info
->agg_arr
[tpa_info
->agg_count
++] = *rx_agg
;
1654 static void bnxt_deliver_skb(struct bnxt
*bp
, struct bnxt_napi
*bnapi
,
1655 struct sk_buff
*skb
)
1657 if (skb
->dev
!= bp
->dev
) {
1658 /* this packet belongs to a vf-rep */
1659 bnxt_vf_rep_rx(bp
, skb
);
1662 skb_record_rx_queue(skb
, bnapi
->index
);
1663 napi_gro_receive(&bnapi
->napi
, skb
);
1666 /* returns the following:
1667 * 1 - 1 packet successfully received
1668 * 0 - successful TPA_START, packet not completed yet
1669 * -EBUSY - completion ring does not have all the agg buffers yet
1670 * -ENOMEM - packet aborted due to out of memory
1671 * -EIO - packet aborted due to hw error indicated in BD
1673 static int bnxt_rx_pkt(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
1674 u32
*raw_cons
, u8
*event
)
1676 struct bnxt_napi
*bnapi
= cpr
->bnapi
;
1677 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
1678 struct net_device
*dev
= bp
->dev
;
1679 struct rx_cmp
*rxcmp
;
1680 struct rx_cmp_ext
*rxcmp1
;
1681 u32 tmp_raw_cons
= *raw_cons
;
1682 u16 cfa_code
, cons
, prod
, cp_cons
= RING_CMP(tmp_raw_cons
);
1683 struct bnxt_sw_rx_bd
*rx_buf
;
1685 u8
*data_ptr
, agg_bufs
, cmp_type
;
1686 dma_addr_t dma_addr
;
1687 struct sk_buff
*skb
;
1692 rxcmp
= (struct rx_cmp
*)
1693 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1695 cmp_type
= RX_CMP_TYPE(rxcmp
);
1697 if (cmp_type
== CMP_TYPE_RX_TPA_AGG_CMP
) {
1698 bnxt_tpa_agg(bp
, rxr
, (struct rx_agg_cmp
*)rxcmp
);
1699 goto next_rx_no_prod_no_len
;
1702 tmp_raw_cons
= NEXT_RAW_CMP(tmp_raw_cons
);
1703 cp_cons
= RING_CMP(tmp_raw_cons
);
1704 rxcmp1
= (struct rx_cmp_ext
*)
1705 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1707 if (!RX_CMP_VALID(rxcmp1
, tmp_raw_cons
))
1710 prod
= rxr
->rx_prod
;
1712 if (cmp_type
== CMP_TYPE_RX_L2_TPA_START_CMP
) {
1713 bnxt_tpa_start(bp
, rxr
, (struct rx_tpa_start_cmp
*)rxcmp
,
1714 (struct rx_tpa_start_cmp_ext
*)rxcmp1
);
1716 *event
|= BNXT_RX_EVENT
;
1717 goto next_rx_no_prod_no_len
;
1719 } else if (cmp_type
== CMP_TYPE_RX_L2_TPA_END_CMP
) {
1720 skb
= bnxt_tpa_end(bp
, cpr
, &tmp_raw_cons
,
1721 (struct rx_tpa_end_cmp
*)rxcmp
,
1722 (struct rx_tpa_end_cmp_ext
*)rxcmp1
, event
);
1729 bnxt_deliver_skb(bp
, bnapi
, skb
);
1732 *event
|= BNXT_RX_EVENT
;
1733 goto next_rx_no_prod_no_len
;
1736 cons
= rxcmp
->rx_cmp_opaque
;
1737 if (unlikely(cons
!= rxr
->rx_next_cons
)) {
1738 int rc1
= bnxt_discard_rx(bp
, cpr
, raw_cons
, rxcmp
);
1740 netdev_warn(bp
->dev
, "RX cons %x != expected cons %x\n",
1741 cons
, rxr
->rx_next_cons
);
1742 bnxt_sched_reset(bp
, rxr
);
1745 rx_buf
= &rxr
->rx_buf_ring
[cons
];
1746 data
= rx_buf
->data
;
1747 data_ptr
= rx_buf
->data_ptr
;
1750 misc
= le32_to_cpu(rxcmp
->rx_cmp_misc_v1
);
1751 agg_bufs
= (misc
& RX_CMP_AGG_BUFS
) >> RX_CMP_AGG_BUFS_SHIFT
;
1754 if (!bnxt_agg_bufs_valid(bp
, cpr
, agg_bufs
, &tmp_raw_cons
))
1757 cp_cons
= NEXT_CMP(cp_cons
);
1758 *event
|= BNXT_AGG_EVENT
;
1760 *event
|= BNXT_RX_EVENT
;
1762 rx_buf
->data
= NULL
;
1763 if (rxcmp1
->rx_cmp_cfa_code_errors_v2
& RX_CMP_L2_ERRORS
) {
1764 u32 rx_err
= le32_to_cpu(rxcmp1
->rx_cmp_cfa_code_errors_v2
);
1766 bnxt_reuse_rx_data(rxr
, cons
, data
);
1768 bnxt_reuse_rx_agg_bufs(cpr
, cp_cons
, 0, agg_bufs
,
1772 if (rx_err
& RX_CMPL_ERRORS_BUFFER_ERROR_MASK
) {
1773 bnapi
->cp_ring
.rx_buf_errors
++;
1774 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
)) {
1775 netdev_warn(bp
->dev
, "RX buffer error %x\n",
1777 bnxt_sched_reset(bp
, rxr
);
1780 goto next_rx_no_len
;
1783 len
= le32_to_cpu(rxcmp
->rx_cmp_len_flags_type
) >> RX_CMP_LEN_SHIFT
;
1784 dma_addr
= rx_buf
->mapping
;
1786 if (bnxt_rx_xdp(bp
, rxr
, cons
, data
, &data_ptr
, &len
, event
)) {
1791 if (len
<= bp
->rx_copy_thresh
) {
1792 skb
= bnxt_copy_skb(bnapi
, data_ptr
, len
, dma_addr
);
1793 bnxt_reuse_rx_data(rxr
, cons
, data
);
1796 bnxt_reuse_rx_agg_bufs(cpr
, cp_cons
, 0,
1804 if (rx_buf
->data_ptr
== data_ptr
)
1805 payload
= misc
& RX_CMP_PAYLOAD_OFFSET
;
1808 skb
= bp
->rx_skb_func(bp
, rxr
, cons
, data
, data_ptr
, dma_addr
,
1817 skb
= bnxt_rx_pages(bp
, cpr
, skb
, cp_cons
, agg_bufs
, false);
1824 if (RX_CMP_HASH_VALID(rxcmp
)) {
1825 u32 hash_type
= RX_CMP_HASH_TYPE(rxcmp
);
1826 enum pkt_hash_types type
= PKT_HASH_TYPE_L4
;
1828 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1829 if (hash_type
!= 1 && hash_type
!= 3)
1830 type
= PKT_HASH_TYPE_L3
;
1831 skb_set_hash(skb
, le32_to_cpu(rxcmp
->rx_cmp_rss_hash
), type
);
1834 cfa_code
= RX_CMP_CFA_CODE(rxcmp1
);
1835 skb
->protocol
= eth_type_trans(skb
, bnxt_get_pkt_dev(bp
, cfa_code
));
1837 if ((rxcmp1
->rx_cmp_flags2
&
1838 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN
)) &&
1839 (skb
->dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
1840 u32 meta_data
= le32_to_cpu(rxcmp1
->rx_cmp_meta_data
);
1841 u16 vtag
= meta_data
& RX_CMP_FLAGS2_METADATA_TCI_MASK
;
1842 u16 vlan_proto
= meta_data
>> RX_CMP_FLAGS2_METADATA_TPID_SFT
;
1844 __vlan_hwaccel_put_tag(skb
, htons(vlan_proto
), vtag
);
1847 skb_checksum_none_assert(skb
);
1848 if (RX_CMP_L4_CS_OK(rxcmp1
)) {
1849 if (dev
->features
& NETIF_F_RXCSUM
) {
1850 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1851 skb
->csum_level
= RX_CMP_ENCAP(rxcmp1
);
1854 if (rxcmp1
->rx_cmp_cfa_code_errors_v2
& RX_CMP_L4_CS_ERR_BITS
) {
1855 if (dev
->features
& NETIF_F_RXCSUM
)
1856 bnapi
->cp_ring
.rx_l4_csum_errors
++;
1860 bnxt_deliver_skb(bp
, bnapi
, skb
);
1864 cpr
->rx_packets
+= 1;
1865 cpr
->rx_bytes
+= len
;
1868 rxr
->rx_prod
= NEXT_RX(prod
);
1869 rxr
->rx_next_cons
= NEXT_RX(cons
);
1871 next_rx_no_prod_no_len
:
1872 *raw_cons
= tmp_raw_cons
;
1877 /* In netpoll mode, if we are using a combined completion ring, we need to
1878 * discard the rx packets and recycle the buffers.
1880 static int bnxt_force_rx_discard(struct bnxt
*bp
,
1881 struct bnxt_cp_ring_info
*cpr
,
1882 u32
*raw_cons
, u8
*event
)
1884 u32 tmp_raw_cons
= *raw_cons
;
1885 struct rx_cmp_ext
*rxcmp1
;
1886 struct rx_cmp
*rxcmp
;
1890 cp_cons
= RING_CMP(tmp_raw_cons
);
1891 rxcmp
= (struct rx_cmp
*)
1892 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1894 tmp_raw_cons
= NEXT_RAW_CMP(tmp_raw_cons
);
1895 cp_cons
= RING_CMP(tmp_raw_cons
);
1896 rxcmp1
= (struct rx_cmp_ext
*)
1897 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1899 if (!RX_CMP_VALID(rxcmp1
, tmp_raw_cons
))
1902 cmp_type
= RX_CMP_TYPE(rxcmp
);
1903 if (cmp_type
== CMP_TYPE_RX_L2_CMP
) {
1904 rxcmp1
->rx_cmp_cfa_code_errors_v2
|=
1905 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR
);
1906 } else if (cmp_type
== CMP_TYPE_RX_L2_TPA_END_CMP
) {
1907 struct rx_tpa_end_cmp_ext
*tpa_end1
;
1909 tpa_end1
= (struct rx_tpa_end_cmp_ext
*)rxcmp1
;
1910 tpa_end1
->rx_tpa_end_cmp_errors_v2
|=
1911 cpu_to_le32(RX_TPA_END_CMP_ERRORS
);
1913 return bnxt_rx_pkt(bp
, cpr
, raw_cons
, event
);
1916 u32
bnxt_fw_health_readl(struct bnxt
*bp
, int reg_idx
)
1918 struct bnxt_fw_health
*fw_health
= bp
->fw_health
;
1919 u32 reg
= fw_health
->regs
[reg_idx
];
1920 u32 reg_type
, reg_off
, val
= 0;
1922 reg_type
= BNXT_FW_HEALTH_REG_TYPE(reg
);
1923 reg_off
= BNXT_FW_HEALTH_REG_OFF(reg
);
1925 case BNXT_FW_HEALTH_REG_TYPE_CFG
:
1926 pci_read_config_dword(bp
->pdev
, reg_off
, &val
);
1928 case BNXT_FW_HEALTH_REG_TYPE_GRC
:
1929 reg_off
= fw_health
->mapped_regs
[reg_idx
];
1931 case BNXT_FW_HEALTH_REG_TYPE_BAR0
:
1932 val
= readl(bp
->bar0
+ reg_off
);
1934 case BNXT_FW_HEALTH_REG_TYPE_BAR1
:
1935 val
= readl(bp
->bar1
+ reg_off
);
1938 if (reg_idx
== BNXT_FW_RESET_INPROG_REG
)
1939 val
&= fw_health
->fw_reset_inprog_reg_mask
;
1943 #define BNXT_GET_EVENT_PORT(data) \
1945 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1947 static int bnxt_async_event_process(struct bnxt
*bp
,
1948 struct hwrm_async_event_cmpl
*cmpl
)
1950 u16 event_id
= le16_to_cpu(cmpl
->event_id
);
1952 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1954 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
: {
1955 u32 data1
= le32_to_cpu(cmpl
->event_data1
);
1956 struct bnxt_link_info
*link_info
= &bp
->link_info
;
1959 goto async_event_process_exit
;
1961 /* print unsupported speed warning in forced speed mode only */
1962 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
) &&
1963 (data1
& 0x20000)) {
1964 u16 fw_speed
= link_info
->force_link_speed
;
1965 u32 speed
= bnxt_fw_to_ethtool_speed(fw_speed
);
1967 if (speed
!= SPEED_UNKNOWN
)
1968 netdev_warn(bp
->dev
, "Link speed %d no longer supported\n",
1971 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT
, &bp
->sp_event
);
1974 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE
:
1975 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE
:
1976 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT
, &bp
->sp_event
);
1978 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
:
1979 set_bit(BNXT_LINK_CHNG_SP_EVENT
, &bp
->sp_event
);
1981 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD
:
1982 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT
, &bp
->sp_event
);
1984 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED
: {
1985 u32 data1
= le32_to_cpu(cmpl
->event_data1
);
1986 u16 port_id
= BNXT_GET_EVENT_PORT(data1
);
1991 if (bp
->pf
.port_id
!= port_id
)
1994 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT
, &bp
->sp_event
);
1997 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE
:
1999 goto async_event_process_exit
;
2000 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT
, &bp
->sp_event
);
2002 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY
: {
2003 u32 data1
= le32_to_cpu(cmpl
->event_data1
);
2006 goto async_event_process_exit
;
2008 bp
->fw_reset_timestamp
= jiffies
;
2009 bp
->fw_reset_min_dsecs
= cmpl
->timestamp_lo
;
2010 if (!bp
->fw_reset_min_dsecs
)
2011 bp
->fw_reset_min_dsecs
= BNXT_DFLT_FW_RST_MIN_DSECS
;
2012 bp
->fw_reset_max_dsecs
= le16_to_cpu(cmpl
->timestamp_hi
);
2013 if (!bp
->fw_reset_max_dsecs
)
2014 bp
->fw_reset_max_dsecs
= BNXT_DFLT_FW_RST_MAX_DSECS
;
2015 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1
)) {
2016 netdev_warn(bp
->dev
, "Firmware fatal reset event received\n");
2017 set_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
);
2019 netdev_warn(bp
->dev
, "Firmware non-fatal reset event received, max wait time %d msec\n",
2020 bp
->fw_reset_max_dsecs
* 100);
2022 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT
, &bp
->sp_event
);
2025 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY
: {
2026 struct bnxt_fw_health
*fw_health
= bp
->fw_health
;
2027 u32 data1
= le32_to_cpu(cmpl
->event_data1
);
2030 goto async_event_process_exit
;
2032 fw_health
->enabled
= EVENT_DATA1_RECOVERY_ENABLED(data1
);
2033 fw_health
->master
= EVENT_DATA1_RECOVERY_MASTER_FUNC(data1
);
2034 if (!fw_health
->enabled
)
2037 if (netif_msg_drv(bp
))
2038 netdev_info(bp
->dev
, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
2039 fw_health
->enabled
, fw_health
->master
,
2040 bnxt_fw_health_readl(bp
,
2041 BNXT_FW_RESET_CNT_REG
),
2042 bnxt_fw_health_readl(bp
,
2043 BNXT_FW_HEALTH_REG
));
2044 fw_health
->tmr_multiplier
=
2045 DIV_ROUND_UP(fw_health
->polling_dsecs
* HZ
,
2046 bp
->current_interval
* 10);
2047 fw_health
->tmr_counter
= fw_health
->tmr_multiplier
;
2048 fw_health
->last_fw_heartbeat
=
2049 bnxt_fw_health_readl(bp
, BNXT_FW_HEARTBEAT_REG
);
2050 fw_health
->last_fw_reset_cnt
=
2051 bnxt_fw_health_readl(bp
, BNXT_FW_RESET_CNT_REG
);
2052 goto async_event_process_exit
;
2055 goto async_event_process_exit
;
2057 bnxt_queue_sp_work(bp
);
2058 async_event_process_exit
:
2059 bnxt_ulp_async_events(bp
, cmpl
);
2063 static int bnxt_hwrm_handler(struct bnxt
*bp
, struct tx_cmp
*txcmp
)
2065 u16 cmpl_type
= TX_CMP_TYPE(txcmp
), vf_id
, seq_id
;
2066 struct hwrm_cmpl
*h_cmpl
= (struct hwrm_cmpl
*)txcmp
;
2067 struct hwrm_fwd_req_cmpl
*fwd_req_cmpl
=
2068 (struct hwrm_fwd_req_cmpl
*)txcmp
;
2070 switch (cmpl_type
) {
2071 case CMPL_BASE_TYPE_HWRM_DONE
:
2072 seq_id
= le16_to_cpu(h_cmpl
->sequence_id
);
2073 if (seq_id
== bp
->hwrm_intr_seq_id
)
2074 bp
->hwrm_intr_seq_id
= (u16
)~bp
->hwrm_intr_seq_id
;
2076 netdev_err(bp
->dev
, "Invalid hwrm seq id %d\n", seq_id
);
2079 case CMPL_BASE_TYPE_HWRM_FWD_REQ
:
2080 vf_id
= le16_to_cpu(fwd_req_cmpl
->source_id
);
2082 if ((vf_id
< bp
->pf
.first_vf_id
) ||
2083 (vf_id
>= bp
->pf
.first_vf_id
+ bp
->pf
.active_vfs
)) {
2084 netdev_err(bp
->dev
, "Msg contains invalid VF id %x\n",
2089 set_bit(vf_id
- bp
->pf
.first_vf_id
, bp
->pf
.vf_event_bmap
);
2090 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT
, &bp
->sp_event
);
2091 bnxt_queue_sp_work(bp
);
2094 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
:
2095 bnxt_async_event_process(bp
,
2096 (struct hwrm_async_event_cmpl
*)txcmp
);
2105 static irqreturn_t
bnxt_msix(int irq
, void *dev_instance
)
2107 struct bnxt_napi
*bnapi
= dev_instance
;
2108 struct bnxt
*bp
= bnapi
->bp
;
2109 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
2110 u32 cons
= RING_CMP(cpr
->cp_raw_cons
);
2113 prefetch(&cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)]);
2114 napi_schedule(&bnapi
->napi
);
2118 static inline int bnxt_has_work(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
)
2120 u32 raw_cons
= cpr
->cp_raw_cons
;
2121 u16 cons
= RING_CMP(raw_cons
);
2122 struct tx_cmp
*txcmp
;
2124 txcmp
= &cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)];
2126 return TX_CMP_VALID(txcmp
, raw_cons
);
2129 static irqreturn_t
bnxt_inta(int irq
, void *dev_instance
)
2131 struct bnxt_napi
*bnapi
= dev_instance
;
2132 struct bnxt
*bp
= bnapi
->bp
;
2133 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
2134 u32 cons
= RING_CMP(cpr
->cp_raw_cons
);
2137 prefetch(&cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)]);
2139 if (!bnxt_has_work(bp
, cpr
)) {
2140 int_status
= readl(bp
->bar0
+ BNXT_CAG_REG_LEGACY_INT_STATUS
);
2141 /* return if erroneous interrupt */
2142 if (!(int_status
& (0x10000 << cpr
->cp_ring_struct
.fw_ring_id
)))
2146 /* disable ring IRQ */
2147 BNXT_CP_DB_IRQ_DIS(cpr
->cp_db
.doorbell
);
2149 /* Return here if interrupt is shared and is disabled. */
2150 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
2153 napi_schedule(&bnapi
->napi
);
2157 static int __bnxt_poll_work(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
2160 struct bnxt_napi
*bnapi
= cpr
->bnapi
;
2161 u32 raw_cons
= cpr
->cp_raw_cons
;
2166 struct tx_cmp
*txcmp
;
2168 cpr
->has_more_work
= 0;
2172 cons
= RING_CMP(raw_cons
);
2173 txcmp
= &cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)];
2175 if (!TX_CMP_VALID(txcmp
, raw_cons
))
2178 /* The valid test of the entry must be done first before
2179 * reading any further.
2182 cpr
->had_work_done
= 1;
2183 if (TX_CMP_TYPE(txcmp
) == CMP_TYPE_TX_L2_CMP
) {
2185 /* return full budget so NAPI will complete. */
2186 if (unlikely(tx_pkts
> bp
->tx_wake_thresh
)) {
2188 raw_cons
= NEXT_RAW_CMP(raw_cons
);
2190 cpr
->has_more_work
= 1;
2193 } else if ((TX_CMP_TYPE(txcmp
) & 0x30) == 0x10) {
2195 rc
= bnxt_rx_pkt(bp
, cpr
, &raw_cons
, &event
);
2197 rc
= bnxt_force_rx_discard(bp
, cpr
, &raw_cons
,
2199 if (likely(rc
>= 0))
2201 /* Increment rx_pkts when rc is -ENOMEM to count towards
2202 * the NAPI budget. Otherwise, we may potentially loop
2203 * here forever if we consistently cannot allocate
2206 else if (rc
== -ENOMEM
&& budget
)
2208 else if (rc
== -EBUSY
) /* partial completion */
2210 } else if (unlikely((TX_CMP_TYPE(txcmp
) ==
2211 CMPL_BASE_TYPE_HWRM_DONE
) ||
2212 (TX_CMP_TYPE(txcmp
) ==
2213 CMPL_BASE_TYPE_HWRM_FWD_REQ
) ||
2214 (TX_CMP_TYPE(txcmp
) ==
2215 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
))) {
2216 bnxt_hwrm_handler(bp
, txcmp
);
2218 raw_cons
= NEXT_RAW_CMP(raw_cons
);
2220 if (rx_pkts
&& rx_pkts
== budget
) {
2221 cpr
->has_more_work
= 1;
2226 if (event
& BNXT_REDIRECT_EVENT
)
2229 if (event
& BNXT_TX_EVENT
) {
2230 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
2231 u16 prod
= txr
->tx_prod
;
2233 /* Sync BD data before updating doorbell */
2236 bnxt_db_write_relaxed(bp
, &txr
->tx_db
, prod
);
2239 cpr
->cp_raw_cons
= raw_cons
;
2240 bnapi
->tx_pkts
+= tx_pkts
;
2241 bnapi
->events
|= event
;
2245 static void __bnxt_poll_work_done(struct bnxt
*bp
, struct bnxt_napi
*bnapi
)
2247 if (bnapi
->tx_pkts
) {
2248 bnapi
->tx_int(bp
, bnapi
, bnapi
->tx_pkts
);
2252 if (bnapi
->events
& BNXT_RX_EVENT
) {
2253 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
2255 if (bnapi
->events
& BNXT_AGG_EVENT
)
2256 bnxt_db_write(bp
, &rxr
->rx_agg_db
, rxr
->rx_agg_prod
);
2257 bnxt_db_write(bp
, &rxr
->rx_db
, rxr
->rx_prod
);
2262 static int bnxt_poll_work(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
2265 struct bnxt_napi
*bnapi
= cpr
->bnapi
;
2268 rx_pkts
= __bnxt_poll_work(bp
, cpr
, budget
);
2270 /* ACK completion ring before freeing tx ring and producing new
2271 * buffers in rx/agg rings to prevent overflowing the completion
2274 bnxt_db_cq(bp
, &cpr
->cp_db
, cpr
->cp_raw_cons
);
2276 __bnxt_poll_work_done(bp
, bnapi
);
2280 static int bnxt_poll_nitroa0(struct napi_struct
*napi
, int budget
)
2282 struct bnxt_napi
*bnapi
= container_of(napi
, struct bnxt_napi
, napi
);
2283 struct bnxt
*bp
= bnapi
->bp
;
2284 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
2285 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
2286 struct tx_cmp
*txcmp
;
2287 struct rx_cmp_ext
*rxcmp1
;
2288 u32 cp_cons
, tmp_raw_cons
;
2289 u32 raw_cons
= cpr
->cp_raw_cons
;
2296 cp_cons
= RING_CMP(raw_cons
);
2297 txcmp
= &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
2299 if (!TX_CMP_VALID(txcmp
, raw_cons
))
2302 if ((TX_CMP_TYPE(txcmp
) & 0x30) == 0x10) {
2303 tmp_raw_cons
= NEXT_RAW_CMP(raw_cons
);
2304 cp_cons
= RING_CMP(tmp_raw_cons
);
2305 rxcmp1
= (struct rx_cmp_ext
*)
2306 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
2308 if (!RX_CMP_VALID(rxcmp1
, tmp_raw_cons
))
2311 /* force an error to recycle the buffer */
2312 rxcmp1
->rx_cmp_cfa_code_errors_v2
|=
2313 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR
);
2315 rc
= bnxt_rx_pkt(bp
, cpr
, &raw_cons
, &event
);
2316 if (likely(rc
== -EIO
) && budget
)
2318 else if (rc
== -EBUSY
) /* partial completion */
2320 } else if (unlikely(TX_CMP_TYPE(txcmp
) ==
2321 CMPL_BASE_TYPE_HWRM_DONE
)) {
2322 bnxt_hwrm_handler(bp
, txcmp
);
2325 "Invalid completion received on special ring\n");
2327 raw_cons
= NEXT_RAW_CMP(raw_cons
);
2329 if (rx_pkts
== budget
)
2333 cpr
->cp_raw_cons
= raw_cons
;
2334 BNXT_DB_CQ(&cpr
->cp_db
, cpr
->cp_raw_cons
);
2335 bnxt_db_write(bp
, &rxr
->rx_db
, rxr
->rx_prod
);
2337 if (event
& BNXT_AGG_EVENT
)
2338 bnxt_db_write(bp
, &rxr
->rx_agg_db
, rxr
->rx_agg_prod
);
2340 if (!bnxt_has_work(bp
, cpr
) && rx_pkts
< budget
) {
2341 napi_complete_done(napi
, rx_pkts
);
2342 BNXT_DB_CQ_ARM(&cpr
->cp_db
, cpr
->cp_raw_cons
);
2347 static int bnxt_poll(struct napi_struct
*napi
, int budget
)
2349 struct bnxt_napi
*bnapi
= container_of(napi
, struct bnxt_napi
, napi
);
2350 struct bnxt
*bp
= bnapi
->bp
;
2351 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
2355 work_done
+= bnxt_poll_work(bp
, cpr
, budget
- work_done
);
2357 if (work_done
>= budget
) {
2359 BNXT_DB_CQ_ARM(&cpr
->cp_db
, cpr
->cp_raw_cons
);
2363 if (!bnxt_has_work(bp
, cpr
)) {
2364 if (napi_complete_done(napi
, work_done
))
2365 BNXT_DB_CQ_ARM(&cpr
->cp_db
, cpr
->cp_raw_cons
);
2369 if (bp
->flags
& BNXT_FLAG_DIM
) {
2370 struct dim_sample dim_sample
= {};
2372 dim_update_sample(cpr
->event_ctr
,
2376 net_dim(&cpr
->dim
, dim_sample
);
2381 static int __bnxt_poll_cqs(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, int budget
)
2383 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
2384 int i
, work_done
= 0;
2386 for (i
= 0; i
< 2; i
++) {
2387 struct bnxt_cp_ring_info
*cpr2
= cpr
->cp_ring_arr
[i
];
2390 work_done
+= __bnxt_poll_work(bp
, cpr2
,
2391 budget
- work_done
);
2392 cpr
->has_more_work
|= cpr2
->has_more_work
;
2398 static void __bnxt_poll_cqs_done(struct bnxt
*bp
, struct bnxt_napi
*bnapi
,
2399 u64 dbr_type
, bool all
)
2401 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
2404 for (i
= 0; i
< 2; i
++) {
2405 struct bnxt_cp_ring_info
*cpr2
= cpr
->cp_ring_arr
[i
];
2406 struct bnxt_db_info
*db
;
2408 if (cpr2
&& (all
|| cpr2
->had_work_done
)) {
2410 writeq(db
->db_key64
| dbr_type
|
2411 RING_CMP(cpr2
->cp_raw_cons
), db
->doorbell
);
2412 cpr2
->had_work_done
= 0;
2415 __bnxt_poll_work_done(bp
, bnapi
);
2418 static int bnxt_poll_p5(struct napi_struct
*napi
, int budget
)
2420 struct bnxt_napi
*bnapi
= container_of(napi
, struct bnxt_napi
, napi
);
2421 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
2422 u32 raw_cons
= cpr
->cp_raw_cons
;
2423 struct bnxt
*bp
= bnapi
->bp
;
2424 struct nqe_cn
*nqcmp
;
2428 if (cpr
->has_more_work
) {
2429 cpr
->has_more_work
= 0;
2430 work_done
= __bnxt_poll_cqs(bp
, bnapi
, budget
);
2431 if (cpr
->has_more_work
) {
2432 __bnxt_poll_cqs_done(bp
, bnapi
, DBR_TYPE_CQ
, false);
2435 __bnxt_poll_cqs_done(bp
, bnapi
, DBR_TYPE_CQ_ARMALL
, true);
2436 if (napi_complete_done(napi
, work_done
))
2437 BNXT_DB_NQ_ARM_P5(&cpr
->cp_db
, cpr
->cp_raw_cons
);
2441 cons
= RING_CMP(raw_cons
);
2442 nqcmp
= &cpr
->nq_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)];
2444 if (!NQ_CMP_VALID(nqcmp
, raw_cons
)) {
2445 __bnxt_poll_cqs_done(bp
, bnapi
, DBR_TYPE_CQ_ARMALL
,
2447 cpr
->cp_raw_cons
= raw_cons
;
2448 if (napi_complete_done(napi
, work_done
))
2449 BNXT_DB_NQ_ARM_P5(&cpr
->cp_db
,
2454 /* The valid test of the entry must be done first before
2455 * reading any further.
2459 if (nqcmp
->type
== cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION
)) {
2460 u32 idx
= le32_to_cpu(nqcmp
->cq_handle_low
);
2461 struct bnxt_cp_ring_info
*cpr2
;
2463 cpr2
= cpr
->cp_ring_arr
[idx
];
2464 work_done
+= __bnxt_poll_work(bp
, cpr2
,
2465 budget
- work_done
);
2466 cpr
->has_more_work
= cpr2
->has_more_work
;
2468 bnxt_hwrm_handler(bp
, (struct tx_cmp
*)nqcmp
);
2470 raw_cons
= NEXT_RAW_CMP(raw_cons
);
2471 if (cpr
->has_more_work
)
2474 __bnxt_poll_cqs_done(bp
, bnapi
, DBR_TYPE_CQ
, true);
2475 cpr
->cp_raw_cons
= raw_cons
;
2479 static void bnxt_free_tx_skbs(struct bnxt
*bp
)
2482 struct pci_dev
*pdev
= bp
->pdev
;
2487 max_idx
= bp
->tx_nr_pages
* TX_DESC_CNT
;
2488 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
2489 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
2492 for (j
= 0; j
< max_idx
;) {
2493 struct bnxt_sw_tx_bd
*tx_buf
= &txr
->tx_buf_ring
[j
];
2494 struct sk_buff
*skb
;
2497 if (i
< bp
->tx_nr_rings_xdp
&&
2498 tx_buf
->action
== XDP_REDIRECT
) {
2499 dma_unmap_single(&pdev
->dev
,
2500 dma_unmap_addr(tx_buf
, mapping
),
2501 dma_unmap_len(tx_buf
, len
),
2503 xdp_return_frame(tx_buf
->xdpf
);
2505 tx_buf
->xdpf
= NULL
;
2518 if (tx_buf
->is_push
) {
2524 dma_unmap_single(&pdev
->dev
,
2525 dma_unmap_addr(tx_buf
, mapping
),
2529 last
= tx_buf
->nr_frags
;
2531 for (k
= 0; k
< last
; k
++, j
++) {
2532 int ring_idx
= j
& bp
->tx_ring_mask
;
2533 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[k
];
2535 tx_buf
= &txr
->tx_buf_ring
[ring_idx
];
2538 dma_unmap_addr(tx_buf
, mapping
),
2539 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
2543 netdev_tx_reset_queue(netdev_get_tx_queue(bp
->dev
, i
));
2547 static void bnxt_free_rx_skbs(struct bnxt
*bp
)
2549 int i
, max_idx
, max_agg_idx
;
2550 struct pci_dev
*pdev
= bp
->pdev
;
2555 max_idx
= bp
->rx_nr_pages
* RX_DESC_CNT
;
2556 max_agg_idx
= bp
->rx_agg_nr_pages
* RX_DESC_CNT
;
2557 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2558 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
2559 struct bnxt_tpa_idx_map
*map
;
2563 for (j
= 0; j
< bp
->max_tpa
; j
++) {
2564 struct bnxt_tpa_info
*tpa_info
=
2566 u8
*data
= tpa_info
->data
;
2571 dma_unmap_single_attrs(&pdev
->dev
,
2573 bp
->rx_buf_use_size
,
2575 DMA_ATTR_WEAK_ORDERING
);
2577 tpa_info
->data
= NULL
;
2583 for (j
= 0; j
< max_idx
; j
++) {
2584 struct bnxt_sw_rx_bd
*rx_buf
= &rxr
->rx_buf_ring
[j
];
2585 dma_addr_t mapping
= rx_buf
->mapping
;
2586 void *data
= rx_buf
->data
;
2591 rx_buf
->data
= NULL
;
2593 if (BNXT_RX_PAGE_MODE(bp
)) {
2594 mapping
-= bp
->rx_dma_offset
;
2595 dma_unmap_page_attrs(&pdev
->dev
, mapping
,
2596 PAGE_SIZE
, bp
->rx_dir
,
2597 DMA_ATTR_WEAK_ORDERING
);
2598 page_pool_recycle_direct(rxr
->page_pool
, data
);
2600 dma_unmap_single_attrs(&pdev
->dev
, mapping
,
2601 bp
->rx_buf_use_size
,
2603 DMA_ATTR_WEAK_ORDERING
);
2608 for (j
= 0; j
< max_agg_idx
; j
++) {
2609 struct bnxt_sw_rx_agg_bd
*rx_agg_buf
=
2610 &rxr
->rx_agg_ring
[j
];
2611 struct page
*page
= rx_agg_buf
->page
;
2616 dma_unmap_page_attrs(&pdev
->dev
, rx_agg_buf
->mapping
,
2619 DMA_ATTR_WEAK_ORDERING
);
2621 rx_agg_buf
->page
= NULL
;
2622 __clear_bit(j
, rxr
->rx_agg_bmap
);
2627 __free_page(rxr
->rx_page
);
2628 rxr
->rx_page
= NULL
;
2630 map
= rxr
->rx_tpa_idx_map
;
2632 memset(map
->agg_idx_bmap
, 0, sizeof(map
->agg_idx_bmap
));
2636 static void bnxt_free_skbs(struct bnxt
*bp
)
2638 bnxt_free_tx_skbs(bp
);
2639 bnxt_free_rx_skbs(bp
);
2642 static void bnxt_free_ring(struct bnxt
*bp
, struct bnxt_ring_mem_info
*rmem
)
2644 struct pci_dev
*pdev
= bp
->pdev
;
2647 for (i
= 0; i
< rmem
->nr_pages
; i
++) {
2648 if (!rmem
->pg_arr
[i
])
2651 dma_free_coherent(&pdev
->dev
, rmem
->page_size
,
2652 rmem
->pg_arr
[i
], rmem
->dma_arr
[i
]);
2654 rmem
->pg_arr
[i
] = NULL
;
2657 size_t pg_tbl_size
= rmem
->nr_pages
* 8;
2659 if (rmem
->flags
& BNXT_RMEM_USE_FULL_PAGE_FLAG
)
2660 pg_tbl_size
= rmem
->page_size
;
2661 dma_free_coherent(&pdev
->dev
, pg_tbl_size
,
2662 rmem
->pg_tbl
, rmem
->pg_tbl_map
);
2663 rmem
->pg_tbl
= NULL
;
2665 if (rmem
->vmem_size
&& *rmem
->vmem
) {
2671 static int bnxt_alloc_ring(struct bnxt
*bp
, struct bnxt_ring_mem_info
*rmem
)
2673 struct pci_dev
*pdev
= bp
->pdev
;
2677 if (rmem
->flags
& (BNXT_RMEM_VALID_PTE_FLAG
| BNXT_RMEM_RING_PTE_FLAG
))
2678 valid_bit
= PTU_PTE_VALID
;
2679 if ((rmem
->nr_pages
> 1 || rmem
->depth
> 0) && !rmem
->pg_tbl
) {
2680 size_t pg_tbl_size
= rmem
->nr_pages
* 8;
2682 if (rmem
->flags
& BNXT_RMEM_USE_FULL_PAGE_FLAG
)
2683 pg_tbl_size
= rmem
->page_size
;
2684 rmem
->pg_tbl
= dma_alloc_coherent(&pdev
->dev
, pg_tbl_size
,
2691 for (i
= 0; i
< rmem
->nr_pages
; i
++) {
2692 u64 extra_bits
= valid_bit
;
2694 rmem
->pg_arr
[i
] = dma_alloc_coherent(&pdev
->dev
,
2698 if (!rmem
->pg_arr
[i
])
2702 memset(rmem
->pg_arr
[i
], rmem
->init_val
,
2704 if (rmem
->nr_pages
> 1 || rmem
->depth
> 0) {
2705 if (i
== rmem
->nr_pages
- 2 &&
2706 (rmem
->flags
& BNXT_RMEM_RING_PTE_FLAG
))
2707 extra_bits
|= PTU_PTE_NEXT_TO_LAST
;
2708 else if (i
== rmem
->nr_pages
- 1 &&
2709 (rmem
->flags
& BNXT_RMEM_RING_PTE_FLAG
))
2710 extra_bits
|= PTU_PTE_LAST
;
2712 cpu_to_le64(rmem
->dma_arr
[i
] | extra_bits
);
2716 if (rmem
->vmem_size
) {
2717 *rmem
->vmem
= vzalloc(rmem
->vmem_size
);
2724 static void bnxt_free_tpa_info(struct bnxt
*bp
)
2728 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2729 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
2731 kfree(rxr
->rx_tpa_idx_map
);
2732 rxr
->rx_tpa_idx_map
= NULL
;
2734 kfree(rxr
->rx_tpa
[0].agg_arr
);
2735 rxr
->rx_tpa
[0].agg_arr
= NULL
;
2742 static int bnxt_alloc_tpa_info(struct bnxt
*bp
)
2744 int i
, j
, total_aggs
= 0;
2746 bp
->max_tpa
= MAX_TPA
;
2747 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
2748 if (!bp
->max_tpa_v2
)
2750 bp
->max_tpa
= max_t(u16
, bp
->max_tpa_v2
, MAX_TPA_P5
);
2751 total_aggs
= bp
->max_tpa
* MAX_SKB_FRAGS
;
2754 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2755 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
2756 struct rx_agg_cmp
*agg
;
2758 rxr
->rx_tpa
= kcalloc(bp
->max_tpa
, sizeof(struct bnxt_tpa_info
),
2763 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
2765 agg
= kcalloc(total_aggs
, sizeof(*agg
), GFP_KERNEL
);
2766 rxr
->rx_tpa
[0].agg_arr
= agg
;
2769 for (j
= 1; j
< bp
->max_tpa
; j
++)
2770 rxr
->rx_tpa
[j
].agg_arr
= agg
+ j
* MAX_SKB_FRAGS
;
2771 rxr
->rx_tpa_idx_map
= kzalloc(sizeof(*rxr
->rx_tpa_idx_map
),
2773 if (!rxr
->rx_tpa_idx_map
)
2779 static void bnxt_free_rx_rings(struct bnxt
*bp
)
2786 bnxt_free_tpa_info(bp
);
2787 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2788 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
2789 struct bnxt_ring_struct
*ring
;
2792 bpf_prog_put(rxr
->xdp_prog
);
2794 if (xdp_rxq_info_is_reg(&rxr
->xdp_rxq
))
2795 xdp_rxq_info_unreg(&rxr
->xdp_rxq
);
2797 page_pool_destroy(rxr
->page_pool
);
2798 rxr
->page_pool
= NULL
;
2800 kfree(rxr
->rx_agg_bmap
);
2801 rxr
->rx_agg_bmap
= NULL
;
2803 ring
= &rxr
->rx_ring_struct
;
2804 bnxt_free_ring(bp
, &ring
->ring_mem
);
2806 ring
= &rxr
->rx_agg_ring_struct
;
2807 bnxt_free_ring(bp
, &ring
->ring_mem
);
2811 static int bnxt_alloc_rx_page_pool(struct bnxt
*bp
,
2812 struct bnxt_rx_ring_info
*rxr
)
2814 struct page_pool_params pp
= { 0 };
2816 pp
.pool_size
= bp
->rx_ring_size
;
2817 pp
.nid
= dev_to_node(&bp
->pdev
->dev
);
2818 pp
.dev
= &bp
->pdev
->dev
;
2819 pp
.dma_dir
= DMA_BIDIRECTIONAL
;
2821 rxr
->page_pool
= page_pool_create(&pp
);
2822 if (IS_ERR(rxr
->page_pool
)) {
2823 int err
= PTR_ERR(rxr
->page_pool
);
2825 rxr
->page_pool
= NULL
;
2831 static int bnxt_alloc_rx_rings(struct bnxt
*bp
)
2833 int i
, rc
= 0, agg_rings
= 0;
2838 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
2841 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2842 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
2843 struct bnxt_ring_struct
*ring
;
2845 ring
= &rxr
->rx_ring_struct
;
2847 rc
= bnxt_alloc_rx_page_pool(bp
, rxr
);
2851 rc
= xdp_rxq_info_reg(&rxr
->xdp_rxq
, bp
->dev
, i
);
2855 rc
= xdp_rxq_info_reg_mem_model(&rxr
->xdp_rxq
,
2859 xdp_rxq_info_unreg(&rxr
->xdp_rxq
);
2863 rc
= bnxt_alloc_ring(bp
, &ring
->ring_mem
);
2871 ring
= &rxr
->rx_agg_ring_struct
;
2872 rc
= bnxt_alloc_ring(bp
, &ring
->ring_mem
);
2877 rxr
->rx_agg_bmap_size
= bp
->rx_agg_ring_mask
+ 1;
2878 mem_size
= rxr
->rx_agg_bmap_size
/ 8;
2879 rxr
->rx_agg_bmap
= kzalloc(mem_size
, GFP_KERNEL
);
2880 if (!rxr
->rx_agg_bmap
)
2884 if (bp
->flags
& BNXT_FLAG_TPA
)
2885 rc
= bnxt_alloc_tpa_info(bp
);
2889 static void bnxt_free_tx_rings(struct bnxt
*bp
)
2892 struct pci_dev
*pdev
= bp
->pdev
;
2897 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
2898 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
2899 struct bnxt_ring_struct
*ring
;
2902 dma_free_coherent(&pdev
->dev
, bp
->tx_push_size
,
2903 txr
->tx_push
, txr
->tx_push_mapping
);
2904 txr
->tx_push
= NULL
;
2907 ring
= &txr
->tx_ring_struct
;
2909 bnxt_free_ring(bp
, &ring
->ring_mem
);
2913 static int bnxt_alloc_tx_rings(struct bnxt
*bp
)
2916 struct pci_dev
*pdev
= bp
->pdev
;
2918 bp
->tx_push_size
= 0;
2919 if (bp
->tx_push_thresh
) {
2922 push_size
= L1_CACHE_ALIGN(sizeof(struct tx_push_bd
) +
2923 bp
->tx_push_thresh
);
2925 if (push_size
> 256) {
2927 bp
->tx_push_thresh
= 0;
2930 bp
->tx_push_size
= push_size
;
2933 for (i
= 0, j
= 0; i
< bp
->tx_nr_rings
; i
++) {
2934 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
2935 struct bnxt_ring_struct
*ring
;
2938 ring
= &txr
->tx_ring_struct
;
2940 rc
= bnxt_alloc_ring(bp
, &ring
->ring_mem
);
2944 ring
->grp_idx
= txr
->bnapi
->index
;
2945 if (bp
->tx_push_size
) {
2948 /* One pre-allocated DMA buffer to backup
2951 txr
->tx_push
= dma_alloc_coherent(&pdev
->dev
,
2953 &txr
->tx_push_mapping
,
2959 mapping
= txr
->tx_push_mapping
+
2960 sizeof(struct tx_push_bd
);
2961 txr
->data_mapping
= cpu_to_le64(mapping
);
2963 qidx
= bp
->tc_to_qidx
[j
];
2964 ring
->queue_id
= bp
->q_info
[qidx
].queue_id
;
2965 if (i
< bp
->tx_nr_rings_xdp
)
2967 if (i
% bp
->tx_nr_rings_per_tc
== (bp
->tx_nr_rings_per_tc
- 1))
2973 static void bnxt_free_cp_rings(struct bnxt
*bp
)
2980 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2981 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
2982 struct bnxt_cp_ring_info
*cpr
;
2983 struct bnxt_ring_struct
*ring
;
2989 cpr
= &bnapi
->cp_ring
;
2990 ring
= &cpr
->cp_ring_struct
;
2992 bnxt_free_ring(bp
, &ring
->ring_mem
);
2994 for (j
= 0; j
< 2; j
++) {
2995 struct bnxt_cp_ring_info
*cpr2
= cpr
->cp_ring_arr
[j
];
2998 ring
= &cpr2
->cp_ring_struct
;
2999 bnxt_free_ring(bp
, &ring
->ring_mem
);
3001 cpr
->cp_ring_arr
[j
] = NULL
;
3007 static struct bnxt_cp_ring_info
*bnxt_alloc_cp_sub_ring(struct bnxt
*bp
)
3009 struct bnxt_ring_mem_info
*rmem
;
3010 struct bnxt_ring_struct
*ring
;
3011 struct bnxt_cp_ring_info
*cpr
;
3014 cpr
= kzalloc(sizeof(*cpr
), GFP_KERNEL
);
3018 ring
= &cpr
->cp_ring_struct
;
3019 rmem
= &ring
->ring_mem
;
3020 rmem
->nr_pages
= bp
->cp_nr_pages
;
3021 rmem
->page_size
= HW_CMPD_RING_SIZE
;
3022 rmem
->pg_arr
= (void **)cpr
->cp_desc_ring
;
3023 rmem
->dma_arr
= cpr
->cp_desc_mapping
;
3024 rmem
->flags
= BNXT_RMEM_RING_PTE_FLAG
;
3025 rc
= bnxt_alloc_ring(bp
, rmem
);
3027 bnxt_free_ring(bp
, rmem
);
3034 static int bnxt_alloc_cp_rings(struct bnxt
*bp
)
3036 bool sh
= !!(bp
->flags
& BNXT_FLAG_SHARED_RINGS
);
3037 int i
, rc
, ulp_base_vec
, ulp_msix
;
3039 ulp_msix
= bnxt_get_ulp_msix_num(bp
);
3040 ulp_base_vec
= bnxt_get_ulp_msix_base(bp
);
3041 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3042 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3043 struct bnxt_cp_ring_info
*cpr
;
3044 struct bnxt_ring_struct
*ring
;
3049 cpr
= &bnapi
->cp_ring
;
3051 ring
= &cpr
->cp_ring_struct
;
3053 rc
= bnxt_alloc_ring(bp
, &ring
->ring_mem
);
3057 if (ulp_msix
&& i
>= ulp_base_vec
)
3058 ring
->map_idx
= i
+ ulp_msix
;
3062 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
3065 if (i
< bp
->rx_nr_rings
) {
3066 struct bnxt_cp_ring_info
*cpr2
=
3067 bnxt_alloc_cp_sub_ring(bp
);
3069 cpr
->cp_ring_arr
[BNXT_RX_HDL
] = cpr2
;
3072 cpr2
->bnapi
= bnapi
;
3074 if ((sh
&& i
< bp
->tx_nr_rings
) ||
3075 (!sh
&& i
>= bp
->rx_nr_rings
)) {
3076 struct bnxt_cp_ring_info
*cpr2
=
3077 bnxt_alloc_cp_sub_ring(bp
);
3079 cpr
->cp_ring_arr
[BNXT_TX_HDL
] = cpr2
;
3082 cpr2
->bnapi
= bnapi
;
3088 static void bnxt_init_ring_struct(struct bnxt
*bp
)
3092 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3093 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3094 struct bnxt_ring_mem_info
*rmem
;
3095 struct bnxt_cp_ring_info
*cpr
;
3096 struct bnxt_rx_ring_info
*rxr
;
3097 struct bnxt_tx_ring_info
*txr
;
3098 struct bnxt_ring_struct
*ring
;
3103 cpr
= &bnapi
->cp_ring
;
3104 ring
= &cpr
->cp_ring_struct
;
3105 rmem
= &ring
->ring_mem
;
3106 rmem
->nr_pages
= bp
->cp_nr_pages
;
3107 rmem
->page_size
= HW_CMPD_RING_SIZE
;
3108 rmem
->pg_arr
= (void **)cpr
->cp_desc_ring
;
3109 rmem
->dma_arr
= cpr
->cp_desc_mapping
;
3110 rmem
->vmem_size
= 0;
3112 rxr
= bnapi
->rx_ring
;
3116 ring
= &rxr
->rx_ring_struct
;
3117 rmem
= &ring
->ring_mem
;
3118 rmem
->nr_pages
= bp
->rx_nr_pages
;
3119 rmem
->page_size
= HW_RXBD_RING_SIZE
;
3120 rmem
->pg_arr
= (void **)rxr
->rx_desc_ring
;
3121 rmem
->dma_arr
= rxr
->rx_desc_mapping
;
3122 rmem
->vmem_size
= SW_RXBD_RING_SIZE
* bp
->rx_nr_pages
;
3123 rmem
->vmem
= (void **)&rxr
->rx_buf_ring
;
3125 ring
= &rxr
->rx_agg_ring_struct
;
3126 rmem
= &ring
->ring_mem
;
3127 rmem
->nr_pages
= bp
->rx_agg_nr_pages
;
3128 rmem
->page_size
= HW_RXBD_RING_SIZE
;
3129 rmem
->pg_arr
= (void **)rxr
->rx_agg_desc_ring
;
3130 rmem
->dma_arr
= rxr
->rx_agg_desc_mapping
;
3131 rmem
->vmem_size
= SW_RXBD_AGG_RING_SIZE
* bp
->rx_agg_nr_pages
;
3132 rmem
->vmem
= (void **)&rxr
->rx_agg_ring
;
3135 txr
= bnapi
->tx_ring
;
3139 ring
= &txr
->tx_ring_struct
;
3140 rmem
= &ring
->ring_mem
;
3141 rmem
->nr_pages
= bp
->tx_nr_pages
;
3142 rmem
->page_size
= HW_RXBD_RING_SIZE
;
3143 rmem
->pg_arr
= (void **)txr
->tx_desc_ring
;
3144 rmem
->dma_arr
= txr
->tx_desc_mapping
;
3145 rmem
->vmem_size
= SW_TXBD_RING_SIZE
* bp
->tx_nr_pages
;
3146 rmem
->vmem
= (void **)&txr
->tx_buf_ring
;
3150 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct
*ring
, u32 type
)
3154 struct rx_bd
**rx_buf_ring
;
3156 rx_buf_ring
= (struct rx_bd
**)ring
->ring_mem
.pg_arr
;
3157 for (i
= 0, prod
= 0; i
< ring
->ring_mem
.nr_pages
; i
++) {
3161 rxbd
= rx_buf_ring
[i
];
3165 for (j
= 0; j
< RX_DESC_CNT
; j
++, rxbd
++, prod
++) {
3166 rxbd
->rx_bd_len_flags_type
= cpu_to_le32(type
);
3167 rxbd
->rx_bd_opaque
= prod
;
3172 static int bnxt_init_one_rx_ring(struct bnxt
*bp
, int ring_nr
)
3174 struct net_device
*dev
= bp
->dev
;
3175 struct bnxt_rx_ring_info
*rxr
;
3176 struct bnxt_ring_struct
*ring
;
3180 type
= (bp
->rx_buf_use_size
<< RX_BD_LEN_SHIFT
) |
3181 RX_BD_TYPE_RX_PACKET_BD
| RX_BD_FLAGS_EOP
;
3183 if (NET_IP_ALIGN
== 2)
3184 type
|= RX_BD_FLAGS_SOP
;
3186 rxr
= &bp
->rx_ring
[ring_nr
];
3187 ring
= &rxr
->rx_ring_struct
;
3188 bnxt_init_rxbd_pages(ring
, type
);
3190 if (BNXT_RX_PAGE_MODE(bp
) && bp
->xdp_prog
) {
3191 bpf_prog_add(bp
->xdp_prog
, 1);
3192 rxr
->xdp_prog
= bp
->xdp_prog
;
3194 prod
= rxr
->rx_prod
;
3195 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
3196 if (bnxt_alloc_rx_data(bp
, rxr
, prod
, GFP_KERNEL
) != 0) {
3197 netdev_warn(dev
, "init'ed rx ring %d with %d/%d skbs only\n",
3198 ring_nr
, i
, bp
->rx_ring_size
);
3201 prod
= NEXT_RX(prod
);
3203 rxr
->rx_prod
= prod
;
3204 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
3206 ring
= &rxr
->rx_agg_ring_struct
;
3207 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
3209 if (!(bp
->flags
& BNXT_FLAG_AGG_RINGS
))
3212 type
= ((u32
)BNXT_RX_PAGE_SIZE
<< RX_BD_LEN_SHIFT
) |
3213 RX_BD_TYPE_RX_AGG_BD
| RX_BD_FLAGS_SOP
;
3215 bnxt_init_rxbd_pages(ring
, type
);
3217 prod
= rxr
->rx_agg_prod
;
3218 for (i
= 0; i
< bp
->rx_agg_ring_size
; i
++) {
3219 if (bnxt_alloc_rx_page(bp
, rxr
, prod
, GFP_KERNEL
) != 0) {
3220 netdev_warn(dev
, "init'ed rx ring %d with %d/%d pages only\n",
3221 ring_nr
, i
, bp
->rx_ring_size
);
3224 prod
= NEXT_RX_AGG(prod
);
3226 rxr
->rx_agg_prod
= prod
;
3228 if (bp
->flags
& BNXT_FLAG_TPA
) {
3233 for (i
= 0; i
< bp
->max_tpa
; i
++) {
3234 data
= __bnxt_alloc_rx_data(bp
, &mapping
,
3239 rxr
->rx_tpa
[i
].data
= data
;
3240 rxr
->rx_tpa
[i
].data_ptr
= data
+ bp
->rx_offset
;
3241 rxr
->rx_tpa
[i
].mapping
= mapping
;
3244 netdev_err(bp
->dev
, "No resource allocated for LRO/GRO\n");
3252 static void bnxt_init_cp_rings(struct bnxt
*bp
)
3256 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3257 struct bnxt_cp_ring_info
*cpr
= &bp
->bnapi
[i
]->cp_ring
;
3258 struct bnxt_ring_struct
*ring
= &cpr
->cp_ring_struct
;
3260 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
3261 cpr
->rx_ring_coal
.coal_ticks
= bp
->rx_coal
.coal_ticks
;
3262 cpr
->rx_ring_coal
.coal_bufs
= bp
->rx_coal
.coal_bufs
;
3263 for (j
= 0; j
< 2; j
++) {
3264 struct bnxt_cp_ring_info
*cpr2
= cpr
->cp_ring_arr
[j
];
3269 ring
= &cpr2
->cp_ring_struct
;
3270 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
3271 cpr2
->rx_ring_coal
.coal_ticks
= bp
->rx_coal
.coal_ticks
;
3272 cpr2
->rx_ring_coal
.coal_bufs
= bp
->rx_coal
.coal_bufs
;
3277 static int bnxt_init_rx_rings(struct bnxt
*bp
)
3281 if (BNXT_RX_PAGE_MODE(bp
)) {
3282 bp
->rx_offset
= NET_IP_ALIGN
+ XDP_PACKET_HEADROOM
;
3283 bp
->rx_dma_offset
= XDP_PACKET_HEADROOM
;
3285 bp
->rx_offset
= BNXT_RX_OFFSET
;
3286 bp
->rx_dma_offset
= BNXT_RX_DMA_OFFSET
;
3289 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
3290 rc
= bnxt_init_one_rx_ring(bp
, i
);
3298 static int bnxt_init_tx_rings(struct bnxt
*bp
)
3302 bp
->tx_wake_thresh
= max_t(int, bp
->tx_ring_size
/ 2,
3305 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
3306 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
3307 struct bnxt_ring_struct
*ring
= &txr
->tx_ring_struct
;
3309 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
3315 static void bnxt_free_ring_grps(struct bnxt
*bp
)
3317 kfree(bp
->grp_info
);
3318 bp
->grp_info
= NULL
;
3321 static int bnxt_init_ring_grps(struct bnxt
*bp
, bool irq_re_init
)
3326 bp
->grp_info
= kcalloc(bp
->cp_nr_rings
,
3327 sizeof(struct bnxt_ring_grp_info
),
3332 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3334 bp
->grp_info
[i
].fw_stats_ctx
= INVALID_HW_RING_ID
;
3335 bp
->grp_info
[i
].fw_grp_id
= INVALID_HW_RING_ID
;
3336 bp
->grp_info
[i
].rx_fw_ring_id
= INVALID_HW_RING_ID
;
3337 bp
->grp_info
[i
].agg_fw_ring_id
= INVALID_HW_RING_ID
;
3338 bp
->grp_info
[i
].cp_fw_ring_id
= INVALID_HW_RING_ID
;
3343 static void bnxt_free_vnics(struct bnxt
*bp
)
3345 kfree(bp
->vnic_info
);
3346 bp
->vnic_info
= NULL
;
3350 static int bnxt_alloc_vnics(struct bnxt
*bp
)
3354 #ifdef CONFIG_RFS_ACCEL
3355 if ((bp
->flags
& (BNXT_FLAG_RFS
| BNXT_FLAG_CHIP_P5
)) == BNXT_FLAG_RFS
)
3356 num_vnics
+= bp
->rx_nr_rings
;
3359 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
3362 bp
->vnic_info
= kcalloc(num_vnics
, sizeof(struct bnxt_vnic_info
),
3367 bp
->nr_vnics
= num_vnics
;
3371 static void bnxt_init_vnics(struct bnxt
*bp
)
3375 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
3376 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
3379 vnic
->fw_vnic_id
= INVALID_HW_RING_ID
;
3380 for (j
= 0; j
< BNXT_MAX_CTX_PER_VNIC
; j
++)
3381 vnic
->fw_rss_cos_lb_ctx
[j
] = INVALID_HW_RING_ID
;
3383 vnic
->fw_l2_ctx_id
= INVALID_HW_RING_ID
;
3385 if (bp
->vnic_info
[i
].rss_hash_key
) {
3387 prandom_bytes(vnic
->rss_hash_key
,
3390 memcpy(vnic
->rss_hash_key
,
3391 bp
->vnic_info
[0].rss_hash_key
,
3397 static int bnxt_calc_nr_ring_pages(u32 ring_size
, int desc_per_pg
)
3401 pages
= ring_size
/ desc_per_pg
;
3408 while (pages
& (pages
- 1))
3414 void bnxt_set_tpa_flags(struct bnxt
*bp
)
3416 bp
->flags
&= ~BNXT_FLAG_TPA
;
3417 if (bp
->flags
& BNXT_FLAG_NO_AGG_RINGS
)
3419 if (bp
->dev
->features
& NETIF_F_LRO
)
3420 bp
->flags
|= BNXT_FLAG_LRO
;
3421 else if (bp
->dev
->features
& NETIF_F_GRO_HW
)
3422 bp
->flags
|= BNXT_FLAG_GRO
;
3425 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3428 void bnxt_set_ring_params(struct bnxt
*bp
)
3430 u32 ring_size
, rx_size
, rx_space
;
3431 u32 agg_factor
= 0, agg_ring_size
= 0;
3433 /* 8 for CRC and VLAN */
3434 rx_size
= SKB_DATA_ALIGN(bp
->dev
->mtu
+ ETH_HLEN
+ NET_IP_ALIGN
+ 8);
3436 rx_space
= rx_size
+ NET_SKB_PAD
+
3437 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
3439 bp
->rx_copy_thresh
= BNXT_RX_COPY_THRESH
;
3440 ring_size
= bp
->rx_ring_size
;
3441 bp
->rx_agg_ring_size
= 0;
3442 bp
->rx_agg_nr_pages
= 0;
3444 if (bp
->flags
& BNXT_FLAG_TPA
)
3445 agg_factor
= min_t(u32
, 4, 65536 / BNXT_RX_PAGE_SIZE
);
3447 bp
->flags
&= ~BNXT_FLAG_JUMBO
;
3448 if (rx_space
> PAGE_SIZE
&& !(bp
->flags
& BNXT_FLAG_NO_AGG_RINGS
)) {
3451 bp
->flags
|= BNXT_FLAG_JUMBO
;
3452 jumbo_factor
= PAGE_ALIGN(bp
->dev
->mtu
- 40) >> PAGE_SHIFT
;
3453 if (jumbo_factor
> agg_factor
)
3454 agg_factor
= jumbo_factor
;
3456 agg_ring_size
= ring_size
* agg_factor
;
3458 if (agg_ring_size
) {
3459 bp
->rx_agg_nr_pages
= bnxt_calc_nr_ring_pages(agg_ring_size
,
3461 if (bp
->rx_agg_nr_pages
> MAX_RX_AGG_PAGES
) {
3462 u32 tmp
= agg_ring_size
;
3464 bp
->rx_agg_nr_pages
= MAX_RX_AGG_PAGES
;
3465 agg_ring_size
= MAX_RX_AGG_PAGES
* RX_DESC_CNT
- 1;
3466 netdev_warn(bp
->dev
, "rx agg ring size %d reduced to %d.\n",
3467 tmp
, agg_ring_size
);
3469 bp
->rx_agg_ring_size
= agg_ring_size
;
3470 bp
->rx_agg_ring_mask
= (bp
->rx_agg_nr_pages
* RX_DESC_CNT
) - 1;
3471 rx_size
= SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH
+ NET_IP_ALIGN
);
3472 rx_space
= rx_size
+ NET_SKB_PAD
+
3473 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
3476 bp
->rx_buf_use_size
= rx_size
;
3477 bp
->rx_buf_size
= rx_space
;
3479 bp
->rx_nr_pages
= bnxt_calc_nr_ring_pages(ring_size
, RX_DESC_CNT
);
3480 bp
->rx_ring_mask
= (bp
->rx_nr_pages
* RX_DESC_CNT
) - 1;
3482 ring_size
= bp
->tx_ring_size
;
3483 bp
->tx_nr_pages
= bnxt_calc_nr_ring_pages(ring_size
, TX_DESC_CNT
);
3484 bp
->tx_ring_mask
= (bp
->tx_nr_pages
* TX_DESC_CNT
) - 1;
3486 ring_size
= bp
->rx_ring_size
* (2 + agg_factor
) + bp
->tx_ring_size
;
3487 bp
->cp_ring_size
= ring_size
;
3489 bp
->cp_nr_pages
= bnxt_calc_nr_ring_pages(ring_size
, CP_DESC_CNT
);
3490 if (bp
->cp_nr_pages
> MAX_CP_PAGES
) {
3491 bp
->cp_nr_pages
= MAX_CP_PAGES
;
3492 bp
->cp_ring_size
= MAX_CP_PAGES
* CP_DESC_CNT
- 1;
3493 netdev_warn(bp
->dev
, "completion ring size %d reduced to %d.\n",
3494 ring_size
, bp
->cp_ring_size
);
3496 bp
->cp_bit
= bp
->cp_nr_pages
* CP_DESC_CNT
;
3497 bp
->cp_ring_mask
= bp
->cp_bit
- 1;
3500 /* Changing allocation mode of RX rings.
3501 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3503 int bnxt_set_rx_skb_mode(struct bnxt
*bp
, bool page_mode
)
3506 if (bp
->dev
->mtu
> BNXT_MAX_PAGE_MODE_MTU
)
3509 min_t(u16
, bp
->max_mtu
, BNXT_MAX_PAGE_MODE_MTU
);
3510 bp
->flags
&= ~BNXT_FLAG_AGG_RINGS
;
3511 bp
->flags
|= BNXT_FLAG_NO_AGG_RINGS
| BNXT_FLAG_RX_PAGE_MODE
;
3512 bp
->rx_dir
= DMA_BIDIRECTIONAL
;
3513 bp
->rx_skb_func
= bnxt_rx_page_skb
;
3514 /* Disable LRO or GRO_HW */
3515 netdev_update_features(bp
->dev
);
3517 bp
->dev
->max_mtu
= bp
->max_mtu
;
3518 bp
->flags
&= ~BNXT_FLAG_RX_PAGE_MODE
;
3519 bp
->rx_dir
= DMA_FROM_DEVICE
;
3520 bp
->rx_skb_func
= bnxt_rx_skb
;
3525 static void bnxt_free_vnic_attributes(struct bnxt
*bp
)
3528 struct bnxt_vnic_info
*vnic
;
3529 struct pci_dev
*pdev
= bp
->pdev
;
3534 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
3535 vnic
= &bp
->vnic_info
[i
];
3537 kfree(vnic
->fw_grp_ids
);
3538 vnic
->fw_grp_ids
= NULL
;
3540 kfree(vnic
->uc_list
);
3541 vnic
->uc_list
= NULL
;
3543 if (vnic
->mc_list
) {
3544 dma_free_coherent(&pdev
->dev
, vnic
->mc_list_size
,
3545 vnic
->mc_list
, vnic
->mc_list_mapping
);
3546 vnic
->mc_list
= NULL
;
3549 if (vnic
->rss_table
) {
3550 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
,
3552 vnic
->rss_table_dma_addr
);
3553 vnic
->rss_table
= NULL
;
3556 vnic
->rss_hash_key
= NULL
;
3561 static int bnxt_alloc_vnic_attributes(struct bnxt
*bp
)
3563 int i
, rc
= 0, size
;
3564 struct bnxt_vnic_info
*vnic
;
3565 struct pci_dev
*pdev
= bp
->pdev
;
3568 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
3569 vnic
= &bp
->vnic_info
[i
];
3571 if (vnic
->flags
& BNXT_VNIC_UCAST_FLAG
) {
3572 int mem_size
= (BNXT_MAX_UC_ADDRS
- 1) * ETH_ALEN
;
3575 vnic
->uc_list
= kmalloc(mem_size
, GFP_KERNEL
);
3576 if (!vnic
->uc_list
) {
3583 if (vnic
->flags
& BNXT_VNIC_MCAST_FLAG
) {
3584 vnic
->mc_list_size
= BNXT_MAX_MC_ADDRS
* ETH_ALEN
;
3586 dma_alloc_coherent(&pdev
->dev
,
3588 &vnic
->mc_list_mapping
,
3590 if (!vnic
->mc_list
) {
3596 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
3597 goto vnic_skip_grps
;
3599 if (vnic
->flags
& BNXT_VNIC_RSS_FLAG
)
3600 max_rings
= bp
->rx_nr_rings
;
3604 vnic
->fw_grp_ids
= kcalloc(max_rings
, sizeof(u16
), GFP_KERNEL
);
3605 if (!vnic
->fw_grp_ids
) {
3610 if ((bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
) &&
3611 !(vnic
->flags
& BNXT_VNIC_RSS_FLAG
))
3614 /* Allocate rss table and hash key */
3615 vnic
->rss_table
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
,
3616 &vnic
->rss_table_dma_addr
,
3618 if (!vnic
->rss_table
) {
3623 size
= L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE
* sizeof(u16
));
3625 vnic
->rss_hash_key
= ((void *)vnic
->rss_table
) + size
;
3626 vnic
->rss_hash_key_dma_addr
= vnic
->rss_table_dma_addr
+ size
;
3634 static void bnxt_free_hwrm_resources(struct bnxt
*bp
)
3636 struct pci_dev
*pdev
= bp
->pdev
;
3638 if (bp
->hwrm_cmd_resp_addr
) {
3639 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, bp
->hwrm_cmd_resp_addr
,
3640 bp
->hwrm_cmd_resp_dma_addr
);
3641 bp
->hwrm_cmd_resp_addr
= NULL
;
3644 if (bp
->hwrm_cmd_kong_resp_addr
) {
3645 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
,
3646 bp
->hwrm_cmd_kong_resp_addr
,
3647 bp
->hwrm_cmd_kong_resp_dma_addr
);
3648 bp
->hwrm_cmd_kong_resp_addr
= NULL
;
3652 static int bnxt_alloc_kong_hwrm_resources(struct bnxt
*bp
)
3654 struct pci_dev
*pdev
= bp
->pdev
;
3656 if (bp
->hwrm_cmd_kong_resp_addr
)
3659 bp
->hwrm_cmd_kong_resp_addr
=
3660 dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
,
3661 &bp
->hwrm_cmd_kong_resp_dma_addr
,
3663 if (!bp
->hwrm_cmd_kong_resp_addr
)
3669 static int bnxt_alloc_hwrm_resources(struct bnxt
*bp
)
3671 struct pci_dev
*pdev
= bp
->pdev
;
3673 bp
->hwrm_cmd_resp_addr
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
,
3674 &bp
->hwrm_cmd_resp_dma_addr
,
3676 if (!bp
->hwrm_cmd_resp_addr
)
3682 static void bnxt_free_hwrm_short_cmd_req(struct bnxt
*bp
)
3684 if (bp
->hwrm_short_cmd_req_addr
) {
3685 struct pci_dev
*pdev
= bp
->pdev
;
3687 dma_free_coherent(&pdev
->dev
, bp
->hwrm_max_ext_req_len
,
3688 bp
->hwrm_short_cmd_req_addr
,
3689 bp
->hwrm_short_cmd_req_dma_addr
);
3690 bp
->hwrm_short_cmd_req_addr
= NULL
;
3694 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt
*bp
)
3696 struct pci_dev
*pdev
= bp
->pdev
;
3698 if (bp
->hwrm_short_cmd_req_addr
)
3701 bp
->hwrm_short_cmd_req_addr
=
3702 dma_alloc_coherent(&pdev
->dev
, bp
->hwrm_max_ext_req_len
,
3703 &bp
->hwrm_short_cmd_req_dma_addr
,
3705 if (!bp
->hwrm_short_cmd_req_addr
)
3711 static void bnxt_free_port_stats(struct bnxt
*bp
)
3713 struct pci_dev
*pdev
= bp
->pdev
;
3715 bp
->flags
&= ~BNXT_FLAG_PORT_STATS
;
3716 bp
->flags
&= ~BNXT_FLAG_PORT_STATS_EXT
;
3718 if (bp
->hw_rx_port_stats
) {
3719 dma_free_coherent(&pdev
->dev
, bp
->hw_port_stats_size
,
3720 bp
->hw_rx_port_stats
,
3721 bp
->hw_rx_port_stats_map
);
3722 bp
->hw_rx_port_stats
= NULL
;
3725 if (bp
->hw_tx_port_stats_ext
) {
3726 dma_free_coherent(&pdev
->dev
, sizeof(struct tx_port_stats_ext
),
3727 bp
->hw_tx_port_stats_ext
,
3728 bp
->hw_tx_port_stats_ext_map
);
3729 bp
->hw_tx_port_stats_ext
= NULL
;
3732 if (bp
->hw_rx_port_stats_ext
) {
3733 dma_free_coherent(&pdev
->dev
, sizeof(struct rx_port_stats_ext
),
3734 bp
->hw_rx_port_stats_ext
,
3735 bp
->hw_rx_port_stats_ext_map
);
3736 bp
->hw_rx_port_stats_ext
= NULL
;
3739 if (bp
->hw_pcie_stats
) {
3740 dma_free_coherent(&pdev
->dev
, sizeof(struct pcie_ctx_hw_stats
),
3741 bp
->hw_pcie_stats
, bp
->hw_pcie_stats_map
);
3742 bp
->hw_pcie_stats
= NULL
;
3746 static void bnxt_free_ring_stats(struct bnxt
*bp
)
3748 struct pci_dev
*pdev
= bp
->pdev
;
3754 size
= bp
->hw_ring_stats_size
;
3756 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3757 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3758 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
3760 if (cpr
->hw_stats
) {
3761 dma_free_coherent(&pdev
->dev
, size
, cpr
->hw_stats
,
3763 cpr
->hw_stats
= NULL
;
3768 static int bnxt_alloc_stats(struct bnxt
*bp
)
3771 struct pci_dev
*pdev
= bp
->pdev
;
3773 size
= bp
->hw_ring_stats_size
;
3775 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3776 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3777 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
3779 cpr
->hw_stats
= dma_alloc_coherent(&pdev
->dev
, size
,
3785 cpr
->hw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
3788 if (BNXT_VF(bp
) || bp
->chip_num
== CHIP_NUM_58700
)
3791 if (bp
->hw_rx_port_stats
)
3792 goto alloc_ext_stats
;
3794 bp
->hw_port_stats_size
= sizeof(struct rx_port_stats
) +
3795 sizeof(struct tx_port_stats
) + 1024;
3797 bp
->hw_rx_port_stats
=
3798 dma_alloc_coherent(&pdev
->dev
, bp
->hw_port_stats_size
,
3799 &bp
->hw_rx_port_stats_map
,
3801 if (!bp
->hw_rx_port_stats
)
3804 bp
->hw_tx_port_stats
= (void *)(bp
->hw_rx_port_stats
+ 1) + 512;
3805 bp
->hw_tx_port_stats_map
= bp
->hw_rx_port_stats_map
+
3806 sizeof(struct rx_port_stats
) + 512;
3807 bp
->flags
|= BNXT_FLAG_PORT_STATS
;
3810 /* Display extended statistics only if FW supports it */
3811 if (bp
->hwrm_spec_code
< 0x10804 || bp
->hwrm_spec_code
== 0x10900)
3812 if (!(bp
->fw_cap
& BNXT_FW_CAP_EXT_STATS_SUPPORTED
))
3815 if (bp
->hw_rx_port_stats_ext
)
3816 goto alloc_tx_ext_stats
;
3818 bp
->hw_rx_port_stats_ext
=
3819 dma_alloc_coherent(&pdev
->dev
, sizeof(struct rx_port_stats_ext
),
3820 &bp
->hw_rx_port_stats_ext_map
, GFP_KERNEL
);
3821 if (!bp
->hw_rx_port_stats_ext
)
3825 if (bp
->hw_tx_port_stats_ext
)
3826 goto alloc_pcie_stats
;
3828 if (bp
->hwrm_spec_code
>= 0x10902 ||
3829 (bp
->fw_cap
& BNXT_FW_CAP_EXT_STATS_SUPPORTED
)) {
3830 bp
->hw_tx_port_stats_ext
=
3831 dma_alloc_coherent(&pdev
->dev
,
3832 sizeof(struct tx_port_stats_ext
),
3833 &bp
->hw_tx_port_stats_ext_map
,
3836 bp
->flags
|= BNXT_FLAG_PORT_STATS_EXT
;
3839 if (bp
->hw_pcie_stats
||
3840 !(bp
->fw_cap
& BNXT_FW_CAP_PCIE_STATS_SUPPORTED
))
3844 dma_alloc_coherent(&pdev
->dev
, sizeof(struct pcie_ctx_hw_stats
),
3845 &bp
->hw_pcie_stats_map
, GFP_KERNEL
);
3846 if (!bp
->hw_pcie_stats
)
3849 bp
->flags
|= BNXT_FLAG_PCIE_STATS
;
3853 static void bnxt_clear_ring_indices(struct bnxt
*bp
)
3860 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3861 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3862 struct bnxt_cp_ring_info
*cpr
;
3863 struct bnxt_rx_ring_info
*rxr
;
3864 struct bnxt_tx_ring_info
*txr
;
3869 cpr
= &bnapi
->cp_ring
;
3870 cpr
->cp_raw_cons
= 0;
3872 txr
= bnapi
->tx_ring
;
3878 rxr
= bnapi
->rx_ring
;
3881 rxr
->rx_agg_prod
= 0;
3882 rxr
->rx_sw_agg_prod
= 0;
3883 rxr
->rx_next_cons
= 0;
3888 static void bnxt_free_ntp_fltrs(struct bnxt
*bp
, bool irq_reinit
)
3890 #ifdef CONFIG_RFS_ACCEL
3893 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3894 * safe to delete the hash table.
3896 for (i
= 0; i
< BNXT_NTP_FLTR_HASH_SIZE
; i
++) {
3897 struct hlist_head
*head
;
3898 struct hlist_node
*tmp
;
3899 struct bnxt_ntuple_filter
*fltr
;
3901 head
= &bp
->ntp_fltr_hash_tbl
[i
];
3902 hlist_for_each_entry_safe(fltr
, tmp
, head
, hash
) {
3903 hlist_del(&fltr
->hash
);
3908 kfree(bp
->ntp_fltr_bmap
);
3909 bp
->ntp_fltr_bmap
= NULL
;
3911 bp
->ntp_fltr_count
= 0;
3915 static int bnxt_alloc_ntp_fltrs(struct bnxt
*bp
)
3917 #ifdef CONFIG_RFS_ACCEL
3920 if (!(bp
->flags
& BNXT_FLAG_RFS
))
3923 for (i
= 0; i
< BNXT_NTP_FLTR_HASH_SIZE
; i
++)
3924 INIT_HLIST_HEAD(&bp
->ntp_fltr_hash_tbl
[i
]);
3926 bp
->ntp_fltr_count
= 0;
3927 bp
->ntp_fltr_bmap
= kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR
),
3931 if (!bp
->ntp_fltr_bmap
)
3940 static void bnxt_free_mem(struct bnxt
*bp
, bool irq_re_init
)
3942 bnxt_free_vnic_attributes(bp
);
3943 bnxt_free_tx_rings(bp
);
3944 bnxt_free_rx_rings(bp
);
3945 bnxt_free_cp_rings(bp
);
3946 bnxt_free_ntp_fltrs(bp
, irq_re_init
);
3948 bnxt_free_ring_stats(bp
);
3949 bnxt_free_ring_grps(bp
);
3950 bnxt_free_vnics(bp
);
3951 kfree(bp
->tx_ring_map
);
3952 bp
->tx_ring_map
= NULL
;
3960 bnxt_clear_ring_indices(bp
);
3964 static int bnxt_alloc_mem(struct bnxt
*bp
, bool irq_re_init
)
3966 int i
, j
, rc
, size
, arr_size
;
3970 /* Allocate bnapi mem pointer array and mem block for
3973 arr_size
= L1_CACHE_ALIGN(sizeof(struct bnxt_napi
*) *
3975 size
= L1_CACHE_ALIGN(sizeof(struct bnxt_napi
));
3976 bnapi
= kzalloc(arr_size
+ size
* bp
->cp_nr_rings
, GFP_KERNEL
);
3982 for (i
= 0; i
< bp
->cp_nr_rings
; i
++, bnapi
+= size
) {
3983 bp
->bnapi
[i
] = bnapi
;
3984 bp
->bnapi
[i
]->index
= i
;
3985 bp
->bnapi
[i
]->bp
= bp
;
3986 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
3987 struct bnxt_cp_ring_info
*cpr
=
3988 &bp
->bnapi
[i
]->cp_ring
;
3990 cpr
->cp_ring_struct
.ring_mem
.flags
=
3991 BNXT_RMEM_RING_PTE_FLAG
;
3995 bp
->rx_ring
= kcalloc(bp
->rx_nr_rings
,
3996 sizeof(struct bnxt_rx_ring_info
),
4001 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
4002 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
4004 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
4005 rxr
->rx_ring_struct
.ring_mem
.flags
=
4006 BNXT_RMEM_RING_PTE_FLAG
;
4007 rxr
->rx_agg_ring_struct
.ring_mem
.flags
=
4008 BNXT_RMEM_RING_PTE_FLAG
;
4010 rxr
->bnapi
= bp
->bnapi
[i
];
4011 bp
->bnapi
[i
]->rx_ring
= &bp
->rx_ring
[i
];
4014 bp
->tx_ring
= kcalloc(bp
->tx_nr_rings
,
4015 sizeof(struct bnxt_tx_ring_info
),
4020 bp
->tx_ring_map
= kcalloc(bp
->tx_nr_rings
, sizeof(u16
),
4023 if (!bp
->tx_ring_map
)
4026 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
)
4029 j
= bp
->rx_nr_rings
;
4031 for (i
= 0; i
< bp
->tx_nr_rings
; i
++, j
++) {
4032 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
4034 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
4035 txr
->tx_ring_struct
.ring_mem
.flags
=
4036 BNXT_RMEM_RING_PTE_FLAG
;
4037 txr
->bnapi
= bp
->bnapi
[j
];
4038 bp
->bnapi
[j
]->tx_ring
= txr
;
4039 bp
->tx_ring_map
[i
] = bp
->tx_nr_rings_xdp
+ i
;
4040 if (i
>= bp
->tx_nr_rings_xdp
) {
4041 txr
->txq_index
= i
- bp
->tx_nr_rings_xdp
;
4042 bp
->bnapi
[j
]->tx_int
= bnxt_tx_int
;
4044 bp
->bnapi
[j
]->flags
|= BNXT_NAPI_FLAG_XDP
;
4045 bp
->bnapi
[j
]->tx_int
= bnxt_tx_int_xdp
;
4049 rc
= bnxt_alloc_stats(bp
);
4053 rc
= bnxt_alloc_ntp_fltrs(bp
);
4057 rc
= bnxt_alloc_vnics(bp
);
4062 bnxt_init_ring_struct(bp
);
4064 rc
= bnxt_alloc_rx_rings(bp
);
4068 rc
= bnxt_alloc_tx_rings(bp
);
4072 rc
= bnxt_alloc_cp_rings(bp
);
4076 bp
->vnic_info
[0].flags
|= BNXT_VNIC_RSS_FLAG
| BNXT_VNIC_MCAST_FLAG
|
4077 BNXT_VNIC_UCAST_FLAG
;
4078 rc
= bnxt_alloc_vnic_attributes(bp
);
4084 bnxt_free_mem(bp
, true);
4088 static void bnxt_disable_int(struct bnxt
*bp
)
4095 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4096 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4097 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
4098 struct bnxt_ring_struct
*ring
= &cpr
->cp_ring_struct
;
4100 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
)
4101 bnxt_db_nq(bp
, &cpr
->cp_db
, cpr
->cp_raw_cons
);
4105 static int bnxt_cp_num_to_irq_num(struct bnxt
*bp
, int n
)
4107 struct bnxt_napi
*bnapi
= bp
->bnapi
[n
];
4108 struct bnxt_cp_ring_info
*cpr
;
4110 cpr
= &bnapi
->cp_ring
;
4111 return cpr
->cp_ring_struct
.map_idx
;
4114 static void bnxt_disable_int_sync(struct bnxt
*bp
)
4118 atomic_inc(&bp
->intr_sem
);
4120 bnxt_disable_int(bp
);
4121 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4122 int map_idx
= bnxt_cp_num_to_irq_num(bp
, i
);
4124 synchronize_irq(bp
->irq_tbl
[map_idx
].vector
);
4128 static void bnxt_enable_int(struct bnxt
*bp
)
4132 atomic_set(&bp
->intr_sem
, 0);
4133 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4134 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4135 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
4137 bnxt_db_nq_arm(bp
, &cpr
->cp_db
, cpr
->cp_raw_cons
);
4141 void bnxt_hwrm_cmd_hdr_init(struct bnxt
*bp
, void *request
, u16 req_type
,
4142 u16 cmpl_ring
, u16 target_id
)
4144 struct input
*req
= request
;
4146 req
->req_type
= cpu_to_le16(req_type
);
4147 req
->cmpl_ring
= cpu_to_le16(cmpl_ring
);
4148 req
->target_id
= cpu_to_le16(target_id
);
4149 if (bnxt_kong_hwrm_message(bp
, req
))
4150 req
->resp_addr
= cpu_to_le64(bp
->hwrm_cmd_kong_resp_dma_addr
);
4152 req
->resp_addr
= cpu_to_le64(bp
->hwrm_cmd_resp_dma_addr
);
4155 static int bnxt_hwrm_to_stderr(u32 hwrm_err
)
4158 case HWRM_ERR_CODE_SUCCESS
:
4160 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED
:
4162 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR
:
4164 case HWRM_ERR_CODE_INVALID_PARAMS
:
4165 case HWRM_ERR_CODE_INVALID_FLAGS
:
4166 case HWRM_ERR_CODE_INVALID_ENABLES
:
4167 case HWRM_ERR_CODE_UNSUPPORTED_TLV
:
4168 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR
:
4170 case HWRM_ERR_CODE_NO_BUFFER
:
4172 case HWRM_ERR_CODE_HOT_RESET_PROGRESS
:
4174 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED
:
4181 static int bnxt_hwrm_do_send_msg(struct bnxt
*bp
, void *msg
, u32 msg_len
,
4182 int timeout
, bool silent
)
4184 int i
, intr_process
, rc
, tmo_count
;
4185 struct input
*req
= msg
;
4189 u16 cp_ring_id
, len
= 0;
4190 struct hwrm_err_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4191 u16 max_req_len
= BNXT_HWRM_MAX_REQ_LEN
;
4192 struct hwrm_short_input short_input
= {0};
4193 u32 doorbell_offset
= BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER
;
4194 u8
*resp_addr
= (u8
*)bp
->hwrm_cmd_resp_addr
;
4195 u32 bar_offset
= BNXT_GRCPF_REG_CHIMP_COMM
;
4196 u16 dst
= BNXT_HWRM_CHNL_CHIMP
;
4198 if (test_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
))
4201 if (msg_len
> BNXT_HWRM_MAX_REQ_LEN
) {
4202 if (msg_len
> bp
->hwrm_max_ext_req_len
||
4203 !bp
->hwrm_short_cmd_req_addr
)
4207 if (bnxt_hwrm_kong_chnl(bp
, req
)) {
4208 dst
= BNXT_HWRM_CHNL_KONG
;
4209 bar_offset
= BNXT_GRCPF_REG_KONG_COMM
;
4210 doorbell_offset
= BNXT_GRCPF_REG_KONG_COMM_TRIGGER
;
4211 resp
= bp
->hwrm_cmd_kong_resp_addr
;
4212 resp_addr
= (u8
*)bp
->hwrm_cmd_kong_resp_addr
;
4215 memset(resp
, 0, PAGE_SIZE
);
4216 cp_ring_id
= le16_to_cpu(req
->cmpl_ring
);
4217 intr_process
= (cp_ring_id
== INVALID_HW_RING_ID
) ? 0 : 1;
4219 req
->seq_id
= cpu_to_le16(bnxt_get_hwrm_seq_id(bp
, dst
));
4220 /* currently supports only one outstanding message */
4222 bp
->hwrm_intr_seq_id
= le16_to_cpu(req
->seq_id
);
4224 if ((bp
->fw_cap
& BNXT_FW_CAP_SHORT_CMD
) ||
4225 msg_len
> BNXT_HWRM_MAX_REQ_LEN
) {
4226 void *short_cmd_req
= bp
->hwrm_short_cmd_req_addr
;
4229 /* Set boundary for maximum extended request length for short
4230 * cmd format. If passed up from device use the max supported
4231 * internal req length.
4233 max_msg_len
= bp
->hwrm_max_ext_req_len
;
4235 memcpy(short_cmd_req
, req
, msg_len
);
4236 if (msg_len
< max_msg_len
)
4237 memset(short_cmd_req
+ msg_len
, 0,
4238 max_msg_len
- msg_len
);
4240 short_input
.req_type
= req
->req_type
;
4241 short_input
.signature
=
4242 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD
);
4243 short_input
.size
= cpu_to_le16(msg_len
);
4244 short_input
.req_addr
=
4245 cpu_to_le64(bp
->hwrm_short_cmd_req_dma_addr
);
4247 data
= (u32
*)&short_input
;
4248 msg_len
= sizeof(short_input
);
4250 /* Sync memory write before updating doorbell */
4253 max_req_len
= BNXT_HWRM_SHORT_REQ_LEN
;
4256 /* Write request msg to hwrm channel */
4257 __iowrite32_copy(bp
->bar0
+ bar_offset
, data
, msg_len
/ 4);
4259 for (i
= msg_len
; i
< max_req_len
; i
+= 4)
4260 writel(0, bp
->bar0
+ bar_offset
+ i
);
4262 /* Ring channel doorbell */
4263 writel(1, bp
->bar0
+ doorbell_offset
);
4265 if (!pci_is_enabled(bp
->pdev
))
4269 timeout
= DFLT_HWRM_CMD_TIMEOUT
;
4270 /* convert timeout to usec */
4274 /* Short timeout for the first few iterations:
4275 * number of loops = number of loops for short timeout +
4276 * number of loops for standard timeout.
4278 tmo_count
= HWRM_SHORT_TIMEOUT_COUNTER
;
4279 timeout
= timeout
- HWRM_SHORT_MIN_TIMEOUT
* HWRM_SHORT_TIMEOUT_COUNTER
;
4280 tmo_count
+= DIV_ROUND_UP(timeout
, HWRM_MIN_TIMEOUT
);
4281 resp_len
= (__le32
*)(resp_addr
+ HWRM_RESP_LEN_OFFSET
);
4284 u16 seq_id
= bp
->hwrm_intr_seq_id
;
4286 /* Wait until hwrm response cmpl interrupt is processed */
4287 while (bp
->hwrm_intr_seq_id
!= (u16
)~seq_id
&&
4289 /* Abort the wait for completion if the FW health
4292 if (test_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
))
4294 /* on first few passes, just barely sleep */
4295 if (i
< HWRM_SHORT_TIMEOUT_COUNTER
)
4296 usleep_range(HWRM_SHORT_MIN_TIMEOUT
,
4297 HWRM_SHORT_MAX_TIMEOUT
);
4299 usleep_range(HWRM_MIN_TIMEOUT
,
4303 if (bp
->hwrm_intr_seq_id
!= (u16
)~seq_id
) {
4305 netdev_err(bp
->dev
, "Resp cmpl intr err msg: 0x%x\n",
4306 le16_to_cpu(req
->req_type
));
4309 len
= (le32_to_cpu(*resp_len
) & HWRM_RESP_LEN_MASK
) >>
4311 valid
= resp_addr
+ len
- 1;
4315 /* Check if response len is updated */
4316 for (i
= 0; i
< tmo_count
; i
++) {
4317 /* Abort the wait for completion if the FW health
4320 if (test_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
))
4322 len
= (le32_to_cpu(*resp_len
) & HWRM_RESP_LEN_MASK
) >>
4326 /* on first few passes, just barely sleep */
4327 if (i
< HWRM_SHORT_TIMEOUT_COUNTER
)
4328 usleep_range(HWRM_SHORT_MIN_TIMEOUT
,
4329 HWRM_SHORT_MAX_TIMEOUT
);
4331 usleep_range(HWRM_MIN_TIMEOUT
,
4335 if (i
>= tmo_count
) {
4337 netdev_err(bp
->dev
, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4338 HWRM_TOTAL_TIMEOUT(i
),
4339 le16_to_cpu(req
->req_type
),
4340 le16_to_cpu(req
->seq_id
), len
);
4344 /* Last byte of resp contains valid bit */
4345 valid
= resp_addr
+ len
- 1;
4346 for (j
= 0; j
< HWRM_VALID_BIT_DELAY_USEC
; j
++) {
4347 /* make sure we read from updated DMA memory */
4354 if (j
>= HWRM_VALID_BIT_DELAY_USEC
) {
4356 netdev_err(bp
->dev
, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4357 HWRM_TOTAL_TIMEOUT(i
),
4358 le16_to_cpu(req
->req_type
),
4359 le16_to_cpu(req
->seq_id
), len
,
4365 /* Zero valid bit for compatibility. Valid bit in an older spec
4366 * may become a new field in a newer spec. We must make sure that
4367 * a new field not implemented by old spec will read zero.
4370 rc
= le16_to_cpu(resp
->error_code
);
4372 netdev_err(bp
->dev
, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4373 le16_to_cpu(resp
->req_type
),
4374 le16_to_cpu(resp
->seq_id
), rc
);
4375 return bnxt_hwrm_to_stderr(rc
);
4378 int _hwrm_send_message(struct bnxt
*bp
, void *msg
, u32 msg_len
, int timeout
)
4380 return bnxt_hwrm_do_send_msg(bp
, msg
, msg_len
, timeout
, false);
4383 int _hwrm_send_message_silent(struct bnxt
*bp
, void *msg
, u32 msg_len
,
4386 return bnxt_hwrm_do_send_msg(bp
, msg
, msg_len
, timeout
, true);
4389 int hwrm_send_message(struct bnxt
*bp
, void *msg
, u32 msg_len
, int timeout
)
4393 mutex_lock(&bp
->hwrm_cmd_lock
);
4394 rc
= _hwrm_send_message(bp
, msg
, msg_len
, timeout
);
4395 mutex_unlock(&bp
->hwrm_cmd_lock
);
4399 int hwrm_send_message_silent(struct bnxt
*bp
, void *msg
, u32 msg_len
,
4404 mutex_lock(&bp
->hwrm_cmd_lock
);
4405 rc
= bnxt_hwrm_do_send_msg(bp
, msg
, msg_len
, timeout
, true);
4406 mutex_unlock(&bp
->hwrm_cmd_lock
);
4410 int bnxt_hwrm_func_drv_rgtr(struct bnxt
*bp
, unsigned long *bmap
, int bmap_size
,
4413 struct hwrm_func_drv_rgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4414 struct hwrm_func_drv_rgtr_input req
= {0};
4415 DECLARE_BITMAP(async_events_bmap
, 256);
4416 u32
*events
= (u32
*)async_events_bmap
;
4420 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_DRV_RGTR
, -1, -1);
4423 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE
|
4424 FUNC_DRV_RGTR_REQ_ENABLES_VER
|
4425 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD
);
4427 req
.os_type
= cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX
);
4428 flags
= FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE
;
4429 if (bp
->fw_cap
& BNXT_FW_CAP_HOT_RESET
)
4430 flags
|= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT
;
4431 if (bp
->fw_cap
& BNXT_FW_CAP_ERROR_RECOVERY
)
4432 flags
|= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT
|
4433 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT
;
4434 req
.flags
= cpu_to_le32(flags
);
4435 req
.ver_maj_8b
= DRV_VER_MAJ
;
4436 req
.ver_min_8b
= DRV_VER_MIN
;
4437 req
.ver_upd_8b
= DRV_VER_UPD
;
4438 req
.ver_maj
= cpu_to_le16(DRV_VER_MAJ
);
4439 req
.ver_min
= cpu_to_le16(DRV_VER_MIN
);
4440 req
.ver_upd
= cpu_to_le16(DRV_VER_UPD
);
4446 memset(data
, 0, sizeof(data
));
4447 for (i
= 0; i
< ARRAY_SIZE(bnxt_vf_req_snif
); i
++) {
4448 u16 cmd
= bnxt_vf_req_snif
[i
];
4449 unsigned int bit
, idx
;
4453 data
[idx
] |= 1 << bit
;
4456 for (i
= 0; i
< 8; i
++)
4457 req
.vf_req_fwd
[i
] = cpu_to_le32(data
[i
]);
4460 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD
);
4463 if (bp
->fw_cap
& BNXT_FW_CAP_OVS_64BIT_HANDLE
)
4464 req
.flags
|= cpu_to_le32(
4465 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE
);
4467 memset(async_events_bmap
, 0, sizeof(async_events_bmap
));
4468 for (i
= 0; i
< ARRAY_SIZE(bnxt_async_events_arr
); i
++) {
4469 u16 event_id
= bnxt_async_events_arr
[i
];
4471 if (event_id
== ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY
&&
4472 !(bp
->fw_cap
& BNXT_FW_CAP_ERROR_RECOVERY
))
4474 __set_bit(bnxt_async_events_arr
[i
], async_events_bmap
);
4476 if (bmap
&& bmap_size
) {
4477 for (i
= 0; i
< bmap_size
; i
++) {
4478 if (test_bit(i
, bmap
))
4479 __set_bit(i
, async_events_bmap
);
4482 for (i
= 0; i
< 8; i
++)
4483 req
.async_event_fwd
[i
] |= cpu_to_le32(events
[i
]);
4487 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD
);
4489 mutex_lock(&bp
->hwrm_cmd_lock
);
4490 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4492 set_bit(BNXT_STATE_DRV_REGISTERED
, &bp
->state
);
4494 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED
))
4495 bp
->fw_cap
|= BNXT_FW_CAP_IF_CHANGE
;
4497 mutex_unlock(&bp
->hwrm_cmd_lock
);
4501 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt
*bp
)
4503 struct hwrm_func_drv_unrgtr_input req
= {0};
4505 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED
, &bp
->state
))
4508 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_DRV_UNRGTR
, -1, -1);
4509 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4512 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt
*bp
, u8 tunnel_type
)
4515 struct hwrm_tunnel_dst_port_free_input req
= {0};
4517 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_TUNNEL_DST_PORT_FREE
, -1, -1);
4518 req
.tunnel_type
= tunnel_type
;
4520 switch (tunnel_type
) {
4521 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
:
4522 req
.tunnel_dst_port_id
= bp
->vxlan_fw_dst_port_id
;
4524 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
:
4525 req
.tunnel_dst_port_id
= bp
->nge_fw_dst_port_id
;
4531 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4533 netdev_err(bp
->dev
, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4538 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt
*bp
, __be16 port
,
4542 struct hwrm_tunnel_dst_port_alloc_input req
= {0};
4543 struct hwrm_tunnel_dst_port_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4545 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_TUNNEL_DST_PORT_ALLOC
, -1, -1);
4547 req
.tunnel_type
= tunnel_type
;
4548 req
.tunnel_dst_port_val
= port
;
4550 mutex_lock(&bp
->hwrm_cmd_lock
);
4551 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4553 netdev_err(bp
->dev
, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4558 switch (tunnel_type
) {
4559 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN
:
4560 bp
->vxlan_fw_dst_port_id
= resp
->tunnel_dst_port_id
;
4562 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE
:
4563 bp
->nge_fw_dst_port_id
= resp
->tunnel_dst_port_id
;
4570 mutex_unlock(&bp
->hwrm_cmd_lock
);
4574 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt
*bp
, u16 vnic_id
)
4576 struct hwrm_cfa_l2_set_rx_mask_input req
= {0};
4577 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
4579 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_L2_SET_RX_MASK
, -1, -1);
4580 req
.vnic_id
= cpu_to_le32(vnic
->fw_vnic_id
);
4582 req
.num_mc_entries
= cpu_to_le32(vnic
->mc_list_count
);
4583 req
.mc_tbl_addr
= cpu_to_le64(vnic
->mc_list_mapping
);
4584 req
.mask
= cpu_to_le32(vnic
->rx_mask
);
4585 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4588 #ifdef CONFIG_RFS_ACCEL
4589 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt
*bp
,
4590 struct bnxt_ntuple_filter
*fltr
)
4592 struct hwrm_cfa_ntuple_filter_free_input req
= {0};
4594 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_NTUPLE_FILTER_FREE
, -1, -1);
4595 req
.ntuple_filter_id
= fltr
->filter_id
;
4596 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4599 #define BNXT_NTP_FLTR_FLAGS \
4600 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4601 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4602 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4603 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4604 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4605 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4606 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4607 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4608 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4609 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4610 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4611 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4612 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4613 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4615 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4616 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4618 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt
*bp
,
4619 struct bnxt_ntuple_filter
*fltr
)
4621 struct hwrm_cfa_ntuple_filter_alloc_input req
= {0};
4622 struct hwrm_cfa_ntuple_filter_alloc_output
*resp
;
4623 struct flow_keys
*keys
= &fltr
->fkeys
;
4624 struct bnxt_vnic_info
*vnic
;
4628 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_NTUPLE_FILTER_ALLOC
, -1, -1);
4629 req
.l2_filter_id
= bp
->vnic_info
[0].fw_l2_filter_id
[fltr
->l2_fltr_idx
];
4631 if (bp
->fw_cap
& BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2
) {
4632 flags
= CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX
;
4633 req
.dst_id
= cpu_to_le16(fltr
->rxq
);
4635 vnic
= &bp
->vnic_info
[fltr
->rxq
+ 1];
4636 req
.dst_id
= cpu_to_le16(vnic
->fw_vnic_id
);
4638 req
.flags
= cpu_to_le32(flags
);
4639 req
.enables
= cpu_to_le32(BNXT_NTP_FLTR_FLAGS
);
4641 req
.ethertype
= htons(ETH_P_IP
);
4642 memcpy(req
.src_macaddr
, fltr
->src_mac_addr
, ETH_ALEN
);
4643 req
.ip_addr_type
= CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4
;
4644 req
.ip_protocol
= keys
->basic
.ip_proto
;
4646 if (keys
->basic
.n_proto
== htons(ETH_P_IPV6
)) {
4649 req
.ethertype
= htons(ETH_P_IPV6
);
4651 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
;
4652 *(struct in6_addr
*)&req
.src_ipaddr
[0] =
4653 keys
->addrs
.v6addrs
.src
;
4654 *(struct in6_addr
*)&req
.dst_ipaddr
[0] =
4655 keys
->addrs
.v6addrs
.dst
;
4656 for (i
= 0; i
< 4; i
++) {
4657 req
.src_ipaddr_mask
[i
] = cpu_to_be32(0xffffffff);
4658 req
.dst_ipaddr_mask
[i
] = cpu_to_be32(0xffffffff);
4661 req
.src_ipaddr
[0] = keys
->addrs
.v4addrs
.src
;
4662 req
.src_ipaddr_mask
[0] = cpu_to_be32(0xffffffff);
4663 req
.dst_ipaddr
[0] = keys
->addrs
.v4addrs
.dst
;
4664 req
.dst_ipaddr_mask
[0] = cpu_to_be32(0xffffffff);
4666 if (keys
->control
.flags
& FLOW_DIS_ENCAPSULATION
) {
4667 req
.enables
|= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG
);
4669 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
;
4672 req
.src_port
= keys
->ports
.src
;
4673 req
.src_port_mask
= cpu_to_be16(0xffff);
4674 req
.dst_port
= keys
->ports
.dst
;
4675 req
.dst_port_mask
= cpu_to_be16(0xffff);
4677 mutex_lock(&bp
->hwrm_cmd_lock
);
4678 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4680 resp
= bnxt_get_hwrm_resp_addr(bp
, &req
);
4681 fltr
->filter_id
= resp
->ntuple_filter_id
;
4683 mutex_unlock(&bp
->hwrm_cmd_lock
);
4688 static int bnxt_hwrm_set_vnic_filter(struct bnxt
*bp
, u16 vnic_id
, u16 idx
,
4692 struct hwrm_cfa_l2_filter_alloc_input req
= {0};
4693 struct hwrm_cfa_l2_filter_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4695 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_L2_FILTER_ALLOC
, -1, -1);
4696 req
.flags
= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
);
4697 if (!BNXT_CHIP_TYPE_NITRO_A0(bp
))
4699 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST
);
4700 req
.dst_id
= cpu_to_le16(bp
->vnic_info
[vnic_id
].fw_vnic_id
);
4702 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR
|
4703 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID
|
4704 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK
);
4705 memcpy(req
.l2_addr
, mac_addr
, ETH_ALEN
);
4706 req
.l2_addr_mask
[0] = 0xff;
4707 req
.l2_addr_mask
[1] = 0xff;
4708 req
.l2_addr_mask
[2] = 0xff;
4709 req
.l2_addr_mask
[3] = 0xff;
4710 req
.l2_addr_mask
[4] = 0xff;
4711 req
.l2_addr_mask
[5] = 0xff;
4713 mutex_lock(&bp
->hwrm_cmd_lock
);
4714 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4716 bp
->vnic_info
[vnic_id
].fw_l2_filter_id
[idx
] =
4718 mutex_unlock(&bp
->hwrm_cmd_lock
);
4722 static int bnxt_hwrm_clear_vnic_filter(struct bnxt
*bp
)
4724 u16 i
, j
, num_of_vnics
= 1; /* only vnic 0 supported */
4727 /* Any associated ntuple filters will also be cleared by firmware. */
4728 mutex_lock(&bp
->hwrm_cmd_lock
);
4729 for (i
= 0; i
< num_of_vnics
; i
++) {
4730 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
4732 for (j
= 0; j
< vnic
->uc_filter_count
; j
++) {
4733 struct hwrm_cfa_l2_filter_free_input req
= {0};
4735 bnxt_hwrm_cmd_hdr_init(bp
, &req
,
4736 HWRM_CFA_L2_FILTER_FREE
, -1, -1);
4738 req
.l2_filter_id
= vnic
->fw_l2_filter_id
[j
];
4740 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
4743 vnic
->uc_filter_count
= 0;
4745 mutex_unlock(&bp
->hwrm_cmd_lock
);
4750 static int bnxt_hwrm_vnic_set_tpa(struct bnxt
*bp
, u16 vnic_id
, u32 tpa_flags
)
4752 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
4753 u16 max_aggs
= VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
;
4754 struct hwrm_vnic_tpa_cfg_input req
= {0};
4756 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
)
4759 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_TPA_CFG
, -1, -1);
4762 u16 mss
= bp
->dev
->mtu
- 40;
4763 u32 nsegs
, n
, segs
= 0, flags
;
4765 flags
= VNIC_TPA_CFG_REQ_FLAGS_TPA
|
4766 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA
|
4767 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE
|
4768 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN
|
4769 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ
;
4770 if (tpa_flags
& BNXT_FLAG_GRO
)
4771 flags
|= VNIC_TPA_CFG_REQ_FLAGS_GRO
;
4773 req
.flags
= cpu_to_le32(flags
);
4776 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS
|
4777 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS
|
4778 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN
);
4780 /* Number of segs are log2 units, and first packet is not
4781 * included as part of this units.
4783 if (mss
<= BNXT_RX_PAGE_SIZE
) {
4784 n
= BNXT_RX_PAGE_SIZE
/ mss
;
4785 nsegs
= (MAX_SKB_FRAGS
- 1) * n
;
4787 n
= mss
/ BNXT_RX_PAGE_SIZE
;
4788 if (mss
& (BNXT_RX_PAGE_SIZE
- 1))
4790 nsegs
= (MAX_SKB_FRAGS
- n
) / n
;
4793 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
4794 segs
= MAX_TPA_SEGS_P5
;
4795 max_aggs
= bp
->max_tpa
;
4797 segs
= ilog2(nsegs
);
4799 req
.max_agg_segs
= cpu_to_le16(segs
);
4800 req
.max_aggs
= cpu_to_le16(max_aggs
);
4802 req
.min_agg_len
= cpu_to_le32(512);
4804 req
.vnic_id
= cpu_to_le16(vnic
->fw_vnic_id
);
4806 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4809 static u16
bnxt_cp_ring_from_grp(struct bnxt
*bp
, struct bnxt_ring_struct
*ring
)
4811 struct bnxt_ring_grp_info
*grp_info
;
4813 grp_info
= &bp
->grp_info
[ring
->grp_idx
];
4814 return grp_info
->cp_fw_ring_id
;
4817 static u16
bnxt_cp_ring_for_rx(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
)
4819 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
4820 struct bnxt_napi
*bnapi
= rxr
->bnapi
;
4821 struct bnxt_cp_ring_info
*cpr
;
4823 cpr
= bnapi
->cp_ring
.cp_ring_arr
[BNXT_RX_HDL
];
4824 return cpr
->cp_ring_struct
.fw_ring_id
;
4826 return bnxt_cp_ring_from_grp(bp
, &rxr
->rx_ring_struct
);
4830 static u16
bnxt_cp_ring_for_tx(struct bnxt
*bp
, struct bnxt_tx_ring_info
*txr
)
4832 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
4833 struct bnxt_napi
*bnapi
= txr
->bnapi
;
4834 struct bnxt_cp_ring_info
*cpr
;
4836 cpr
= bnapi
->cp_ring
.cp_ring_arr
[BNXT_TX_HDL
];
4837 return cpr
->cp_ring_struct
.fw_ring_id
;
4839 return bnxt_cp_ring_from_grp(bp
, &txr
->tx_ring_struct
);
4843 static int bnxt_hwrm_vnic_set_rss(struct bnxt
*bp
, u16 vnic_id
, bool set_rss
)
4845 u32 i
, j
, max_rings
;
4846 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
4847 struct hwrm_vnic_rss_cfg_input req
= {0};
4849 if ((bp
->flags
& BNXT_FLAG_CHIP_P5
) ||
4850 vnic
->fw_rss_cos_lb_ctx
[0] == INVALID_HW_RING_ID
)
4853 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_RSS_CFG
, -1, -1);
4855 req
.hash_type
= cpu_to_le32(bp
->rss_hash_cfg
);
4856 req
.hash_mode_flags
= VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT
;
4857 if (vnic
->flags
& BNXT_VNIC_RSS_FLAG
) {
4858 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
4859 max_rings
= bp
->rx_nr_rings
- 1;
4861 max_rings
= bp
->rx_nr_rings
;
4866 /* Fill the RSS indirection table with ring group ids */
4867 for (i
= 0, j
= 0; i
< HW_HASH_INDEX_SIZE
; i
++, j
++) {
4870 vnic
->rss_table
[i
] = cpu_to_le16(vnic
->fw_grp_ids
[j
]);
4873 req
.ring_grp_tbl_addr
= cpu_to_le64(vnic
->rss_table_dma_addr
);
4874 req
.hash_key_tbl_addr
=
4875 cpu_to_le64(vnic
->rss_hash_key_dma_addr
);
4877 req
.rss_ctx_idx
= cpu_to_le16(vnic
->fw_rss_cos_lb_ctx
[0]);
4878 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4881 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt
*bp
, u16 vnic_id
, bool set_rss
)
4883 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
4884 u32 i
, j
, k
, nr_ctxs
, max_rings
= bp
->rx_nr_rings
;
4885 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[0];
4886 struct hwrm_vnic_rss_cfg_input req
= {0};
4888 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_RSS_CFG
, -1, -1);
4889 req
.vnic_id
= cpu_to_le16(vnic
->fw_vnic_id
);
4891 hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4894 req
.hash_type
= cpu_to_le32(bp
->rss_hash_cfg
);
4895 req
.hash_mode_flags
= VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT
;
4896 req
.ring_grp_tbl_addr
= cpu_to_le64(vnic
->rss_table_dma_addr
);
4897 req
.hash_key_tbl_addr
= cpu_to_le64(vnic
->rss_hash_key_dma_addr
);
4898 nr_ctxs
= DIV_ROUND_UP(bp
->rx_nr_rings
, 64);
4899 for (i
= 0, k
= 0; i
< nr_ctxs
; i
++) {
4900 __le16
*ring_tbl
= vnic
->rss_table
;
4903 req
.ring_table_pair_index
= i
;
4904 req
.rss_ctx_idx
= cpu_to_le16(vnic
->fw_rss_cos_lb_ctx
[i
]);
4905 for (j
= 0; j
< 64; j
++) {
4908 ring_id
= rxr
->rx_ring_struct
.fw_ring_id
;
4909 *ring_tbl
++ = cpu_to_le16(ring_id
);
4910 ring_id
= bnxt_cp_ring_for_rx(bp
, rxr
);
4911 *ring_tbl
++ = cpu_to_le16(ring_id
);
4914 if (k
== max_rings
) {
4916 rxr
= &bp
->rx_ring
[0];
4919 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4926 static int bnxt_hwrm_vnic_set_hds(struct bnxt
*bp
, u16 vnic_id
)
4928 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
4929 struct hwrm_vnic_plcmodes_cfg_input req
= {0};
4931 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_PLCMODES_CFG
, -1, -1);
4932 req
.flags
= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT
|
4933 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4
|
4934 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6
);
4936 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID
|
4937 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID
);
4938 /* thresholds not implemented in firmware yet */
4939 req
.jumbo_thresh
= cpu_to_le16(bp
->rx_copy_thresh
);
4940 req
.hds_threshold
= cpu_to_le16(bp
->rx_copy_thresh
);
4941 req
.vnic_id
= cpu_to_le32(vnic
->fw_vnic_id
);
4942 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4945 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt
*bp
, u16 vnic_id
,
4948 struct hwrm_vnic_rss_cos_lb_ctx_free_input req
= {0};
4950 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_RSS_COS_LB_CTX_FREE
, -1, -1);
4951 req
.rss_cos_lb_ctx_id
=
4952 cpu_to_le16(bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[ctx_idx
]);
4954 hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4955 bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[ctx_idx
] = INVALID_HW_RING_ID
;
4958 static void bnxt_hwrm_vnic_ctx_free(struct bnxt
*bp
)
4962 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
4963 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
4965 for (j
= 0; j
< BNXT_MAX_CTX_PER_VNIC
; j
++) {
4966 if (vnic
->fw_rss_cos_lb_ctx
[j
] != INVALID_HW_RING_ID
)
4967 bnxt_hwrm_vnic_ctx_free_one(bp
, i
, j
);
4970 bp
->rsscos_nr_ctxs
= 0;
4973 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt
*bp
, u16 vnic_id
, u16 ctx_idx
)
4976 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req
= {0};
4977 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output
*resp
=
4978 bp
->hwrm_cmd_resp_addr
;
4980 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC
, -1,
4983 mutex_lock(&bp
->hwrm_cmd_lock
);
4984 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4986 bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[ctx_idx
] =
4987 le16_to_cpu(resp
->rss_cos_lb_ctx_id
);
4988 mutex_unlock(&bp
->hwrm_cmd_lock
);
4993 static u32
bnxt_get_roce_vnic_mode(struct bnxt
*bp
)
4995 if (bp
->flags
& BNXT_FLAG_ROCE_MIRROR_CAP
)
4996 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE
;
4997 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE
;
5000 int bnxt_hwrm_vnic_cfg(struct bnxt
*bp
, u16 vnic_id
)
5002 unsigned int ring
= 0, grp_idx
;
5003 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
5004 struct hwrm_vnic_cfg_input req
= {0};
5007 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_CFG
, -1, -1);
5009 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5010 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[0];
5012 req
.default_rx_ring_id
=
5013 cpu_to_le16(rxr
->rx_ring_struct
.fw_ring_id
);
5014 req
.default_cmpl_ring_id
=
5015 cpu_to_le16(bnxt_cp_ring_for_rx(bp
, rxr
));
5017 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID
|
5018 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID
);
5021 req
.enables
= cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP
);
5022 /* Only RSS support for now TBD: COS & LB */
5023 if (vnic
->fw_rss_cos_lb_ctx
[0] != INVALID_HW_RING_ID
) {
5024 req
.rss_rule
= cpu_to_le16(vnic
->fw_rss_cos_lb_ctx
[0]);
5025 req
.enables
|= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE
|
5026 VNIC_CFG_REQ_ENABLES_MRU
);
5027 } else if (vnic
->flags
& BNXT_VNIC_RFS_NEW_RSS_FLAG
) {
5029 cpu_to_le16(bp
->vnic_info
[0].fw_rss_cos_lb_ctx
[0]);
5030 req
.enables
|= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE
|
5031 VNIC_CFG_REQ_ENABLES_MRU
);
5032 req
.flags
|= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE
);
5034 req
.rss_rule
= cpu_to_le16(0xffff);
5037 if (BNXT_CHIP_TYPE_NITRO_A0(bp
) &&
5038 (vnic
->fw_rss_cos_lb_ctx
[0] != INVALID_HW_RING_ID
)) {
5039 req
.cos_rule
= cpu_to_le16(vnic
->fw_rss_cos_lb_ctx
[1]);
5040 req
.enables
|= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE
);
5042 req
.cos_rule
= cpu_to_le16(0xffff);
5045 if (vnic
->flags
& BNXT_VNIC_RSS_FLAG
)
5047 else if (vnic
->flags
& BNXT_VNIC_RFS_FLAG
)
5049 else if ((vnic_id
== 1) && BNXT_CHIP_TYPE_NITRO_A0(bp
))
5050 ring
= bp
->rx_nr_rings
- 1;
5052 grp_idx
= bp
->rx_ring
[ring
].bnapi
->index
;
5053 req
.dflt_ring_grp
= cpu_to_le16(bp
->grp_info
[grp_idx
].fw_grp_id
);
5054 req
.lb_rule
= cpu_to_le16(0xffff);
5056 req
.mru
= cpu_to_le16(bp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+
5059 req
.vnic_id
= cpu_to_le16(vnic
->fw_vnic_id
);
5060 #ifdef CONFIG_BNXT_SRIOV
5062 def_vlan
= bp
->vf
.vlan
;
5064 if ((bp
->flags
& BNXT_FLAG_STRIP_VLAN
) || def_vlan
)
5065 req
.flags
|= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE
);
5066 if (!vnic_id
&& bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
))
5067 req
.flags
|= cpu_to_le32(bnxt_get_roce_vnic_mode(bp
));
5069 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5072 static int bnxt_hwrm_vnic_free_one(struct bnxt
*bp
, u16 vnic_id
)
5076 if (bp
->vnic_info
[vnic_id
].fw_vnic_id
!= INVALID_HW_RING_ID
) {
5077 struct hwrm_vnic_free_input req
= {0};
5079 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_FREE
, -1, -1);
5081 cpu_to_le32(bp
->vnic_info
[vnic_id
].fw_vnic_id
);
5083 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5084 bp
->vnic_info
[vnic_id
].fw_vnic_id
= INVALID_HW_RING_ID
;
5089 static void bnxt_hwrm_vnic_free(struct bnxt
*bp
)
5093 for (i
= 0; i
< bp
->nr_vnics
; i
++)
5094 bnxt_hwrm_vnic_free_one(bp
, i
);
5097 static int bnxt_hwrm_vnic_alloc(struct bnxt
*bp
, u16 vnic_id
,
5098 unsigned int start_rx_ring_idx
,
5099 unsigned int nr_rings
)
5102 unsigned int i
, j
, grp_idx
, end_idx
= start_rx_ring_idx
+ nr_rings
;
5103 struct hwrm_vnic_alloc_input req
= {0};
5104 struct hwrm_vnic_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5105 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
5107 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
5108 goto vnic_no_ring_grps
;
5110 /* map ring groups to this vnic */
5111 for (i
= start_rx_ring_idx
, j
= 0; i
< end_idx
; i
++, j
++) {
5112 grp_idx
= bp
->rx_ring
[i
].bnapi
->index
;
5113 if (bp
->grp_info
[grp_idx
].fw_grp_id
== INVALID_HW_RING_ID
) {
5114 netdev_err(bp
->dev
, "Not enough ring groups avail:%x req:%x\n",
5118 vnic
->fw_grp_ids
[j
] = bp
->grp_info
[grp_idx
].fw_grp_id
;
5122 for (i
= 0; i
< BNXT_MAX_CTX_PER_VNIC
; i
++)
5123 vnic
->fw_rss_cos_lb_ctx
[i
] = INVALID_HW_RING_ID
;
5125 req
.flags
= cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT
);
5127 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_ALLOC
, -1, -1);
5129 mutex_lock(&bp
->hwrm_cmd_lock
);
5130 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5132 vnic
->fw_vnic_id
= le32_to_cpu(resp
->vnic_id
);
5133 mutex_unlock(&bp
->hwrm_cmd_lock
);
5137 static int bnxt_hwrm_vnic_qcaps(struct bnxt
*bp
)
5139 struct hwrm_vnic_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5140 struct hwrm_vnic_qcaps_input req
= {0};
5143 bp
->hw_ring_stats_size
= sizeof(struct ctx_hw_stats
);
5144 bp
->flags
&= ~(BNXT_FLAG_NEW_RSS_CAP
| BNXT_FLAG_ROCE_MIRROR_CAP
);
5145 if (bp
->hwrm_spec_code
< 0x10600)
5148 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_QCAPS
, -1, -1);
5149 mutex_lock(&bp
->hwrm_cmd_lock
);
5150 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5152 u32 flags
= le32_to_cpu(resp
->flags
);
5154 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
) &&
5155 (flags
& VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP
))
5156 bp
->flags
|= BNXT_FLAG_NEW_RSS_CAP
;
5158 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP
)
5159 bp
->flags
|= BNXT_FLAG_ROCE_MIRROR_CAP
;
5160 bp
->max_tpa_v2
= le16_to_cpu(resp
->max_aggs_supported
);
5162 bp
->hw_ring_stats_size
=
5163 sizeof(struct ctx_hw_stats_ext
);
5165 mutex_unlock(&bp
->hwrm_cmd_lock
);
5169 static int bnxt_hwrm_ring_grp_alloc(struct bnxt
*bp
)
5174 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
5177 mutex_lock(&bp
->hwrm_cmd_lock
);
5178 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
5179 struct hwrm_ring_grp_alloc_input req
= {0};
5180 struct hwrm_ring_grp_alloc_output
*resp
=
5181 bp
->hwrm_cmd_resp_addr
;
5182 unsigned int grp_idx
= bp
->rx_ring
[i
].bnapi
->index
;
5184 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_GRP_ALLOC
, -1, -1);
5186 req
.cr
= cpu_to_le16(bp
->grp_info
[grp_idx
].cp_fw_ring_id
);
5187 req
.rr
= cpu_to_le16(bp
->grp_info
[grp_idx
].rx_fw_ring_id
);
5188 req
.ar
= cpu_to_le16(bp
->grp_info
[grp_idx
].agg_fw_ring_id
);
5189 req
.sc
= cpu_to_le16(bp
->grp_info
[grp_idx
].fw_stats_ctx
);
5191 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
5196 bp
->grp_info
[grp_idx
].fw_grp_id
=
5197 le32_to_cpu(resp
->ring_group_id
);
5199 mutex_unlock(&bp
->hwrm_cmd_lock
);
5203 static int bnxt_hwrm_ring_grp_free(struct bnxt
*bp
)
5207 struct hwrm_ring_grp_free_input req
= {0};
5209 if (!bp
->grp_info
|| (bp
->flags
& BNXT_FLAG_CHIP_P5
))
5212 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_GRP_FREE
, -1, -1);
5214 mutex_lock(&bp
->hwrm_cmd_lock
);
5215 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
5216 if (bp
->grp_info
[i
].fw_grp_id
== INVALID_HW_RING_ID
)
5219 cpu_to_le32(bp
->grp_info
[i
].fw_grp_id
);
5221 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
5223 bp
->grp_info
[i
].fw_grp_id
= INVALID_HW_RING_ID
;
5225 mutex_unlock(&bp
->hwrm_cmd_lock
);
5229 static int hwrm_ring_alloc_send_msg(struct bnxt
*bp
,
5230 struct bnxt_ring_struct
*ring
,
5231 u32 ring_type
, u32 map_index
)
5233 int rc
= 0, err
= 0;
5234 struct hwrm_ring_alloc_input req
= {0};
5235 struct hwrm_ring_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5236 struct bnxt_ring_mem_info
*rmem
= &ring
->ring_mem
;
5237 struct bnxt_ring_grp_info
*grp_info
;
5240 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_ALLOC
, -1, -1);
5243 if (rmem
->nr_pages
> 1) {
5244 req
.page_tbl_addr
= cpu_to_le64(rmem
->pg_tbl_map
);
5245 /* Page size is in log2 units */
5246 req
.page_size
= BNXT_PAGE_SHIFT
;
5247 req
.page_tbl_depth
= 1;
5249 req
.page_tbl_addr
= cpu_to_le64(rmem
->dma_arr
[0]);
5252 /* Association of ring index with doorbell index and MSIX number */
5253 req
.logical_id
= cpu_to_le16(map_index
);
5255 switch (ring_type
) {
5256 case HWRM_RING_ALLOC_TX
: {
5257 struct bnxt_tx_ring_info
*txr
;
5259 txr
= container_of(ring
, struct bnxt_tx_ring_info
,
5261 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_TX
;
5262 /* Association of transmit ring with completion ring */
5263 grp_info
= &bp
->grp_info
[ring
->grp_idx
];
5264 req
.cmpl_ring_id
= cpu_to_le16(bnxt_cp_ring_for_tx(bp
, txr
));
5265 req
.length
= cpu_to_le32(bp
->tx_ring_mask
+ 1);
5266 req
.stat_ctx_id
= cpu_to_le32(grp_info
->fw_stats_ctx
);
5267 req
.queue_id
= cpu_to_le16(ring
->queue_id
);
5270 case HWRM_RING_ALLOC_RX
:
5271 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_RX
;
5272 req
.length
= cpu_to_le32(bp
->rx_ring_mask
+ 1);
5273 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5276 /* Association of rx ring with stats context */
5277 grp_info
= &bp
->grp_info
[ring
->grp_idx
];
5278 req
.rx_buf_size
= cpu_to_le16(bp
->rx_buf_use_size
);
5279 req
.stat_ctx_id
= cpu_to_le32(grp_info
->fw_stats_ctx
);
5280 req
.enables
|= cpu_to_le32(
5281 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID
);
5282 if (NET_IP_ALIGN
== 2)
5283 flags
= RING_ALLOC_REQ_FLAGS_RX_SOP_PAD
;
5284 req
.flags
= cpu_to_le16(flags
);
5287 case HWRM_RING_ALLOC_AGG
:
5288 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5289 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_RX_AGG
;
5290 /* Association of agg ring with rx ring */
5291 grp_info
= &bp
->grp_info
[ring
->grp_idx
];
5292 req
.rx_ring_id
= cpu_to_le16(grp_info
->rx_fw_ring_id
);
5293 req
.rx_buf_size
= cpu_to_le16(BNXT_RX_PAGE_SIZE
);
5294 req
.stat_ctx_id
= cpu_to_le32(grp_info
->fw_stats_ctx
);
5295 req
.enables
|= cpu_to_le32(
5296 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID
|
5297 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID
);
5299 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_RX
;
5301 req
.length
= cpu_to_le32(bp
->rx_agg_ring_mask
+ 1);
5303 case HWRM_RING_ALLOC_CMPL
:
5304 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_L2_CMPL
;
5305 req
.length
= cpu_to_le32(bp
->cp_ring_mask
+ 1);
5306 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5307 /* Association of cp ring with nq */
5308 grp_info
= &bp
->grp_info
[map_index
];
5309 req
.nq_ring_id
= cpu_to_le16(grp_info
->cp_fw_ring_id
);
5310 req
.cq_handle
= cpu_to_le64(ring
->handle
);
5311 req
.enables
|= cpu_to_le32(
5312 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID
);
5313 } else if (bp
->flags
& BNXT_FLAG_USING_MSIX
) {
5314 req
.int_mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
5317 case HWRM_RING_ALLOC_NQ
:
5318 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_NQ
;
5319 req
.length
= cpu_to_le32(bp
->cp_ring_mask
+ 1);
5320 if (bp
->flags
& BNXT_FLAG_USING_MSIX
)
5321 req
.int_mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
5324 netdev_err(bp
->dev
, "hwrm alloc invalid ring type %d\n",
5329 mutex_lock(&bp
->hwrm_cmd_lock
);
5330 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5331 err
= le16_to_cpu(resp
->error_code
);
5332 ring_id
= le16_to_cpu(resp
->ring_id
);
5333 mutex_unlock(&bp
->hwrm_cmd_lock
);
5336 netdev_err(bp
->dev
, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5337 ring_type
, rc
, err
);
5340 ring
->fw_ring_id
= ring_id
;
5344 static int bnxt_hwrm_set_async_event_cr(struct bnxt
*bp
, int idx
)
5349 struct hwrm_func_cfg_input req
= {0};
5351 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
5352 req
.fid
= cpu_to_le16(0xffff);
5353 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR
);
5354 req
.async_event_cr
= cpu_to_le16(idx
);
5355 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5357 struct hwrm_func_vf_cfg_input req
= {0};
5359 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_CFG
, -1, -1);
5361 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR
);
5362 req
.async_event_cr
= cpu_to_le16(idx
);
5363 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5368 static void bnxt_set_db(struct bnxt
*bp
, struct bnxt_db_info
*db
, u32 ring_type
,
5369 u32 map_idx
, u32 xid
)
5371 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5373 db
->doorbell
= bp
->bar1
+ 0x10000;
5375 db
->doorbell
= bp
->bar1
+ 0x4000;
5376 switch (ring_type
) {
5377 case HWRM_RING_ALLOC_TX
:
5378 db
->db_key64
= DBR_PATH_L2
| DBR_TYPE_SQ
;
5380 case HWRM_RING_ALLOC_RX
:
5381 case HWRM_RING_ALLOC_AGG
:
5382 db
->db_key64
= DBR_PATH_L2
| DBR_TYPE_SRQ
;
5384 case HWRM_RING_ALLOC_CMPL
:
5385 db
->db_key64
= DBR_PATH_L2
;
5387 case HWRM_RING_ALLOC_NQ
:
5388 db
->db_key64
= DBR_PATH_L2
;
5391 db
->db_key64
|= (u64
)xid
<< DBR_XID_SFT
;
5393 db
->doorbell
= bp
->bar1
+ map_idx
* 0x80;
5394 switch (ring_type
) {
5395 case HWRM_RING_ALLOC_TX
:
5396 db
->db_key32
= DB_KEY_TX
;
5398 case HWRM_RING_ALLOC_RX
:
5399 case HWRM_RING_ALLOC_AGG
:
5400 db
->db_key32
= DB_KEY_RX
;
5402 case HWRM_RING_ALLOC_CMPL
:
5403 db
->db_key32
= DB_KEY_CP
;
5409 static int bnxt_hwrm_ring_alloc(struct bnxt
*bp
)
5411 bool agg_rings
= !!(bp
->flags
& BNXT_FLAG_AGG_RINGS
);
5415 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
5416 type
= HWRM_RING_ALLOC_NQ
;
5418 type
= HWRM_RING_ALLOC_CMPL
;
5419 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
5420 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
5421 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
5422 struct bnxt_ring_struct
*ring
= &cpr
->cp_ring_struct
;
5423 u32 map_idx
= ring
->map_idx
;
5424 unsigned int vector
;
5426 vector
= bp
->irq_tbl
[map_idx
].vector
;
5427 disable_irq_nosync(vector
);
5428 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, type
, map_idx
);
5433 bnxt_set_db(bp
, &cpr
->cp_db
, type
, map_idx
, ring
->fw_ring_id
);
5434 bnxt_db_nq(bp
, &cpr
->cp_db
, cpr
->cp_raw_cons
);
5436 bp
->grp_info
[i
].cp_fw_ring_id
= ring
->fw_ring_id
;
5439 rc
= bnxt_hwrm_set_async_event_cr(bp
, ring
->fw_ring_id
);
5441 netdev_warn(bp
->dev
, "Failed to set async event completion ring.\n");
5445 type
= HWRM_RING_ALLOC_TX
;
5446 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
5447 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
5448 struct bnxt_ring_struct
*ring
;
5451 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5452 struct bnxt_napi
*bnapi
= txr
->bnapi
;
5453 struct bnxt_cp_ring_info
*cpr
, *cpr2
;
5454 u32 type2
= HWRM_RING_ALLOC_CMPL
;
5456 cpr
= &bnapi
->cp_ring
;
5457 cpr2
= cpr
->cp_ring_arr
[BNXT_TX_HDL
];
5458 ring
= &cpr2
->cp_ring_struct
;
5459 ring
->handle
= BNXT_TX_HDL
;
5460 map_idx
= bnapi
->index
;
5461 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, type2
, map_idx
);
5464 bnxt_set_db(bp
, &cpr2
->cp_db
, type2
, map_idx
,
5466 bnxt_db_cq(bp
, &cpr2
->cp_db
, cpr2
->cp_raw_cons
);
5468 ring
= &txr
->tx_ring_struct
;
5470 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, type
, map_idx
);
5473 bnxt_set_db(bp
, &txr
->tx_db
, type
, map_idx
, ring
->fw_ring_id
);
5476 type
= HWRM_RING_ALLOC_RX
;
5477 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
5478 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
5479 struct bnxt_ring_struct
*ring
= &rxr
->rx_ring_struct
;
5480 struct bnxt_napi
*bnapi
= rxr
->bnapi
;
5481 u32 map_idx
= bnapi
->index
;
5483 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, type
, map_idx
);
5486 bnxt_set_db(bp
, &rxr
->rx_db
, type
, map_idx
, ring
->fw_ring_id
);
5487 /* If we have agg rings, post agg buffers first. */
5489 bnxt_db_write(bp
, &rxr
->rx_db
, rxr
->rx_prod
);
5490 bp
->grp_info
[map_idx
].rx_fw_ring_id
= ring
->fw_ring_id
;
5491 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5492 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
5493 u32 type2
= HWRM_RING_ALLOC_CMPL
;
5494 struct bnxt_cp_ring_info
*cpr2
;
5496 cpr2
= cpr
->cp_ring_arr
[BNXT_RX_HDL
];
5497 ring
= &cpr2
->cp_ring_struct
;
5498 ring
->handle
= BNXT_RX_HDL
;
5499 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, type2
, map_idx
);
5502 bnxt_set_db(bp
, &cpr2
->cp_db
, type2
, map_idx
,
5504 bnxt_db_cq(bp
, &cpr2
->cp_db
, cpr2
->cp_raw_cons
);
5509 type
= HWRM_RING_ALLOC_AGG
;
5510 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
5511 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
5512 struct bnxt_ring_struct
*ring
=
5513 &rxr
->rx_agg_ring_struct
;
5514 u32 grp_idx
= ring
->grp_idx
;
5515 u32 map_idx
= grp_idx
+ bp
->rx_nr_rings
;
5517 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, type
, map_idx
);
5521 bnxt_set_db(bp
, &rxr
->rx_agg_db
, type
, map_idx
,
5523 bnxt_db_write(bp
, &rxr
->rx_agg_db
, rxr
->rx_agg_prod
);
5524 bnxt_db_write(bp
, &rxr
->rx_db
, rxr
->rx_prod
);
5525 bp
->grp_info
[grp_idx
].agg_fw_ring_id
= ring
->fw_ring_id
;
5532 static int hwrm_ring_free_send_msg(struct bnxt
*bp
,
5533 struct bnxt_ring_struct
*ring
,
5534 u32 ring_type
, int cmpl_ring_id
)
5537 struct hwrm_ring_free_input req
= {0};
5538 struct hwrm_ring_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5541 if (test_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
))
5544 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_FREE
, cmpl_ring_id
, -1);
5545 req
.ring_type
= ring_type
;
5546 req
.ring_id
= cpu_to_le16(ring
->fw_ring_id
);
5548 mutex_lock(&bp
->hwrm_cmd_lock
);
5549 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5550 error_code
= le16_to_cpu(resp
->error_code
);
5551 mutex_unlock(&bp
->hwrm_cmd_lock
);
5553 if (rc
|| error_code
) {
5554 netdev_err(bp
->dev
, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5555 ring_type
, rc
, error_code
);
5561 static void bnxt_hwrm_ring_free(struct bnxt
*bp
, bool close_path
)
5569 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
5570 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
5571 struct bnxt_ring_struct
*ring
= &txr
->tx_ring_struct
;
5573 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
5574 u32 cmpl_ring_id
= bnxt_cp_ring_for_tx(bp
, txr
);
5576 hwrm_ring_free_send_msg(bp
, ring
,
5577 RING_FREE_REQ_RING_TYPE_TX
,
5578 close_path
? cmpl_ring_id
:
5579 INVALID_HW_RING_ID
);
5580 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
5584 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
5585 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
5586 struct bnxt_ring_struct
*ring
= &rxr
->rx_ring_struct
;
5587 u32 grp_idx
= rxr
->bnapi
->index
;
5589 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
5590 u32 cmpl_ring_id
= bnxt_cp_ring_for_rx(bp
, rxr
);
5592 hwrm_ring_free_send_msg(bp
, ring
,
5593 RING_FREE_REQ_RING_TYPE_RX
,
5594 close_path
? cmpl_ring_id
:
5595 INVALID_HW_RING_ID
);
5596 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
5597 bp
->grp_info
[grp_idx
].rx_fw_ring_id
=
5602 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
5603 type
= RING_FREE_REQ_RING_TYPE_RX_AGG
;
5605 type
= RING_FREE_REQ_RING_TYPE_RX
;
5606 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
5607 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
5608 struct bnxt_ring_struct
*ring
= &rxr
->rx_agg_ring_struct
;
5609 u32 grp_idx
= rxr
->bnapi
->index
;
5611 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
5612 u32 cmpl_ring_id
= bnxt_cp_ring_for_rx(bp
, rxr
);
5614 hwrm_ring_free_send_msg(bp
, ring
, type
,
5615 close_path
? cmpl_ring_id
:
5616 INVALID_HW_RING_ID
);
5617 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
5618 bp
->grp_info
[grp_idx
].agg_fw_ring_id
=
5623 /* The completion rings are about to be freed. After that the
5624 * IRQ doorbell will not work anymore. So we need to disable
5627 bnxt_disable_int_sync(bp
);
5629 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
5630 type
= RING_FREE_REQ_RING_TYPE_NQ
;
5632 type
= RING_FREE_REQ_RING_TYPE_L2_CMPL
;
5633 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
5634 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
5635 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
5636 struct bnxt_ring_struct
*ring
;
5639 for (j
= 0; j
< 2; j
++) {
5640 struct bnxt_cp_ring_info
*cpr2
= cpr
->cp_ring_arr
[j
];
5643 ring
= &cpr2
->cp_ring_struct
;
5644 if (ring
->fw_ring_id
== INVALID_HW_RING_ID
)
5646 hwrm_ring_free_send_msg(bp
, ring
,
5647 RING_FREE_REQ_RING_TYPE_L2_CMPL
,
5648 INVALID_HW_RING_ID
);
5649 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
5652 ring
= &cpr
->cp_ring_struct
;
5653 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
5654 hwrm_ring_free_send_msg(bp
, ring
, type
,
5655 INVALID_HW_RING_ID
);
5656 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
5657 bp
->grp_info
[i
].cp_fw_ring_id
= INVALID_HW_RING_ID
;
5662 static int bnxt_trim_rings(struct bnxt
*bp
, int *rx
, int *tx
, int max
,
5665 static int bnxt_hwrm_get_rings(struct bnxt
*bp
)
5667 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5668 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
5669 struct hwrm_func_qcfg_input req
= {0};
5672 if (bp
->hwrm_spec_code
< 0x10601)
5675 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCFG
, -1, -1);
5676 req
.fid
= cpu_to_le16(0xffff);
5677 mutex_lock(&bp
->hwrm_cmd_lock
);
5678 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5680 mutex_unlock(&bp
->hwrm_cmd_lock
);
5684 hw_resc
->resv_tx_rings
= le16_to_cpu(resp
->alloc_tx_rings
);
5685 if (BNXT_NEW_RM(bp
)) {
5688 hw_resc
->resv_rx_rings
= le16_to_cpu(resp
->alloc_rx_rings
);
5689 hw_resc
->resv_hw_ring_grps
=
5690 le32_to_cpu(resp
->alloc_hw_ring_grps
);
5691 hw_resc
->resv_vnics
= le16_to_cpu(resp
->alloc_vnics
);
5692 cp
= le16_to_cpu(resp
->alloc_cmpl_rings
);
5693 stats
= le16_to_cpu(resp
->alloc_stat_ctx
);
5694 hw_resc
->resv_irqs
= cp
;
5695 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5696 int rx
= hw_resc
->resv_rx_rings
;
5697 int tx
= hw_resc
->resv_tx_rings
;
5699 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
5701 if (cp
< (rx
+ tx
)) {
5702 bnxt_trim_rings(bp
, &rx
, &tx
, cp
, false);
5703 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
5705 hw_resc
->resv_rx_rings
= rx
;
5706 hw_resc
->resv_tx_rings
= tx
;
5708 hw_resc
->resv_irqs
= le16_to_cpu(resp
->alloc_msix
);
5709 hw_resc
->resv_hw_ring_grps
= rx
;
5711 hw_resc
->resv_cp_rings
= cp
;
5712 hw_resc
->resv_stat_ctxs
= stats
;
5714 mutex_unlock(&bp
->hwrm_cmd_lock
);
5718 /* Caller must hold bp->hwrm_cmd_lock */
5719 int __bnxt_hwrm_get_tx_rings(struct bnxt
*bp
, u16 fid
, int *tx_rings
)
5721 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5722 struct hwrm_func_qcfg_input req
= {0};
5725 if (bp
->hwrm_spec_code
< 0x10601)
5728 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCFG
, -1, -1);
5729 req
.fid
= cpu_to_le16(fid
);
5730 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5732 *tx_rings
= le16_to_cpu(resp
->alloc_tx_rings
);
5737 static bool bnxt_rfs_supported(struct bnxt
*bp
);
5740 __bnxt_hwrm_reserve_pf_rings(struct bnxt
*bp
, struct hwrm_func_cfg_input
*req
,
5741 int tx_rings
, int rx_rings
, int ring_grps
,
5742 int cp_rings
, int stats
, int vnics
)
5746 bnxt_hwrm_cmd_hdr_init(bp
, req
, HWRM_FUNC_CFG
, -1, -1);
5747 req
->fid
= cpu_to_le16(0xffff);
5748 enables
|= tx_rings
? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
: 0;
5749 req
->num_tx_rings
= cpu_to_le16(tx_rings
);
5750 if (BNXT_NEW_RM(bp
)) {
5751 enables
|= rx_rings
? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
: 0;
5752 enables
|= stats
? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
: 0;
5753 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5754 enables
|= cp_rings
? FUNC_CFG_REQ_ENABLES_NUM_MSIX
: 0;
5755 enables
|= tx_rings
+ ring_grps
?
5756 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
: 0;
5757 enables
|= rx_rings
?
5758 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS
: 0;
5760 enables
|= cp_rings
?
5761 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
: 0;
5762 enables
|= ring_grps
?
5763 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
|
5764 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS
: 0;
5766 enables
|= vnics
? FUNC_CFG_REQ_ENABLES_NUM_VNICS
: 0;
5768 req
->num_rx_rings
= cpu_to_le16(rx_rings
);
5769 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5770 req
->num_cmpl_rings
= cpu_to_le16(tx_rings
+ ring_grps
);
5771 req
->num_msix
= cpu_to_le16(cp_rings
);
5772 req
->num_rsscos_ctxs
=
5773 cpu_to_le16(DIV_ROUND_UP(ring_grps
, 64));
5775 req
->num_cmpl_rings
= cpu_to_le16(cp_rings
);
5776 req
->num_hw_ring_grps
= cpu_to_le16(ring_grps
);
5777 req
->num_rsscos_ctxs
= cpu_to_le16(1);
5778 if (!(bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
) &&
5779 bnxt_rfs_supported(bp
))
5780 req
->num_rsscos_ctxs
=
5781 cpu_to_le16(ring_grps
+ 1);
5783 req
->num_stat_ctxs
= cpu_to_le16(stats
);
5784 req
->num_vnics
= cpu_to_le16(vnics
);
5786 req
->enables
= cpu_to_le32(enables
);
5790 __bnxt_hwrm_reserve_vf_rings(struct bnxt
*bp
,
5791 struct hwrm_func_vf_cfg_input
*req
, int tx_rings
,
5792 int rx_rings
, int ring_grps
, int cp_rings
,
5793 int stats
, int vnics
)
5797 bnxt_hwrm_cmd_hdr_init(bp
, req
, HWRM_FUNC_VF_CFG
, -1, -1);
5798 enables
|= tx_rings
? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS
: 0;
5799 enables
|= rx_rings
? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS
|
5800 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS
: 0;
5801 enables
|= stats
? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS
: 0;
5802 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5803 enables
|= tx_rings
+ ring_grps
?
5804 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS
: 0;
5806 enables
|= cp_rings
?
5807 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS
: 0;
5808 enables
|= ring_grps
?
5809 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
: 0;
5811 enables
|= vnics
? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS
: 0;
5812 enables
|= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS
;
5814 req
->num_l2_ctxs
= cpu_to_le16(BNXT_VF_MAX_L2_CTX
);
5815 req
->num_tx_rings
= cpu_to_le16(tx_rings
);
5816 req
->num_rx_rings
= cpu_to_le16(rx_rings
);
5817 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
5818 req
->num_cmpl_rings
= cpu_to_le16(tx_rings
+ ring_grps
);
5819 req
->num_rsscos_ctxs
= cpu_to_le16(DIV_ROUND_UP(ring_grps
, 64));
5821 req
->num_cmpl_rings
= cpu_to_le16(cp_rings
);
5822 req
->num_hw_ring_grps
= cpu_to_le16(ring_grps
);
5823 req
->num_rsscos_ctxs
= cpu_to_le16(BNXT_VF_MAX_RSS_CTX
);
5825 req
->num_stat_ctxs
= cpu_to_le16(stats
);
5826 req
->num_vnics
= cpu_to_le16(vnics
);
5828 req
->enables
= cpu_to_le32(enables
);
5832 bnxt_hwrm_reserve_pf_rings(struct bnxt
*bp
, int tx_rings
, int rx_rings
,
5833 int ring_grps
, int cp_rings
, int stats
, int vnics
)
5835 struct hwrm_func_cfg_input req
= {0};
5838 __bnxt_hwrm_reserve_pf_rings(bp
, &req
, tx_rings
, rx_rings
, ring_grps
,
5839 cp_rings
, stats
, vnics
);
5843 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5847 if (bp
->hwrm_spec_code
< 0x10601)
5848 bp
->hw_resc
.resv_tx_rings
= tx_rings
;
5850 rc
= bnxt_hwrm_get_rings(bp
);
5855 bnxt_hwrm_reserve_vf_rings(struct bnxt
*bp
, int tx_rings
, int rx_rings
,
5856 int ring_grps
, int cp_rings
, int stats
, int vnics
)
5858 struct hwrm_func_vf_cfg_input req
= {0};
5861 if (!BNXT_NEW_RM(bp
)) {
5862 bp
->hw_resc
.resv_tx_rings
= tx_rings
;
5866 __bnxt_hwrm_reserve_vf_rings(bp
, &req
, tx_rings
, rx_rings
, ring_grps
,
5867 cp_rings
, stats
, vnics
);
5868 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5872 rc
= bnxt_hwrm_get_rings(bp
);
5876 static int bnxt_hwrm_reserve_rings(struct bnxt
*bp
, int tx
, int rx
, int grp
,
5877 int cp
, int stat
, int vnic
)
5880 return bnxt_hwrm_reserve_pf_rings(bp
, tx
, rx
, grp
, cp
, stat
,
5883 return bnxt_hwrm_reserve_vf_rings(bp
, tx
, rx
, grp
, cp
, stat
,
5887 int bnxt_nq_rings_in_use(struct bnxt
*bp
)
5889 int cp
= bp
->cp_nr_rings
;
5890 int ulp_msix
, ulp_base
;
5892 ulp_msix
= bnxt_get_ulp_msix_num(bp
);
5894 ulp_base
= bnxt_get_ulp_msix_base(bp
);
5896 if ((ulp_base
+ ulp_msix
) > cp
)
5897 cp
= ulp_base
+ ulp_msix
;
5902 static int bnxt_cp_rings_in_use(struct bnxt
*bp
)
5906 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
5907 return bnxt_nq_rings_in_use(bp
);
5909 cp
= bp
->tx_nr_rings
+ bp
->rx_nr_rings
;
5913 static int bnxt_get_func_stat_ctxs(struct bnxt
*bp
)
5915 int ulp_stat
= bnxt_get_ulp_stat_ctxs(bp
);
5916 int cp
= bp
->cp_nr_rings
;
5921 if (bnxt_nq_rings_in_use(bp
) > cp
+ bnxt_get_ulp_msix_num(bp
))
5922 return bnxt_get_ulp_msix_base(bp
) + ulp_stat
;
5924 return cp
+ ulp_stat
;
5927 static bool bnxt_need_reserve_rings(struct bnxt
*bp
)
5929 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
5930 int cp
= bnxt_cp_rings_in_use(bp
);
5931 int nq
= bnxt_nq_rings_in_use(bp
);
5932 int rx
= bp
->rx_nr_rings
, stat
;
5933 int vnic
= 1, grp
= rx
;
5935 if (bp
->hwrm_spec_code
< 0x10601)
5938 if (hw_resc
->resv_tx_rings
!= bp
->tx_nr_rings
)
5941 if ((bp
->flags
& BNXT_FLAG_RFS
) && !(bp
->flags
& BNXT_FLAG_CHIP_P5
))
5943 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
5945 stat
= bnxt_get_func_stat_ctxs(bp
);
5946 if (BNXT_NEW_RM(bp
) &&
5947 (hw_resc
->resv_rx_rings
!= rx
|| hw_resc
->resv_cp_rings
!= cp
||
5948 hw_resc
->resv_vnics
!= vnic
|| hw_resc
->resv_stat_ctxs
!= stat
||
5949 (hw_resc
->resv_hw_ring_grps
!= grp
&&
5950 !(bp
->flags
& BNXT_FLAG_CHIP_P5
))))
5952 if ((bp
->flags
& BNXT_FLAG_CHIP_P5
) && BNXT_PF(bp
) &&
5953 hw_resc
->resv_irqs
!= nq
)
5958 static int __bnxt_reserve_rings(struct bnxt
*bp
)
5960 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
5961 int cp
= bnxt_nq_rings_in_use(bp
);
5962 int tx
= bp
->tx_nr_rings
;
5963 int rx
= bp
->rx_nr_rings
;
5964 int grp
, rx_rings
, rc
;
5968 if (!bnxt_need_reserve_rings(bp
))
5971 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
)
5973 if ((bp
->flags
& BNXT_FLAG_RFS
) && !(bp
->flags
& BNXT_FLAG_CHIP_P5
))
5975 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
5977 grp
= bp
->rx_nr_rings
;
5978 stat
= bnxt_get_func_stat_ctxs(bp
);
5980 rc
= bnxt_hwrm_reserve_rings(bp
, tx
, rx
, grp
, cp
, stat
, vnic
);
5984 tx
= hw_resc
->resv_tx_rings
;
5985 if (BNXT_NEW_RM(bp
)) {
5986 rx
= hw_resc
->resv_rx_rings
;
5987 cp
= hw_resc
->resv_irqs
;
5988 grp
= hw_resc
->resv_hw_ring_grps
;
5989 vnic
= hw_resc
->resv_vnics
;
5990 stat
= hw_resc
->resv_stat_ctxs
;
5994 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
5998 if (netif_running(bp
->dev
))
6001 bp
->flags
&= ~BNXT_FLAG_AGG_RINGS
;
6002 bp
->flags
|= BNXT_FLAG_NO_AGG_RINGS
;
6003 bp
->dev
->hw_features
&= ~NETIF_F_LRO
;
6004 bp
->dev
->features
&= ~NETIF_F_LRO
;
6005 bnxt_set_ring_params(bp
);
6008 rx_rings
= min_t(int, rx_rings
, grp
);
6009 cp
= min_t(int, cp
, bp
->cp_nr_rings
);
6010 if (stat
> bnxt_get_ulp_stat_ctxs(bp
))
6011 stat
-= bnxt_get_ulp_stat_ctxs(bp
);
6012 cp
= min_t(int, cp
, stat
);
6013 rc
= bnxt_trim_rings(bp
, &rx_rings
, &tx
, cp
, sh
);
6014 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
6016 cp
= sh
? max_t(int, tx
, rx_rings
) : tx
+ rx_rings
;
6017 bp
->tx_nr_rings
= tx
;
6018 bp
->rx_nr_rings
= rx_rings
;
6019 bp
->cp_nr_rings
= cp
;
6021 if (!tx
|| !rx
|| !cp
|| !grp
|| !vnic
|| !stat
)
6027 static int bnxt_hwrm_check_vf_rings(struct bnxt
*bp
, int tx_rings
, int rx_rings
,
6028 int ring_grps
, int cp_rings
, int stats
,
6031 struct hwrm_func_vf_cfg_input req
= {0};
6035 if (!BNXT_NEW_RM(bp
))
6038 __bnxt_hwrm_reserve_vf_rings(bp
, &req
, tx_rings
, rx_rings
, ring_grps
,
6039 cp_rings
, stats
, vnics
);
6040 flags
= FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST
|
6041 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST
|
6042 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST
|
6043 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST
|
6044 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST
|
6045 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST
;
6046 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
6047 flags
|= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST
;
6049 req
.flags
= cpu_to_le32(flags
);
6050 rc
= hwrm_send_message_silent(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6054 static int bnxt_hwrm_check_pf_rings(struct bnxt
*bp
, int tx_rings
, int rx_rings
,
6055 int ring_grps
, int cp_rings
, int stats
,
6058 struct hwrm_func_cfg_input req
= {0};
6062 __bnxt_hwrm_reserve_pf_rings(bp
, &req
, tx_rings
, rx_rings
, ring_grps
,
6063 cp_rings
, stats
, vnics
);
6064 flags
= FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST
;
6065 if (BNXT_NEW_RM(bp
)) {
6066 flags
|= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST
|
6067 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST
|
6068 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST
|
6069 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST
;
6070 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
6071 flags
|= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST
|
6072 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST
;
6074 flags
|= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST
;
6077 req
.flags
= cpu_to_le32(flags
);
6078 rc
= hwrm_send_message_silent(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6082 static int bnxt_hwrm_check_rings(struct bnxt
*bp
, int tx_rings
, int rx_rings
,
6083 int ring_grps
, int cp_rings
, int stats
,
6086 if (bp
->hwrm_spec_code
< 0x10801)
6090 return bnxt_hwrm_check_pf_rings(bp
, tx_rings
, rx_rings
,
6091 ring_grps
, cp_rings
, stats
,
6094 return bnxt_hwrm_check_vf_rings(bp
, tx_rings
, rx_rings
, ring_grps
,
6095 cp_rings
, stats
, vnics
);
6098 static void bnxt_hwrm_coal_params_qcaps(struct bnxt
*bp
)
6100 struct hwrm_ring_aggint_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
6101 struct bnxt_coal_cap
*coal_cap
= &bp
->coal_cap
;
6102 struct hwrm_ring_aggint_qcaps_input req
= {0};
6105 coal_cap
->cmpl_params
= BNXT_LEGACY_COAL_CMPL_PARAMS
;
6106 coal_cap
->num_cmpl_dma_aggr_max
= 63;
6107 coal_cap
->num_cmpl_dma_aggr_during_int_max
= 63;
6108 coal_cap
->cmpl_aggr_dma_tmr_max
= 65535;
6109 coal_cap
->cmpl_aggr_dma_tmr_during_int_max
= 65535;
6110 coal_cap
->int_lat_tmr_min_max
= 65535;
6111 coal_cap
->int_lat_tmr_max_max
= 65535;
6112 coal_cap
->num_cmpl_aggr_int_max
= 65535;
6113 coal_cap
->timer_units
= 80;
6115 if (bp
->hwrm_spec_code
< 0x10902)
6118 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_AGGINT_QCAPS
, -1, -1);
6119 mutex_lock(&bp
->hwrm_cmd_lock
);
6120 rc
= _hwrm_send_message_silent(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6122 coal_cap
->cmpl_params
= le32_to_cpu(resp
->cmpl_params
);
6123 coal_cap
->nq_params
= le32_to_cpu(resp
->nq_params
);
6124 coal_cap
->num_cmpl_dma_aggr_max
=
6125 le16_to_cpu(resp
->num_cmpl_dma_aggr_max
);
6126 coal_cap
->num_cmpl_dma_aggr_during_int_max
=
6127 le16_to_cpu(resp
->num_cmpl_dma_aggr_during_int_max
);
6128 coal_cap
->cmpl_aggr_dma_tmr_max
=
6129 le16_to_cpu(resp
->cmpl_aggr_dma_tmr_max
);
6130 coal_cap
->cmpl_aggr_dma_tmr_during_int_max
=
6131 le16_to_cpu(resp
->cmpl_aggr_dma_tmr_during_int_max
);
6132 coal_cap
->int_lat_tmr_min_max
=
6133 le16_to_cpu(resp
->int_lat_tmr_min_max
);
6134 coal_cap
->int_lat_tmr_max_max
=
6135 le16_to_cpu(resp
->int_lat_tmr_max_max
);
6136 coal_cap
->num_cmpl_aggr_int_max
=
6137 le16_to_cpu(resp
->num_cmpl_aggr_int_max
);
6138 coal_cap
->timer_units
= le16_to_cpu(resp
->timer_units
);
6140 mutex_unlock(&bp
->hwrm_cmd_lock
);
6143 static u16
bnxt_usec_to_coal_tmr(struct bnxt
*bp
, u16 usec
)
6145 struct bnxt_coal_cap
*coal_cap
= &bp
->coal_cap
;
6147 return usec
* 1000 / coal_cap
->timer_units
;
6150 static void bnxt_hwrm_set_coal_params(struct bnxt
*bp
,
6151 struct bnxt_coal
*hw_coal
,
6152 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input
*req
)
6154 struct bnxt_coal_cap
*coal_cap
= &bp
->coal_cap
;
6155 u32 cmpl_params
= coal_cap
->cmpl_params
;
6156 u16 val
, tmr
, max
, flags
= 0;
6158 max
= hw_coal
->bufs_per_record
* 128;
6159 if (hw_coal
->budget
)
6160 max
= hw_coal
->bufs_per_record
* hw_coal
->budget
;
6161 max
= min_t(u16
, max
, coal_cap
->num_cmpl_aggr_int_max
);
6163 val
= clamp_t(u16
, hw_coal
->coal_bufs
, 1, max
);
6164 req
->num_cmpl_aggr_int
= cpu_to_le16(val
);
6166 val
= min_t(u16
, val
, coal_cap
->num_cmpl_dma_aggr_max
);
6167 req
->num_cmpl_dma_aggr
= cpu_to_le16(val
);
6169 val
= clamp_t(u16
, hw_coal
->coal_bufs_irq
, 1,
6170 coal_cap
->num_cmpl_dma_aggr_during_int_max
);
6171 req
->num_cmpl_dma_aggr_during_int
= cpu_to_le16(val
);
6173 tmr
= bnxt_usec_to_coal_tmr(bp
, hw_coal
->coal_ticks
);
6174 tmr
= clamp_t(u16
, tmr
, 1, coal_cap
->int_lat_tmr_max_max
);
6175 req
->int_lat_tmr_max
= cpu_to_le16(tmr
);
6177 /* min timer set to 1/2 of interrupt timer */
6178 if (cmpl_params
& RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN
) {
6180 val
= clamp_t(u16
, val
, 1, coal_cap
->int_lat_tmr_min_max
);
6181 req
->int_lat_tmr_min
= cpu_to_le16(val
);
6182 req
->enables
|= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE
);
6185 /* buf timer set to 1/4 of interrupt timer */
6186 val
= clamp_t(u16
, tmr
/ 4, 1, coal_cap
->cmpl_aggr_dma_tmr_max
);
6187 req
->cmpl_aggr_dma_tmr
= cpu_to_le16(val
);
6190 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT
) {
6191 tmr
= bnxt_usec_to_coal_tmr(bp
, hw_coal
->coal_ticks_irq
);
6192 val
= clamp_t(u16
, tmr
, 1,
6193 coal_cap
->cmpl_aggr_dma_tmr_during_int_max
);
6194 req
->cmpl_aggr_dma_tmr_during_int
= cpu_to_le16(val
);
6196 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE
);
6199 if (cmpl_params
& RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET
)
6200 flags
|= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET
;
6201 if ((cmpl_params
& RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE
) &&
6202 hw_coal
->idle_thresh
&& hw_coal
->coal_ticks
< hw_coal
->idle_thresh
)
6203 flags
|= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE
;
6204 req
->flags
= cpu_to_le16(flags
);
6205 req
->enables
|= cpu_to_le16(BNXT_COAL_CMPL_ENABLES
);
6208 /* Caller holds bp->hwrm_cmd_lock */
6209 static int __bnxt_hwrm_set_coal_nq(struct bnxt
*bp
, struct bnxt_napi
*bnapi
,
6210 struct bnxt_coal
*hw_coal
)
6212 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req
= {0};
6213 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
6214 struct bnxt_coal_cap
*coal_cap
= &bp
->coal_cap
;
6215 u32 nq_params
= coal_cap
->nq_params
;
6218 if (!(nq_params
& RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN
))
6221 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS
,
6223 req
.ring_id
= cpu_to_le16(cpr
->cp_ring_struct
.fw_ring_id
);
6225 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ
);
6227 tmr
= bnxt_usec_to_coal_tmr(bp
, hw_coal
->coal_ticks
) / 2;
6228 tmr
= clamp_t(u16
, tmr
, 1, coal_cap
->int_lat_tmr_min_max
);
6229 req
.int_lat_tmr_min
= cpu_to_le16(tmr
);
6230 req
.enables
|= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE
);
6231 return _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6234 int bnxt_hwrm_set_ring_coal(struct bnxt
*bp
, struct bnxt_napi
*bnapi
)
6236 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx
= {0};
6237 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
6238 struct bnxt_coal coal
;
6240 /* Tick values in micro seconds.
6241 * 1 coal_buf x bufs_per_record = 1 completion record.
6243 memcpy(&coal
, &bp
->rx_coal
, sizeof(struct bnxt_coal
));
6245 coal
.coal_ticks
= cpr
->rx_ring_coal
.coal_ticks
;
6246 coal
.coal_bufs
= cpr
->rx_ring_coal
.coal_bufs
;
6248 if (!bnapi
->rx_ring
)
6251 bnxt_hwrm_cmd_hdr_init(bp
, &req_rx
,
6252 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS
, -1, -1);
6254 bnxt_hwrm_set_coal_params(bp
, &coal
, &req_rx
);
6256 req_rx
.ring_id
= cpu_to_le16(bnxt_cp_ring_for_rx(bp
, bnapi
->rx_ring
));
6258 return hwrm_send_message(bp
, &req_rx
, sizeof(req_rx
),
6262 int bnxt_hwrm_set_coal(struct bnxt
*bp
)
6265 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx
= {0},
6268 bnxt_hwrm_cmd_hdr_init(bp
, &req_rx
,
6269 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS
, -1, -1);
6270 bnxt_hwrm_cmd_hdr_init(bp
, &req_tx
,
6271 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS
, -1, -1);
6273 bnxt_hwrm_set_coal_params(bp
, &bp
->rx_coal
, &req_rx
);
6274 bnxt_hwrm_set_coal_params(bp
, &bp
->tx_coal
, &req_tx
);
6276 mutex_lock(&bp
->hwrm_cmd_lock
);
6277 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
6278 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
6279 struct bnxt_coal
*hw_coal
;
6283 if (!bnapi
->rx_ring
) {
6284 ring_id
= bnxt_cp_ring_for_tx(bp
, bnapi
->tx_ring
);
6287 ring_id
= bnxt_cp_ring_for_rx(bp
, bnapi
->rx_ring
);
6289 req
->ring_id
= cpu_to_le16(ring_id
);
6291 rc
= _hwrm_send_message(bp
, req
, sizeof(*req
),
6296 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
6299 if (bnapi
->rx_ring
&& bnapi
->tx_ring
) {
6301 ring_id
= bnxt_cp_ring_for_tx(bp
, bnapi
->tx_ring
);
6302 req
->ring_id
= cpu_to_le16(ring_id
);
6303 rc
= _hwrm_send_message(bp
, req
, sizeof(*req
),
6309 hw_coal
= &bp
->rx_coal
;
6311 hw_coal
= &bp
->tx_coal
;
6312 __bnxt_hwrm_set_coal_nq(bp
, bnapi
, hw_coal
);
6314 mutex_unlock(&bp
->hwrm_cmd_lock
);
6318 static int bnxt_hwrm_stat_ctx_free(struct bnxt
*bp
)
6321 struct hwrm_stat_ctx_free_input req
= {0};
6326 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
6329 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_STAT_CTX_FREE
, -1, -1);
6331 mutex_lock(&bp
->hwrm_cmd_lock
);
6332 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
6333 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
6334 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
6336 if (cpr
->hw_stats_ctx_id
!= INVALID_STATS_CTX_ID
) {
6337 req
.stat_ctx_id
= cpu_to_le32(cpr
->hw_stats_ctx_id
);
6339 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
6342 cpr
->hw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
6345 mutex_unlock(&bp
->hwrm_cmd_lock
);
6349 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt
*bp
)
6352 struct hwrm_stat_ctx_alloc_input req
= {0};
6353 struct hwrm_stat_ctx_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
6355 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
6358 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_STAT_CTX_ALLOC
, -1, -1);
6360 req
.stats_dma_length
= cpu_to_le16(bp
->hw_ring_stats_size
);
6361 req
.update_period_ms
= cpu_to_le32(bp
->stats_coal_ticks
/ 1000);
6363 mutex_lock(&bp
->hwrm_cmd_lock
);
6364 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
6365 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
6366 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
6368 req
.stats_dma_addr
= cpu_to_le64(cpr
->hw_stats_map
);
6370 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
6375 cpr
->hw_stats_ctx_id
= le32_to_cpu(resp
->stat_ctx_id
);
6377 bp
->grp_info
[i
].fw_stats_ctx
= cpr
->hw_stats_ctx_id
;
6379 mutex_unlock(&bp
->hwrm_cmd_lock
);
6383 static int bnxt_hwrm_func_qcfg(struct bnxt
*bp
)
6385 struct hwrm_func_qcfg_input req
= {0};
6386 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
6390 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCFG
, -1, -1);
6391 req
.fid
= cpu_to_le16(0xffff);
6392 mutex_lock(&bp
->hwrm_cmd_lock
);
6393 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6395 goto func_qcfg_exit
;
6397 #ifdef CONFIG_BNXT_SRIOV
6399 struct bnxt_vf_info
*vf
= &bp
->vf
;
6401 vf
->vlan
= le16_to_cpu(resp
->vlan
) & VLAN_VID_MASK
;
6403 bp
->pf
.registered_vfs
= le16_to_cpu(resp
->registered_vfs
);
6406 flags
= le16_to_cpu(resp
->flags
);
6407 if (flags
& (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED
|
6408 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED
)) {
6409 bp
->fw_cap
|= BNXT_FW_CAP_LLDP_AGENT
;
6410 if (flags
& FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED
)
6411 bp
->fw_cap
|= BNXT_FW_CAP_DCBX_AGENT
;
6413 if (BNXT_PF(bp
) && (flags
& FUNC_QCFG_RESP_FLAGS_MULTI_HOST
))
6414 bp
->flags
|= BNXT_FLAG_MULTI_HOST
;
6416 switch (resp
->port_partition_type
) {
6417 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0
:
6418 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5
:
6419 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0
:
6420 bp
->port_partition_type
= resp
->port_partition_type
;
6423 if (bp
->hwrm_spec_code
< 0x10707 ||
6424 resp
->evb_mode
== FUNC_QCFG_RESP_EVB_MODE_VEB
)
6425 bp
->br_mode
= BRIDGE_MODE_VEB
;
6426 else if (resp
->evb_mode
== FUNC_QCFG_RESP_EVB_MODE_VEPA
)
6427 bp
->br_mode
= BRIDGE_MODE_VEPA
;
6429 bp
->br_mode
= BRIDGE_MODE_UNDEF
;
6431 bp
->max_mtu
= le16_to_cpu(resp
->max_mtu_configured
);
6433 bp
->max_mtu
= BNXT_MAX_MTU
;
6436 mutex_unlock(&bp
->hwrm_cmd_lock
);
6440 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt
*bp
)
6442 struct hwrm_func_backing_store_qcaps_input req
= {0};
6443 struct hwrm_func_backing_store_qcaps_output
*resp
=
6444 bp
->hwrm_cmd_resp_addr
;
6447 if (bp
->hwrm_spec_code
< 0x10902 || BNXT_VF(bp
) || bp
->ctx
)
6450 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_BACKING_STORE_QCAPS
, -1, -1);
6451 mutex_lock(&bp
->hwrm_cmd_lock
);
6452 rc
= _hwrm_send_message_silent(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6454 struct bnxt_ctx_pg_info
*ctx_pg
;
6455 struct bnxt_ctx_mem_info
*ctx
;
6458 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
6463 ctx_pg
= kzalloc(sizeof(*ctx_pg
) * (bp
->max_q
+ 1), GFP_KERNEL
);
6469 for (i
= 0; i
< bp
->max_q
+ 1; i
++, ctx_pg
++)
6470 ctx
->tqm_mem
[i
] = ctx_pg
;
6473 ctx
->qp_max_entries
= le32_to_cpu(resp
->qp_max_entries
);
6474 ctx
->qp_min_qp1_entries
= le16_to_cpu(resp
->qp_min_qp1_entries
);
6475 ctx
->qp_max_l2_entries
= le16_to_cpu(resp
->qp_max_l2_entries
);
6476 ctx
->qp_entry_size
= le16_to_cpu(resp
->qp_entry_size
);
6477 ctx
->srq_max_l2_entries
= le16_to_cpu(resp
->srq_max_l2_entries
);
6478 ctx
->srq_max_entries
= le32_to_cpu(resp
->srq_max_entries
);
6479 ctx
->srq_entry_size
= le16_to_cpu(resp
->srq_entry_size
);
6480 ctx
->cq_max_l2_entries
= le16_to_cpu(resp
->cq_max_l2_entries
);
6481 ctx
->cq_max_entries
= le32_to_cpu(resp
->cq_max_entries
);
6482 ctx
->cq_entry_size
= le16_to_cpu(resp
->cq_entry_size
);
6483 ctx
->vnic_max_vnic_entries
=
6484 le16_to_cpu(resp
->vnic_max_vnic_entries
);
6485 ctx
->vnic_max_ring_table_entries
=
6486 le16_to_cpu(resp
->vnic_max_ring_table_entries
);
6487 ctx
->vnic_entry_size
= le16_to_cpu(resp
->vnic_entry_size
);
6488 ctx
->stat_max_entries
= le32_to_cpu(resp
->stat_max_entries
);
6489 ctx
->stat_entry_size
= le16_to_cpu(resp
->stat_entry_size
);
6490 ctx
->tqm_entry_size
= le16_to_cpu(resp
->tqm_entry_size
);
6491 ctx
->tqm_min_entries_per_ring
=
6492 le32_to_cpu(resp
->tqm_min_entries_per_ring
);
6493 ctx
->tqm_max_entries_per_ring
=
6494 le32_to_cpu(resp
->tqm_max_entries_per_ring
);
6495 ctx
->tqm_entries_multiple
= resp
->tqm_entries_multiple
;
6496 if (!ctx
->tqm_entries_multiple
)
6497 ctx
->tqm_entries_multiple
= 1;
6498 ctx
->mrav_max_entries
= le32_to_cpu(resp
->mrav_max_entries
);
6499 ctx
->mrav_entry_size
= le16_to_cpu(resp
->mrav_entry_size
);
6500 ctx
->mrav_num_entries_units
=
6501 le16_to_cpu(resp
->mrav_num_entries_units
);
6502 ctx
->tim_entry_size
= le16_to_cpu(resp
->tim_entry_size
);
6503 ctx
->tim_max_entries
= le32_to_cpu(resp
->tim_max_entries
);
6504 ctx
->ctx_kind_initializer
= resp
->ctx_kind_initializer
;
6509 mutex_unlock(&bp
->hwrm_cmd_lock
);
6513 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info
*rmem
, u8
*pg_attr
,
6518 if (BNXT_PAGE_SHIFT
== 13)
6520 else if (BNXT_PAGE_SIZE
== 16)
6524 if (rmem
->depth
>= 1) {
6525 if (rmem
->depth
== 2)
6529 *pg_dir
= cpu_to_le64(rmem
->pg_tbl_map
);
6531 *pg_dir
= cpu_to_le64(rmem
->dma_arr
[0]);
6535 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6536 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6537 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6538 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6539 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6540 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6542 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt
*bp
, u32 enables
)
6544 struct hwrm_func_backing_store_cfg_input req
= {0};
6545 struct bnxt_ctx_mem_info
*ctx
= bp
->ctx
;
6546 struct bnxt_ctx_pg_info
*ctx_pg
;
6547 __le32
*num_entries
;
6557 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_BACKING_STORE_CFG
, -1, -1);
6558 req
.enables
= cpu_to_le32(enables
);
6560 if (enables
& FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP
) {
6561 ctx_pg
= &ctx
->qp_mem
;
6562 req
.qp_num_entries
= cpu_to_le32(ctx_pg
->entries
);
6563 req
.qp_num_qp1_entries
= cpu_to_le16(ctx
->qp_min_qp1_entries
);
6564 req
.qp_num_l2_entries
= cpu_to_le16(ctx
->qp_max_l2_entries
);
6565 req
.qp_entry_size
= cpu_to_le16(ctx
->qp_entry_size
);
6566 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
,
6567 &req
.qpc_pg_size_qpc_lvl
,
6570 if (enables
& FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ
) {
6571 ctx_pg
= &ctx
->srq_mem
;
6572 req
.srq_num_entries
= cpu_to_le32(ctx_pg
->entries
);
6573 req
.srq_num_l2_entries
= cpu_to_le16(ctx
->srq_max_l2_entries
);
6574 req
.srq_entry_size
= cpu_to_le16(ctx
->srq_entry_size
);
6575 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
,
6576 &req
.srq_pg_size_srq_lvl
,
6579 if (enables
& FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ
) {
6580 ctx_pg
= &ctx
->cq_mem
;
6581 req
.cq_num_entries
= cpu_to_le32(ctx_pg
->entries
);
6582 req
.cq_num_l2_entries
= cpu_to_le16(ctx
->cq_max_l2_entries
);
6583 req
.cq_entry_size
= cpu_to_le16(ctx
->cq_entry_size
);
6584 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
, &req
.cq_pg_size_cq_lvl
,
6587 if (enables
& FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC
) {
6588 ctx_pg
= &ctx
->vnic_mem
;
6589 req
.vnic_num_vnic_entries
=
6590 cpu_to_le16(ctx
->vnic_max_vnic_entries
);
6591 req
.vnic_num_ring_table_entries
=
6592 cpu_to_le16(ctx
->vnic_max_ring_table_entries
);
6593 req
.vnic_entry_size
= cpu_to_le16(ctx
->vnic_entry_size
);
6594 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
,
6595 &req
.vnic_pg_size_vnic_lvl
,
6596 &req
.vnic_page_dir
);
6598 if (enables
& FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT
) {
6599 ctx_pg
= &ctx
->stat_mem
;
6600 req
.stat_num_entries
= cpu_to_le32(ctx
->stat_max_entries
);
6601 req
.stat_entry_size
= cpu_to_le16(ctx
->stat_entry_size
);
6602 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
,
6603 &req
.stat_pg_size_stat_lvl
,
6604 &req
.stat_page_dir
);
6606 if (enables
& FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV
) {
6607 ctx_pg
= &ctx
->mrav_mem
;
6608 req
.mrav_num_entries
= cpu_to_le32(ctx_pg
->entries
);
6609 if (ctx
->mrav_num_entries_units
)
6611 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT
;
6612 req
.mrav_entry_size
= cpu_to_le16(ctx
->mrav_entry_size
);
6613 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
,
6614 &req
.mrav_pg_size_mrav_lvl
,
6615 &req
.mrav_page_dir
);
6617 if (enables
& FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM
) {
6618 ctx_pg
= &ctx
->tim_mem
;
6619 req
.tim_num_entries
= cpu_to_le32(ctx_pg
->entries
);
6620 req
.tim_entry_size
= cpu_to_le16(ctx
->tim_entry_size
);
6621 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
,
6622 &req
.tim_pg_size_tim_lvl
,
6625 for (i
= 0, num_entries
= &req
.tqm_sp_num_entries
,
6626 pg_attr
= &req
.tqm_sp_pg_size_tqm_sp_lvl
,
6627 pg_dir
= &req
.tqm_sp_page_dir
,
6628 ena
= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP
;
6629 i
< 9; i
++, num_entries
++, pg_attr
++, pg_dir
++, ena
<<= 1) {
6630 if (!(enables
& ena
))
6633 req
.tqm_entry_size
= cpu_to_le16(ctx
->tqm_entry_size
);
6634 ctx_pg
= ctx
->tqm_mem
[i
];
6635 *num_entries
= cpu_to_le32(ctx_pg
->entries
);
6636 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
, pg_attr
, pg_dir
);
6638 req
.flags
= cpu_to_le32(flags
);
6639 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6643 static int bnxt_alloc_ctx_mem_blk(struct bnxt
*bp
,
6644 struct bnxt_ctx_pg_info
*ctx_pg
)
6646 struct bnxt_ring_mem_info
*rmem
= &ctx_pg
->ring_mem
;
6648 rmem
->page_size
= BNXT_PAGE_SIZE
;
6649 rmem
->pg_arr
= ctx_pg
->ctx_pg_arr
;
6650 rmem
->dma_arr
= ctx_pg
->ctx_dma_arr
;
6651 rmem
->flags
= BNXT_RMEM_VALID_PTE_FLAG
;
6652 if (rmem
->depth
>= 1)
6653 rmem
->flags
|= BNXT_RMEM_USE_FULL_PAGE_FLAG
;
6654 return bnxt_alloc_ring(bp
, rmem
);
6657 static int bnxt_alloc_ctx_pg_tbls(struct bnxt
*bp
,
6658 struct bnxt_ctx_pg_info
*ctx_pg
, u32 mem_size
,
6659 u8 depth
, bool use_init_val
)
6661 struct bnxt_ring_mem_info
*rmem
= &ctx_pg
->ring_mem
;
6667 ctx_pg
->nr_pages
= DIV_ROUND_UP(mem_size
, BNXT_PAGE_SIZE
);
6668 if (ctx_pg
->nr_pages
> MAX_CTX_TOTAL_PAGES
) {
6669 ctx_pg
->nr_pages
= 0;
6672 if (ctx_pg
->nr_pages
> MAX_CTX_PAGES
|| depth
> 1) {
6676 ctx_pg
->ctx_pg_tbl
= kcalloc(MAX_CTX_PAGES
, sizeof(ctx_pg
),
6678 if (!ctx_pg
->ctx_pg_tbl
)
6680 nr_tbls
= DIV_ROUND_UP(ctx_pg
->nr_pages
, MAX_CTX_PAGES
);
6681 rmem
->nr_pages
= nr_tbls
;
6682 rc
= bnxt_alloc_ctx_mem_blk(bp
, ctx_pg
);
6685 for (i
= 0; i
< nr_tbls
; i
++) {
6686 struct bnxt_ctx_pg_info
*pg_tbl
;
6688 pg_tbl
= kzalloc(sizeof(*pg_tbl
), GFP_KERNEL
);
6691 ctx_pg
->ctx_pg_tbl
[i
] = pg_tbl
;
6692 rmem
= &pg_tbl
->ring_mem
;
6693 rmem
->pg_tbl
= ctx_pg
->ctx_pg_arr
[i
];
6694 rmem
->pg_tbl_map
= ctx_pg
->ctx_dma_arr
[i
];
6696 rmem
->nr_pages
= MAX_CTX_PAGES
;
6698 rmem
->init_val
= bp
->ctx
->ctx_kind_initializer
;
6699 if (i
== (nr_tbls
- 1)) {
6700 int rem
= ctx_pg
->nr_pages
% MAX_CTX_PAGES
;
6703 rmem
->nr_pages
= rem
;
6705 rc
= bnxt_alloc_ctx_mem_blk(bp
, pg_tbl
);
6710 rmem
->nr_pages
= DIV_ROUND_UP(mem_size
, BNXT_PAGE_SIZE
);
6711 if (rmem
->nr_pages
> 1 || depth
)
6714 rmem
->init_val
= bp
->ctx
->ctx_kind_initializer
;
6715 rc
= bnxt_alloc_ctx_mem_blk(bp
, ctx_pg
);
6720 static void bnxt_free_ctx_pg_tbls(struct bnxt
*bp
,
6721 struct bnxt_ctx_pg_info
*ctx_pg
)
6723 struct bnxt_ring_mem_info
*rmem
= &ctx_pg
->ring_mem
;
6725 if (rmem
->depth
> 1 || ctx_pg
->nr_pages
> MAX_CTX_PAGES
||
6726 ctx_pg
->ctx_pg_tbl
) {
6727 int i
, nr_tbls
= rmem
->nr_pages
;
6729 for (i
= 0; i
< nr_tbls
; i
++) {
6730 struct bnxt_ctx_pg_info
*pg_tbl
;
6731 struct bnxt_ring_mem_info
*rmem2
;
6733 pg_tbl
= ctx_pg
->ctx_pg_tbl
[i
];
6736 rmem2
= &pg_tbl
->ring_mem
;
6737 bnxt_free_ring(bp
, rmem2
);
6738 ctx_pg
->ctx_pg_arr
[i
] = NULL
;
6740 ctx_pg
->ctx_pg_tbl
[i
] = NULL
;
6742 kfree(ctx_pg
->ctx_pg_tbl
);
6743 ctx_pg
->ctx_pg_tbl
= NULL
;
6745 bnxt_free_ring(bp
, rmem
);
6746 ctx_pg
->nr_pages
= 0;
6749 static void bnxt_free_ctx_mem(struct bnxt
*bp
)
6751 struct bnxt_ctx_mem_info
*ctx
= bp
->ctx
;
6757 if (ctx
->tqm_mem
[0]) {
6758 for (i
= 0; i
< bp
->max_q
+ 1; i
++)
6759 bnxt_free_ctx_pg_tbls(bp
, ctx
->tqm_mem
[i
]);
6760 kfree(ctx
->tqm_mem
[0]);
6761 ctx
->tqm_mem
[0] = NULL
;
6764 bnxt_free_ctx_pg_tbls(bp
, &ctx
->tim_mem
);
6765 bnxt_free_ctx_pg_tbls(bp
, &ctx
->mrav_mem
);
6766 bnxt_free_ctx_pg_tbls(bp
, &ctx
->stat_mem
);
6767 bnxt_free_ctx_pg_tbls(bp
, &ctx
->vnic_mem
);
6768 bnxt_free_ctx_pg_tbls(bp
, &ctx
->cq_mem
);
6769 bnxt_free_ctx_pg_tbls(bp
, &ctx
->srq_mem
);
6770 bnxt_free_ctx_pg_tbls(bp
, &ctx
->qp_mem
);
6771 ctx
->flags
&= ~BNXT_CTX_FLAG_INITED
;
6774 static int bnxt_alloc_ctx_mem(struct bnxt
*bp
)
6776 struct bnxt_ctx_pg_info
*ctx_pg
;
6777 struct bnxt_ctx_mem_info
*ctx
;
6778 u32 mem_size
, ena
, entries
;
6785 rc
= bnxt_hwrm_func_backing_store_qcaps(bp
);
6787 netdev_err(bp
->dev
, "Failed querying context mem capability, rc = %d.\n",
6792 if (!ctx
|| (ctx
->flags
& BNXT_CTX_FLAG_INITED
))
6795 if ((bp
->flags
& BNXT_FLAG_ROCE_CAP
) && !is_kdump_kernel()) {
6801 ctx_pg
= &ctx
->qp_mem
;
6802 ctx_pg
->entries
= ctx
->qp_min_qp1_entries
+ ctx
->qp_max_l2_entries
+
6804 mem_size
= ctx
->qp_entry_size
* ctx_pg
->entries
;
6805 rc
= bnxt_alloc_ctx_pg_tbls(bp
, ctx_pg
, mem_size
, pg_lvl
, true);
6809 ctx_pg
= &ctx
->srq_mem
;
6810 ctx_pg
->entries
= ctx
->srq_max_l2_entries
+ extra_srqs
;
6811 mem_size
= ctx
->srq_entry_size
* ctx_pg
->entries
;
6812 rc
= bnxt_alloc_ctx_pg_tbls(bp
, ctx_pg
, mem_size
, pg_lvl
, true);
6816 ctx_pg
= &ctx
->cq_mem
;
6817 ctx_pg
->entries
= ctx
->cq_max_l2_entries
+ extra_qps
* 2;
6818 mem_size
= ctx
->cq_entry_size
* ctx_pg
->entries
;
6819 rc
= bnxt_alloc_ctx_pg_tbls(bp
, ctx_pg
, mem_size
, pg_lvl
, true);
6823 ctx_pg
= &ctx
->vnic_mem
;
6824 ctx_pg
->entries
= ctx
->vnic_max_vnic_entries
+
6825 ctx
->vnic_max_ring_table_entries
;
6826 mem_size
= ctx
->vnic_entry_size
* ctx_pg
->entries
;
6827 rc
= bnxt_alloc_ctx_pg_tbls(bp
, ctx_pg
, mem_size
, 1, true);
6831 ctx_pg
= &ctx
->stat_mem
;
6832 ctx_pg
->entries
= ctx
->stat_max_entries
;
6833 mem_size
= ctx
->stat_entry_size
* ctx_pg
->entries
;
6834 rc
= bnxt_alloc_ctx_pg_tbls(bp
, ctx_pg
, mem_size
, 1, true);
6839 if (!(bp
->flags
& BNXT_FLAG_ROCE_CAP
))
6842 ctx_pg
= &ctx
->mrav_mem
;
6843 /* 128K extra is needed to accommodate static AH context
6844 * allocation by f/w.
6846 num_mr
= 1024 * 256;
6847 num_ah
= 1024 * 128;
6848 ctx_pg
->entries
= num_mr
+ num_ah
;
6849 mem_size
= ctx
->mrav_entry_size
* ctx_pg
->entries
;
6850 rc
= bnxt_alloc_ctx_pg_tbls(bp
, ctx_pg
, mem_size
, 2, true);
6853 ena
= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV
;
6854 if (ctx
->mrav_num_entries_units
)
6856 ((num_mr
/ ctx
->mrav_num_entries_units
) << 16) |
6857 (num_ah
/ ctx
->mrav_num_entries_units
);
6859 ctx_pg
= &ctx
->tim_mem
;
6860 ctx_pg
->entries
= ctx
->qp_mem
.entries
;
6861 mem_size
= ctx
->tim_entry_size
* ctx_pg
->entries
;
6862 rc
= bnxt_alloc_ctx_pg_tbls(bp
, ctx_pg
, mem_size
, 1, false);
6865 ena
|= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM
;
6868 entries
= ctx
->qp_max_l2_entries
+ extra_qps
;
6869 entries
= roundup(entries
, ctx
->tqm_entries_multiple
);
6870 entries
= clamp_t(u32
, entries
, ctx
->tqm_min_entries_per_ring
,
6871 ctx
->tqm_max_entries_per_ring
);
6872 for (i
= 0; i
< bp
->max_q
+ 1; i
++) {
6873 ctx_pg
= ctx
->tqm_mem
[i
];
6874 ctx_pg
->entries
= entries
;
6875 mem_size
= ctx
->tqm_entry_size
* entries
;
6876 rc
= bnxt_alloc_ctx_pg_tbls(bp
, ctx_pg
, mem_size
, 1, false);
6879 ena
|= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP
<< i
;
6881 ena
|= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES
;
6882 rc
= bnxt_hwrm_func_backing_store_cfg(bp
, ena
);
6884 netdev_err(bp
->dev
, "Failed configuring context mem, rc = %d.\n",
6887 ctx
->flags
|= BNXT_CTX_FLAG_INITED
;
6892 int bnxt_hwrm_func_resc_qcaps(struct bnxt
*bp
, bool all
)
6894 struct hwrm_func_resource_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
6895 struct hwrm_func_resource_qcaps_input req
= {0};
6896 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
6899 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_RESOURCE_QCAPS
, -1, -1);
6900 req
.fid
= cpu_to_le16(0xffff);
6902 mutex_lock(&bp
->hwrm_cmd_lock
);
6903 rc
= _hwrm_send_message_silent(bp
, &req
, sizeof(req
),
6906 goto hwrm_func_resc_qcaps_exit
;
6908 hw_resc
->max_tx_sch_inputs
= le16_to_cpu(resp
->max_tx_scheduler_inputs
);
6910 goto hwrm_func_resc_qcaps_exit
;
6912 hw_resc
->min_rsscos_ctxs
= le16_to_cpu(resp
->min_rsscos_ctx
);
6913 hw_resc
->max_rsscos_ctxs
= le16_to_cpu(resp
->max_rsscos_ctx
);
6914 hw_resc
->min_cp_rings
= le16_to_cpu(resp
->min_cmpl_rings
);
6915 hw_resc
->max_cp_rings
= le16_to_cpu(resp
->max_cmpl_rings
);
6916 hw_resc
->min_tx_rings
= le16_to_cpu(resp
->min_tx_rings
);
6917 hw_resc
->max_tx_rings
= le16_to_cpu(resp
->max_tx_rings
);
6918 hw_resc
->min_rx_rings
= le16_to_cpu(resp
->min_rx_rings
);
6919 hw_resc
->max_rx_rings
= le16_to_cpu(resp
->max_rx_rings
);
6920 hw_resc
->min_hw_ring_grps
= le16_to_cpu(resp
->min_hw_ring_grps
);
6921 hw_resc
->max_hw_ring_grps
= le16_to_cpu(resp
->max_hw_ring_grps
);
6922 hw_resc
->min_l2_ctxs
= le16_to_cpu(resp
->min_l2_ctxs
);
6923 hw_resc
->max_l2_ctxs
= le16_to_cpu(resp
->max_l2_ctxs
);
6924 hw_resc
->min_vnics
= le16_to_cpu(resp
->min_vnics
);
6925 hw_resc
->max_vnics
= le16_to_cpu(resp
->max_vnics
);
6926 hw_resc
->min_stat_ctxs
= le16_to_cpu(resp
->min_stat_ctx
);
6927 hw_resc
->max_stat_ctxs
= le16_to_cpu(resp
->max_stat_ctx
);
6929 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
6930 u16 max_msix
= le16_to_cpu(resp
->max_msix
);
6932 hw_resc
->max_nqs
= max_msix
;
6933 hw_resc
->max_hw_ring_grps
= hw_resc
->max_rx_rings
;
6937 struct bnxt_pf_info
*pf
= &bp
->pf
;
6939 pf
->vf_resv_strategy
=
6940 le16_to_cpu(resp
->vf_reservation_strategy
);
6941 if (pf
->vf_resv_strategy
> BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC
)
6942 pf
->vf_resv_strategy
= BNXT_VF_RESV_STRATEGY_MAXIMAL
;
6944 hwrm_func_resc_qcaps_exit
:
6945 mutex_unlock(&bp
->hwrm_cmd_lock
);
6949 static int __bnxt_hwrm_func_qcaps(struct bnxt
*bp
)
6952 struct hwrm_func_qcaps_input req
= {0};
6953 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
6954 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
6957 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCAPS
, -1, -1);
6958 req
.fid
= cpu_to_le16(0xffff);
6960 mutex_lock(&bp
->hwrm_cmd_lock
);
6961 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6963 goto hwrm_func_qcaps_exit
;
6965 flags
= le32_to_cpu(resp
->flags
);
6966 if (flags
& FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED
)
6967 bp
->flags
|= BNXT_FLAG_ROCEV1_CAP
;
6968 if (flags
& FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED
)
6969 bp
->flags
|= BNXT_FLAG_ROCEV2_CAP
;
6970 if (flags
& FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED
)
6971 bp
->fw_cap
|= BNXT_FW_CAP_PCIE_STATS_SUPPORTED
;
6972 if (flags
& FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE
)
6973 bp
->fw_cap
|= BNXT_FW_CAP_HOT_RESET
;
6974 if (flags
& FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED
)
6975 bp
->fw_cap
|= BNXT_FW_CAP_EXT_STATS_SUPPORTED
;
6976 if (flags
& FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE
)
6977 bp
->fw_cap
|= BNXT_FW_CAP_ERROR_RECOVERY
;
6978 if (flags
& FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD
)
6979 bp
->fw_cap
|= BNXT_FW_CAP_ERR_RECOVER_RELOAD
;
6981 bp
->tx_push_thresh
= 0;
6982 if (flags
& FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED
)
6983 bp
->tx_push_thresh
= BNXT_TX_PUSH_THRESH
;
6985 hw_resc
->max_rsscos_ctxs
= le16_to_cpu(resp
->max_rsscos_ctx
);
6986 hw_resc
->max_cp_rings
= le16_to_cpu(resp
->max_cmpl_rings
);
6987 hw_resc
->max_tx_rings
= le16_to_cpu(resp
->max_tx_rings
);
6988 hw_resc
->max_rx_rings
= le16_to_cpu(resp
->max_rx_rings
);
6989 hw_resc
->max_hw_ring_grps
= le32_to_cpu(resp
->max_hw_ring_grps
);
6990 if (!hw_resc
->max_hw_ring_grps
)
6991 hw_resc
->max_hw_ring_grps
= hw_resc
->max_tx_rings
;
6992 hw_resc
->max_l2_ctxs
= le16_to_cpu(resp
->max_l2_ctxs
);
6993 hw_resc
->max_vnics
= le16_to_cpu(resp
->max_vnics
);
6994 hw_resc
->max_stat_ctxs
= le16_to_cpu(resp
->max_stat_ctx
);
6997 struct bnxt_pf_info
*pf
= &bp
->pf
;
6999 pf
->fw_fid
= le16_to_cpu(resp
->fid
);
7000 pf
->port_id
= le16_to_cpu(resp
->port_id
);
7001 memcpy(pf
->mac_addr
, resp
->mac_address
, ETH_ALEN
);
7002 pf
->first_vf_id
= le16_to_cpu(resp
->first_vf_id
);
7003 pf
->max_vfs
= le16_to_cpu(resp
->max_vfs
);
7004 pf
->max_encap_records
= le32_to_cpu(resp
->max_encap_records
);
7005 pf
->max_decap_records
= le32_to_cpu(resp
->max_decap_records
);
7006 pf
->max_tx_em_flows
= le32_to_cpu(resp
->max_tx_em_flows
);
7007 pf
->max_tx_wm_flows
= le32_to_cpu(resp
->max_tx_wm_flows
);
7008 pf
->max_rx_em_flows
= le32_to_cpu(resp
->max_rx_em_flows
);
7009 pf
->max_rx_wm_flows
= le32_to_cpu(resp
->max_rx_wm_flows
);
7010 bp
->flags
&= ~BNXT_FLAG_WOL_CAP
;
7011 if (flags
& FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED
)
7012 bp
->flags
|= BNXT_FLAG_WOL_CAP
;
7014 #ifdef CONFIG_BNXT_SRIOV
7015 struct bnxt_vf_info
*vf
= &bp
->vf
;
7017 vf
->fw_fid
= le16_to_cpu(resp
->fid
);
7018 memcpy(vf
->mac_addr
, resp
->mac_address
, ETH_ALEN
);
7022 hwrm_func_qcaps_exit
:
7023 mutex_unlock(&bp
->hwrm_cmd_lock
);
7027 static int bnxt_hwrm_queue_qportcfg(struct bnxt
*bp
);
7029 static int bnxt_hwrm_func_qcaps(struct bnxt
*bp
)
7033 rc
= __bnxt_hwrm_func_qcaps(bp
);
7036 rc
= bnxt_hwrm_queue_qportcfg(bp
);
7038 netdev_err(bp
->dev
, "hwrm query qportcfg failure rc: %d\n", rc
);
7041 if (bp
->hwrm_spec_code
>= 0x10803) {
7042 rc
= bnxt_alloc_ctx_mem(bp
);
7045 rc
= bnxt_hwrm_func_resc_qcaps(bp
, true);
7047 bp
->fw_cap
|= BNXT_FW_CAP_NEW_RM
;
7052 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt
*bp
)
7054 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req
= {0};
7055 struct hwrm_cfa_adv_flow_mgnt_qcaps_output
*resp
;
7059 if (!(bp
->fw_cap
& BNXT_FW_CAP_CFA_ADV_FLOW
))
7062 resp
= bp
->hwrm_cmd_resp_addr
;
7063 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_ADV_FLOW_MGNT_QCAPS
, -1, -1);
7065 mutex_lock(&bp
->hwrm_cmd_lock
);
7066 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
7068 goto hwrm_cfa_adv_qcaps_exit
;
7070 flags
= le32_to_cpu(resp
->flags
);
7072 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED
)
7073 bp
->fw_cap
|= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2
;
7075 hwrm_cfa_adv_qcaps_exit
:
7076 mutex_unlock(&bp
->hwrm_cmd_lock
);
7080 static int bnxt_map_fw_health_regs(struct bnxt
*bp
)
7082 struct bnxt_fw_health
*fw_health
= bp
->fw_health
;
7083 u32 reg_base
= 0xffffffff;
7086 /* Only pre-map the monitoring GRC registers using window 3 */
7087 for (i
= 0; i
< 4; i
++) {
7088 u32 reg
= fw_health
->regs
[i
];
7090 if (BNXT_FW_HEALTH_REG_TYPE(reg
) != BNXT_FW_HEALTH_REG_TYPE_GRC
)
7092 if (reg_base
== 0xffffffff)
7093 reg_base
= reg
& BNXT_GRC_BASE_MASK
;
7094 if ((reg
& BNXT_GRC_BASE_MASK
) != reg_base
)
7096 fw_health
->mapped_regs
[i
] = BNXT_FW_HEALTH_WIN_BASE
+
7097 (reg
& BNXT_GRC_OFFSET_MASK
);
7099 if (reg_base
== 0xffffffff)
7102 writel(reg_base
, bp
->bar0
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT
+
7103 BNXT_FW_HEALTH_WIN_MAP_OFF
);
7107 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt
*bp
)
7109 struct hwrm_error_recovery_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
7110 struct bnxt_fw_health
*fw_health
= bp
->fw_health
;
7111 struct hwrm_error_recovery_qcfg_input req
= {0};
7114 if (!(bp
->fw_cap
& BNXT_FW_CAP_ERROR_RECOVERY
))
7117 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_ERROR_RECOVERY_QCFG
, -1, -1);
7118 mutex_lock(&bp
->hwrm_cmd_lock
);
7119 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
7121 goto err_recovery_out
;
7122 fw_health
->flags
= le32_to_cpu(resp
->flags
);
7123 if ((fw_health
->flags
& ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU
) &&
7124 !(bp
->fw_cap
& BNXT_FW_CAP_KONG_MB_CHNL
)) {
7126 goto err_recovery_out
;
7128 fw_health
->polling_dsecs
= le32_to_cpu(resp
->driver_polling_freq
);
7129 fw_health
->master_func_wait_dsecs
=
7130 le32_to_cpu(resp
->master_func_wait_period
);
7131 fw_health
->normal_func_wait_dsecs
=
7132 le32_to_cpu(resp
->normal_func_wait_period
);
7133 fw_health
->post_reset_wait_dsecs
=
7134 le32_to_cpu(resp
->master_func_wait_period_after_reset
);
7135 fw_health
->post_reset_max_wait_dsecs
=
7136 le32_to_cpu(resp
->max_bailout_time_after_reset
);
7137 fw_health
->regs
[BNXT_FW_HEALTH_REG
] =
7138 le32_to_cpu(resp
->fw_health_status_reg
);
7139 fw_health
->regs
[BNXT_FW_HEARTBEAT_REG
] =
7140 le32_to_cpu(resp
->fw_heartbeat_reg
);
7141 fw_health
->regs
[BNXT_FW_RESET_CNT_REG
] =
7142 le32_to_cpu(resp
->fw_reset_cnt_reg
);
7143 fw_health
->regs
[BNXT_FW_RESET_INPROG_REG
] =
7144 le32_to_cpu(resp
->reset_inprogress_reg
);
7145 fw_health
->fw_reset_inprog_reg_mask
=
7146 le32_to_cpu(resp
->reset_inprogress_reg_mask
);
7147 fw_health
->fw_reset_seq_cnt
= resp
->reg_array_cnt
;
7148 if (fw_health
->fw_reset_seq_cnt
>= 16) {
7150 goto err_recovery_out
;
7152 for (i
= 0; i
< fw_health
->fw_reset_seq_cnt
; i
++) {
7153 fw_health
->fw_reset_seq_regs
[i
] =
7154 le32_to_cpu(resp
->reset_reg
[i
]);
7155 fw_health
->fw_reset_seq_vals
[i
] =
7156 le32_to_cpu(resp
->reset_reg_val
[i
]);
7157 fw_health
->fw_reset_seq_delay_msec
[i
] =
7158 resp
->delay_after_reset
[i
];
7161 mutex_unlock(&bp
->hwrm_cmd_lock
);
7163 rc
= bnxt_map_fw_health_regs(bp
);
7165 bp
->fw_cap
&= ~BNXT_FW_CAP_ERROR_RECOVERY
;
7169 static int bnxt_hwrm_func_reset(struct bnxt
*bp
)
7171 struct hwrm_func_reset_input req
= {0};
7173 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_RESET
, -1, -1);
7176 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_RESET_TIMEOUT
);
7179 static int bnxt_hwrm_queue_qportcfg(struct bnxt
*bp
)
7182 struct hwrm_queue_qportcfg_input req
= {0};
7183 struct hwrm_queue_qportcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
7187 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_QPORTCFG
, -1, -1);
7189 mutex_lock(&bp
->hwrm_cmd_lock
);
7190 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
7194 if (!resp
->max_configurable_queues
) {
7198 bp
->max_tc
= resp
->max_configurable_queues
;
7199 bp
->max_lltc
= resp
->max_configurable_lossless_queues
;
7200 if (bp
->max_tc
> BNXT_MAX_QUEUE
)
7201 bp
->max_tc
= BNXT_MAX_QUEUE
;
7203 no_rdma
= !(bp
->flags
& BNXT_FLAG_ROCE_CAP
);
7204 qptr
= &resp
->queue_id0
;
7205 for (i
= 0, j
= 0; i
< bp
->max_tc
; i
++) {
7206 bp
->q_info
[j
].queue_id
= *qptr
;
7207 bp
->q_ids
[i
] = *qptr
++;
7208 bp
->q_info
[j
].queue_profile
= *qptr
++;
7209 bp
->tc_to_qidx
[j
] = j
;
7210 if (!BNXT_CNPQ(bp
->q_info
[j
].queue_profile
) ||
7211 (no_rdma
&& BNXT_PF(bp
)))
7214 bp
->max_q
= bp
->max_tc
;
7215 bp
->max_tc
= max_t(u8
, j
, 1);
7217 if (resp
->queue_cfg_info
& QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG
)
7220 if (bp
->max_lltc
> bp
->max_tc
)
7221 bp
->max_lltc
= bp
->max_tc
;
7224 mutex_unlock(&bp
->hwrm_cmd_lock
);
7228 static int __bnxt_hwrm_ver_get(struct bnxt
*bp
, bool silent
)
7230 struct hwrm_ver_get_input req
= {0};
7233 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VER_GET
, -1, -1);
7234 req
.hwrm_intf_maj
= HWRM_VERSION_MAJOR
;
7235 req
.hwrm_intf_min
= HWRM_VERSION_MINOR
;
7236 req
.hwrm_intf_upd
= HWRM_VERSION_UPDATE
;
7238 rc
= bnxt_hwrm_do_send_msg(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
,
7243 static int bnxt_hwrm_ver_get(struct bnxt
*bp
)
7245 struct hwrm_ver_get_output
*resp
= bp
->hwrm_cmd_resp_addr
;
7249 bp
->hwrm_max_req_len
= HWRM_MAX_REQ_LEN
;
7250 mutex_lock(&bp
->hwrm_cmd_lock
);
7251 rc
= __bnxt_hwrm_ver_get(bp
, false);
7253 goto hwrm_ver_get_exit
;
7255 memcpy(&bp
->ver_resp
, resp
, sizeof(struct hwrm_ver_get_output
));
7257 bp
->hwrm_spec_code
= resp
->hwrm_intf_maj_8b
<< 16 |
7258 resp
->hwrm_intf_min_8b
<< 8 |
7259 resp
->hwrm_intf_upd_8b
;
7260 if (resp
->hwrm_intf_maj_8b
< 1) {
7261 netdev_warn(bp
->dev
, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7262 resp
->hwrm_intf_maj_8b
, resp
->hwrm_intf_min_8b
,
7263 resp
->hwrm_intf_upd_8b
);
7264 netdev_warn(bp
->dev
, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7266 snprintf(bp
->fw_ver_str
, BC_HWRM_STR_LEN
, "%d.%d.%d.%d",
7267 resp
->hwrm_fw_maj_8b
, resp
->hwrm_fw_min_8b
,
7268 resp
->hwrm_fw_bld_8b
, resp
->hwrm_fw_rsvd_8b
);
7270 if (strlen(resp
->active_pkg_name
)) {
7271 int fw_ver_len
= strlen(bp
->fw_ver_str
);
7273 snprintf(bp
->fw_ver_str
+ fw_ver_len
,
7274 FW_VER_STR_LEN
- fw_ver_len
- 1, "/pkg %s",
7275 resp
->active_pkg_name
);
7276 bp
->fw_cap
|= BNXT_FW_CAP_PKG_VER
;
7279 bp
->hwrm_cmd_timeout
= le16_to_cpu(resp
->def_req_timeout
);
7280 if (!bp
->hwrm_cmd_timeout
)
7281 bp
->hwrm_cmd_timeout
= DFLT_HWRM_CMD_TIMEOUT
;
7283 if (resp
->hwrm_intf_maj_8b
>= 1) {
7284 bp
->hwrm_max_req_len
= le16_to_cpu(resp
->max_req_win_len
);
7285 bp
->hwrm_max_ext_req_len
= le16_to_cpu(resp
->max_ext_req_len
);
7287 if (bp
->hwrm_max_ext_req_len
< HWRM_MAX_REQ_LEN
)
7288 bp
->hwrm_max_ext_req_len
= HWRM_MAX_REQ_LEN
;
7290 bp
->chip_num
= le16_to_cpu(resp
->chip_num
);
7291 bp
->chip_rev
= resp
->chip_rev
;
7292 if (bp
->chip_num
== CHIP_NUM_58700
&& !resp
->chip_rev
&&
7294 bp
->flags
|= BNXT_FLAG_CHIP_NITRO_A0
;
7296 dev_caps_cfg
= le32_to_cpu(resp
->dev_caps_cfg
);
7297 if ((dev_caps_cfg
& VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED
) &&
7298 (dev_caps_cfg
& VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED
))
7299 bp
->fw_cap
|= BNXT_FW_CAP_SHORT_CMD
;
7301 if (dev_caps_cfg
& VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED
)
7302 bp
->fw_cap
|= BNXT_FW_CAP_KONG_MB_CHNL
;
7305 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED
)
7306 bp
->fw_cap
|= BNXT_FW_CAP_OVS_64BIT_HANDLE
;
7309 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED
)
7310 bp
->fw_cap
|= BNXT_FW_CAP_TRUSTED_VF
;
7313 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED
)
7314 bp
->fw_cap
|= BNXT_FW_CAP_CFA_ADV_FLOW
;
7317 mutex_unlock(&bp
->hwrm_cmd_lock
);
7321 int bnxt_hwrm_fw_set_time(struct bnxt
*bp
)
7323 struct hwrm_fw_set_time_input req
= {0};
7325 time64_t now
= ktime_get_real_seconds();
7327 if ((BNXT_VF(bp
) && bp
->hwrm_spec_code
< 0x10901) ||
7328 bp
->hwrm_spec_code
< 0x10400)
7331 time64_to_tm(now
, 0, &tm
);
7332 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FW_SET_TIME
, -1, -1);
7333 req
.year
= cpu_to_le16(1900 + tm
.tm_year
);
7334 req
.month
= 1 + tm
.tm_mon
;
7335 req
.day
= tm
.tm_mday
;
7336 req
.hour
= tm
.tm_hour
;
7337 req
.minute
= tm
.tm_min
;
7338 req
.second
= tm
.tm_sec
;
7339 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
7342 static int bnxt_hwrm_port_qstats(struct bnxt
*bp
)
7345 struct bnxt_pf_info
*pf
= &bp
->pf
;
7346 struct hwrm_port_qstats_input req
= {0};
7348 if (!(bp
->flags
& BNXT_FLAG_PORT_STATS
))
7351 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_QSTATS
, -1, -1);
7352 req
.port_id
= cpu_to_le16(pf
->port_id
);
7353 req
.tx_stat_host_addr
= cpu_to_le64(bp
->hw_tx_port_stats_map
);
7354 req
.rx_stat_host_addr
= cpu_to_le64(bp
->hw_rx_port_stats_map
);
7355 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
7359 static int bnxt_hwrm_port_qstats_ext(struct bnxt
*bp
)
7361 struct hwrm_port_qstats_ext_output
*resp
= bp
->hwrm_cmd_resp_addr
;
7362 struct hwrm_queue_pri2cos_qcfg_input req2
= {0};
7363 struct hwrm_port_qstats_ext_input req
= {0};
7364 struct bnxt_pf_info
*pf
= &bp
->pf
;
7368 if (!(bp
->flags
& BNXT_FLAG_PORT_STATS_EXT
))
7371 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_QSTATS_EXT
, -1, -1);
7372 req
.port_id
= cpu_to_le16(pf
->port_id
);
7373 req
.rx_stat_size
= cpu_to_le16(sizeof(struct rx_port_stats_ext
));
7374 req
.rx_stat_host_addr
= cpu_to_le64(bp
->hw_rx_port_stats_ext_map
);
7375 tx_stat_size
= bp
->hw_tx_port_stats_ext
?
7376 sizeof(*bp
->hw_tx_port_stats_ext
) : 0;
7377 req
.tx_stat_size
= cpu_to_le16(tx_stat_size
);
7378 req
.tx_stat_host_addr
= cpu_to_le64(bp
->hw_tx_port_stats_ext_map
);
7379 mutex_lock(&bp
->hwrm_cmd_lock
);
7380 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
7382 bp
->fw_rx_stats_ext_size
= le16_to_cpu(resp
->rx_stat_size
) / 8;
7383 bp
->fw_tx_stats_ext_size
= tx_stat_size
?
7384 le16_to_cpu(resp
->tx_stat_size
) / 8 : 0;
7386 bp
->fw_rx_stats_ext_size
= 0;
7387 bp
->fw_tx_stats_ext_size
= 0;
7389 if (bp
->fw_tx_stats_ext_size
<=
7390 offsetof(struct tx_port_stats_ext
, pfc_pri0_tx_duration_us
) / 8) {
7391 mutex_unlock(&bp
->hwrm_cmd_lock
);
7392 bp
->pri2cos_valid
= 0;
7396 bnxt_hwrm_cmd_hdr_init(bp
, &req2
, HWRM_QUEUE_PRI2COS_QCFG
, -1, -1);
7397 req2
.flags
= cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN
);
7399 rc
= _hwrm_send_message(bp
, &req2
, sizeof(req2
), HWRM_CMD_TIMEOUT
);
7401 struct hwrm_queue_pri2cos_qcfg_output
*resp2
;
7405 resp2
= bp
->hwrm_cmd_resp_addr
;
7406 pri2cos
= &resp2
->pri0_cos_queue_id
;
7407 for (i
= 0; i
< 8; i
++) {
7408 u8 queue_id
= pri2cos
[i
];
7410 for (j
= 0; j
< bp
->max_q
; j
++) {
7411 if (bp
->q_ids
[j
] == queue_id
)
7415 bp
->pri2cos_valid
= 1;
7417 mutex_unlock(&bp
->hwrm_cmd_lock
);
7421 static int bnxt_hwrm_pcie_qstats(struct bnxt
*bp
)
7423 struct hwrm_pcie_qstats_input req
= {0};
7425 if (!(bp
->flags
& BNXT_FLAG_PCIE_STATS
))
7428 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PCIE_QSTATS
, -1, -1);
7429 req
.pcie_stat_size
= cpu_to_le16(sizeof(struct pcie_ctx_hw_stats
));
7430 req
.pcie_stat_host_addr
= cpu_to_le64(bp
->hw_pcie_stats_map
);
7431 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
7434 static void bnxt_hwrm_free_tunnel_ports(struct bnxt
*bp
)
7436 if (bp
->vxlan_port_cnt
) {
7437 bnxt_hwrm_tunnel_dst_port_free(
7438 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
);
7440 bp
->vxlan_port_cnt
= 0;
7441 if (bp
->nge_port_cnt
) {
7442 bnxt_hwrm_tunnel_dst_port_free(
7443 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
);
7445 bp
->nge_port_cnt
= 0;
7448 static int bnxt_set_tpa(struct bnxt
*bp
, bool set_tpa
)
7454 tpa_flags
= bp
->flags
& BNXT_FLAG_TPA
;
7455 else if (test_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
))
7457 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
7458 rc
= bnxt_hwrm_vnic_set_tpa(bp
, i
, tpa_flags
);
7460 netdev_err(bp
->dev
, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
7468 static void bnxt_hwrm_clear_vnic_rss(struct bnxt
*bp
)
7472 for (i
= 0; i
< bp
->nr_vnics
; i
++)
7473 bnxt_hwrm_vnic_set_rss(bp
, i
, false);
7476 static void bnxt_clear_vnic(struct bnxt
*bp
)
7481 bnxt_hwrm_clear_vnic_filter(bp
);
7482 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
)) {
7483 /* clear all RSS setting before free vnic ctx */
7484 bnxt_hwrm_clear_vnic_rss(bp
);
7485 bnxt_hwrm_vnic_ctx_free(bp
);
7487 /* before free the vnic, undo the vnic tpa settings */
7488 if (bp
->flags
& BNXT_FLAG_TPA
)
7489 bnxt_set_tpa(bp
, false);
7490 bnxt_hwrm_vnic_free(bp
);
7491 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
7492 bnxt_hwrm_vnic_ctx_free(bp
);
7495 static void bnxt_hwrm_resource_free(struct bnxt
*bp
, bool close_path
,
7498 bnxt_clear_vnic(bp
);
7499 bnxt_hwrm_ring_free(bp
, close_path
);
7500 bnxt_hwrm_ring_grp_free(bp
);
7502 bnxt_hwrm_stat_ctx_free(bp
);
7503 bnxt_hwrm_free_tunnel_ports(bp
);
7507 static int bnxt_hwrm_set_br_mode(struct bnxt
*bp
, u16 br_mode
)
7509 struct hwrm_func_cfg_input req
= {0};
7512 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
7513 req
.fid
= cpu_to_le16(0xffff);
7514 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE
);
7515 if (br_mode
== BRIDGE_MODE_VEB
)
7516 req
.evb_mode
= FUNC_CFG_REQ_EVB_MODE_VEB
;
7517 else if (br_mode
== BRIDGE_MODE_VEPA
)
7518 req
.evb_mode
= FUNC_CFG_REQ_EVB_MODE_VEPA
;
7521 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
7525 static int bnxt_hwrm_set_cache_line_size(struct bnxt
*bp
, int size
)
7527 struct hwrm_func_cfg_input req
= {0};
7530 if (BNXT_VF(bp
) || bp
->hwrm_spec_code
< 0x10803)
7533 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
7534 req
.fid
= cpu_to_le16(0xffff);
7535 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE
);
7536 req
.options
= FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64
;
7538 req
.options
= FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
;
7540 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
7544 static int __bnxt_setup_vnic(struct bnxt
*bp
, u16 vnic_id
)
7546 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
7549 if (vnic
->flags
& BNXT_VNIC_RFS_NEW_RSS_FLAG
)
7552 /* allocate context for vnic */
7553 rc
= bnxt_hwrm_vnic_ctx_alloc(bp
, vnic_id
, 0);
7555 netdev_err(bp
->dev
, "hwrm vnic %d alloc failure rc: %x\n",
7557 goto vnic_setup_err
;
7559 bp
->rsscos_nr_ctxs
++;
7561 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
7562 rc
= bnxt_hwrm_vnic_ctx_alloc(bp
, vnic_id
, 1);
7564 netdev_err(bp
->dev
, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7566 goto vnic_setup_err
;
7568 bp
->rsscos_nr_ctxs
++;
7572 /* configure default vnic, ring grp */
7573 rc
= bnxt_hwrm_vnic_cfg(bp
, vnic_id
);
7575 netdev_err(bp
->dev
, "hwrm vnic %d cfg failure rc: %x\n",
7577 goto vnic_setup_err
;
7580 /* Enable RSS hashing on vnic */
7581 rc
= bnxt_hwrm_vnic_set_rss(bp
, vnic_id
, true);
7583 netdev_err(bp
->dev
, "hwrm vnic %d set rss failure rc: %x\n",
7585 goto vnic_setup_err
;
7588 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
7589 rc
= bnxt_hwrm_vnic_set_hds(bp
, vnic_id
);
7591 netdev_err(bp
->dev
, "hwrm vnic %d set hds failure rc: %x\n",
7600 static int __bnxt_setup_vnic_p5(struct bnxt
*bp
, u16 vnic_id
)
7604 nr_ctxs
= DIV_ROUND_UP(bp
->rx_nr_rings
, 64);
7605 for (i
= 0; i
< nr_ctxs
; i
++) {
7606 rc
= bnxt_hwrm_vnic_ctx_alloc(bp
, vnic_id
, i
);
7608 netdev_err(bp
->dev
, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7612 bp
->rsscos_nr_ctxs
++;
7617 rc
= bnxt_hwrm_vnic_set_rss_p5(bp
, vnic_id
, true);
7619 netdev_err(bp
->dev
, "hwrm vnic %d set rss failure rc: %d\n",
7623 rc
= bnxt_hwrm_vnic_cfg(bp
, vnic_id
);
7625 netdev_err(bp
->dev
, "hwrm vnic %d cfg failure rc: %x\n",
7629 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
7630 rc
= bnxt_hwrm_vnic_set_hds(bp
, vnic_id
);
7632 netdev_err(bp
->dev
, "hwrm vnic %d set hds failure rc: %x\n",
7639 static int bnxt_setup_vnic(struct bnxt
*bp
, u16 vnic_id
)
7641 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
7642 return __bnxt_setup_vnic_p5(bp
, vnic_id
);
7644 return __bnxt_setup_vnic(bp
, vnic_id
);
7647 static int bnxt_alloc_rfs_vnics(struct bnxt
*bp
)
7649 #ifdef CONFIG_RFS_ACCEL
7652 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
7655 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
7656 struct bnxt_vnic_info
*vnic
;
7657 u16 vnic_id
= i
+ 1;
7660 if (vnic_id
>= bp
->nr_vnics
)
7663 vnic
= &bp
->vnic_info
[vnic_id
];
7664 vnic
->flags
|= BNXT_VNIC_RFS_FLAG
;
7665 if (bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
)
7666 vnic
->flags
|= BNXT_VNIC_RFS_NEW_RSS_FLAG
;
7667 rc
= bnxt_hwrm_vnic_alloc(bp
, vnic_id
, ring_id
, 1);
7669 netdev_err(bp
->dev
, "hwrm vnic %d alloc failure rc: %x\n",
7673 rc
= bnxt_setup_vnic(bp
, vnic_id
);
7683 /* Allow PF and VF with default VLAN to be in promiscuous mode */
7684 static bool bnxt_promisc_ok(struct bnxt
*bp
)
7686 #ifdef CONFIG_BNXT_SRIOV
7687 if (BNXT_VF(bp
) && !bp
->vf
.vlan
)
7693 static int bnxt_setup_nitroa0_vnic(struct bnxt
*bp
)
7695 unsigned int rc
= 0;
7697 rc
= bnxt_hwrm_vnic_alloc(bp
, 1, bp
->rx_nr_rings
- 1, 1);
7699 netdev_err(bp
->dev
, "Cannot allocate special vnic for NS2 A0: %x\n",
7704 rc
= bnxt_hwrm_vnic_cfg(bp
, 1);
7706 netdev_err(bp
->dev
, "Cannot allocate special vnic for NS2 A0: %x\n",
7713 static int bnxt_cfg_rx_mode(struct bnxt
*);
7714 static bool bnxt_mc_list_updated(struct bnxt
*, u32
*);
7716 static int bnxt_init_chip(struct bnxt
*bp
, bool irq_re_init
)
7718 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
7720 unsigned int rx_nr_rings
= bp
->rx_nr_rings
;
7723 rc
= bnxt_hwrm_stat_ctx_alloc(bp
);
7725 netdev_err(bp
->dev
, "hwrm stat ctx alloc failure rc: %x\n",
7731 rc
= bnxt_hwrm_ring_alloc(bp
);
7733 netdev_err(bp
->dev
, "hwrm ring alloc failure rc: %x\n", rc
);
7737 rc
= bnxt_hwrm_ring_grp_alloc(bp
);
7739 netdev_err(bp
->dev
, "hwrm_ring_grp alloc failure: %x\n", rc
);
7743 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
7746 /* default vnic 0 */
7747 rc
= bnxt_hwrm_vnic_alloc(bp
, 0, 0, rx_nr_rings
);
7749 netdev_err(bp
->dev
, "hwrm vnic alloc failure rc: %x\n", rc
);
7753 rc
= bnxt_setup_vnic(bp
, 0);
7757 if (bp
->flags
& BNXT_FLAG_RFS
) {
7758 rc
= bnxt_alloc_rfs_vnics(bp
);
7763 if (bp
->flags
& BNXT_FLAG_TPA
) {
7764 rc
= bnxt_set_tpa(bp
, true);
7770 bnxt_update_vf_mac(bp
);
7772 /* Filter for default vnic 0 */
7773 rc
= bnxt_hwrm_set_vnic_filter(bp
, 0, 0, bp
->dev
->dev_addr
);
7775 netdev_err(bp
->dev
, "HWRM vnic filter failure rc: %x\n", rc
);
7778 vnic
->uc_filter_count
= 1;
7781 if (bp
->dev
->flags
& IFF_BROADCAST
)
7782 vnic
->rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST
;
7784 if ((bp
->dev
->flags
& IFF_PROMISC
) && bnxt_promisc_ok(bp
))
7785 vnic
->rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
;
7787 if (bp
->dev
->flags
& IFF_ALLMULTI
) {
7788 vnic
->rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
;
7789 vnic
->mc_list_count
= 0;
7793 bnxt_mc_list_updated(bp
, &mask
);
7794 vnic
->rx_mask
|= mask
;
7797 rc
= bnxt_cfg_rx_mode(bp
);
7801 rc
= bnxt_hwrm_set_coal(bp
);
7803 netdev_warn(bp
->dev
, "HWRM set coalescing failure rc: %x\n",
7806 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
7807 rc
= bnxt_setup_nitroa0_vnic(bp
);
7809 netdev_err(bp
->dev
, "Special vnic setup failure for NS2 A0 rc: %x\n",
7814 bnxt_hwrm_func_qcfg(bp
);
7815 netdev_update_features(bp
->dev
);
7821 bnxt_hwrm_resource_free(bp
, 0, true);
7826 static int bnxt_shutdown_nic(struct bnxt
*bp
, bool irq_re_init
)
7828 bnxt_hwrm_resource_free(bp
, 1, irq_re_init
);
7832 static int bnxt_init_nic(struct bnxt
*bp
, bool irq_re_init
)
7834 bnxt_init_cp_rings(bp
);
7835 bnxt_init_rx_rings(bp
);
7836 bnxt_init_tx_rings(bp
);
7837 bnxt_init_ring_grps(bp
, irq_re_init
);
7838 bnxt_init_vnics(bp
);
7840 return bnxt_init_chip(bp
, irq_re_init
);
7843 static int bnxt_set_real_num_queues(struct bnxt
*bp
)
7846 struct net_device
*dev
= bp
->dev
;
7848 rc
= netif_set_real_num_tx_queues(dev
, bp
->tx_nr_rings
-
7849 bp
->tx_nr_rings_xdp
);
7853 rc
= netif_set_real_num_rx_queues(dev
, bp
->rx_nr_rings
);
7857 #ifdef CONFIG_RFS_ACCEL
7858 if (bp
->flags
& BNXT_FLAG_RFS
)
7859 dev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(bp
->rx_nr_rings
);
7865 static int bnxt_trim_rings(struct bnxt
*bp
, int *rx
, int *tx
, int max
,
7868 int _rx
= *rx
, _tx
= *tx
;
7871 *rx
= min_t(int, _rx
, max
);
7872 *tx
= min_t(int, _tx
, max
);
7877 while (_rx
+ _tx
> max
) {
7878 if (_rx
> _tx
&& _rx
> 1)
7889 static void bnxt_setup_msix(struct bnxt
*bp
)
7891 const int len
= sizeof(bp
->irq_tbl
[0].name
);
7892 struct net_device
*dev
= bp
->dev
;
7895 tcs
= netdev_get_num_tc(dev
);
7899 for (i
= 0; i
< tcs
; i
++) {
7900 count
= bp
->tx_nr_rings_per_tc
;
7902 netdev_set_tc_queue(dev
, i
, count
, off
);
7906 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
7907 int map_idx
= bnxt_cp_num_to_irq_num(bp
, i
);
7910 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
)
7912 else if (i
< bp
->rx_nr_rings
)
7917 snprintf(bp
->irq_tbl
[map_idx
].name
, len
, "%s-%s-%d", dev
->name
,
7919 bp
->irq_tbl
[map_idx
].handler
= bnxt_msix
;
7923 static void bnxt_setup_inta(struct bnxt
*bp
)
7925 const int len
= sizeof(bp
->irq_tbl
[0].name
);
7927 if (netdev_get_num_tc(bp
->dev
))
7928 netdev_reset_tc(bp
->dev
);
7930 snprintf(bp
->irq_tbl
[0].name
, len
, "%s-%s-%d", bp
->dev
->name
, "TxRx",
7932 bp
->irq_tbl
[0].handler
= bnxt_inta
;
7935 static int bnxt_setup_int_mode(struct bnxt
*bp
)
7939 if (bp
->flags
& BNXT_FLAG_USING_MSIX
)
7940 bnxt_setup_msix(bp
);
7942 bnxt_setup_inta(bp
);
7944 rc
= bnxt_set_real_num_queues(bp
);
7948 #ifdef CONFIG_RFS_ACCEL
7949 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt
*bp
)
7951 return bp
->hw_resc
.max_rsscos_ctxs
;
7954 static unsigned int bnxt_get_max_func_vnics(struct bnxt
*bp
)
7956 return bp
->hw_resc
.max_vnics
;
7960 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt
*bp
)
7962 return bp
->hw_resc
.max_stat_ctxs
;
7965 unsigned int bnxt_get_max_func_cp_rings(struct bnxt
*bp
)
7967 return bp
->hw_resc
.max_cp_rings
;
7970 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt
*bp
)
7972 unsigned int cp
= bp
->hw_resc
.max_cp_rings
;
7974 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
7975 cp
-= bnxt_get_ulp_msix_num(bp
);
7980 static unsigned int bnxt_get_max_func_irqs(struct bnxt
*bp
)
7982 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
7984 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
7985 return min_t(unsigned int, hw_resc
->max_irqs
, hw_resc
->max_nqs
);
7987 return min_t(unsigned int, hw_resc
->max_irqs
, hw_resc
->max_cp_rings
);
7990 static void bnxt_set_max_func_irqs(struct bnxt
*bp
, unsigned int max_irqs
)
7992 bp
->hw_resc
.max_irqs
= max_irqs
;
7995 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt
*bp
)
7999 cp
= bnxt_get_max_func_cp_rings_for_en(bp
);
8000 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
8001 return cp
- bp
->rx_nr_rings
- bp
->tx_nr_rings
;
8003 return cp
- bp
->cp_nr_rings
;
8006 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt
*bp
)
8008 return bnxt_get_max_func_stat_ctxs(bp
) - bnxt_get_func_stat_ctxs(bp
);
8011 int bnxt_get_avail_msix(struct bnxt
*bp
, int num
)
8013 int max_cp
= bnxt_get_max_func_cp_rings(bp
);
8014 int max_irq
= bnxt_get_max_func_irqs(bp
);
8015 int total_req
= bp
->cp_nr_rings
+ num
;
8016 int max_idx
, avail_msix
;
8018 max_idx
= bp
->total_irqs
;
8019 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
8020 max_idx
= min_t(int, bp
->total_irqs
, max_cp
);
8021 avail_msix
= max_idx
- bp
->cp_nr_rings
;
8022 if (!BNXT_NEW_RM(bp
) || avail_msix
>= num
)
8025 if (max_irq
< total_req
) {
8026 num
= max_irq
- bp
->cp_nr_rings
;
8033 static int bnxt_get_num_msix(struct bnxt
*bp
)
8035 if (!BNXT_NEW_RM(bp
))
8036 return bnxt_get_max_func_irqs(bp
);
8038 return bnxt_nq_rings_in_use(bp
);
8041 static int bnxt_init_msix(struct bnxt
*bp
)
8043 int i
, total_vecs
, max
, rc
= 0, min
= 1, ulp_msix
;
8044 struct msix_entry
*msix_ent
;
8046 total_vecs
= bnxt_get_num_msix(bp
);
8047 max
= bnxt_get_max_func_irqs(bp
);
8048 if (total_vecs
> max
)
8054 msix_ent
= kcalloc(total_vecs
, sizeof(struct msix_entry
), GFP_KERNEL
);
8058 for (i
= 0; i
< total_vecs
; i
++) {
8059 msix_ent
[i
].entry
= i
;
8060 msix_ent
[i
].vector
= 0;
8063 if (!(bp
->flags
& BNXT_FLAG_SHARED_RINGS
))
8066 total_vecs
= pci_enable_msix_range(bp
->pdev
, msix_ent
, min
, total_vecs
);
8067 ulp_msix
= bnxt_get_ulp_msix_num(bp
);
8068 if (total_vecs
< 0 || total_vecs
< ulp_msix
) {
8070 goto msix_setup_exit
;
8073 bp
->irq_tbl
= kcalloc(total_vecs
, sizeof(struct bnxt_irq
), GFP_KERNEL
);
8075 for (i
= 0; i
< total_vecs
; i
++)
8076 bp
->irq_tbl
[i
].vector
= msix_ent
[i
].vector
;
8078 bp
->total_irqs
= total_vecs
;
8079 /* Trim rings based upon num of vectors allocated */
8080 rc
= bnxt_trim_rings(bp
, &bp
->rx_nr_rings
, &bp
->tx_nr_rings
,
8081 total_vecs
- ulp_msix
, min
== 1);
8083 goto msix_setup_exit
;
8085 bp
->cp_nr_rings
= (min
== 1) ?
8086 max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
) :
8087 bp
->tx_nr_rings
+ bp
->rx_nr_rings
;
8091 goto msix_setup_exit
;
8093 bp
->flags
|= BNXT_FLAG_USING_MSIX
;
8098 netdev_err(bp
->dev
, "bnxt_init_msix err: %x\n", rc
);
8101 pci_disable_msix(bp
->pdev
);
8106 static int bnxt_init_inta(struct bnxt
*bp
)
8108 bp
->irq_tbl
= kcalloc(1, sizeof(struct bnxt_irq
), GFP_KERNEL
);
8113 bp
->rx_nr_rings
= 1;
8114 bp
->tx_nr_rings
= 1;
8115 bp
->cp_nr_rings
= 1;
8116 bp
->flags
|= BNXT_FLAG_SHARED_RINGS
;
8117 bp
->irq_tbl
[0].vector
= bp
->pdev
->irq
;
8121 static int bnxt_init_int_mode(struct bnxt
*bp
)
8125 if (bp
->flags
& BNXT_FLAG_MSIX_CAP
)
8126 rc
= bnxt_init_msix(bp
);
8128 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
) && BNXT_PF(bp
)) {
8129 /* fallback to INTA */
8130 rc
= bnxt_init_inta(bp
);
8135 static void bnxt_clear_int_mode(struct bnxt
*bp
)
8137 if (bp
->flags
& BNXT_FLAG_USING_MSIX
)
8138 pci_disable_msix(bp
->pdev
);
8142 bp
->flags
&= ~BNXT_FLAG_USING_MSIX
;
8145 int bnxt_reserve_rings(struct bnxt
*bp
, bool irq_re_init
)
8147 int tcs
= netdev_get_num_tc(bp
->dev
);
8148 bool irq_cleared
= false;
8151 if (!bnxt_need_reserve_rings(bp
))
8154 if (irq_re_init
&& BNXT_NEW_RM(bp
) &&
8155 bnxt_get_num_msix(bp
) != bp
->total_irqs
) {
8156 bnxt_ulp_irq_stop(bp
);
8157 bnxt_clear_int_mode(bp
);
8160 rc
= __bnxt_reserve_rings(bp
);
8163 rc
= bnxt_init_int_mode(bp
);
8164 bnxt_ulp_irq_restart(bp
, rc
);
8167 netdev_err(bp
->dev
, "ring reservation/IRQ init failure rc: %d\n", rc
);
8170 if (tcs
&& (bp
->tx_nr_rings_per_tc
* tcs
!= bp
->tx_nr_rings
)) {
8171 netdev_err(bp
->dev
, "tx ring reservation failure\n");
8172 netdev_reset_tc(bp
->dev
);
8173 bp
->tx_nr_rings_per_tc
= bp
->tx_nr_rings
;
8179 static void bnxt_free_irq(struct bnxt
*bp
)
8181 struct bnxt_irq
*irq
;
8184 #ifdef CONFIG_RFS_ACCEL
8185 free_irq_cpu_rmap(bp
->dev
->rx_cpu_rmap
);
8186 bp
->dev
->rx_cpu_rmap
= NULL
;
8188 if (!bp
->irq_tbl
|| !bp
->bnapi
)
8191 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
8192 int map_idx
= bnxt_cp_num_to_irq_num(bp
, i
);
8194 irq
= &bp
->irq_tbl
[map_idx
];
8195 if (irq
->requested
) {
8196 if (irq
->have_cpumask
) {
8197 irq_set_affinity_hint(irq
->vector
, NULL
);
8198 free_cpumask_var(irq
->cpu_mask
);
8199 irq
->have_cpumask
= 0;
8201 free_irq(irq
->vector
, bp
->bnapi
[i
]);
8208 static int bnxt_request_irq(struct bnxt
*bp
)
8211 unsigned long flags
= 0;
8212 #ifdef CONFIG_RFS_ACCEL
8213 struct cpu_rmap
*rmap
;
8216 rc
= bnxt_setup_int_mode(bp
);
8218 netdev_err(bp
->dev
, "bnxt_setup_int_mode err: %x\n",
8222 #ifdef CONFIG_RFS_ACCEL
8223 rmap
= bp
->dev
->rx_cpu_rmap
;
8225 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
))
8226 flags
= IRQF_SHARED
;
8228 for (i
= 0, j
= 0; i
< bp
->cp_nr_rings
; i
++) {
8229 int map_idx
= bnxt_cp_num_to_irq_num(bp
, i
);
8230 struct bnxt_irq
*irq
= &bp
->irq_tbl
[map_idx
];
8232 #ifdef CONFIG_RFS_ACCEL
8233 if (rmap
&& bp
->bnapi
[i
]->rx_ring
) {
8234 rc
= irq_cpu_rmap_add(rmap
, irq
->vector
);
8236 netdev_warn(bp
->dev
, "failed adding irq rmap for ring %d\n",
8241 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
8248 if (zalloc_cpumask_var(&irq
->cpu_mask
, GFP_KERNEL
)) {
8249 int numa_node
= dev_to_node(&bp
->pdev
->dev
);
8251 irq
->have_cpumask
= 1;
8252 cpumask_set_cpu(cpumask_local_spread(i
, numa_node
),
8254 rc
= irq_set_affinity_hint(irq
->vector
, irq
->cpu_mask
);
8256 netdev_warn(bp
->dev
,
8257 "Set affinity failed, IRQ = %d\n",
8266 static void bnxt_del_napi(struct bnxt
*bp
)
8273 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
8274 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
8276 napi_hash_del(&bnapi
->napi
);
8277 netif_napi_del(&bnapi
->napi
);
8279 /* We called napi_hash_del() before netif_napi_del(), we need
8280 * to respect an RCU grace period before freeing napi structures.
8285 static void bnxt_init_napi(struct bnxt
*bp
)
8288 unsigned int cp_nr_rings
= bp
->cp_nr_rings
;
8289 struct bnxt_napi
*bnapi
;
8291 if (bp
->flags
& BNXT_FLAG_USING_MSIX
) {
8292 int (*poll_fn
)(struct napi_struct
*, int) = bnxt_poll
;
8294 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
8295 poll_fn
= bnxt_poll_p5
;
8296 else if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
8298 for (i
= 0; i
< cp_nr_rings
; i
++) {
8299 bnapi
= bp
->bnapi
[i
];
8300 netif_napi_add(bp
->dev
, &bnapi
->napi
, poll_fn
, 64);
8302 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
8303 bnapi
= bp
->bnapi
[cp_nr_rings
];
8304 netif_napi_add(bp
->dev
, &bnapi
->napi
,
8305 bnxt_poll_nitroa0
, 64);
8308 bnapi
= bp
->bnapi
[0];
8309 netif_napi_add(bp
->dev
, &bnapi
->napi
, bnxt_poll
, 64);
8313 static void bnxt_disable_napi(struct bnxt
*bp
)
8320 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
8321 struct bnxt_cp_ring_info
*cpr
= &bp
->bnapi
[i
]->cp_ring
;
8323 if (bp
->bnapi
[i
]->rx_ring
)
8324 cancel_work_sync(&cpr
->dim
.work
);
8326 napi_disable(&bp
->bnapi
[i
]->napi
);
8330 static void bnxt_enable_napi(struct bnxt
*bp
)
8334 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
8335 struct bnxt_cp_ring_info
*cpr
= &bp
->bnapi
[i
]->cp_ring
;
8336 bp
->bnapi
[i
]->in_reset
= false;
8338 if (bp
->bnapi
[i
]->rx_ring
) {
8339 INIT_WORK(&cpr
->dim
.work
, bnxt_dim_work
);
8340 cpr
->dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
8342 napi_enable(&bp
->bnapi
[i
]->napi
);
8346 void bnxt_tx_disable(struct bnxt
*bp
)
8349 struct bnxt_tx_ring_info
*txr
;
8352 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
8353 txr
= &bp
->tx_ring
[i
];
8354 txr
->dev_state
= BNXT_DEV_STATE_CLOSING
;
8357 /* Stop all TX queues */
8358 netif_tx_disable(bp
->dev
);
8359 netif_carrier_off(bp
->dev
);
8362 void bnxt_tx_enable(struct bnxt
*bp
)
8365 struct bnxt_tx_ring_info
*txr
;
8367 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
8368 txr
= &bp
->tx_ring
[i
];
8371 netif_tx_wake_all_queues(bp
->dev
);
8372 if (bp
->link_info
.link_up
)
8373 netif_carrier_on(bp
->dev
);
8376 static void bnxt_report_link(struct bnxt
*bp
)
8378 if (bp
->link_info
.link_up
) {
8380 const char *flow_ctrl
;
8384 netif_carrier_on(bp
->dev
);
8385 if (bp
->link_info
.duplex
== BNXT_LINK_DUPLEX_FULL
)
8389 if (bp
->link_info
.pause
== BNXT_LINK_PAUSE_BOTH
)
8390 flow_ctrl
= "ON - receive & transmit";
8391 else if (bp
->link_info
.pause
== BNXT_LINK_PAUSE_TX
)
8392 flow_ctrl
= "ON - transmit";
8393 else if (bp
->link_info
.pause
== BNXT_LINK_PAUSE_RX
)
8394 flow_ctrl
= "ON - receive";
8397 speed
= bnxt_fw_to_ethtool_speed(bp
->link_info
.link_speed
);
8398 netdev_info(bp
->dev
, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
8399 speed
, duplex
, flow_ctrl
);
8400 if (bp
->flags
& BNXT_FLAG_EEE_CAP
)
8401 netdev_info(bp
->dev
, "EEE is %s\n",
8402 bp
->eee
.eee_active
? "active" :
8404 fec
= bp
->link_info
.fec_cfg
;
8405 if (!(fec
& PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED
))
8406 netdev_info(bp
->dev
, "FEC autoneg %s encodings: %s\n",
8407 (fec
& BNXT_FEC_AUTONEG
) ? "on" : "off",
8408 (fec
& BNXT_FEC_ENC_BASE_R
) ? "BaseR" :
8409 (fec
& BNXT_FEC_ENC_RS
) ? "RS" : "None");
8411 netif_carrier_off(bp
->dev
);
8412 netdev_err(bp
->dev
, "NIC Link is Down\n");
8416 static int bnxt_hwrm_phy_qcaps(struct bnxt
*bp
)
8419 struct hwrm_port_phy_qcaps_input req
= {0};
8420 struct hwrm_port_phy_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
8421 struct bnxt_link_info
*link_info
= &bp
->link_info
;
8423 bp
->flags
&= ~BNXT_FLAG_EEE_CAP
;
8425 bp
->test_info
->flags
&= ~(BNXT_TEST_FL_EXT_LPBK
|
8426 BNXT_TEST_FL_AN_PHY_LPBK
);
8427 if (bp
->hwrm_spec_code
< 0x10201)
8430 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_QCAPS
, -1, -1);
8432 mutex_lock(&bp
->hwrm_cmd_lock
);
8433 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
8435 goto hwrm_phy_qcaps_exit
;
8437 if (resp
->flags
& PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
) {
8438 struct ethtool_eee
*eee
= &bp
->eee
;
8439 u16 fw_speeds
= le16_to_cpu(resp
->supported_speeds_eee_mode
);
8441 bp
->flags
|= BNXT_FLAG_EEE_CAP
;
8442 eee
->supported
= _bnxt_fw_to_ethtool_adv_spds(fw_speeds
, 0);
8443 bp
->lpi_tmr_lo
= le32_to_cpu(resp
->tx_lpi_timer_low
) &
8444 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK
;
8445 bp
->lpi_tmr_hi
= le32_to_cpu(resp
->valid_tx_lpi_timer_high
) &
8446 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK
;
8448 if (resp
->flags
& PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED
) {
8450 bp
->test_info
->flags
|= BNXT_TEST_FL_EXT_LPBK
;
8452 if (resp
->flags
& PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED
) {
8454 bp
->test_info
->flags
|= BNXT_TEST_FL_AN_PHY_LPBK
;
8456 if (resp
->flags
& PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED
) {
8458 bp
->fw_cap
|= BNXT_FW_CAP_SHARED_PORT_CFG
;
8460 if (resp
->supported_speeds_auto_mode
)
8461 link_info
->support_auto_speeds
=
8462 le16_to_cpu(resp
->supported_speeds_auto_mode
);
8464 bp
->port_count
= resp
->port_cnt
;
8466 hwrm_phy_qcaps_exit
:
8467 mutex_unlock(&bp
->hwrm_cmd_lock
);
8471 static int bnxt_update_link(struct bnxt
*bp
, bool chng_link_state
)
8474 struct bnxt_link_info
*link_info
= &bp
->link_info
;
8475 struct hwrm_port_phy_qcfg_input req
= {0};
8476 struct hwrm_port_phy_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
8477 u8 link_up
= link_info
->link_up
;
8480 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_QCFG
, -1, -1);
8482 mutex_lock(&bp
->hwrm_cmd_lock
);
8483 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
8485 mutex_unlock(&bp
->hwrm_cmd_lock
);
8489 memcpy(&link_info
->phy_qcfg_resp
, resp
, sizeof(*resp
));
8490 link_info
->phy_link_status
= resp
->link
;
8491 link_info
->duplex
= resp
->duplex_cfg
;
8492 if (bp
->hwrm_spec_code
>= 0x10800)
8493 link_info
->duplex
= resp
->duplex_state
;
8494 link_info
->pause
= resp
->pause
;
8495 link_info
->auto_mode
= resp
->auto_mode
;
8496 link_info
->auto_pause_setting
= resp
->auto_pause
;
8497 link_info
->lp_pause
= resp
->link_partner_adv_pause
;
8498 link_info
->force_pause_setting
= resp
->force_pause
;
8499 link_info
->duplex_setting
= resp
->duplex_cfg
;
8500 if (link_info
->phy_link_status
== BNXT_LINK_LINK
)
8501 link_info
->link_speed
= le16_to_cpu(resp
->link_speed
);
8503 link_info
->link_speed
= 0;
8504 link_info
->force_link_speed
= le16_to_cpu(resp
->force_link_speed
);
8505 link_info
->support_speeds
= le16_to_cpu(resp
->support_speeds
);
8506 link_info
->auto_link_speeds
= le16_to_cpu(resp
->auto_link_speed_mask
);
8507 link_info
->lp_auto_link_speeds
=
8508 le16_to_cpu(resp
->link_partner_adv_speeds
);
8509 link_info
->preemphasis
= le32_to_cpu(resp
->preemphasis
);
8510 link_info
->phy_ver
[0] = resp
->phy_maj
;
8511 link_info
->phy_ver
[1] = resp
->phy_min
;
8512 link_info
->phy_ver
[2] = resp
->phy_bld
;
8513 link_info
->media_type
= resp
->media_type
;
8514 link_info
->phy_type
= resp
->phy_type
;
8515 link_info
->transceiver
= resp
->xcvr_pkg_type
;
8516 link_info
->phy_addr
= resp
->eee_config_phy_addr
&
8517 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK
;
8518 link_info
->module_status
= resp
->module_status
;
8520 if (bp
->flags
& BNXT_FLAG_EEE_CAP
) {
8521 struct ethtool_eee
*eee
= &bp
->eee
;
8524 eee
->eee_active
= 0;
8525 if (resp
->eee_config_phy_addr
&
8526 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE
) {
8527 eee
->eee_active
= 1;
8528 fw_speeds
= le16_to_cpu(
8529 resp
->link_partner_adv_eee_link_speed_mask
);
8530 eee
->lp_advertised
=
8531 _bnxt_fw_to_ethtool_adv_spds(fw_speeds
, 0);
8534 /* Pull initial EEE config */
8535 if (!chng_link_state
) {
8536 if (resp
->eee_config_phy_addr
&
8537 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED
)
8538 eee
->eee_enabled
= 1;
8540 fw_speeds
= le16_to_cpu(resp
->adv_eee_link_speed_mask
);
8542 _bnxt_fw_to_ethtool_adv_spds(fw_speeds
, 0);
8544 if (resp
->eee_config_phy_addr
&
8545 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI
) {
8548 eee
->tx_lpi_enabled
= 1;
8549 tmr
= resp
->xcvr_identifier_type_tx_lpi_timer
;
8550 eee
->tx_lpi_timer
= le32_to_cpu(tmr
) &
8551 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK
;
8556 link_info
->fec_cfg
= PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED
;
8557 if (bp
->hwrm_spec_code
>= 0x10504)
8558 link_info
->fec_cfg
= le16_to_cpu(resp
->fec_cfg
);
8560 /* TODO: need to add more logic to report VF link */
8561 if (chng_link_state
) {
8562 if (link_info
->phy_link_status
== BNXT_LINK_LINK
)
8563 link_info
->link_up
= 1;
8565 link_info
->link_up
= 0;
8566 if (link_up
!= link_info
->link_up
)
8567 bnxt_report_link(bp
);
8569 /* alwasy link down if not require to update link state */
8570 link_info
->link_up
= 0;
8572 mutex_unlock(&bp
->hwrm_cmd_lock
);
8574 if (!BNXT_PHY_CFG_ABLE(bp
))
8577 diff
= link_info
->support_auto_speeds
^ link_info
->advertising
;
8578 if ((link_info
->support_auto_speeds
| diff
) !=
8579 link_info
->support_auto_speeds
) {
8580 /* An advertised speed is no longer supported, so we need to
8581 * update the advertisement settings. Caller holds RTNL
8582 * so we can modify link settings.
8584 link_info
->advertising
= link_info
->support_auto_speeds
;
8585 if (link_info
->autoneg
& BNXT_AUTONEG_SPEED
)
8586 bnxt_hwrm_set_link_setting(bp
, true, false);
8591 static void bnxt_get_port_module_status(struct bnxt
*bp
)
8593 struct bnxt_link_info
*link_info
= &bp
->link_info
;
8594 struct hwrm_port_phy_qcfg_output
*resp
= &link_info
->phy_qcfg_resp
;
8597 if (bnxt_update_link(bp
, true))
8600 module_status
= link_info
->module_status
;
8601 switch (module_status
) {
8602 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX
:
8603 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN
:
8604 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG
:
8605 netdev_warn(bp
->dev
, "Unqualified SFP+ module detected on port %d\n",
8607 if (bp
->hwrm_spec_code
>= 0x10201) {
8608 netdev_warn(bp
->dev
, "Module part number %s\n",
8609 resp
->phy_vendor_partnumber
);
8611 if (module_status
== PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX
)
8612 netdev_warn(bp
->dev
, "TX is disabled\n");
8613 if (module_status
== PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN
)
8614 netdev_warn(bp
->dev
, "SFP+ module is shutdown\n");
8619 bnxt_hwrm_set_pause_common(struct bnxt
*bp
, struct hwrm_port_phy_cfg_input
*req
)
8621 if (bp
->link_info
.autoneg
& BNXT_AUTONEG_FLOW_CTRL
) {
8622 if (bp
->hwrm_spec_code
>= 0x10201)
8624 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE
;
8625 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_RX
)
8626 req
->auto_pause
|= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX
;
8627 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_TX
)
8628 req
->auto_pause
|= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX
;
8630 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
);
8632 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_RX
)
8633 req
->force_pause
|= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX
;
8634 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_TX
)
8635 req
->force_pause
|= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX
;
8637 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE
);
8638 if (bp
->hwrm_spec_code
>= 0x10201) {
8639 req
->auto_pause
= req
->force_pause
;
8640 req
->enables
|= cpu_to_le32(
8641 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
);
8646 static void bnxt_hwrm_set_link_common(struct bnxt
*bp
,
8647 struct hwrm_port_phy_cfg_input
*req
)
8649 u8 autoneg
= bp
->link_info
.autoneg
;
8650 u16 fw_link_speed
= bp
->link_info
.req_link_speed
;
8651 u16 advertising
= bp
->link_info
.advertising
;
8653 if (autoneg
& BNXT_AUTONEG_SPEED
) {
8655 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
;
8657 req
->enables
|= cpu_to_le32(
8658 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK
);
8659 req
->auto_link_speed_mask
= cpu_to_le16(advertising
);
8661 req
->enables
|= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE
);
8663 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG
);
8665 req
->force_link_speed
= cpu_to_le16(fw_link_speed
);
8666 req
->flags
|= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE
);
8669 /* tell chimp that the setting takes effect immediately */
8670 req
->flags
|= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
);
8673 int bnxt_hwrm_set_pause(struct bnxt
*bp
)
8675 struct hwrm_port_phy_cfg_input req
= {0};
8678 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_CFG
, -1, -1);
8679 bnxt_hwrm_set_pause_common(bp
, &req
);
8681 if ((bp
->link_info
.autoneg
& BNXT_AUTONEG_FLOW_CTRL
) ||
8682 bp
->link_info
.force_link_chng
)
8683 bnxt_hwrm_set_link_common(bp
, &req
);
8685 mutex_lock(&bp
->hwrm_cmd_lock
);
8686 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
8687 if (!rc
&& !(bp
->link_info
.autoneg
& BNXT_AUTONEG_FLOW_CTRL
)) {
8688 /* since changing of pause setting doesn't trigger any link
8689 * change event, the driver needs to update the current pause
8690 * result upon successfully return of the phy_cfg command
8692 bp
->link_info
.pause
=
8693 bp
->link_info
.force_pause_setting
= bp
->link_info
.req_flow_ctrl
;
8694 bp
->link_info
.auto_pause_setting
= 0;
8695 if (!bp
->link_info
.force_link_chng
)
8696 bnxt_report_link(bp
);
8698 bp
->link_info
.force_link_chng
= false;
8699 mutex_unlock(&bp
->hwrm_cmd_lock
);
8703 static void bnxt_hwrm_set_eee(struct bnxt
*bp
,
8704 struct hwrm_port_phy_cfg_input
*req
)
8706 struct ethtool_eee
*eee
= &bp
->eee
;
8708 if (eee
->eee_enabled
) {
8710 u32 flags
= PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE
;
8712 if (eee
->tx_lpi_enabled
)
8713 flags
|= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE
;
8715 flags
|= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE
;
8717 req
->flags
|= cpu_to_le32(flags
);
8718 eee_speeds
= bnxt_get_fw_auto_link_speeds(eee
->advertised
);
8719 req
->eee_link_speed_mask
= cpu_to_le16(eee_speeds
);
8720 req
->tx_lpi_timer
= cpu_to_le32(eee
->tx_lpi_timer
);
8722 req
->flags
|= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE
);
8726 int bnxt_hwrm_set_link_setting(struct bnxt
*bp
, bool set_pause
, bool set_eee
)
8728 struct hwrm_port_phy_cfg_input req
= {0};
8730 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_CFG
, -1, -1);
8732 bnxt_hwrm_set_pause_common(bp
, &req
);
8734 bnxt_hwrm_set_link_common(bp
, &req
);
8737 bnxt_hwrm_set_eee(bp
, &req
);
8738 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
8741 static int bnxt_hwrm_shutdown_link(struct bnxt
*bp
)
8743 struct hwrm_port_phy_cfg_input req
= {0};
8745 if (!BNXT_SINGLE_PF(bp
))
8748 if (pci_num_vf(bp
->pdev
))
8751 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_CFG
, -1, -1);
8752 req
.flags
= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN
);
8753 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
8756 static int bnxt_fw_init_one(struct bnxt
*bp
);
8758 static int bnxt_hwrm_if_change(struct bnxt
*bp
, bool up
)
8760 struct hwrm_func_drv_if_change_output
*resp
= bp
->hwrm_cmd_resp_addr
;
8761 struct hwrm_func_drv_if_change_input req
= {0};
8762 bool resc_reinit
= false, fw_reset
= false;
8766 if (!(bp
->fw_cap
& BNXT_FW_CAP_IF_CHANGE
))
8769 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_DRV_IF_CHANGE
, -1, -1);
8771 req
.flags
= cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP
);
8772 mutex_lock(&bp
->hwrm_cmd_lock
);
8773 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
8775 flags
= le32_to_cpu(resp
->flags
);
8776 mutex_unlock(&bp
->hwrm_cmd_lock
);
8783 if (flags
& FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE
)
8785 if (flags
& FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE
)
8788 if (test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
) && !fw_reset
) {
8789 netdev_err(bp
->dev
, "RESET_DONE not set during FW reset.\n");
8792 if (resc_reinit
|| fw_reset
) {
8794 if (!test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
))
8796 bnxt_free_ctx_mem(bp
);
8799 rc
= bnxt_fw_init_one(bp
);
8801 set_bit(BNXT_STATE_ABORT_ERR
, &bp
->state
);
8804 bnxt_clear_int_mode(bp
);
8805 rc
= bnxt_init_int_mode(bp
);
8807 netdev_err(bp
->dev
, "init int mode failed\n");
8810 set_bit(BNXT_STATE_FW_RESET_DET
, &bp
->state
);
8812 if (BNXT_NEW_RM(bp
)) {
8813 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
8815 rc
= bnxt_hwrm_func_resc_qcaps(bp
, true);
8816 hw_resc
->resv_cp_rings
= 0;
8817 hw_resc
->resv_stat_ctxs
= 0;
8818 hw_resc
->resv_irqs
= 0;
8819 hw_resc
->resv_tx_rings
= 0;
8820 hw_resc
->resv_rx_rings
= 0;
8821 hw_resc
->resv_hw_ring_grps
= 0;
8822 hw_resc
->resv_vnics
= 0;
8824 bp
->tx_nr_rings
= 0;
8825 bp
->rx_nr_rings
= 0;
8832 static int bnxt_hwrm_port_led_qcaps(struct bnxt
*bp
)
8834 struct hwrm_port_led_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
8835 struct hwrm_port_led_qcaps_input req
= {0};
8836 struct bnxt_pf_info
*pf
= &bp
->pf
;
8840 if (BNXT_VF(bp
) || bp
->hwrm_spec_code
< 0x10601)
8843 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_LED_QCAPS
, -1, -1);
8844 req
.port_id
= cpu_to_le16(pf
->port_id
);
8845 mutex_lock(&bp
->hwrm_cmd_lock
);
8846 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
8848 mutex_unlock(&bp
->hwrm_cmd_lock
);
8851 if (resp
->num_leds
> 0 && resp
->num_leds
< BNXT_MAX_LED
) {
8854 bp
->num_leds
= resp
->num_leds
;
8855 memcpy(bp
->leds
, &resp
->led0_id
, sizeof(bp
->leds
[0]) *
8857 for (i
= 0; i
< bp
->num_leds
; i
++) {
8858 struct bnxt_led_info
*led
= &bp
->leds
[i
];
8859 __le16 caps
= led
->led_state_caps
;
8861 if (!led
->led_group_id
||
8862 !BNXT_LED_ALT_BLINK_CAP(caps
)) {
8868 mutex_unlock(&bp
->hwrm_cmd_lock
);
8872 int bnxt_hwrm_alloc_wol_fltr(struct bnxt
*bp
)
8874 struct hwrm_wol_filter_alloc_input req
= {0};
8875 struct hwrm_wol_filter_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
8878 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_WOL_FILTER_ALLOC
, -1, -1);
8879 req
.port_id
= cpu_to_le16(bp
->pf
.port_id
);
8880 req
.wol_type
= WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT
;
8881 req
.enables
= cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS
);
8882 memcpy(req
.mac_address
, bp
->dev
->dev_addr
, ETH_ALEN
);
8883 mutex_lock(&bp
->hwrm_cmd_lock
);
8884 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
8886 bp
->wol_filter_id
= resp
->wol_filter_id
;
8887 mutex_unlock(&bp
->hwrm_cmd_lock
);
8891 int bnxt_hwrm_free_wol_fltr(struct bnxt
*bp
)
8893 struct hwrm_wol_filter_free_input req
= {0};
8896 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_WOL_FILTER_FREE
, -1, -1);
8897 req
.port_id
= cpu_to_le16(bp
->pf
.port_id
);
8898 req
.enables
= cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID
);
8899 req
.wol_filter_id
= bp
->wol_filter_id
;
8900 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
8904 static u16
bnxt_hwrm_get_wol_fltrs(struct bnxt
*bp
, u16 handle
)
8906 struct hwrm_wol_filter_qcfg_input req
= {0};
8907 struct hwrm_wol_filter_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
8908 u16 next_handle
= 0;
8911 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_WOL_FILTER_QCFG
, -1, -1);
8912 req
.port_id
= cpu_to_le16(bp
->pf
.port_id
);
8913 req
.handle
= cpu_to_le16(handle
);
8914 mutex_lock(&bp
->hwrm_cmd_lock
);
8915 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
8917 next_handle
= le16_to_cpu(resp
->next_handle
);
8918 if (next_handle
!= 0) {
8919 if (resp
->wol_type
==
8920 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT
) {
8922 bp
->wol_filter_id
= resp
->wol_filter_id
;
8926 mutex_unlock(&bp
->hwrm_cmd_lock
);
8930 static void bnxt_get_wol_settings(struct bnxt
*bp
)
8935 if (!BNXT_PF(bp
) || !(bp
->flags
& BNXT_FLAG_WOL_CAP
))
8939 handle
= bnxt_hwrm_get_wol_fltrs(bp
, handle
);
8940 } while (handle
&& handle
!= 0xffff);
8943 #ifdef CONFIG_BNXT_HWMON
8944 static ssize_t
bnxt_show_temp(struct device
*dev
,
8945 struct device_attribute
*devattr
, char *buf
)
8947 struct hwrm_temp_monitor_query_input req
= {0};
8948 struct hwrm_temp_monitor_query_output
*resp
;
8949 struct bnxt
*bp
= dev_get_drvdata(dev
);
8952 resp
= bp
->hwrm_cmd_resp_addr
;
8953 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_TEMP_MONITOR_QUERY
, -1, -1);
8954 mutex_lock(&bp
->hwrm_cmd_lock
);
8955 if (!_hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
))
8956 temp
= resp
->temp
* 1000; /* display millidegree */
8957 mutex_unlock(&bp
->hwrm_cmd_lock
);
8959 return sprintf(buf
, "%u\n", temp
);
8961 static SENSOR_DEVICE_ATTR(temp1_input
, 0444, bnxt_show_temp
, NULL
, 0);
8963 static struct attribute
*bnxt_attrs
[] = {
8964 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
8967 ATTRIBUTE_GROUPS(bnxt
);
8969 static void bnxt_hwmon_close(struct bnxt
*bp
)
8971 if (bp
->hwmon_dev
) {
8972 hwmon_device_unregister(bp
->hwmon_dev
);
8973 bp
->hwmon_dev
= NULL
;
8977 static void bnxt_hwmon_open(struct bnxt
*bp
)
8979 struct pci_dev
*pdev
= bp
->pdev
;
8984 bp
->hwmon_dev
= hwmon_device_register_with_groups(&pdev
->dev
,
8985 DRV_MODULE_NAME
, bp
,
8987 if (IS_ERR(bp
->hwmon_dev
)) {
8988 bp
->hwmon_dev
= NULL
;
8989 dev_warn(&pdev
->dev
, "Cannot register hwmon device\n");
8993 static void bnxt_hwmon_close(struct bnxt
*bp
)
8997 static void bnxt_hwmon_open(struct bnxt
*bp
)
9002 static bool bnxt_eee_config_ok(struct bnxt
*bp
)
9004 struct ethtool_eee
*eee
= &bp
->eee
;
9005 struct bnxt_link_info
*link_info
= &bp
->link_info
;
9007 if (!(bp
->flags
& BNXT_FLAG_EEE_CAP
))
9010 if (eee
->eee_enabled
) {
9012 _bnxt_fw_to_ethtool_adv_spds(link_info
->advertising
, 0);
9014 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
)) {
9015 eee
->eee_enabled
= 0;
9018 if (eee
->advertised
& ~advertising
) {
9019 eee
->advertised
= advertising
& eee
->supported
;
9026 static int bnxt_update_phy_setting(struct bnxt
*bp
)
9029 bool update_link
= false;
9030 bool update_pause
= false;
9031 bool update_eee
= false;
9032 struct bnxt_link_info
*link_info
= &bp
->link_info
;
9034 rc
= bnxt_update_link(bp
, true);
9036 netdev_err(bp
->dev
, "failed to update link (rc: %x)\n",
9040 if (!BNXT_SINGLE_PF(bp
))
9043 if ((link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
) &&
9044 (link_info
->auto_pause_setting
& BNXT_LINK_PAUSE_BOTH
) !=
9045 link_info
->req_flow_ctrl
)
9046 update_pause
= true;
9047 if (!(link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
) &&
9048 link_info
->force_pause_setting
!= link_info
->req_flow_ctrl
)
9049 update_pause
= true;
9050 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
)) {
9051 if (BNXT_AUTO_MODE(link_info
->auto_mode
))
9053 if (link_info
->req_link_speed
!= link_info
->force_link_speed
)
9055 if (link_info
->req_duplex
!= link_info
->duplex_setting
)
9058 if (link_info
->auto_mode
== BNXT_LINK_AUTO_NONE
)
9060 if (link_info
->advertising
!= link_info
->auto_link_speeds
)
9064 /* The last close may have shutdown the link, so need to call
9065 * PHY_CFG to bring it back up.
9067 if (!bp
->link_info
.link_up
)
9070 if (!bnxt_eee_config_ok(bp
))
9074 rc
= bnxt_hwrm_set_link_setting(bp
, update_pause
, update_eee
);
9075 else if (update_pause
)
9076 rc
= bnxt_hwrm_set_pause(bp
);
9078 netdev_err(bp
->dev
, "failed to update phy setting (rc: %x)\n",
9086 /* Common routine to pre-map certain register block to different GRC window.
9087 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9088 * in PF and 3 windows in VF that can be customized to map in different
9091 static void bnxt_preset_reg_win(struct bnxt
*bp
)
9094 /* CAG registers map to GRC window #4 */
9095 writel(BNXT_CAG_REG_BASE
,
9096 bp
->bar0
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT
+ 12);
9100 static int bnxt_init_dflt_ring_mode(struct bnxt
*bp
);
9102 static int __bnxt_open_nic(struct bnxt
*bp
, bool irq_re_init
, bool link_re_init
)
9106 bnxt_preset_reg_win(bp
);
9107 netif_carrier_off(bp
->dev
);
9109 /* Reserve rings now if none were reserved at driver probe. */
9110 rc
= bnxt_init_dflt_ring_mode(bp
);
9112 netdev_err(bp
->dev
, "Failed to reserve default rings at open\n");
9116 rc
= bnxt_reserve_rings(bp
, irq_re_init
);
9119 if ((bp
->flags
& BNXT_FLAG_RFS
) &&
9120 !(bp
->flags
& BNXT_FLAG_USING_MSIX
)) {
9121 /* disable RFS if falling back to INTA */
9122 bp
->dev
->hw_features
&= ~NETIF_F_NTUPLE
;
9123 bp
->flags
&= ~BNXT_FLAG_RFS
;
9126 rc
= bnxt_alloc_mem(bp
, irq_re_init
);
9128 netdev_err(bp
->dev
, "bnxt_alloc_mem err: %x\n", rc
);
9129 goto open_err_free_mem
;
9134 rc
= bnxt_request_irq(bp
);
9136 netdev_err(bp
->dev
, "bnxt_request_irq err: %x\n", rc
);
9141 bnxt_enable_napi(bp
);
9142 bnxt_debug_dev_init(bp
);
9144 rc
= bnxt_init_nic(bp
, irq_re_init
);
9146 netdev_err(bp
->dev
, "bnxt_init_nic err: %x\n", rc
);
9151 mutex_lock(&bp
->link_lock
);
9152 rc
= bnxt_update_phy_setting(bp
);
9153 mutex_unlock(&bp
->link_lock
);
9155 netdev_warn(bp
->dev
, "failed to update phy settings\n");
9156 if (BNXT_SINGLE_PF(bp
)) {
9157 bp
->link_info
.phy_retry
= true;
9158 bp
->link_info
.phy_retry_expires
=
9165 udp_tunnel_get_rx_info(bp
->dev
);
9167 set_bit(BNXT_STATE_OPEN
, &bp
->state
);
9168 bnxt_enable_int(bp
);
9169 /* Enable TX queues */
9171 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
9172 /* Poll link status and check for SFP+ module status */
9173 bnxt_get_port_module_status(bp
);
9175 /* VF-reps may need to be re-opened after the PF is re-opened */
9177 bnxt_vf_reps_open(bp
);
9181 bnxt_debug_dev_exit(bp
);
9182 bnxt_disable_napi(bp
);
9190 bnxt_free_mem(bp
, true);
9194 /* rtnl_lock held */
9195 int bnxt_open_nic(struct bnxt
*bp
, bool irq_re_init
, bool link_re_init
)
9199 rc
= __bnxt_open_nic(bp
, irq_re_init
, link_re_init
);
9201 netdev_err(bp
->dev
, "nic open fail (rc: %x)\n", rc
);
9207 /* rtnl_lock held, open the NIC half way by allocating all resources, but
9208 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
9211 int bnxt_half_open_nic(struct bnxt
*bp
)
9215 rc
= bnxt_alloc_mem(bp
, false);
9217 netdev_err(bp
->dev
, "bnxt_alloc_mem err: %x\n", rc
);
9220 rc
= bnxt_init_nic(bp
, false);
9222 netdev_err(bp
->dev
, "bnxt_init_nic err: %x\n", rc
);
9229 bnxt_free_mem(bp
, false);
9234 /* rtnl_lock held, this call can only be made after a previous successful
9235 * call to bnxt_half_open_nic().
9237 void bnxt_half_close_nic(struct bnxt
*bp
)
9239 bnxt_hwrm_resource_free(bp
, false, false);
9241 bnxt_free_mem(bp
, false);
9244 static int bnxt_open(struct net_device
*dev
)
9246 struct bnxt
*bp
= netdev_priv(dev
);
9249 if (test_bit(BNXT_STATE_ABORT_ERR
, &bp
->state
)) {
9250 netdev_err(bp
->dev
, "A previous firmware reset did not complete, aborting\n");
9254 rc
= bnxt_hwrm_if_change(bp
, true);
9257 rc
= __bnxt_open_nic(bp
, true, true);
9259 bnxt_hwrm_if_change(bp
, false);
9261 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET
, &bp
->state
)) {
9263 struct bnxt_pf_info
*pf
= &bp
->pf
;
9264 int n
= pf
->active_vfs
;
9267 bnxt_cfg_hw_sriov(bp
, &n
, true);
9269 if (!test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
))
9270 bnxt_ulp_start(bp
, 0);
9272 bnxt_hwmon_open(bp
);
9278 static bool bnxt_drv_busy(struct bnxt
*bp
)
9280 return (test_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
) ||
9281 test_bit(BNXT_STATE_READ_STATS
, &bp
->state
));
9284 static void bnxt_get_ring_stats(struct bnxt
*bp
,
9285 struct rtnl_link_stats64
*stats
);
9287 static void __bnxt_close_nic(struct bnxt
*bp
, bool irq_re_init
,
9290 /* Close the VF-reps before closing PF */
9292 bnxt_vf_reps_close(bp
);
9294 /* Change device state to avoid TX queue wake up's */
9295 bnxt_tx_disable(bp
);
9297 clear_bit(BNXT_STATE_OPEN
, &bp
->state
);
9298 smp_mb__after_atomic();
9299 while (bnxt_drv_busy(bp
))
9302 /* Flush rings and and disable interrupts */
9303 bnxt_shutdown_nic(bp
, irq_re_init
);
9305 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
9307 bnxt_debug_dev_exit(bp
);
9308 bnxt_disable_napi(bp
);
9309 del_timer_sync(&bp
->timer
);
9310 if (test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
) &&
9311 pci_is_enabled(bp
->pdev
))
9312 pci_disable_device(bp
->pdev
);
9316 /* Save ring stats before shutdown */
9318 bnxt_get_ring_stats(bp
, &bp
->net_stats_prev
);
9323 bnxt_free_mem(bp
, irq_re_init
);
9326 int bnxt_close_nic(struct bnxt
*bp
, bool irq_re_init
, bool link_re_init
)
9330 if (test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
)) {
9331 /* If we get here, it means firmware reset is in progress
9332 * while we are trying to close. We can safely proceed with
9333 * the close because we are holding rtnl_lock(). Some firmware
9334 * messages may fail as we proceed to close. We set the
9335 * ABORT_ERR flag here so that the FW reset thread will later
9336 * abort when it gets the rtnl_lock() and sees the flag.
9338 netdev_warn(bp
->dev
, "FW reset in progress during close, FW reset will be aborted\n");
9339 set_bit(BNXT_STATE_ABORT_ERR
, &bp
->state
);
9342 #ifdef CONFIG_BNXT_SRIOV
9343 if (bp
->sriov_cfg
) {
9344 rc
= wait_event_interruptible_timeout(bp
->sriov_cfg_wait
,
9346 BNXT_SRIOV_CFG_WAIT_TMO
);
9348 netdev_warn(bp
->dev
, "timeout waiting for SRIOV config operation to complete!\n");
9351 __bnxt_close_nic(bp
, irq_re_init
, link_re_init
);
9355 static int bnxt_close(struct net_device
*dev
)
9357 struct bnxt
*bp
= netdev_priv(dev
);
9359 bnxt_hwmon_close(bp
);
9360 bnxt_close_nic(bp
, true, true);
9361 bnxt_hwrm_shutdown_link(bp
);
9362 bnxt_hwrm_if_change(bp
, false);
9366 static int bnxt_hwrm_port_phy_read(struct bnxt
*bp
, u16 phy_addr
, u16 reg
,
9369 struct hwrm_port_phy_mdio_read_output
*resp
= bp
->hwrm_cmd_resp_addr
;
9370 struct hwrm_port_phy_mdio_read_input req
= {0};
9373 if (bp
->hwrm_spec_code
< 0x10a00)
9376 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_MDIO_READ
, -1, -1);
9377 req
.port_id
= cpu_to_le16(bp
->pf
.port_id
);
9378 req
.phy_addr
= phy_addr
;
9379 req
.reg_addr
= cpu_to_le16(reg
& 0x1f);
9380 if (mdio_phy_id_is_c45(phy_addr
)) {
9382 req
.phy_addr
= mdio_phy_id_prtad(phy_addr
);
9383 req
.dev_addr
= mdio_phy_id_devad(phy_addr
);
9384 req
.reg_addr
= cpu_to_le16(reg
);
9387 mutex_lock(&bp
->hwrm_cmd_lock
);
9388 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
9390 *val
= le16_to_cpu(resp
->reg_data
);
9391 mutex_unlock(&bp
->hwrm_cmd_lock
);
9395 static int bnxt_hwrm_port_phy_write(struct bnxt
*bp
, u16 phy_addr
, u16 reg
,
9398 struct hwrm_port_phy_mdio_write_input req
= {0};
9400 if (bp
->hwrm_spec_code
< 0x10a00)
9403 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_MDIO_WRITE
, -1, -1);
9404 req
.port_id
= cpu_to_le16(bp
->pf
.port_id
);
9405 req
.phy_addr
= phy_addr
;
9406 req
.reg_addr
= cpu_to_le16(reg
& 0x1f);
9407 if (mdio_phy_id_is_c45(phy_addr
)) {
9409 req
.phy_addr
= mdio_phy_id_prtad(phy_addr
);
9410 req
.dev_addr
= mdio_phy_id_devad(phy_addr
);
9411 req
.reg_addr
= cpu_to_le16(reg
);
9413 req
.reg_data
= cpu_to_le16(val
);
9415 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
9418 /* rtnl_lock held */
9419 static int bnxt_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
9421 struct mii_ioctl_data
*mdio
= if_mii(ifr
);
9422 struct bnxt
*bp
= netdev_priv(dev
);
9427 mdio
->phy_id
= bp
->link_info
.phy_addr
;
9433 if (!netif_running(dev
))
9436 rc
= bnxt_hwrm_port_phy_read(bp
, mdio
->phy_id
, mdio
->reg_num
,
9438 mdio
->val_out
= mii_regval
;
9443 if (!netif_running(dev
))
9446 return bnxt_hwrm_port_phy_write(bp
, mdio
->phy_id
, mdio
->reg_num
,
9456 static void bnxt_get_ring_stats(struct bnxt
*bp
,
9457 struct rtnl_link_stats64
*stats
)
9462 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
9463 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
9464 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
9465 struct ctx_hw_stats
*hw_stats
= cpr
->hw_stats
;
9467 stats
->rx_packets
+= le64_to_cpu(hw_stats
->rx_ucast_pkts
);
9468 stats
->rx_packets
+= le64_to_cpu(hw_stats
->rx_mcast_pkts
);
9469 stats
->rx_packets
+= le64_to_cpu(hw_stats
->rx_bcast_pkts
);
9471 stats
->tx_packets
+= le64_to_cpu(hw_stats
->tx_ucast_pkts
);
9472 stats
->tx_packets
+= le64_to_cpu(hw_stats
->tx_mcast_pkts
);
9473 stats
->tx_packets
+= le64_to_cpu(hw_stats
->tx_bcast_pkts
);
9475 stats
->rx_bytes
+= le64_to_cpu(hw_stats
->rx_ucast_bytes
);
9476 stats
->rx_bytes
+= le64_to_cpu(hw_stats
->rx_mcast_bytes
);
9477 stats
->rx_bytes
+= le64_to_cpu(hw_stats
->rx_bcast_bytes
);
9479 stats
->tx_bytes
+= le64_to_cpu(hw_stats
->tx_ucast_bytes
);
9480 stats
->tx_bytes
+= le64_to_cpu(hw_stats
->tx_mcast_bytes
);
9481 stats
->tx_bytes
+= le64_to_cpu(hw_stats
->tx_bcast_bytes
);
9483 stats
->rx_missed_errors
+=
9484 le64_to_cpu(hw_stats
->rx_discard_pkts
);
9486 stats
->multicast
+= le64_to_cpu(hw_stats
->rx_mcast_pkts
);
9488 stats
->tx_dropped
+= le64_to_cpu(hw_stats
->tx_drop_pkts
);
9492 static void bnxt_add_prev_stats(struct bnxt
*bp
,
9493 struct rtnl_link_stats64
*stats
)
9495 struct rtnl_link_stats64
*prev_stats
= &bp
->net_stats_prev
;
9497 stats
->rx_packets
+= prev_stats
->rx_packets
;
9498 stats
->tx_packets
+= prev_stats
->tx_packets
;
9499 stats
->rx_bytes
+= prev_stats
->rx_bytes
;
9500 stats
->tx_bytes
+= prev_stats
->tx_bytes
;
9501 stats
->rx_missed_errors
+= prev_stats
->rx_missed_errors
;
9502 stats
->multicast
+= prev_stats
->multicast
;
9503 stats
->tx_dropped
+= prev_stats
->tx_dropped
;
9507 bnxt_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
9509 struct bnxt
*bp
= netdev_priv(dev
);
9511 set_bit(BNXT_STATE_READ_STATS
, &bp
->state
);
9512 /* Make sure bnxt_close_nic() sees that we are reading stats before
9513 * we check the BNXT_STATE_OPEN flag.
9515 smp_mb__after_atomic();
9516 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
9517 clear_bit(BNXT_STATE_READ_STATS
, &bp
->state
);
9518 *stats
= bp
->net_stats_prev
;
9522 bnxt_get_ring_stats(bp
, stats
);
9523 bnxt_add_prev_stats(bp
, stats
);
9525 if (bp
->flags
& BNXT_FLAG_PORT_STATS
) {
9526 struct rx_port_stats
*rx
= bp
->hw_rx_port_stats
;
9527 struct tx_port_stats
*tx
= bp
->hw_tx_port_stats
;
9529 stats
->rx_crc_errors
= le64_to_cpu(rx
->rx_fcs_err_frames
);
9530 stats
->rx_frame_errors
= le64_to_cpu(rx
->rx_align_err_frames
);
9531 stats
->rx_length_errors
= le64_to_cpu(rx
->rx_undrsz_frames
) +
9532 le64_to_cpu(rx
->rx_ovrsz_frames
) +
9533 le64_to_cpu(rx
->rx_runt_frames
);
9534 stats
->rx_errors
= le64_to_cpu(rx
->rx_false_carrier_frames
) +
9535 le64_to_cpu(rx
->rx_jbr_frames
);
9536 stats
->collisions
= le64_to_cpu(tx
->tx_total_collisions
);
9537 stats
->tx_fifo_errors
= le64_to_cpu(tx
->tx_fifo_underruns
);
9538 stats
->tx_errors
= le64_to_cpu(tx
->tx_err
);
9540 clear_bit(BNXT_STATE_READ_STATS
, &bp
->state
);
9543 static bool bnxt_mc_list_updated(struct bnxt
*bp
, u32
*rx_mask
)
9545 struct net_device
*dev
= bp
->dev
;
9546 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
9547 struct netdev_hw_addr
*ha
;
9550 bool update
= false;
9553 netdev_for_each_mc_addr(ha
, dev
) {
9554 if (mc_count
>= BNXT_MAX_MC_ADDRS
) {
9555 *rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
;
9556 vnic
->mc_list_count
= 0;
9560 if (!ether_addr_equal(haddr
, vnic
->mc_list
+ off
)) {
9561 memcpy(vnic
->mc_list
+ off
, haddr
, ETH_ALEN
);
9568 *rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
;
9570 if (mc_count
!= vnic
->mc_list_count
) {
9571 vnic
->mc_list_count
= mc_count
;
9577 static bool bnxt_uc_list_updated(struct bnxt
*bp
)
9579 struct net_device
*dev
= bp
->dev
;
9580 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
9581 struct netdev_hw_addr
*ha
;
9584 if (netdev_uc_count(dev
) != (vnic
->uc_filter_count
- 1))
9587 netdev_for_each_uc_addr(ha
, dev
) {
9588 if (!ether_addr_equal(ha
->addr
, vnic
->uc_list
+ off
))
9596 static void bnxt_set_rx_mode(struct net_device
*dev
)
9598 struct bnxt
*bp
= netdev_priv(dev
);
9599 struct bnxt_vnic_info
*vnic
;
9600 bool mc_update
= false;
9604 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
))
9607 vnic
= &bp
->vnic_info
[0];
9608 mask
= vnic
->rx_mask
;
9609 mask
&= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
|
9610 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
|
9611 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
|
9612 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST
);
9614 if ((dev
->flags
& IFF_PROMISC
) && bnxt_promisc_ok(bp
))
9615 mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
;
9617 uc_update
= bnxt_uc_list_updated(bp
);
9619 if (dev
->flags
& IFF_BROADCAST
)
9620 mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST
;
9621 if (dev
->flags
& IFF_ALLMULTI
) {
9622 mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
;
9623 vnic
->mc_list_count
= 0;
9625 mc_update
= bnxt_mc_list_updated(bp
, &mask
);
9628 if (mask
!= vnic
->rx_mask
|| uc_update
|| mc_update
) {
9629 vnic
->rx_mask
= mask
;
9631 set_bit(BNXT_RX_MASK_SP_EVENT
, &bp
->sp_event
);
9632 bnxt_queue_sp_work(bp
);
9636 static int bnxt_cfg_rx_mode(struct bnxt
*bp
)
9638 struct net_device
*dev
= bp
->dev
;
9639 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
9640 struct netdev_hw_addr
*ha
;
9644 netif_addr_lock_bh(dev
);
9645 uc_update
= bnxt_uc_list_updated(bp
);
9646 netif_addr_unlock_bh(dev
);
9651 mutex_lock(&bp
->hwrm_cmd_lock
);
9652 for (i
= 1; i
< vnic
->uc_filter_count
; i
++) {
9653 struct hwrm_cfa_l2_filter_free_input req
= {0};
9655 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_L2_FILTER_FREE
, -1,
9658 req
.l2_filter_id
= vnic
->fw_l2_filter_id
[i
];
9660 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
9663 mutex_unlock(&bp
->hwrm_cmd_lock
);
9665 vnic
->uc_filter_count
= 1;
9667 netif_addr_lock_bh(dev
);
9668 if (netdev_uc_count(dev
) > (BNXT_MAX_UC_ADDRS
- 1)) {
9669 vnic
->rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
;
9671 netdev_for_each_uc_addr(ha
, dev
) {
9672 memcpy(vnic
->uc_list
+ off
, ha
->addr
, ETH_ALEN
);
9674 vnic
->uc_filter_count
++;
9677 netif_addr_unlock_bh(dev
);
9679 for (i
= 1, off
= 0; i
< vnic
->uc_filter_count
; i
++, off
+= ETH_ALEN
) {
9680 rc
= bnxt_hwrm_set_vnic_filter(bp
, 0, i
, vnic
->uc_list
+ off
);
9682 netdev_err(bp
->dev
, "HWRM vnic filter failure rc: %x\n",
9684 vnic
->uc_filter_count
= i
;
9690 rc
= bnxt_hwrm_cfa_l2_set_rx_mask(bp
, 0);
9691 if (rc
&& vnic
->mc_list_count
) {
9692 netdev_info(bp
->dev
, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
9694 vnic
->rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
;
9695 vnic
->mc_list_count
= 0;
9696 rc
= bnxt_hwrm_cfa_l2_set_rx_mask(bp
, 0);
9699 netdev_err(bp
->dev
, "HWRM cfa l2 rx mask failure rc: %d\n",
9705 static bool bnxt_can_reserve_rings(struct bnxt
*bp
)
9707 #ifdef CONFIG_BNXT_SRIOV
9708 if (BNXT_NEW_RM(bp
) && BNXT_VF(bp
)) {
9709 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
9711 /* No minimum rings were provisioned by the PF. Don't
9712 * reserve rings by default when device is down.
9714 if (hw_resc
->min_tx_rings
|| hw_resc
->resv_tx_rings
)
9717 if (!netif_running(bp
->dev
))
9724 /* If the chip and firmware supports RFS */
9725 static bool bnxt_rfs_supported(struct bnxt
*bp
)
9727 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
9728 if (bp
->fw_cap
& BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2
)
9732 if (BNXT_PF(bp
) && !BNXT_CHIP_TYPE_NITRO_A0(bp
))
9734 if (bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
)
9739 /* If runtime conditions support RFS */
9740 static bool bnxt_rfs_capable(struct bnxt
*bp
)
9742 #ifdef CONFIG_RFS_ACCEL
9743 int vnics
, max_vnics
, max_rss_ctxs
;
9745 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
9746 return bnxt_rfs_supported(bp
);
9747 if (!(bp
->flags
& BNXT_FLAG_MSIX_CAP
) || !bnxt_can_reserve_rings(bp
))
9750 vnics
= 1 + bp
->rx_nr_rings
;
9751 max_vnics
= bnxt_get_max_func_vnics(bp
);
9752 max_rss_ctxs
= bnxt_get_max_func_rss_ctxs(bp
);
9754 /* RSS contexts not a limiting factor */
9755 if (bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
)
9756 max_rss_ctxs
= max_vnics
;
9757 if (vnics
> max_vnics
|| vnics
> max_rss_ctxs
) {
9758 if (bp
->rx_nr_rings
> 1)
9759 netdev_warn(bp
->dev
,
9760 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9761 min(max_rss_ctxs
- 1, max_vnics
- 1));
9765 if (!BNXT_NEW_RM(bp
))
9768 if (vnics
== bp
->hw_resc
.resv_vnics
)
9771 bnxt_hwrm_reserve_rings(bp
, 0, 0, 0, 0, 0, vnics
);
9772 if (vnics
<= bp
->hw_resc
.resv_vnics
)
9775 netdev_warn(bp
->dev
, "Unable to reserve resources to support NTUPLE filters.\n");
9776 bnxt_hwrm_reserve_rings(bp
, 0, 0, 0, 0, 0, 1);
9783 static netdev_features_t
bnxt_fix_features(struct net_device
*dev
,
9784 netdev_features_t features
)
9786 struct bnxt
*bp
= netdev_priv(dev
);
9788 if ((features
& NETIF_F_NTUPLE
) && !bnxt_rfs_capable(bp
))
9789 features
&= ~NETIF_F_NTUPLE
;
9791 if (bp
->flags
& BNXT_FLAG_NO_AGG_RINGS
)
9792 features
&= ~(NETIF_F_LRO
| NETIF_F_GRO_HW
);
9794 if (!(features
& NETIF_F_GRO
))
9795 features
&= ~NETIF_F_GRO_HW
;
9797 if (features
& NETIF_F_GRO_HW
)
9798 features
&= ~NETIF_F_LRO
;
9800 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9801 * turned on or off together.
9803 if ((features
& (NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_STAG_RX
)) !=
9804 (NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_STAG_RX
)) {
9805 if (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
9806 features
&= ~(NETIF_F_HW_VLAN_CTAG_RX
|
9807 NETIF_F_HW_VLAN_STAG_RX
);
9809 features
|= NETIF_F_HW_VLAN_CTAG_RX
|
9810 NETIF_F_HW_VLAN_STAG_RX
;
9812 #ifdef CONFIG_BNXT_SRIOV
9815 features
&= ~(NETIF_F_HW_VLAN_CTAG_RX
|
9816 NETIF_F_HW_VLAN_STAG_RX
);
9823 static int bnxt_set_features(struct net_device
*dev
, netdev_features_t features
)
9825 struct bnxt
*bp
= netdev_priv(dev
);
9826 u32 flags
= bp
->flags
;
9829 bool re_init
= false;
9830 bool update_tpa
= false;
9832 flags
&= ~BNXT_FLAG_ALL_CONFIG_FEATS
;
9833 if (features
& NETIF_F_GRO_HW
)
9834 flags
|= BNXT_FLAG_GRO
;
9835 else if (features
& NETIF_F_LRO
)
9836 flags
|= BNXT_FLAG_LRO
;
9838 if (bp
->flags
& BNXT_FLAG_NO_AGG_RINGS
)
9839 flags
&= ~BNXT_FLAG_TPA
;
9841 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
9842 flags
|= BNXT_FLAG_STRIP_VLAN
;
9844 if (features
& NETIF_F_NTUPLE
)
9845 flags
|= BNXT_FLAG_RFS
;
9847 changes
= flags
^ bp
->flags
;
9848 if (changes
& BNXT_FLAG_TPA
) {
9850 if ((bp
->flags
& BNXT_FLAG_TPA
) == 0 ||
9851 (flags
& BNXT_FLAG_TPA
) == 0 ||
9852 (bp
->flags
& BNXT_FLAG_CHIP_P5
))
9856 if (changes
& ~BNXT_FLAG_TPA
)
9859 if (flags
!= bp
->flags
) {
9860 u32 old_flags
= bp
->flags
;
9862 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
9865 bnxt_set_ring_params(bp
);
9870 bnxt_close_nic(bp
, false, false);
9873 bnxt_set_ring_params(bp
);
9875 return bnxt_open_nic(bp
, false, false);
9879 rc
= bnxt_set_tpa(bp
,
9880 (flags
& BNXT_FLAG_TPA
) ?
9883 bp
->flags
= old_flags
;
9889 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt
*bp
, u8 ring_type
,
9890 u32 ring_id
, u32
*prod
, u32
*cons
)
9892 struct hwrm_dbg_ring_info_get_output
*resp
= bp
->hwrm_cmd_resp_addr
;
9893 struct hwrm_dbg_ring_info_get_input req
= {0};
9896 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_DBG_RING_INFO_GET
, -1, -1);
9897 req
.ring_type
= ring_type
;
9898 req
.fw_ring_id
= cpu_to_le32(ring_id
);
9899 mutex_lock(&bp
->hwrm_cmd_lock
);
9900 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
9902 *prod
= le32_to_cpu(resp
->producer_index
);
9903 *cons
= le32_to_cpu(resp
->consumer_index
);
9905 mutex_unlock(&bp
->hwrm_cmd_lock
);
9909 static void bnxt_dump_tx_sw_state(struct bnxt_napi
*bnapi
)
9911 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
9912 int i
= bnapi
->index
;
9917 netdev_info(bnapi
->bp
->dev
, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9918 i
, txr
->tx_ring_struct
.fw_ring_id
, txr
->tx_prod
,
9922 static void bnxt_dump_rx_sw_state(struct bnxt_napi
*bnapi
)
9924 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
9925 int i
= bnapi
->index
;
9930 netdev_info(bnapi
->bp
->dev
, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9931 i
, rxr
->rx_ring_struct
.fw_ring_id
, rxr
->rx_prod
,
9932 rxr
->rx_agg_ring_struct
.fw_ring_id
, rxr
->rx_agg_prod
,
9933 rxr
->rx_sw_agg_prod
);
9936 static void bnxt_dump_cp_sw_state(struct bnxt_napi
*bnapi
)
9938 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
9939 int i
= bnapi
->index
;
9941 netdev_info(bnapi
->bp
->dev
, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9942 i
, cpr
->cp_ring_struct
.fw_ring_id
, cpr
->cp_raw_cons
);
9945 static void bnxt_dbg_dump_states(struct bnxt
*bp
)
9948 struct bnxt_napi
*bnapi
;
9950 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
9951 bnapi
= bp
->bnapi
[i
];
9952 if (netif_msg_drv(bp
)) {
9953 bnxt_dump_tx_sw_state(bnapi
);
9954 bnxt_dump_rx_sw_state(bnapi
);
9955 bnxt_dump_cp_sw_state(bnapi
);
9960 static void bnxt_reset_task(struct bnxt
*bp
, bool silent
)
9963 bnxt_dbg_dump_states(bp
);
9964 if (netif_running(bp
->dev
)) {
9968 bnxt_close_nic(bp
, false, false);
9969 bnxt_open_nic(bp
, false, false);
9972 bnxt_close_nic(bp
, true, false);
9973 rc
= bnxt_open_nic(bp
, true, false);
9974 bnxt_ulp_start(bp
, rc
);
9979 static void bnxt_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
9981 struct bnxt
*bp
= netdev_priv(dev
);
9983 netdev_err(bp
->dev
, "TX timeout detected, starting reset task!\n");
9984 set_bit(BNXT_RESET_TASK_SP_EVENT
, &bp
->sp_event
);
9985 bnxt_queue_sp_work(bp
);
9988 static void bnxt_fw_health_check(struct bnxt
*bp
)
9990 struct bnxt_fw_health
*fw_health
= bp
->fw_health
;
9993 if (!fw_health
->enabled
|| test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
))
9996 if (fw_health
->tmr_counter
) {
9997 fw_health
->tmr_counter
--;
10001 val
= bnxt_fw_health_readl(bp
, BNXT_FW_HEARTBEAT_REG
);
10002 if (val
== fw_health
->last_fw_heartbeat
)
10005 fw_health
->last_fw_heartbeat
= val
;
10007 val
= bnxt_fw_health_readl(bp
, BNXT_FW_RESET_CNT_REG
);
10008 if (val
!= fw_health
->last_fw_reset_cnt
)
10011 fw_health
->tmr_counter
= fw_health
->tmr_multiplier
;
10015 set_bit(BNXT_FW_EXCEPTION_SP_EVENT
, &bp
->sp_event
);
10016 bnxt_queue_sp_work(bp
);
10019 static void bnxt_timer(struct timer_list
*t
)
10021 struct bnxt
*bp
= from_timer(bp
, t
, timer
);
10022 struct net_device
*dev
= bp
->dev
;
10024 if (!netif_running(dev
))
10027 if (atomic_read(&bp
->intr_sem
) != 0)
10028 goto bnxt_restart_timer
;
10030 if (bp
->fw_cap
& BNXT_FW_CAP_ERROR_RECOVERY
)
10031 bnxt_fw_health_check(bp
);
10033 if (bp
->link_info
.link_up
&& (bp
->flags
& BNXT_FLAG_PORT_STATS
) &&
10034 bp
->stats_coal_ticks
) {
10035 set_bit(BNXT_PERIODIC_STATS_SP_EVENT
, &bp
->sp_event
);
10036 bnxt_queue_sp_work(bp
);
10039 if (bnxt_tc_flower_enabled(bp
)) {
10040 set_bit(BNXT_FLOW_STATS_SP_EVENT
, &bp
->sp_event
);
10041 bnxt_queue_sp_work(bp
);
10044 #ifdef CONFIG_RFS_ACCEL
10045 if ((bp
->flags
& BNXT_FLAG_RFS
) && bp
->ntp_fltr_count
) {
10046 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT
, &bp
->sp_event
);
10047 bnxt_queue_sp_work(bp
);
10049 #endif /*CONFIG_RFS_ACCEL*/
10051 if (bp
->link_info
.phy_retry
) {
10052 if (time_after(jiffies
, bp
->link_info
.phy_retry_expires
)) {
10053 bp
->link_info
.phy_retry
= false;
10054 netdev_warn(bp
->dev
, "failed to update phy settings after maximum retries.\n");
10056 set_bit(BNXT_UPDATE_PHY_SP_EVENT
, &bp
->sp_event
);
10057 bnxt_queue_sp_work(bp
);
10061 if ((bp
->flags
& BNXT_FLAG_CHIP_P5
) && !bp
->chip_rev
&&
10062 netif_carrier_ok(dev
)) {
10063 set_bit(BNXT_RING_COAL_NOW_SP_EVENT
, &bp
->sp_event
);
10064 bnxt_queue_sp_work(bp
);
10066 bnxt_restart_timer
:
10067 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
10070 static void bnxt_rtnl_lock_sp(struct bnxt
*bp
)
10072 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10073 * set. If the device is being closed, bnxt_close() may be holding
10074 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
10075 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10077 clear_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
10081 static void bnxt_rtnl_unlock_sp(struct bnxt
*bp
)
10083 set_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
10087 /* Only called from bnxt_sp_task() */
10088 static void bnxt_reset(struct bnxt
*bp
, bool silent
)
10090 bnxt_rtnl_lock_sp(bp
);
10091 if (test_bit(BNXT_STATE_OPEN
, &bp
->state
))
10092 bnxt_reset_task(bp
, silent
);
10093 bnxt_rtnl_unlock_sp(bp
);
10096 static void bnxt_fw_reset_close(struct bnxt
*bp
)
10099 __bnxt_close_nic(bp
, true, false);
10100 bnxt_clear_int_mode(bp
);
10101 bnxt_hwrm_func_drv_unrgtr(bp
);
10102 bnxt_free_ctx_mem(bp
);
10107 static bool is_bnxt_fw_ok(struct bnxt
*bp
)
10109 struct bnxt_fw_health
*fw_health
= bp
->fw_health
;
10110 bool no_heartbeat
= false, has_reset
= false;
10113 val
= bnxt_fw_health_readl(bp
, BNXT_FW_HEARTBEAT_REG
);
10114 if (val
== fw_health
->last_fw_heartbeat
)
10115 no_heartbeat
= true;
10117 val
= bnxt_fw_health_readl(bp
, BNXT_FW_RESET_CNT_REG
);
10118 if (val
!= fw_health
->last_fw_reset_cnt
)
10121 if (!no_heartbeat
&& has_reset
)
10127 /* rtnl_lock is acquired before calling this function */
10128 static void bnxt_force_fw_reset(struct bnxt
*bp
)
10130 struct bnxt_fw_health
*fw_health
= bp
->fw_health
;
10133 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
) ||
10134 test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
))
10137 set_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
);
10138 bnxt_fw_reset_close(bp
);
10139 wait_dsecs
= fw_health
->master_func_wait_dsecs
;
10140 if (fw_health
->master
) {
10141 if (fw_health
->flags
& ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU
)
10143 bp
->fw_reset_state
= BNXT_FW_RESET_STATE_RESET_FW
;
10145 bp
->fw_reset_timestamp
= jiffies
+ wait_dsecs
* HZ
/ 10;
10146 wait_dsecs
= fw_health
->normal_func_wait_dsecs
;
10147 bp
->fw_reset_state
= BNXT_FW_RESET_STATE_ENABLE_DEV
;
10150 bp
->fw_reset_min_dsecs
= fw_health
->post_reset_wait_dsecs
;
10151 bp
->fw_reset_max_dsecs
= fw_health
->post_reset_max_wait_dsecs
;
10152 bnxt_queue_fw_reset_work(bp
, wait_dsecs
* HZ
/ 10);
10155 void bnxt_fw_exception(struct bnxt
*bp
)
10157 netdev_warn(bp
->dev
, "Detected firmware fatal condition, initiating reset\n");
10158 set_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
);
10159 bnxt_rtnl_lock_sp(bp
);
10160 bnxt_force_fw_reset(bp
);
10161 bnxt_rtnl_unlock_sp(bp
);
10164 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10167 static int bnxt_get_registered_vfs(struct bnxt
*bp
)
10169 #ifdef CONFIG_BNXT_SRIOV
10175 rc
= bnxt_hwrm_func_qcfg(bp
);
10177 netdev_err(bp
->dev
, "func_qcfg cmd failed, rc = %d\n", rc
);
10180 if (bp
->pf
.registered_vfs
)
10181 return bp
->pf
.registered_vfs
;
10188 void bnxt_fw_reset(struct bnxt
*bp
)
10190 bnxt_rtnl_lock_sp(bp
);
10191 if (test_bit(BNXT_STATE_OPEN
, &bp
->state
) &&
10192 !test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
)) {
10195 set_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
);
10196 if (bp
->pf
.active_vfs
&&
10197 !test_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
))
10198 n
= bnxt_get_registered_vfs(bp
);
10200 netdev_err(bp
->dev
, "Firmware reset aborted, rc = %d\n",
10202 clear_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
);
10203 dev_close(bp
->dev
);
10204 goto fw_reset_exit
;
10205 } else if (n
> 0) {
10206 u16 vf_tmo_dsecs
= n
* 10;
10208 if (bp
->fw_reset_max_dsecs
< vf_tmo_dsecs
)
10209 bp
->fw_reset_max_dsecs
= vf_tmo_dsecs
;
10210 bp
->fw_reset_state
=
10211 BNXT_FW_RESET_STATE_POLL_VF
;
10212 bnxt_queue_fw_reset_work(bp
, HZ
/ 10);
10213 goto fw_reset_exit
;
10215 bnxt_fw_reset_close(bp
);
10216 if (bp
->fw_cap
& BNXT_FW_CAP_ERR_RECOVER_RELOAD
) {
10217 bp
->fw_reset_state
= BNXT_FW_RESET_STATE_POLL_FW_DOWN
;
10220 bp
->fw_reset_state
= BNXT_FW_RESET_STATE_ENABLE_DEV
;
10221 tmo
= bp
->fw_reset_min_dsecs
* HZ
/ 10;
10223 bnxt_queue_fw_reset_work(bp
, tmo
);
10226 bnxt_rtnl_unlock_sp(bp
);
10229 static void bnxt_chk_missed_irq(struct bnxt
*bp
)
10233 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
10236 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
10237 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
10238 struct bnxt_cp_ring_info
*cpr
;
10245 cpr
= &bnapi
->cp_ring
;
10246 for (j
= 0; j
< 2; j
++) {
10247 struct bnxt_cp_ring_info
*cpr2
= cpr
->cp_ring_arr
[j
];
10250 if (!cpr2
|| cpr2
->has_more_work
||
10251 !bnxt_has_work(bp
, cpr2
))
10254 if (cpr2
->cp_raw_cons
!= cpr2
->last_cp_raw_cons
) {
10255 cpr2
->last_cp_raw_cons
= cpr2
->cp_raw_cons
;
10258 fw_ring_id
= cpr2
->cp_ring_struct
.fw_ring_id
;
10259 bnxt_dbg_hwrm_ring_info_get(bp
,
10260 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL
,
10261 fw_ring_id
, &val
[0], &val
[1]);
10262 cpr
->missed_irqs
++;
10267 static void bnxt_cfg_ntp_filters(struct bnxt
*);
10269 static void bnxt_init_ethtool_link_settings(struct bnxt
*bp
)
10271 struct bnxt_link_info
*link_info
= &bp
->link_info
;
10273 if (BNXT_AUTO_MODE(link_info
->auto_mode
)) {
10274 link_info
->autoneg
= BNXT_AUTONEG_SPEED
;
10275 if (bp
->hwrm_spec_code
>= 0x10201) {
10276 if (link_info
->auto_pause_setting
&
10277 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE
)
10278 link_info
->autoneg
|= BNXT_AUTONEG_FLOW_CTRL
;
10280 link_info
->autoneg
|= BNXT_AUTONEG_FLOW_CTRL
;
10282 link_info
->advertising
= link_info
->auto_link_speeds
;
10284 link_info
->req_link_speed
= link_info
->force_link_speed
;
10285 link_info
->req_duplex
= link_info
->duplex_setting
;
10287 if (link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
)
10288 link_info
->req_flow_ctrl
=
10289 link_info
->auto_pause_setting
& BNXT_LINK_PAUSE_BOTH
;
10291 link_info
->req_flow_ctrl
= link_info
->force_pause_setting
;
10294 static void bnxt_sp_task(struct work_struct
*work
)
10296 struct bnxt
*bp
= container_of(work
, struct bnxt
, sp_task
);
10298 set_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
10299 smp_mb__after_atomic();
10300 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
10301 clear_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
10305 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT
, &bp
->sp_event
))
10306 bnxt_cfg_rx_mode(bp
);
10308 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT
, &bp
->sp_event
))
10309 bnxt_cfg_ntp_filters(bp
);
10310 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT
, &bp
->sp_event
))
10311 bnxt_hwrm_exec_fwd_req(bp
);
10312 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT
, &bp
->sp_event
)) {
10313 bnxt_hwrm_tunnel_dst_port_alloc(
10314 bp
, bp
->vxlan_port
,
10315 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
);
10317 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT
, &bp
->sp_event
)) {
10318 bnxt_hwrm_tunnel_dst_port_free(
10319 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
);
10321 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT
, &bp
->sp_event
)) {
10322 bnxt_hwrm_tunnel_dst_port_alloc(
10324 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
);
10326 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT
, &bp
->sp_event
)) {
10327 bnxt_hwrm_tunnel_dst_port_free(
10328 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
);
10330 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT
, &bp
->sp_event
)) {
10331 bnxt_hwrm_port_qstats(bp
);
10332 bnxt_hwrm_port_qstats_ext(bp
);
10333 bnxt_hwrm_pcie_qstats(bp
);
10336 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT
, &bp
->sp_event
)) {
10339 mutex_lock(&bp
->link_lock
);
10340 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT
,
10342 bnxt_hwrm_phy_qcaps(bp
);
10344 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT
,
10346 bnxt_init_ethtool_link_settings(bp
);
10348 rc
= bnxt_update_link(bp
, true);
10349 mutex_unlock(&bp
->link_lock
);
10351 netdev_err(bp
->dev
, "SP task can't update link (rc: %x)\n",
10354 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT
, &bp
->sp_event
)) {
10357 mutex_lock(&bp
->link_lock
);
10358 rc
= bnxt_update_phy_setting(bp
);
10359 mutex_unlock(&bp
->link_lock
);
10361 netdev_warn(bp
->dev
, "update phy settings retry failed\n");
10363 bp
->link_info
.phy_retry
= false;
10364 netdev_info(bp
->dev
, "update phy settings retry succeeded\n");
10367 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT
, &bp
->sp_event
)) {
10368 mutex_lock(&bp
->link_lock
);
10369 bnxt_get_port_module_status(bp
);
10370 mutex_unlock(&bp
->link_lock
);
10373 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT
, &bp
->sp_event
))
10374 bnxt_tc_flow_stats_work(bp
);
10376 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT
, &bp
->sp_event
))
10377 bnxt_chk_missed_irq(bp
);
10379 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
10380 * must be the last functions to be called before exiting.
10382 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT
, &bp
->sp_event
))
10383 bnxt_reset(bp
, false);
10385 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT
, &bp
->sp_event
))
10386 bnxt_reset(bp
, true);
10388 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT
, &bp
->sp_event
))
10389 bnxt_devlink_health_report(bp
, BNXT_FW_RESET_NOTIFY_SP_EVENT
);
10391 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT
, &bp
->sp_event
)) {
10392 if (!is_bnxt_fw_ok(bp
))
10393 bnxt_devlink_health_report(bp
,
10394 BNXT_FW_EXCEPTION_SP_EVENT
);
10397 smp_mb__before_atomic();
10398 clear_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
10401 /* Under rtnl_lock */
10402 int bnxt_check_rings(struct bnxt
*bp
, int tx
, int rx
, bool sh
, int tcs
,
10405 int max_rx
, max_tx
, tx_sets
= 1;
10406 int tx_rings_needed
, stats
;
10413 rc
= bnxt_get_max_rings(bp
, &max_rx
, &max_tx
, sh
);
10420 tx_rings_needed
= tx
* tx_sets
+ tx_xdp
;
10421 if (max_tx
< tx_rings_needed
)
10425 if ((bp
->flags
& (BNXT_FLAG_RFS
| BNXT_FLAG_CHIP_P5
)) == BNXT_FLAG_RFS
)
10428 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
10430 cp
= sh
? max_t(int, tx_rings_needed
, rx
) : tx_rings_needed
+ rx
;
10432 if (BNXT_NEW_RM(bp
)) {
10433 cp
+= bnxt_get_ulp_msix_num(bp
);
10434 stats
+= bnxt_get_ulp_stat_ctxs(bp
);
10436 return bnxt_hwrm_check_rings(bp
, tx_rings_needed
, rx_rings
, rx
, cp
,
10440 static void bnxt_unmap_bars(struct bnxt
*bp
, struct pci_dev
*pdev
)
10443 pci_iounmap(pdev
, bp
->bar2
);
10448 pci_iounmap(pdev
, bp
->bar1
);
10453 pci_iounmap(pdev
, bp
->bar0
);
10458 static void bnxt_cleanup_pci(struct bnxt
*bp
)
10460 bnxt_unmap_bars(bp
, bp
->pdev
);
10461 pci_release_regions(bp
->pdev
);
10462 if (pci_is_enabled(bp
->pdev
))
10463 pci_disable_device(bp
->pdev
);
10466 static void bnxt_init_dflt_coal(struct bnxt
*bp
)
10468 struct bnxt_coal
*coal
;
10470 /* Tick values in micro seconds.
10471 * 1 coal_buf x bufs_per_record = 1 completion record.
10473 coal
= &bp
->rx_coal
;
10474 coal
->coal_ticks
= 10;
10475 coal
->coal_bufs
= 30;
10476 coal
->coal_ticks_irq
= 1;
10477 coal
->coal_bufs_irq
= 2;
10478 coal
->idle_thresh
= 50;
10479 coal
->bufs_per_record
= 2;
10480 coal
->budget
= 64; /* NAPI budget */
10482 coal
= &bp
->tx_coal
;
10483 coal
->coal_ticks
= 28;
10484 coal
->coal_bufs
= 30;
10485 coal
->coal_ticks_irq
= 2;
10486 coal
->coal_bufs_irq
= 2;
10487 coal
->bufs_per_record
= 1;
10489 bp
->stats_coal_ticks
= BNXT_DEF_STATS_COAL_TICKS
;
10492 static void bnxt_alloc_fw_health(struct bnxt
*bp
)
10497 if (!(bp
->fw_cap
& BNXT_FW_CAP_HOT_RESET
) &&
10498 !(bp
->fw_cap
& BNXT_FW_CAP_ERROR_RECOVERY
))
10501 bp
->fw_health
= kzalloc(sizeof(*bp
->fw_health
), GFP_KERNEL
);
10502 if (!bp
->fw_health
) {
10503 netdev_warn(bp
->dev
, "Failed to allocate fw_health\n");
10504 bp
->fw_cap
&= ~BNXT_FW_CAP_HOT_RESET
;
10505 bp
->fw_cap
&= ~BNXT_FW_CAP_ERROR_RECOVERY
;
10509 static int bnxt_fw_init_one_p1(struct bnxt
*bp
)
10514 rc
= bnxt_hwrm_ver_get(bp
);
10518 if (bp
->fw_cap
& BNXT_FW_CAP_KONG_MB_CHNL
) {
10519 rc
= bnxt_alloc_kong_hwrm_resources(bp
);
10521 bp
->fw_cap
&= ~BNXT_FW_CAP_KONG_MB_CHNL
;
10524 if ((bp
->fw_cap
& BNXT_FW_CAP_SHORT_CMD
) ||
10525 bp
->hwrm_max_ext_req_len
> BNXT_HWRM_MAX_REQ_LEN
) {
10526 rc
= bnxt_alloc_hwrm_short_cmd_req(bp
);
10530 rc
= bnxt_hwrm_func_reset(bp
);
10534 bnxt_hwrm_fw_set_time(bp
);
10538 static int bnxt_fw_init_one_p2(struct bnxt
*bp
)
10542 /* Get the MAX capabilities for this function */
10543 rc
= bnxt_hwrm_func_qcaps(bp
);
10545 netdev_err(bp
->dev
, "hwrm query capability failure rc: %x\n",
10550 rc
= bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp
);
10552 netdev_warn(bp
->dev
, "hwrm query adv flow mgnt failure rc: %d\n",
10555 bnxt_alloc_fw_health(bp
);
10556 rc
= bnxt_hwrm_error_recovery_qcfg(bp
);
10558 netdev_warn(bp
->dev
, "hwrm query error recovery failure rc: %d\n",
10561 rc
= bnxt_hwrm_func_drv_rgtr(bp
, NULL
, 0, false);
10565 bnxt_hwrm_func_qcfg(bp
);
10566 bnxt_hwrm_vnic_qcaps(bp
);
10567 bnxt_hwrm_port_led_qcaps(bp
);
10568 bnxt_ethtool_init(bp
);
10573 static void bnxt_set_dflt_rss_hash_type(struct bnxt
*bp
)
10575 bp
->flags
&= ~BNXT_FLAG_UDP_RSS_CAP
;
10576 bp
->rss_hash_cfg
= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4
|
10577 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4
|
10578 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6
|
10579 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6
;
10580 if (BNXT_CHIP_P4_PLUS(bp
) && bp
->hwrm_spec_code
>= 0x10501) {
10581 bp
->flags
|= BNXT_FLAG_UDP_RSS_CAP
;
10582 bp
->rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4
|
10583 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6
;
10587 static void bnxt_set_dflt_rfs(struct bnxt
*bp
)
10589 struct net_device
*dev
= bp
->dev
;
10591 dev
->hw_features
&= ~NETIF_F_NTUPLE
;
10592 dev
->features
&= ~NETIF_F_NTUPLE
;
10593 bp
->flags
&= ~BNXT_FLAG_RFS
;
10594 if (bnxt_rfs_supported(bp
)) {
10595 dev
->hw_features
|= NETIF_F_NTUPLE
;
10596 if (bnxt_rfs_capable(bp
)) {
10597 bp
->flags
|= BNXT_FLAG_RFS
;
10598 dev
->features
|= NETIF_F_NTUPLE
;
10603 static void bnxt_fw_init_one_p3(struct bnxt
*bp
)
10605 struct pci_dev
*pdev
= bp
->pdev
;
10607 bnxt_set_dflt_rss_hash_type(bp
);
10608 bnxt_set_dflt_rfs(bp
);
10610 bnxt_get_wol_settings(bp
);
10611 if (bp
->flags
& BNXT_FLAG_WOL_CAP
)
10612 device_set_wakeup_enable(&pdev
->dev
, bp
->wol
);
10614 device_set_wakeup_capable(&pdev
->dev
, false);
10616 bnxt_hwrm_set_cache_line_size(bp
, cache_line_size());
10617 bnxt_hwrm_coal_params_qcaps(bp
);
10620 static int bnxt_fw_init_one(struct bnxt
*bp
)
10624 rc
= bnxt_fw_init_one_p1(bp
);
10626 netdev_err(bp
->dev
, "Firmware init phase 1 failed\n");
10629 rc
= bnxt_fw_init_one_p2(bp
);
10631 netdev_err(bp
->dev
, "Firmware init phase 2 failed\n");
10634 rc
= bnxt_approve_mac(bp
, bp
->dev
->dev_addr
, false);
10638 /* In case fw capabilities have changed, destroy the unneeded
10639 * reporters and create newly capable ones.
10641 bnxt_dl_fw_reporters_destroy(bp
, false);
10642 bnxt_dl_fw_reporters_create(bp
);
10643 bnxt_fw_init_one_p3(bp
);
10647 static void bnxt_fw_reset_writel(struct bnxt
*bp
, int reg_idx
)
10649 struct bnxt_fw_health
*fw_health
= bp
->fw_health
;
10650 u32 reg
= fw_health
->fw_reset_seq_regs
[reg_idx
];
10651 u32 val
= fw_health
->fw_reset_seq_vals
[reg_idx
];
10652 u32 reg_type
, reg_off
, delay_msecs
;
10654 delay_msecs
= fw_health
->fw_reset_seq_delay_msec
[reg_idx
];
10655 reg_type
= BNXT_FW_HEALTH_REG_TYPE(reg
);
10656 reg_off
= BNXT_FW_HEALTH_REG_OFF(reg
);
10657 switch (reg_type
) {
10658 case BNXT_FW_HEALTH_REG_TYPE_CFG
:
10659 pci_write_config_dword(bp
->pdev
, reg_off
, val
);
10661 case BNXT_FW_HEALTH_REG_TYPE_GRC
:
10662 writel(reg_off
& BNXT_GRC_BASE_MASK
,
10663 bp
->bar0
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT
+ 4);
10664 reg_off
= (reg_off
& BNXT_GRC_OFFSET_MASK
) + 0x2000;
10666 case BNXT_FW_HEALTH_REG_TYPE_BAR0
:
10667 writel(val
, bp
->bar0
+ reg_off
);
10669 case BNXT_FW_HEALTH_REG_TYPE_BAR1
:
10670 writel(val
, bp
->bar1
+ reg_off
);
10674 pci_read_config_dword(bp
->pdev
, 0, &val
);
10675 msleep(delay_msecs
);
10679 static void bnxt_reset_all(struct bnxt
*bp
)
10681 struct bnxt_fw_health
*fw_health
= bp
->fw_health
;
10684 if (bp
->fw_cap
& BNXT_FW_CAP_ERR_RECOVER_RELOAD
) {
10685 #ifdef CONFIG_TEE_BNXT_FW
10686 rc
= tee_bnxt_fw_load();
10688 netdev_err(bp
->dev
, "Unable to reset FW rc=%d\n", rc
);
10689 bp
->fw_reset_timestamp
= jiffies
;
10694 if (fw_health
->flags
& ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST
) {
10695 for (i
= 0; i
< fw_health
->fw_reset_seq_cnt
; i
++)
10696 bnxt_fw_reset_writel(bp
, i
);
10697 } else if (fw_health
->flags
& ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU
) {
10698 struct hwrm_fw_reset_input req
= {0};
10700 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FW_RESET
, -1, -1);
10701 req
.resp_addr
= cpu_to_le64(bp
->hwrm_cmd_kong_resp_dma_addr
);
10702 req
.embedded_proc_type
= FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
;
10703 req
.selfrst_status
= FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP
;
10704 req
.flags
= FW_RESET_REQ_FLAGS_RESET_GRACEFUL
;
10705 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
10707 netdev_warn(bp
->dev
, "Unable to reset FW rc=%d\n", rc
);
10709 bp
->fw_reset_timestamp
= jiffies
;
10712 static void bnxt_fw_reset_task(struct work_struct
*work
)
10714 struct bnxt
*bp
= container_of(work
, struct bnxt
, fw_reset_task
.work
);
10717 if (!test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
)) {
10718 netdev_err(bp
->dev
, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
10722 switch (bp
->fw_reset_state
) {
10723 case BNXT_FW_RESET_STATE_POLL_VF
: {
10724 int n
= bnxt_get_registered_vfs(bp
);
10728 netdev_err(bp
->dev
, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
10729 n
, jiffies_to_msecs(jiffies
-
10730 bp
->fw_reset_timestamp
));
10731 goto fw_reset_abort
;
10732 } else if (n
> 0) {
10733 if (time_after(jiffies
, bp
->fw_reset_timestamp
+
10734 (bp
->fw_reset_max_dsecs
* HZ
/ 10))) {
10735 clear_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
);
10736 bp
->fw_reset_state
= 0;
10737 netdev_err(bp
->dev
, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
10741 bnxt_queue_fw_reset_work(bp
, HZ
/ 10);
10744 bp
->fw_reset_timestamp
= jiffies
;
10746 bnxt_fw_reset_close(bp
);
10747 if (bp
->fw_cap
& BNXT_FW_CAP_ERR_RECOVER_RELOAD
) {
10748 bp
->fw_reset_state
= BNXT_FW_RESET_STATE_POLL_FW_DOWN
;
10751 bp
->fw_reset_state
= BNXT_FW_RESET_STATE_ENABLE_DEV
;
10752 tmo
= bp
->fw_reset_min_dsecs
* HZ
/ 10;
10755 bnxt_queue_fw_reset_work(bp
, tmo
);
10758 case BNXT_FW_RESET_STATE_POLL_FW_DOWN
: {
10761 val
= bnxt_fw_health_readl(bp
, BNXT_FW_HEALTH_REG
);
10762 if (!(val
& BNXT_FW_STATUS_SHUTDOWN
) &&
10763 !time_after(jiffies
, bp
->fw_reset_timestamp
+
10764 (bp
->fw_reset_max_dsecs
* HZ
/ 10))) {
10765 bnxt_queue_fw_reset_work(bp
, HZ
/ 5);
10769 if (!bp
->fw_health
->master
) {
10770 u32 wait_dsecs
= bp
->fw_health
->normal_func_wait_dsecs
;
10772 bp
->fw_reset_state
= BNXT_FW_RESET_STATE_ENABLE_DEV
;
10773 bnxt_queue_fw_reset_work(bp
, wait_dsecs
* HZ
/ 10);
10776 bp
->fw_reset_state
= BNXT_FW_RESET_STATE_RESET_FW
;
10779 case BNXT_FW_RESET_STATE_RESET_FW
:
10780 bnxt_reset_all(bp
);
10781 bp
->fw_reset_state
= BNXT_FW_RESET_STATE_ENABLE_DEV
;
10782 bnxt_queue_fw_reset_work(bp
, bp
->fw_reset_min_dsecs
* HZ
/ 10);
10784 case BNXT_FW_RESET_STATE_ENABLE_DEV
:
10785 if (test_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
)) {
10788 val
= bnxt_fw_health_readl(bp
,
10789 BNXT_FW_RESET_INPROG_REG
);
10791 netdev_warn(bp
->dev
, "FW reset inprog %x after min wait time.\n",
10794 clear_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
);
10795 if (pci_enable_device(bp
->pdev
)) {
10796 netdev_err(bp
->dev
, "Cannot re-enable PCI device\n");
10797 goto fw_reset_abort
;
10799 pci_set_master(bp
->pdev
);
10800 bp
->fw_reset_state
= BNXT_FW_RESET_STATE_POLL_FW
;
10802 case BNXT_FW_RESET_STATE_POLL_FW
:
10803 bp
->hwrm_cmd_timeout
= SHORT_HWRM_CMD_TIMEOUT
;
10804 rc
= __bnxt_hwrm_ver_get(bp
, true);
10806 if (time_after(jiffies
, bp
->fw_reset_timestamp
+
10807 (bp
->fw_reset_max_dsecs
* HZ
/ 10))) {
10808 netdev_err(bp
->dev
, "Firmware reset aborted\n");
10809 goto fw_reset_abort
;
10811 bnxt_queue_fw_reset_work(bp
, HZ
/ 5);
10814 bp
->hwrm_cmd_timeout
= DFLT_HWRM_CMD_TIMEOUT
;
10815 bp
->fw_reset_state
= BNXT_FW_RESET_STATE_OPENING
;
10817 case BNXT_FW_RESET_STATE_OPENING
:
10818 while (!rtnl_trylock()) {
10819 bnxt_queue_fw_reset_work(bp
, HZ
/ 10);
10822 rc
= bnxt_open(bp
->dev
);
10824 netdev_err(bp
->dev
, "bnxt_open_nic() failed\n");
10825 clear_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
);
10826 dev_close(bp
->dev
);
10829 bp
->fw_reset_state
= 0;
10830 /* Make sure fw_reset_state is 0 before clearing the flag */
10831 smp_mb__before_atomic();
10832 clear_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
);
10833 bnxt_ulp_start(bp
, rc
);
10834 bnxt_dl_health_recovery_done(bp
);
10835 bnxt_dl_health_status_update(bp
, true);
10842 clear_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
);
10843 if (bp
->fw_reset_state
!= BNXT_FW_RESET_STATE_POLL_VF
)
10844 bnxt_dl_health_status_update(bp
, false);
10845 bp
->fw_reset_state
= 0;
10847 dev_close(bp
->dev
);
10851 static int bnxt_init_board(struct pci_dev
*pdev
, struct net_device
*dev
)
10854 struct bnxt
*bp
= netdev_priv(dev
);
10856 SET_NETDEV_DEV(dev
, &pdev
->dev
);
10858 /* enable device (incl. PCI PM wakeup), and bus-mastering */
10859 rc
= pci_enable_device(pdev
);
10861 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
10865 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
10866 dev_err(&pdev
->dev
,
10867 "Cannot find PCI device base address, aborting\n");
10869 goto init_err_disable
;
10872 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
10874 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
10875 goto init_err_disable
;
10878 if (dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64)) != 0 &&
10879 dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32)) != 0) {
10880 dev_err(&pdev
->dev
, "System does not support DMA, aborting\n");
10881 goto init_err_disable
;
10884 pci_set_master(pdev
);
10889 bp
->bar0
= pci_ioremap_bar(pdev
, 0);
10891 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
10893 goto init_err_release
;
10896 bp
->bar1
= pci_ioremap_bar(pdev
, 2);
10898 dev_err(&pdev
->dev
, "Cannot map doorbell registers, aborting\n");
10900 goto init_err_release
;
10903 bp
->bar2
= pci_ioremap_bar(pdev
, 4);
10905 dev_err(&pdev
->dev
, "Cannot map bar4 registers, aborting\n");
10907 goto init_err_release
;
10910 pci_enable_pcie_error_reporting(pdev
);
10912 INIT_WORK(&bp
->sp_task
, bnxt_sp_task
);
10913 INIT_DELAYED_WORK(&bp
->fw_reset_task
, bnxt_fw_reset_task
);
10915 spin_lock_init(&bp
->ntp_fltr_lock
);
10916 #if BITS_PER_LONG == 32
10917 spin_lock_init(&bp
->db_lock
);
10920 bp
->rx_ring_size
= BNXT_DEFAULT_RX_RING_SIZE
;
10921 bp
->tx_ring_size
= BNXT_DEFAULT_TX_RING_SIZE
;
10923 bnxt_init_dflt_coal(bp
);
10925 timer_setup(&bp
->timer
, bnxt_timer
, 0);
10926 bp
->current_interval
= BNXT_TIMER_INTERVAL
;
10928 clear_bit(BNXT_STATE_OPEN
, &bp
->state
);
10932 bnxt_unmap_bars(bp
, pdev
);
10933 pci_release_regions(pdev
);
10936 pci_disable_device(pdev
);
10942 /* rtnl_lock held */
10943 static int bnxt_change_mac_addr(struct net_device
*dev
, void *p
)
10945 struct sockaddr
*addr
= p
;
10946 struct bnxt
*bp
= netdev_priv(dev
);
10949 if (!is_valid_ether_addr(addr
->sa_data
))
10950 return -EADDRNOTAVAIL
;
10952 if (ether_addr_equal(addr
->sa_data
, dev
->dev_addr
))
10955 rc
= bnxt_approve_mac(bp
, addr
->sa_data
, true);
10959 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
10960 if (netif_running(dev
)) {
10961 bnxt_close_nic(bp
, false, false);
10962 rc
= bnxt_open_nic(bp
, false, false);
10968 /* rtnl_lock held */
10969 static int bnxt_change_mtu(struct net_device
*dev
, int new_mtu
)
10971 struct bnxt
*bp
= netdev_priv(dev
);
10973 if (netif_running(dev
))
10974 bnxt_close_nic(bp
, false, false);
10976 dev
->mtu
= new_mtu
;
10977 bnxt_set_ring_params(bp
);
10979 if (netif_running(dev
))
10980 return bnxt_open_nic(bp
, false, false);
10985 int bnxt_setup_mq_tc(struct net_device
*dev
, u8 tc
)
10987 struct bnxt
*bp
= netdev_priv(dev
);
10991 if (tc
> bp
->max_tc
) {
10992 netdev_err(dev
, "Too many traffic classes requested: %d. Max supported is %d.\n",
10997 if (netdev_get_num_tc(dev
) == tc
)
11000 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
)
11003 rc
= bnxt_check_rings(bp
, bp
->tx_nr_rings_per_tc
, bp
->rx_nr_rings
,
11004 sh
, tc
, bp
->tx_nr_rings_xdp
);
11008 /* Needs to close the device and do hw resource re-allocations */
11009 if (netif_running(bp
->dev
))
11010 bnxt_close_nic(bp
, true, false);
11013 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
* tc
;
11014 netdev_set_num_tc(dev
, tc
);
11016 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
;
11017 netdev_reset_tc(dev
);
11019 bp
->tx_nr_rings
+= bp
->tx_nr_rings_xdp
;
11020 bp
->cp_nr_rings
= sh
? max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
) :
11021 bp
->tx_nr_rings
+ bp
->rx_nr_rings
;
11023 if (netif_running(bp
->dev
))
11024 return bnxt_open_nic(bp
, true, false);
11029 static int bnxt_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
11032 struct bnxt
*bp
= cb_priv
;
11034 if (!bnxt_tc_flower_enabled(bp
) ||
11035 !tc_cls_can_offload_and_chain0(bp
->dev
, type_data
))
11036 return -EOPNOTSUPP
;
11039 case TC_SETUP_CLSFLOWER
:
11040 return bnxt_tc_setup_flower(bp
, bp
->pf
.fw_fid
, type_data
);
11042 return -EOPNOTSUPP
;
11046 LIST_HEAD(bnxt_block_cb_list
);
11048 static int bnxt_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
11051 struct bnxt
*bp
= netdev_priv(dev
);
11054 case TC_SETUP_BLOCK
:
11055 return flow_block_cb_setup_simple(type_data
,
11056 &bnxt_block_cb_list
,
11057 bnxt_setup_tc_block_cb
,
11059 case TC_SETUP_QDISC_MQPRIO
: {
11060 struct tc_mqprio_qopt
*mqprio
= type_data
;
11062 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
11064 return bnxt_setup_mq_tc(dev
, mqprio
->num_tc
);
11067 return -EOPNOTSUPP
;
11071 #ifdef CONFIG_RFS_ACCEL
11072 static bool bnxt_fltr_match(struct bnxt_ntuple_filter
*f1
,
11073 struct bnxt_ntuple_filter
*f2
)
11075 struct flow_keys
*keys1
= &f1
->fkeys
;
11076 struct flow_keys
*keys2
= &f2
->fkeys
;
11078 if (keys1
->basic
.n_proto
!= keys2
->basic
.n_proto
||
11079 keys1
->basic
.ip_proto
!= keys2
->basic
.ip_proto
)
11082 if (keys1
->basic
.n_proto
== htons(ETH_P_IP
)) {
11083 if (keys1
->addrs
.v4addrs
.src
!= keys2
->addrs
.v4addrs
.src
||
11084 keys1
->addrs
.v4addrs
.dst
!= keys2
->addrs
.v4addrs
.dst
)
11087 if (memcmp(&keys1
->addrs
.v6addrs
.src
, &keys2
->addrs
.v6addrs
.src
,
11088 sizeof(keys1
->addrs
.v6addrs
.src
)) ||
11089 memcmp(&keys1
->addrs
.v6addrs
.dst
, &keys2
->addrs
.v6addrs
.dst
,
11090 sizeof(keys1
->addrs
.v6addrs
.dst
)))
11094 if (keys1
->ports
.ports
== keys2
->ports
.ports
&&
11095 keys1
->control
.flags
== keys2
->control
.flags
&&
11096 ether_addr_equal(f1
->src_mac_addr
, f2
->src_mac_addr
) &&
11097 ether_addr_equal(f1
->dst_mac_addr
, f2
->dst_mac_addr
))
11103 static int bnxt_rx_flow_steer(struct net_device
*dev
, const struct sk_buff
*skb
,
11104 u16 rxq_index
, u32 flow_id
)
11106 struct bnxt
*bp
= netdev_priv(dev
);
11107 struct bnxt_ntuple_filter
*fltr
, *new_fltr
;
11108 struct flow_keys
*fkeys
;
11109 struct ethhdr
*eth
= (struct ethhdr
*)skb_mac_header(skb
);
11110 int rc
= 0, idx
, bit_id
, l2_idx
= 0;
11111 struct hlist_head
*head
;
11114 if (!ether_addr_equal(dev
->dev_addr
, eth
->h_dest
)) {
11115 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
11118 netif_addr_lock_bh(dev
);
11119 for (j
= 0; j
< vnic
->uc_filter_count
; j
++, off
+= ETH_ALEN
) {
11120 if (ether_addr_equal(eth
->h_dest
,
11121 vnic
->uc_list
+ off
)) {
11126 netif_addr_unlock_bh(dev
);
11130 new_fltr
= kzalloc(sizeof(*new_fltr
), GFP_ATOMIC
);
11134 fkeys
= &new_fltr
->fkeys
;
11135 if (!skb_flow_dissect_flow_keys(skb
, fkeys
, 0)) {
11136 rc
= -EPROTONOSUPPORT
;
11140 if ((fkeys
->basic
.n_proto
!= htons(ETH_P_IP
) &&
11141 fkeys
->basic
.n_proto
!= htons(ETH_P_IPV6
)) ||
11142 ((fkeys
->basic
.ip_proto
!= IPPROTO_TCP
) &&
11143 (fkeys
->basic
.ip_proto
!= IPPROTO_UDP
))) {
11144 rc
= -EPROTONOSUPPORT
;
11147 if (fkeys
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
11148 bp
->hwrm_spec_code
< 0x10601) {
11149 rc
= -EPROTONOSUPPORT
;
11152 flags
= fkeys
->control
.flags
;
11153 if (((flags
& FLOW_DIS_ENCAPSULATION
) &&
11154 bp
->hwrm_spec_code
< 0x10601) || (flags
& FLOW_DIS_IS_FRAGMENT
)) {
11155 rc
= -EPROTONOSUPPORT
;
11159 memcpy(new_fltr
->dst_mac_addr
, eth
->h_dest
, ETH_ALEN
);
11160 memcpy(new_fltr
->src_mac_addr
, eth
->h_source
, ETH_ALEN
);
11162 idx
= skb_get_hash_raw(skb
) & BNXT_NTP_FLTR_HASH_MASK
;
11163 head
= &bp
->ntp_fltr_hash_tbl
[idx
];
11165 hlist_for_each_entry_rcu(fltr
, head
, hash
) {
11166 if (bnxt_fltr_match(fltr
, new_fltr
)) {
11174 spin_lock_bh(&bp
->ntp_fltr_lock
);
11175 bit_id
= bitmap_find_free_region(bp
->ntp_fltr_bmap
,
11176 BNXT_NTP_FLTR_MAX_FLTR
, 0);
11178 spin_unlock_bh(&bp
->ntp_fltr_lock
);
11183 new_fltr
->sw_id
= (u16
)bit_id
;
11184 new_fltr
->flow_id
= flow_id
;
11185 new_fltr
->l2_fltr_idx
= l2_idx
;
11186 new_fltr
->rxq
= rxq_index
;
11187 hlist_add_head_rcu(&new_fltr
->hash
, head
);
11188 bp
->ntp_fltr_count
++;
11189 spin_unlock_bh(&bp
->ntp_fltr_lock
);
11191 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT
, &bp
->sp_event
);
11192 bnxt_queue_sp_work(bp
);
11194 return new_fltr
->sw_id
;
11201 static void bnxt_cfg_ntp_filters(struct bnxt
*bp
)
11205 for (i
= 0; i
< BNXT_NTP_FLTR_HASH_SIZE
; i
++) {
11206 struct hlist_head
*head
;
11207 struct hlist_node
*tmp
;
11208 struct bnxt_ntuple_filter
*fltr
;
11211 head
= &bp
->ntp_fltr_hash_tbl
[i
];
11212 hlist_for_each_entry_safe(fltr
, tmp
, head
, hash
) {
11215 if (test_bit(BNXT_FLTR_VALID
, &fltr
->state
)) {
11216 if (rps_may_expire_flow(bp
->dev
, fltr
->rxq
,
11219 bnxt_hwrm_cfa_ntuple_filter_free(bp
,
11224 rc
= bnxt_hwrm_cfa_ntuple_filter_alloc(bp
,
11229 set_bit(BNXT_FLTR_VALID
, &fltr
->state
);
11233 spin_lock_bh(&bp
->ntp_fltr_lock
);
11234 hlist_del_rcu(&fltr
->hash
);
11235 bp
->ntp_fltr_count
--;
11236 spin_unlock_bh(&bp
->ntp_fltr_lock
);
11238 clear_bit(fltr
->sw_id
, bp
->ntp_fltr_bmap
);
11243 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT
, &bp
->sp_event
))
11244 netdev_info(bp
->dev
, "Receive PF driver unload event!");
11249 static void bnxt_cfg_ntp_filters(struct bnxt
*bp
)
11253 #endif /* CONFIG_RFS_ACCEL */
11255 static void bnxt_udp_tunnel_add(struct net_device
*dev
,
11256 struct udp_tunnel_info
*ti
)
11258 struct bnxt
*bp
= netdev_priv(dev
);
11260 if (ti
->sa_family
!= AF_INET6
&& ti
->sa_family
!= AF_INET
)
11263 if (!netif_running(dev
))
11266 switch (ti
->type
) {
11267 case UDP_TUNNEL_TYPE_VXLAN
:
11268 if (bp
->vxlan_port_cnt
&& bp
->vxlan_port
!= ti
->port
)
11271 bp
->vxlan_port_cnt
++;
11272 if (bp
->vxlan_port_cnt
== 1) {
11273 bp
->vxlan_port
= ti
->port
;
11274 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT
, &bp
->sp_event
);
11275 bnxt_queue_sp_work(bp
);
11278 case UDP_TUNNEL_TYPE_GENEVE
:
11279 if (bp
->nge_port_cnt
&& bp
->nge_port
!= ti
->port
)
11282 bp
->nge_port_cnt
++;
11283 if (bp
->nge_port_cnt
== 1) {
11284 bp
->nge_port
= ti
->port
;
11285 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT
, &bp
->sp_event
);
11292 bnxt_queue_sp_work(bp
);
11295 static void bnxt_udp_tunnel_del(struct net_device
*dev
,
11296 struct udp_tunnel_info
*ti
)
11298 struct bnxt
*bp
= netdev_priv(dev
);
11300 if (ti
->sa_family
!= AF_INET6
&& ti
->sa_family
!= AF_INET
)
11303 if (!netif_running(dev
))
11306 switch (ti
->type
) {
11307 case UDP_TUNNEL_TYPE_VXLAN
:
11308 if (!bp
->vxlan_port_cnt
|| bp
->vxlan_port
!= ti
->port
)
11310 bp
->vxlan_port_cnt
--;
11312 if (bp
->vxlan_port_cnt
!= 0)
11315 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT
, &bp
->sp_event
);
11317 case UDP_TUNNEL_TYPE_GENEVE
:
11318 if (!bp
->nge_port_cnt
|| bp
->nge_port
!= ti
->port
)
11320 bp
->nge_port_cnt
--;
11322 if (bp
->nge_port_cnt
!= 0)
11325 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT
, &bp
->sp_event
);
11331 bnxt_queue_sp_work(bp
);
11334 static int bnxt_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
11335 struct net_device
*dev
, u32 filter_mask
,
11338 struct bnxt
*bp
= netdev_priv(dev
);
11340 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
, bp
->br_mode
, 0, 0,
11341 nlflags
, filter_mask
, NULL
);
11344 static int bnxt_bridge_setlink(struct net_device
*dev
, struct nlmsghdr
*nlh
,
11345 u16 flags
, struct netlink_ext_ack
*extack
)
11347 struct bnxt
*bp
= netdev_priv(dev
);
11348 struct nlattr
*attr
, *br_spec
;
11351 if (bp
->hwrm_spec_code
< 0x10708 || !BNXT_SINGLE_PF(bp
))
11352 return -EOPNOTSUPP
;
11354 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
11358 nla_for_each_nested(attr
, br_spec
, rem
) {
11361 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
11364 if (nla_len(attr
) < sizeof(mode
))
11367 mode
= nla_get_u16(attr
);
11368 if (mode
== bp
->br_mode
)
11371 rc
= bnxt_hwrm_set_br_mode(bp
, mode
);
11373 bp
->br_mode
= mode
;
11379 int bnxt_get_port_parent_id(struct net_device
*dev
,
11380 struct netdev_phys_item_id
*ppid
)
11382 struct bnxt
*bp
= netdev_priv(dev
);
11384 if (bp
->eswitch_mode
!= DEVLINK_ESWITCH_MODE_SWITCHDEV
)
11385 return -EOPNOTSUPP
;
11387 /* The PF and it's VF-reps only support the switchdev framework */
11388 if (!BNXT_PF(bp
) || !(bp
->flags
& BNXT_FLAG_DSN_VALID
))
11389 return -EOPNOTSUPP
;
11391 ppid
->id_len
= sizeof(bp
->dsn
);
11392 memcpy(ppid
->id
, bp
->dsn
, ppid
->id_len
);
11397 static struct devlink_port
*bnxt_get_devlink_port(struct net_device
*dev
)
11399 struct bnxt
*bp
= netdev_priv(dev
);
11401 return &bp
->dl_port
;
11404 static const struct net_device_ops bnxt_netdev_ops
= {
11405 .ndo_open
= bnxt_open
,
11406 .ndo_start_xmit
= bnxt_start_xmit
,
11407 .ndo_stop
= bnxt_close
,
11408 .ndo_get_stats64
= bnxt_get_stats64
,
11409 .ndo_set_rx_mode
= bnxt_set_rx_mode
,
11410 .ndo_do_ioctl
= bnxt_ioctl
,
11411 .ndo_validate_addr
= eth_validate_addr
,
11412 .ndo_set_mac_address
= bnxt_change_mac_addr
,
11413 .ndo_change_mtu
= bnxt_change_mtu
,
11414 .ndo_fix_features
= bnxt_fix_features
,
11415 .ndo_set_features
= bnxt_set_features
,
11416 .ndo_tx_timeout
= bnxt_tx_timeout
,
11417 #ifdef CONFIG_BNXT_SRIOV
11418 .ndo_get_vf_config
= bnxt_get_vf_config
,
11419 .ndo_set_vf_mac
= bnxt_set_vf_mac
,
11420 .ndo_set_vf_vlan
= bnxt_set_vf_vlan
,
11421 .ndo_set_vf_rate
= bnxt_set_vf_bw
,
11422 .ndo_set_vf_link_state
= bnxt_set_vf_link_state
,
11423 .ndo_set_vf_spoofchk
= bnxt_set_vf_spoofchk
,
11424 .ndo_set_vf_trust
= bnxt_set_vf_trust
,
11426 .ndo_setup_tc
= bnxt_setup_tc
,
11427 #ifdef CONFIG_RFS_ACCEL
11428 .ndo_rx_flow_steer
= bnxt_rx_flow_steer
,
11430 .ndo_udp_tunnel_add
= bnxt_udp_tunnel_add
,
11431 .ndo_udp_tunnel_del
= bnxt_udp_tunnel_del
,
11432 .ndo_bpf
= bnxt_xdp
,
11433 .ndo_xdp_xmit
= bnxt_xdp_xmit
,
11434 .ndo_bridge_getlink
= bnxt_bridge_getlink
,
11435 .ndo_bridge_setlink
= bnxt_bridge_setlink
,
11436 .ndo_get_devlink_port
= bnxt_get_devlink_port
,
11439 static void bnxt_remove_one(struct pci_dev
*pdev
)
11441 struct net_device
*dev
= pci_get_drvdata(pdev
);
11442 struct bnxt
*bp
= netdev_priv(dev
);
11445 bnxt_sriov_disable(bp
);
11447 bnxt_dl_fw_reporters_destroy(bp
, true);
11448 pci_disable_pcie_error_reporting(pdev
);
11449 unregister_netdev(dev
);
11450 bnxt_dl_unregister(bp
);
11451 bnxt_shutdown_tc(bp
);
11452 bnxt_cancel_sp_work(bp
);
11455 bnxt_clear_int_mode(bp
);
11456 bnxt_hwrm_func_drv_unrgtr(bp
);
11457 bnxt_free_hwrm_resources(bp
);
11458 bnxt_free_hwrm_short_cmd_req(bp
);
11459 bnxt_ethtool_free(bp
);
11463 kfree(bp
->fw_health
);
11464 bp
->fw_health
= NULL
;
11465 bnxt_cleanup_pci(bp
);
11466 bnxt_free_ctx_mem(bp
);
11469 bnxt_free_port_stats(bp
);
11473 static int bnxt_probe_phy(struct bnxt
*bp
, bool fw_dflt
)
11476 struct bnxt_link_info
*link_info
= &bp
->link_info
;
11478 rc
= bnxt_hwrm_phy_qcaps(bp
);
11480 netdev_err(bp
->dev
, "Probe phy can't get phy capabilities (rc: %x)\n",
11487 rc
= bnxt_update_link(bp
, false);
11489 netdev_err(bp
->dev
, "Probe phy can't update link (rc: %x)\n",
11494 /* Older firmware does not have supported_auto_speeds, so assume
11495 * that all supported speeds can be autonegotiated.
11497 if (link_info
->auto_link_speeds
&& !link_info
->support_auto_speeds
)
11498 link_info
->support_auto_speeds
= link_info
->support_speeds
;
11500 bnxt_init_ethtool_link_settings(bp
);
11504 static int bnxt_get_max_irq(struct pci_dev
*pdev
)
11508 if (!pdev
->msix_cap
)
11511 pci_read_config_word(pdev
, pdev
->msix_cap
+ PCI_MSIX_FLAGS
, &ctrl
);
11512 return (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
11515 static void _bnxt_get_max_rings(struct bnxt
*bp
, int *max_rx
, int *max_tx
,
11518 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
11519 int max_ring_grps
= 0, max_irq
;
11521 *max_tx
= hw_resc
->max_tx_rings
;
11522 *max_rx
= hw_resc
->max_rx_rings
;
11523 *max_cp
= bnxt_get_max_func_cp_rings_for_en(bp
);
11524 max_irq
= min_t(int, bnxt_get_max_func_irqs(bp
) -
11525 bnxt_get_ulp_msix_num(bp
),
11526 hw_resc
->max_stat_ctxs
- bnxt_get_ulp_stat_ctxs(bp
));
11527 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
11528 *max_cp
= min_t(int, *max_cp
, max_irq
);
11529 max_ring_grps
= hw_resc
->max_hw_ring_grps
;
11530 if (BNXT_CHIP_TYPE_NITRO_A0(bp
) && BNXT_PF(bp
)) {
11534 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
11536 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
11537 bnxt_trim_rings(bp
, max_rx
, max_tx
, *max_cp
, false);
11538 /* On P5 chips, max_cp output param should be available NQs */
11541 *max_rx
= min_t(int, *max_rx
, max_ring_grps
);
11544 int bnxt_get_max_rings(struct bnxt
*bp
, int *max_rx
, int *max_tx
, bool shared
)
11548 _bnxt_get_max_rings(bp
, &rx
, &tx
, &cp
);
11551 if (!rx
|| !tx
|| !cp
)
11554 return bnxt_trim_rings(bp
, max_rx
, max_tx
, cp
, shared
);
11557 static int bnxt_get_dflt_rings(struct bnxt
*bp
, int *max_rx
, int *max_tx
,
11562 rc
= bnxt_get_max_rings(bp
, max_rx
, max_tx
, shared
);
11563 if (rc
&& (bp
->flags
& BNXT_FLAG_AGG_RINGS
)) {
11564 /* Not enough rings, try disabling agg rings. */
11565 bp
->flags
&= ~BNXT_FLAG_AGG_RINGS
;
11566 rc
= bnxt_get_max_rings(bp
, max_rx
, max_tx
, shared
);
11568 /* set BNXT_FLAG_AGG_RINGS back for consistency */
11569 bp
->flags
|= BNXT_FLAG_AGG_RINGS
;
11572 bp
->flags
|= BNXT_FLAG_NO_AGG_RINGS
;
11573 bp
->dev
->hw_features
&= ~(NETIF_F_LRO
| NETIF_F_GRO_HW
);
11574 bp
->dev
->features
&= ~(NETIF_F_LRO
| NETIF_F_GRO_HW
);
11575 bnxt_set_ring_params(bp
);
11578 if (bp
->flags
& BNXT_FLAG_ROCE_CAP
) {
11579 int max_cp
, max_stat
, max_irq
;
11581 /* Reserve minimum resources for RoCE */
11582 max_cp
= bnxt_get_max_func_cp_rings(bp
);
11583 max_stat
= bnxt_get_max_func_stat_ctxs(bp
);
11584 max_irq
= bnxt_get_max_func_irqs(bp
);
11585 if (max_cp
<= BNXT_MIN_ROCE_CP_RINGS
||
11586 max_irq
<= BNXT_MIN_ROCE_CP_RINGS
||
11587 max_stat
<= BNXT_MIN_ROCE_STAT_CTXS
)
11590 max_cp
-= BNXT_MIN_ROCE_CP_RINGS
;
11591 max_irq
-= BNXT_MIN_ROCE_CP_RINGS
;
11592 max_stat
-= BNXT_MIN_ROCE_STAT_CTXS
;
11593 max_cp
= min_t(int, max_cp
, max_irq
);
11594 max_cp
= min_t(int, max_cp
, max_stat
);
11595 rc
= bnxt_trim_rings(bp
, max_rx
, max_tx
, max_cp
, shared
);
11602 /* In initial default shared ring setting, each shared ring must have a
11605 static void bnxt_trim_dflt_sh_rings(struct bnxt
*bp
)
11607 bp
->cp_nr_rings
= min_t(int, bp
->tx_nr_rings_per_tc
, bp
->rx_nr_rings
);
11608 bp
->rx_nr_rings
= bp
->cp_nr_rings
;
11609 bp
->tx_nr_rings_per_tc
= bp
->cp_nr_rings
;
11610 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
;
11613 static int bnxt_set_dflt_rings(struct bnxt
*bp
, bool sh
)
11615 int dflt_rings
, max_rx_rings
, max_tx_rings
, rc
;
11617 if (!bnxt_can_reserve_rings(bp
))
11621 bp
->flags
|= BNXT_FLAG_SHARED_RINGS
;
11622 dflt_rings
= is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
11623 /* Reduce default rings on multi-port cards so that total default
11624 * rings do not exceed CPU count.
11626 if (bp
->port_count
> 1) {
11628 max_t(int, num_online_cpus() / bp
->port_count
, 1);
11630 dflt_rings
= min_t(int, dflt_rings
, max_rings
);
11632 rc
= bnxt_get_dflt_rings(bp
, &max_rx_rings
, &max_tx_rings
, sh
);
11635 bp
->rx_nr_rings
= min_t(int, dflt_rings
, max_rx_rings
);
11636 bp
->tx_nr_rings_per_tc
= min_t(int, dflt_rings
, max_tx_rings
);
11638 bnxt_trim_dflt_sh_rings(bp
);
11640 bp
->cp_nr_rings
= bp
->tx_nr_rings_per_tc
+ bp
->rx_nr_rings
;
11641 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
;
11643 rc
= __bnxt_reserve_rings(bp
);
11645 netdev_warn(bp
->dev
, "Unable to reserve tx rings\n");
11646 bp
->tx_nr_rings_per_tc
= bp
->tx_nr_rings
;
11648 bnxt_trim_dflt_sh_rings(bp
);
11650 /* Rings may have been trimmed, re-reserve the trimmed rings. */
11651 if (bnxt_need_reserve_rings(bp
)) {
11652 rc
= __bnxt_reserve_rings(bp
);
11654 netdev_warn(bp
->dev
, "2nd rings reservation failed.\n");
11655 bp
->tx_nr_rings_per_tc
= bp
->tx_nr_rings
;
11657 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
11664 static int bnxt_init_dflt_ring_mode(struct bnxt
*bp
)
11668 if (bp
->tx_nr_rings
)
11671 bnxt_ulp_irq_stop(bp
);
11672 bnxt_clear_int_mode(bp
);
11673 rc
= bnxt_set_dflt_rings(bp
, true);
11675 netdev_err(bp
->dev
, "Not enough rings available.\n");
11676 goto init_dflt_ring_err
;
11678 rc
= bnxt_init_int_mode(bp
);
11680 goto init_dflt_ring_err
;
11682 bp
->tx_nr_rings_per_tc
= bp
->tx_nr_rings
;
11683 if (bnxt_rfs_supported(bp
) && bnxt_rfs_capable(bp
)) {
11684 bp
->flags
|= BNXT_FLAG_RFS
;
11685 bp
->dev
->features
|= NETIF_F_NTUPLE
;
11687 init_dflt_ring_err
:
11688 bnxt_ulp_irq_restart(bp
, rc
);
11692 int bnxt_restore_pf_fw_resources(struct bnxt
*bp
)
11697 bnxt_hwrm_func_qcaps(bp
);
11699 if (netif_running(bp
->dev
))
11700 __bnxt_close_nic(bp
, true, false);
11702 bnxt_ulp_irq_stop(bp
);
11703 bnxt_clear_int_mode(bp
);
11704 rc
= bnxt_init_int_mode(bp
);
11705 bnxt_ulp_irq_restart(bp
, rc
);
11707 if (netif_running(bp
->dev
)) {
11709 dev_close(bp
->dev
);
11711 rc
= bnxt_open_nic(bp
, true, false);
11717 static int bnxt_init_mac_addr(struct bnxt
*bp
)
11722 memcpy(bp
->dev
->dev_addr
, bp
->pf
.mac_addr
, ETH_ALEN
);
11724 #ifdef CONFIG_BNXT_SRIOV
11725 struct bnxt_vf_info
*vf
= &bp
->vf
;
11726 bool strict_approval
= true;
11728 if (is_valid_ether_addr(vf
->mac_addr
)) {
11729 /* overwrite netdev dev_addr with admin VF MAC */
11730 memcpy(bp
->dev
->dev_addr
, vf
->mac_addr
, ETH_ALEN
);
11731 /* Older PF driver or firmware may not approve this
11734 strict_approval
= false;
11736 eth_hw_addr_random(bp
->dev
);
11738 rc
= bnxt_approve_mac(bp
, bp
->dev
->dev_addr
, strict_approval
);
11744 static int bnxt_pcie_dsn_get(struct bnxt
*bp
, u8 dsn
[])
11746 struct pci_dev
*pdev
= bp
->pdev
;
11747 int pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_DSN
);
11751 netdev_info(bp
->dev
, "Unable do read adapter's DSN");
11752 return -EOPNOTSUPP
;
11755 /* DSN (two dw) is at an offset of 4 from the cap pos */
11757 pci_read_config_dword(pdev
, pos
, &dw
);
11758 put_unaligned_le32(dw
, &dsn
[0]);
11759 pci_read_config_dword(pdev
, pos
+ 4, &dw
);
11760 put_unaligned_le32(dw
, &dsn
[4]);
11761 bp
->flags
|= BNXT_FLAG_DSN_VALID
;
11765 static int bnxt_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
11767 static int version_printed
;
11768 struct net_device
*dev
;
11772 if (pci_is_bridge(pdev
))
11775 if (version_printed
++ == 0)
11776 pr_info("%s", version
);
11778 max_irqs
= bnxt_get_max_irq(pdev
);
11779 dev
= alloc_etherdev_mq(sizeof(*bp
), max_irqs
);
11783 bp
= netdev_priv(dev
);
11784 bnxt_set_max_func_irqs(bp
, max_irqs
);
11786 if (bnxt_vf_pciid(ent
->driver_data
))
11787 bp
->flags
|= BNXT_FLAG_VF
;
11789 if (pdev
->msix_cap
)
11790 bp
->flags
|= BNXT_FLAG_MSIX_CAP
;
11792 rc
= bnxt_init_board(pdev
, dev
);
11794 goto init_err_free
;
11796 dev
->netdev_ops
= &bnxt_netdev_ops
;
11797 dev
->watchdog_timeo
= BNXT_TX_TIMEOUT
;
11798 dev
->ethtool_ops
= &bnxt_ethtool_ops
;
11799 pci_set_drvdata(pdev
, dev
);
11801 rc
= bnxt_alloc_hwrm_resources(bp
);
11803 goto init_err_pci_clean
;
11805 mutex_init(&bp
->hwrm_cmd_lock
);
11806 mutex_init(&bp
->link_lock
);
11808 rc
= bnxt_fw_init_one_p1(bp
);
11810 goto init_err_pci_clean
;
11812 if (BNXT_CHIP_P5(bp
))
11813 bp
->flags
|= BNXT_FLAG_CHIP_P5
;
11815 rc
= bnxt_fw_init_one_p2(bp
);
11817 goto init_err_pci_clean
;
11819 dev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_SG
|
11820 NETIF_F_TSO
| NETIF_F_TSO6
|
11821 NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_GSO_GRE
|
11822 NETIF_F_GSO_IPXIP4
|
11823 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_GSO_GRE_CSUM
|
11824 NETIF_F_GSO_PARTIAL
| NETIF_F_RXHASH
|
11825 NETIF_F_RXCSUM
| NETIF_F_GRO
;
11827 if (BNXT_SUPPORTS_TPA(bp
))
11828 dev
->hw_features
|= NETIF_F_LRO
;
11830 dev
->hw_enc_features
=
11831 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_SG
|
11832 NETIF_F_TSO
| NETIF_F_TSO6
|
11833 NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_GSO_GRE
|
11834 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_GSO_GRE_CSUM
|
11835 NETIF_F_GSO_IPXIP4
| NETIF_F_GSO_PARTIAL
;
11836 dev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
|
11837 NETIF_F_GSO_GRE_CSUM
;
11838 dev
->vlan_features
= dev
->hw_features
| NETIF_F_HIGHDMA
;
11839 dev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_CTAG_TX
|
11840 NETIF_F_HW_VLAN_STAG_RX
| NETIF_F_HW_VLAN_STAG_TX
;
11841 if (BNXT_SUPPORTS_TPA(bp
))
11842 dev
->hw_features
|= NETIF_F_GRO_HW
;
11843 dev
->features
|= dev
->hw_features
| NETIF_F_HIGHDMA
;
11844 if (dev
->features
& NETIF_F_GRO_HW
)
11845 dev
->features
&= ~NETIF_F_LRO
;
11846 dev
->priv_flags
|= IFF_UNICAST_FLT
;
11848 #ifdef CONFIG_BNXT_SRIOV
11849 init_waitqueue_head(&bp
->sriov_cfg_wait
);
11850 mutex_init(&bp
->sriov_lock
);
11852 if (BNXT_SUPPORTS_TPA(bp
)) {
11853 bp
->gro_func
= bnxt_gro_func_5730x
;
11854 if (BNXT_CHIP_P4(bp
))
11855 bp
->gro_func
= bnxt_gro_func_5731x
;
11856 else if (BNXT_CHIP_P5(bp
))
11857 bp
->gro_func
= bnxt_gro_func_5750x
;
11859 if (!BNXT_CHIP_P4_PLUS(bp
))
11860 bp
->flags
|= BNXT_FLAG_DOUBLE_DB
;
11862 bp
->ulp_probe
= bnxt_ulp_probe
;
11864 rc
= bnxt_init_mac_addr(bp
);
11866 dev_err(&pdev
->dev
, "Unable to initialize mac address.\n");
11867 rc
= -EADDRNOTAVAIL
;
11868 goto init_err_pci_clean
;
11872 /* Read the adapter's DSN to use as the eswitch switch_id */
11873 rc
= bnxt_pcie_dsn_get(bp
, bp
->dsn
);
11876 /* MTU range: 60 - FW defined max */
11877 dev
->min_mtu
= ETH_ZLEN
;
11878 dev
->max_mtu
= bp
->max_mtu
;
11880 rc
= bnxt_probe_phy(bp
, true);
11882 goto init_err_pci_clean
;
11884 bnxt_set_rx_skb_mode(bp
, false);
11885 bnxt_set_tpa_flags(bp
);
11886 bnxt_set_ring_params(bp
);
11887 rc
= bnxt_set_dflt_rings(bp
, true);
11889 netdev_err(bp
->dev
, "Not enough rings available.\n");
11891 goto init_err_pci_clean
;
11894 bnxt_fw_init_one_p3(bp
);
11896 if (dev
->hw_features
& NETIF_F_HW_VLAN_CTAG_RX
)
11897 bp
->flags
|= BNXT_FLAG_STRIP_VLAN
;
11899 rc
= bnxt_init_int_mode(bp
);
11901 goto init_err_pci_clean
;
11903 /* No TC has been set yet and rings may have been trimmed due to
11904 * limited MSIX, so we re-initialize the TX rings per TC.
11906 bp
->tx_nr_rings_per_tc
= bp
->tx_nr_rings
;
11911 create_singlethread_workqueue("bnxt_pf_wq");
11913 dev_err(&pdev
->dev
, "Unable to create workqueue.\n");
11914 goto init_err_pci_clean
;
11920 bnxt_dl_register(bp
);
11922 rc
= register_netdev(dev
);
11924 goto init_err_cleanup
;
11927 devlink_port_type_eth_set(&bp
->dl_port
, bp
->dev
);
11928 bnxt_dl_fw_reporters_create(bp
);
11930 netdev_info(dev
, "%s found at mem %lx, node addr %pM\n",
11931 board_info
[ent
->driver_data
].name
,
11932 (long)pci_resource_start(pdev
, 0), dev
->dev_addr
);
11933 pcie_print_link_status(pdev
);
11938 bnxt_dl_unregister(bp
);
11939 bnxt_shutdown_tc(bp
);
11940 bnxt_clear_int_mode(bp
);
11942 init_err_pci_clean
:
11943 bnxt_hwrm_func_drv_unrgtr(bp
);
11944 bnxt_free_hwrm_short_cmd_req(bp
);
11945 bnxt_free_hwrm_resources(bp
);
11946 bnxt_free_ctx_mem(bp
);
11949 kfree(bp
->fw_health
);
11950 bp
->fw_health
= NULL
;
11951 bnxt_cleanup_pci(bp
);
11958 static void bnxt_shutdown(struct pci_dev
*pdev
)
11960 struct net_device
*dev
= pci_get_drvdata(pdev
);
11967 bp
= netdev_priv(dev
);
11969 goto shutdown_exit
;
11971 if (netif_running(dev
))
11974 bnxt_ulp_shutdown(bp
);
11976 if (system_state
== SYSTEM_POWER_OFF
) {
11977 bnxt_clear_int_mode(bp
);
11978 pci_disable_device(pdev
);
11979 pci_wake_from_d3(pdev
, bp
->wol
);
11980 pci_set_power_state(pdev
, PCI_D3hot
);
11987 #ifdef CONFIG_PM_SLEEP
11988 static int bnxt_suspend(struct device
*device
)
11990 struct net_device
*dev
= dev_get_drvdata(device
);
11991 struct bnxt
*bp
= netdev_priv(dev
);
11996 if (netif_running(dev
)) {
11997 netif_device_detach(dev
);
11998 rc
= bnxt_close(dev
);
12000 bnxt_hwrm_func_drv_unrgtr(bp
);
12001 pci_disable_device(bp
->pdev
);
12002 bnxt_free_ctx_mem(bp
);
12009 static int bnxt_resume(struct device
*device
)
12011 struct net_device
*dev
= dev_get_drvdata(device
);
12012 struct bnxt
*bp
= netdev_priv(dev
);
12016 rc
= pci_enable_device(bp
->pdev
);
12018 netdev_err(dev
, "Cannot re-enable PCI device during resume, err = %d\n",
12022 pci_set_master(bp
->pdev
);
12023 if (bnxt_hwrm_ver_get(bp
)) {
12027 rc
= bnxt_hwrm_func_reset(bp
);
12033 if (bnxt_hwrm_queue_qportcfg(bp
)) {
12038 if (bp
->hwrm_spec_code
>= 0x10803) {
12039 if (bnxt_alloc_ctx_mem(bp
)) {
12044 if (BNXT_NEW_RM(bp
))
12045 bnxt_hwrm_func_resc_qcaps(bp
, false);
12047 if (bnxt_hwrm_func_drv_rgtr(bp
, NULL
, 0, false)) {
12052 bnxt_get_wol_settings(bp
);
12053 if (netif_running(dev
)) {
12054 rc
= bnxt_open(dev
);
12056 netif_device_attach(dev
);
12060 bnxt_ulp_start(bp
, rc
);
12065 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops
, bnxt_suspend
, bnxt_resume
);
12066 #define BNXT_PM_OPS (&bnxt_pm_ops)
12070 #define BNXT_PM_OPS NULL
12072 #endif /* CONFIG_PM_SLEEP */
12075 * bnxt_io_error_detected - called when PCI error is detected
12076 * @pdev: Pointer to PCI device
12077 * @state: The current pci connection state
12079 * This function is called after a PCI bus error affecting
12080 * this device has been detected.
12082 static pci_ers_result_t
bnxt_io_error_detected(struct pci_dev
*pdev
,
12083 pci_channel_state_t state
)
12085 struct net_device
*netdev
= pci_get_drvdata(pdev
);
12086 struct bnxt
*bp
= netdev_priv(netdev
);
12088 netdev_info(netdev
, "PCI I/O error detected\n");
12091 netif_device_detach(netdev
);
12095 if (state
== pci_channel_io_perm_failure
) {
12097 return PCI_ERS_RESULT_DISCONNECT
;
12100 if (netif_running(netdev
))
12101 bnxt_close(netdev
);
12103 pci_disable_device(pdev
);
12106 /* Request a slot slot reset. */
12107 return PCI_ERS_RESULT_NEED_RESET
;
12111 * bnxt_io_slot_reset - called after the pci bus has been reset.
12112 * @pdev: Pointer to PCI device
12114 * Restart the card from scratch, as if from a cold-boot.
12115 * At this point, the card has exprienced a hard reset,
12116 * followed by fixups by BIOS, and has its config space
12117 * set up identically to what it was at cold boot.
12119 static pci_ers_result_t
bnxt_io_slot_reset(struct pci_dev
*pdev
)
12121 struct net_device
*netdev
= pci_get_drvdata(pdev
);
12122 struct bnxt
*bp
= netdev_priv(netdev
);
12124 pci_ers_result_t result
= PCI_ERS_RESULT_DISCONNECT
;
12126 netdev_info(bp
->dev
, "PCI Slot Reset\n");
12130 if (pci_enable_device(pdev
)) {
12131 dev_err(&pdev
->dev
,
12132 "Cannot re-enable PCI device after reset.\n");
12134 pci_set_master(pdev
);
12136 err
= bnxt_hwrm_func_reset(bp
);
12137 if (!err
&& netif_running(netdev
))
12138 err
= bnxt_open(netdev
);
12141 result
= PCI_ERS_RESULT_RECOVERED
;
12142 bnxt_ulp_start(bp
, err
);
12145 if (result
!= PCI_ERS_RESULT_RECOVERED
&& netif_running(netdev
))
12150 return PCI_ERS_RESULT_RECOVERED
;
12154 * bnxt_io_resume - called when traffic can start flowing again.
12155 * @pdev: Pointer to PCI device
12157 * This callback is called when the error recovery driver tells
12158 * us that its OK to resume normal operation.
12160 static void bnxt_io_resume(struct pci_dev
*pdev
)
12162 struct net_device
*netdev
= pci_get_drvdata(pdev
);
12166 netif_device_attach(netdev
);
12171 static const struct pci_error_handlers bnxt_err_handler
= {
12172 .error_detected
= bnxt_io_error_detected
,
12173 .slot_reset
= bnxt_io_slot_reset
,
12174 .resume
= bnxt_io_resume
12177 static struct pci_driver bnxt_pci_driver
= {
12178 .name
= DRV_MODULE_NAME
,
12179 .id_table
= bnxt_pci_tbl
,
12180 .probe
= bnxt_init_one
,
12181 .remove
= bnxt_remove_one
,
12182 .shutdown
= bnxt_shutdown
,
12183 .driver
.pm
= BNXT_PM_OPS
,
12184 .err_handler
= &bnxt_err_handler
,
12185 #if defined(CONFIG_BNXT_SRIOV)
12186 .sriov_configure
= bnxt_sriov_configure
,
12190 static int __init
bnxt_init(void)
12193 return pci_register_driver(&bnxt_pci_driver
);
12196 static void __exit
bnxt_exit(void)
12198 pci_unregister_driver(&bnxt_pci_driver
);
12200 destroy_workqueue(bnxt_pf_wq
);
12204 module_init(bnxt_init
);
12205 module_exit(bnxt_exit
);