2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 * Broadcom BCM57xx(x)/BCM590x NetXtreme and NetLink family Ethernet driver
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II Gigabit Ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
83 #include <sys/taskqueue.h>
86 #include <net/if_arp.h>
87 #include <net/ethernet.h>
88 #include <net/if_dl.h>
89 #include <net/if_media.h>
93 #include <net/if_types.h>
94 #include <net/if_vlan_var.h>
96 #include <netinet/in_systm.h>
97 #include <netinet/in.h>
98 #include <netinet/ip.h>
99 #include <netinet/tcp.h>
101 #include <machine/bus.h>
102 #include <machine/resource.h>
104 #include <sys/rman.h>
106 #include <dev/mii/mii.h>
107 #include <dev/mii/miivar.h>
109 #include <dev/mii/brgphyreg.h>
112 #include <dev/ofw/ofw_bus.h>
113 #include <dev/ofw/openfirm.h>
114 #include <machine/ofw_machdep.h>
115 #include <machine/ver.h>
118 #include <dev/pci/pcireg.h>
119 #include <dev/pci/pcivar.h>
121 #include <dev/bge/if_bgereg.h>
123 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
126 MODULE_DEPEND(bge
, pci
, 1, 1, 1);
127 MODULE_DEPEND(bge
, ether
, 1, 1, 1);
128 MODULE_DEPEND(bge
, miibus
, 1, 1, 1);
130 /* "device miibus" required. See GENERIC if you get errors here. */
131 #include "miibus_if.h"
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
139 static const struct bge_type
{
143 { ALTEON_VENDORID
, ALTEON_DEVICEID_BCM5700
},
144 { ALTEON_VENDORID
, ALTEON_DEVICEID_BCM5701
},
146 { ALTIMA_VENDORID
, ALTIMA_DEVICE_AC1000
},
147 { ALTIMA_VENDORID
, ALTIMA_DEVICE_AC1002
},
148 { ALTIMA_VENDORID
, ALTIMA_DEVICE_AC9100
},
150 { APPLE_VENDORID
, APPLE_DEVICE_BCM5701
},
152 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5700
},
153 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5701
},
154 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5702
},
155 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5702_ALT
},
156 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5702X
},
157 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5703
},
158 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5703_ALT
},
159 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5703X
},
160 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5704C
},
161 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5704S
},
162 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5704S_ALT
},
163 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5705
},
164 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5705F
},
165 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5705K
},
166 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5705M
},
167 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5705M_ALT
},
168 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5714C
},
169 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5714S
},
170 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5715
},
171 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5715S
},
172 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5717
},
173 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5718
},
174 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5719
},
175 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5720
},
176 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5721
},
177 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5722
},
178 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5723
},
179 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5725
},
180 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5727
},
181 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5750
},
182 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5750M
},
183 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5751
},
184 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5751F
},
185 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5751M
},
186 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5752
},
187 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5752M
},
188 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5753
},
189 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5753F
},
190 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5753M
},
191 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5754
},
192 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5754M
},
193 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5755
},
194 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5755M
},
195 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5756
},
196 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5761
},
197 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5761E
},
198 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5761S
},
199 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5761SE
},
200 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5762
},
201 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5764
},
202 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5780
},
203 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5780S
},
204 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5781
},
205 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5782
},
206 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5784
},
207 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5785F
},
208 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5785G
},
209 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5786
},
210 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5787
},
211 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5787F
},
212 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5787M
},
213 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5788
},
214 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5789
},
215 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5901
},
216 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5901A2
},
217 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5903M
},
218 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5906
},
219 { BCOM_VENDORID
, BCOM_DEVICEID_BCM5906M
},
220 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57760
},
221 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57761
},
222 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57762
},
223 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57764
},
224 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57765
},
225 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57766
},
226 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57767
},
227 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57780
},
228 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57781
},
229 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57782
},
230 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57785
},
231 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57786
},
232 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57787
},
233 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57788
},
234 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57790
},
235 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57791
},
236 { BCOM_VENDORID
, BCOM_DEVICEID_BCM57795
},
238 { SK_VENDORID
, SK_DEVICEID_ALTIMA
},
240 { TC_VENDORID
, TC_DEVICEID_3C996
},
242 { FJTSU_VENDORID
, FJTSU_DEVICEID_PW008GE4
},
243 { FJTSU_VENDORID
, FJTSU_DEVICEID_PW008GE5
},
244 { FJTSU_VENDORID
, FJTSU_DEVICEID_PP250450
},
249 static const struct bge_vendor
{
253 { ALTEON_VENDORID
, "Alteon" },
254 { ALTIMA_VENDORID
, "Altima" },
255 { APPLE_VENDORID
, "Apple" },
256 { BCOM_VENDORID
, "Broadcom" },
257 { SK_VENDORID
, "SysKonnect" },
258 { TC_VENDORID
, "3Com" },
259 { FJTSU_VENDORID
, "Fujitsu" },
264 static const struct bge_revision
{
267 } bge_revisions
[] = {
268 { BGE_CHIPID_BCM5700_A0
, "BCM5700 A0" },
269 { BGE_CHIPID_BCM5700_A1
, "BCM5700 A1" },
270 { BGE_CHIPID_BCM5700_B0
, "BCM5700 B0" },
271 { BGE_CHIPID_BCM5700_B1
, "BCM5700 B1" },
272 { BGE_CHIPID_BCM5700_B2
, "BCM5700 B2" },
273 { BGE_CHIPID_BCM5700_B3
, "BCM5700 B3" },
274 { BGE_CHIPID_BCM5700_ALTIMA
, "BCM5700 Altima" },
275 { BGE_CHIPID_BCM5700_C0
, "BCM5700 C0" },
276 { BGE_CHIPID_BCM5701_A0
, "BCM5701 A0" },
277 { BGE_CHIPID_BCM5701_B0
, "BCM5701 B0" },
278 { BGE_CHIPID_BCM5701_B2
, "BCM5701 B2" },
279 { BGE_CHIPID_BCM5701_B5
, "BCM5701 B5" },
280 { BGE_CHIPID_BCM5703_A0
, "BCM5703 A0" },
281 { BGE_CHIPID_BCM5703_A1
, "BCM5703 A1" },
282 { BGE_CHIPID_BCM5703_A2
, "BCM5703 A2" },
283 { BGE_CHIPID_BCM5703_A3
, "BCM5703 A3" },
284 { BGE_CHIPID_BCM5703_B0
, "BCM5703 B0" },
285 { BGE_CHIPID_BCM5704_A0
, "BCM5704 A0" },
286 { BGE_CHIPID_BCM5704_A1
, "BCM5704 A1" },
287 { BGE_CHIPID_BCM5704_A2
, "BCM5704 A2" },
288 { BGE_CHIPID_BCM5704_A3
, "BCM5704 A3" },
289 { BGE_CHIPID_BCM5704_B0
, "BCM5704 B0" },
290 { BGE_CHIPID_BCM5705_A0
, "BCM5705 A0" },
291 { BGE_CHIPID_BCM5705_A1
, "BCM5705 A1" },
292 { BGE_CHIPID_BCM5705_A2
, "BCM5705 A2" },
293 { BGE_CHIPID_BCM5705_A3
, "BCM5705 A3" },
294 { BGE_CHIPID_BCM5750_A0
, "BCM5750 A0" },
295 { BGE_CHIPID_BCM5750_A1
, "BCM5750 A1" },
296 { BGE_CHIPID_BCM5750_A3
, "BCM5750 A3" },
297 { BGE_CHIPID_BCM5750_B0
, "BCM5750 B0" },
298 { BGE_CHIPID_BCM5750_B1
, "BCM5750 B1" },
299 { BGE_CHIPID_BCM5750_C0
, "BCM5750 C0" },
300 { BGE_CHIPID_BCM5750_C1
, "BCM5750 C1" },
301 { BGE_CHIPID_BCM5750_C2
, "BCM5750 C2" },
302 { BGE_CHIPID_BCM5714_A0
, "BCM5714 A0" },
303 { BGE_CHIPID_BCM5752_A0
, "BCM5752 A0" },
304 { BGE_CHIPID_BCM5752_A1
, "BCM5752 A1" },
305 { BGE_CHIPID_BCM5752_A2
, "BCM5752 A2" },
306 { BGE_CHIPID_BCM5714_B0
, "BCM5714 B0" },
307 { BGE_CHIPID_BCM5714_B3
, "BCM5714 B3" },
308 { BGE_CHIPID_BCM5715_A0
, "BCM5715 A0" },
309 { BGE_CHIPID_BCM5715_A1
, "BCM5715 A1" },
310 { BGE_CHIPID_BCM5715_A3
, "BCM5715 A3" },
311 { BGE_CHIPID_BCM5717_A0
, "BCM5717 A0" },
312 { BGE_CHIPID_BCM5717_B0
, "BCM5717 B0" },
313 { BGE_CHIPID_BCM5719_A0
, "BCM5719 A0" },
314 { BGE_CHIPID_BCM5720_A0
, "BCM5720 A0" },
315 { BGE_CHIPID_BCM5755_A0
, "BCM5755 A0" },
316 { BGE_CHIPID_BCM5755_A1
, "BCM5755 A1" },
317 { BGE_CHIPID_BCM5755_A2
, "BCM5755 A2" },
318 { BGE_CHIPID_BCM5722_A0
, "BCM5722 A0" },
319 { BGE_CHIPID_BCM5761_A0
, "BCM5761 A0" },
320 { BGE_CHIPID_BCM5761_A1
, "BCM5761 A1" },
321 { BGE_CHIPID_BCM5762_A0
, "BCM5762 A0" },
322 { BGE_CHIPID_BCM5784_A0
, "BCM5784 A0" },
323 { BGE_CHIPID_BCM5784_A1
, "BCM5784 A1" },
324 /* 5754 and 5787 share the same ASIC ID */
325 { BGE_CHIPID_BCM5787_A0
, "BCM5754/5787 A0" },
326 { BGE_CHIPID_BCM5787_A1
, "BCM5754/5787 A1" },
327 { BGE_CHIPID_BCM5787_A2
, "BCM5754/5787 A2" },
328 { BGE_CHIPID_BCM5906_A1
, "BCM5906 A1" },
329 { BGE_CHIPID_BCM5906_A2
, "BCM5906 A2" },
330 { BGE_CHIPID_BCM57765_A0
, "BCM57765 A0" },
331 { BGE_CHIPID_BCM57765_B0
, "BCM57765 B0" },
332 { BGE_CHIPID_BCM57780_A0
, "BCM57780 A0" },
333 { BGE_CHIPID_BCM57780_A1
, "BCM57780 A1" },
339 * Some defaults for major revisions, so that newer steppings
340 * that we don't know about have a shot at working.
342 static const struct bge_revision bge_majorrevs
[] = {
343 { BGE_ASICREV_BCM5700
, "unknown BCM5700" },
344 { BGE_ASICREV_BCM5701
, "unknown BCM5701" },
345 { BGE_ASICREV_BCM5703
, "unknown BCM5703" },
346 { BGE_ASICREV_BCM5704
, "unknown BCM5704" },
347 { BGE_ASICREV_BCM5705
, "unknown BCM5705" },
348 { BGE_ASICREV_BCM5750
, "unknown BCM5750" },
349 { BGE_ASICREV_BCM5714_A0
, "unknown BCM5714" },
350 { BGE_ASICREV_BCM5752
, "unknown BCM5752" },
351 { BGE_ASICREV_BCM5780
, "unknown BCM5780" },
352 { BGE_ASICREV_BCM5714
, "unknown BCM5714" },
353 { BGE_ASICREV_BCM5755
, "unknown BCM5755" },
354 { BGE_ASICREV_BCM5761
, "unknown BCM5761" },
355 { BGE_ASICREV_BCM5784
, "unknown BCM5784" },
356 { BGE_ASICREV_BCM5785
, "unknown BCM5785" },
357 /* 5754 and 5787 share the same ASIC ID */
358 { BGE_ASICREV_BCM5787
, "unknown BCM5754/5787" },
359 { BGE_ASICREV_BCM5906
, "unknown BCM5906" },
360 { BGE_ASICREV_BCM57765
, "unknown BCM57765" },
361 { BGE_ASICREV_BCM57766
, "unknown BCM57766" },
362 { BGE_ASICREV_BCM57780
, "unknown BCM57780" },
363 { BGE_ASICREV_BCM5717
, "unknown BCM5717" },
364 { BGE_ASICREV_BCM5719
, "unknown BCM5719" },
365 { BGE_ASICREV_BCM5720
, "unknown BCM5720" },
366 { BGE_ASICREV_BCM5762
, "unknown BCM5762" },
371 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
372 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
373 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
374 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
375 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
376 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
377 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
378 #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_57765_PLUS)
380 static uint32_t bge_chipid(device_t
);
381 static const struct bge_vendor
* bge_lookup_vendor(uint16_t);
382 static const struct bge_revision
* bge_lookup_rev(uint32_t);
384 typedef int (*bge_eaddr_fcn_t
)(struct bge_softc
*, uint8_t[]);
386 static int bge_probe(device_t
);
387 static int bge_attach(device_t
);
388 static int bge_detach(device_t
);
389 static int bge_suspend(device_t
);
390 static int bge_resume(device_t
);
391 static void bge_release_resources(struct bge_softc
*);
392 static void bge_dma_map_addr(void *, bus_dma_segment_t
*, int, int);
393 static int bge_dma_alloc(struct bge_softc
*);
394 static void bge_dma_free(struct bge_softc
*);
395 static int bge_dma_ring_alloc(struct bge_softc
*, bus_size_t
, bus_size_t
,
396 bus_dma_tag_t
*, uint8_t **, bus_dmamap_t
*, bus_addr_t
*, const char *);
398 static void bge_devinfo(struct bge_softc
*);
399 static int bge_mbox_reorder(struct bge_softc
*);
401 static int bge_get_eaddr_fw(struct bge_softc
*sc
, uint8_t ether_addr
[]);
402 static int bge_get_eaddr_mem(struct bge_softc
*, uint8_t[]);
403 static int bge_get_eaddr_nvram(struct bge_softc
*, uint8_t[]);
404 static int bge_get_eaddr_eeprom(struct bge_softc
*, uint8_t[]);
405 static int bge_get_eaddr(struct bge_softc
*, uint8_t[]);
407 static void bge_txeof(struct bge_softc
*, uint16_t);
408 static void bge_rxcsum(struct bge_softc
*, struct bge_rx_bd
*, struct mbuf
*);
409 static int bge_rxeof(struct bge_softc
*, uint16_t, int);
411 static void bge_asf_driver_up (struct bge_softc
*);
412 static void bge_tick(void *);
413 static void bge_stats_clear_regs(struct bge_softc
*);
414 static void bge_stats_update(struct bge_softc
*);
415 static void bge_stats_update_regs(struct bge_softc
*);
416 static struct mbuf
*bge_check_short_dma(struct mbuf
*);
417 static struct mbuf
*bge_setup_tso(struct bge_softc
*, struct mbuf
*,
418 uint16_t *, uint16_t *);
419 static int bge_encap(struct bge_softc
*, struct mbuf
**, uint32_t *);
421 static void bge_intr(void *);
422 static int bge_msi_intr(void *);
423 static void bge_intr_task(void *, int);
424 static void bge_start_locked(struct ifnet
*);
425 static void bge_start(struct ifnet
*);
426 static int bge_ioctl(struct ifnet
*, u_long
, caddr_t
);
427 static void bge_init_locked(struct bge_softc
*);
428 static void bge_init(void *);
429 static void bge_stop_block(struct bge_softc
*, bus_size_t
, uint32_t);
430 static void bge_stop(struct bge_softc
*);
431 static void bge_watchdog(struct bge_softc
*);
432 static int bge_shutdown(device_t
);
433 static int bge_ifmedia_upd_locked(struct ifnet
*);
434 static int bge_ifmedia_upd(struct ifnet
*);
435 static void bge_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
437 static uint8_t bge_nvram_getbyte(struct bge_softc
*, int, uint8_t *);
438 static int bge_read_nvram(struct bge_softc
*, caddr_t
, int, int);
440 static uint8_t bge_eeprom_getbyte(struct bge_softc
*, int, uint8_t *);
441 static int bge_read_eeprom(struct bge_softc
*, caddr_t
, int, int);
443 static void bge_setpromisc(struct bge_softc
*);
444 static void bge_setmulti(struct bge_softc
*);
445 static void bge_setvlan(struct bge_softc
*);
447 static __inline
void bge_rxreuse_std(struct bge_softc
*, int);
448 static __inline
void bge_rxreuse_jumbo(struct bge_softc
*, int);
449 static int bge_newbuf_std(struct bge_softc
*, int);
450 static int bge_newbuf_jumbo(struct bge_softc
*, int);
451 static int bge_init_rx_ring_std(struct bge_softc
*);
452 static void bge_free_rx_ring_std(struct bge_softc
*);
453 static int bge_init_rx_ring_jumbo(struct bge_softc
*);
454 static void bge_free_rx_ring_jumbo(struct bge_softc
*);
455 static void bge_free_tx_ring(struct bge_softc
*);
456 static int bge_init_tx_ring(struct bge_softc
*);
458 static int bge_chipinit(struct bge_softc
*);
459 static int bge_blockinit(struct bge_softc
*);
460 static uint32_t bge_dma_swap_options(struct bge_softc
*);
462 static int bge_has_eaddr(struct bge_softc
*);
463 static uint32_t bge_readmem_ind(struct bge_softc
*, int);
464 static void bge_writemem_ind(struct bge_softc
*, int, int);
466 static void bge_writembx(struct bge_softc
*, int, int);
469 static uint32_t bge_readreg_ind(struct bge_softc
*, int);
471 static void bge_writemem_direct(struct bge_softc
*, int, int);
472 static void bge_writereg_ind(struct bge_softc
*, int, int);
474 static int bge_miibus_readreg(device_t
, int, int);
475 static int bge_miibus_writereg(device_t
, int, int, int);
476 static void bge_miibus_statchg(device_t
);
477 #ifdef DEVICE_POLLING
478 static int bge_poll(struct ifnet
*ifp
, enum poll_cmd cmd
, int count
);
481 #define BGE_RESET_SHUTDOWN 0
482 #define BGE_RESET_START 1
483 #define BGE_RESET_SUSPEND 2
484 static void bge_sig_post_reset(struct bge_softc
*, int);
485 static void bge_sig_legacy(struct bge_softc
*, int);
486 static void bge_sig_pre_reset(struct bge_softc
*, int);
487 static void bge_stop_fw(struct bge_softc
*);
488 static int bge_reset(struct bge_softc
*);
489 static void bge_link_upd(struct bge_softc
*);
491 static void bge_ape_lock_init(struct bge_softc
*);
492 static void bge_ape_read_fw_ver(struct bge_softc
*);
493 static int bge_ape_lock(struct bge_softc
*, int);
494 static void bge_ape_unlock(struct bge_softc
*, int);
495 static void bge_ape_send_event(struct bge_softc
*, uint32_t);
496 static void bge_ape_driver_state_change(struct bge_softc
*, int);
499 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
500 * leak information to untrusted users. It is also known to cause alignment
501 * traps on certain architectures.
503 #ifdef BGE_REGISTER_DEBUG
504 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS
);
505 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS
);
506 static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS
);
507 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS
);
509 static void bge_add_sysctls(struct bge_softc
*);
510 static void bge_add_sysctl_stats_regs(struct bge_softc
*,
511 struct sysctl_ctx_list
*, struct sysctl_oid_list
*);
512 static void bge_add_sysctl_stats(struct bge_softc
*, struct sysctl_ctx_list
*,
513 struct sysctl_oid_list
*);
514 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS
);
516 static device_method_t bge_methods
[] = {
517 /* Device interface */
518 DEVMETHOD(device_probe
, bge_probe
),
519 DEVMETHOD(device_attach
, bge_attach
),
520 DEVMETHOD(device_detach
, bge_detach
),
521 DEVMETHOD(device_shutdown
, bge_shutdown
),
522 DEVMETHOD(device_suspend
, bge_suspend
),
523 DEVMETHOD(device_resume
, bge_resume
),
526 DEVMETHOD(miibus_readreg
, bge_miibus_readreg
),
527 DEVMETHOD(miibus_writereg
, bge_miibus_writereg
),
528 DEVMETHOD(miibus_statchg
, bge_miibus_statchg
),
533 static driver_t bge_driver
= {
536 sizeof(struct bge_softc
)
539 static devclass_t bge_devclass
;
541 DRIVER_MODULE(bge
, pci
, bge_driver
, bge_devclass
, 0, 0);
542 DRIVER_MODULE(miibus
, bge
, miibus_driver
, miibus_devclass
, 0, 0);
544 static int bge_allow_asf
= 1;
546 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf
);
548 static SYSCTL_NODE(_hw
, OID_AUTO
, bge
, CTLFLAG_RD
, 0, "BGE driver parameters");
549 SYSCTL_INT(_hw_bge
, OID_AUTO
, allow_asf
, CTLFLAG_RD
, &bge_allow_asf
, 0,
550 "Allow ASF mode if available");
552 #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
553 #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
554 #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
555 #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
556 #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
559 bge_has_eaddr(struct bge_softc
*sc
)
562 char buf
[sizeof(SPARC64_BLADE_1500_PATH_BGE
)];
569 * The on-board BGEs found in sun4u machines aren't fitted with
570 * an EEPROM which means that we have to obtain the MAC address
571 * via OFW and that some tests will always fail. We distinguish
572 * such BGEs by the subvendor ID, which also has to be obtained
573 * from OFW instead of the PCI configuration space as the latter
574 * indicates Broadcom as the subvendor of the netboot interface.
575 * For early Blade 1500 and 2500 we even have to check the OFW
576 * device path as the subvendor ID always defaults to Broadcom
579 if (OF_getprop(ofw_bus_get_node(dev
), SPARC64_OFW_SUBVENDOR
,
580 &subvendor
, sizeof(subvendor
)) == sizeof(subvendor
) &&
581 (subvendor
== FJTSU_VENDORID
|| subvendor
== SUN_VENDORID
))
583 memset(buf
, 0, sizeof(buf
));
584 if (OF_package_to_path(ofw_bus_get_node(dev
), buf
, sizeof(buf
)) > 0) {
585 if (strcmp(sparc64_model
, SPARC64_BLADE_1500_MODEL
) == 0 &&
586 strcmp(buf
, SPARC64_BLADE_1500_PATH_BGE
) == 0)
588 if (strcmp(sparc64_model
, SPARC64_BLADE_2500_MODEL
) == 0 &&
589 strcmp(buf
, SPARC64_BLADE_2500_PATH_BGE
) == 0)
597 bge_readmem_ind(struct bge_softc
*sc
, int off
)
602 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5906
&&
603 off
>= BGE_STATS_BLOCK
&& off
< BGE_SEND_RING_1_TO_4
)
608 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, off
, 4);
609 val
= pci_read_config(dev
, BGE_PCI_MEMWIN_DATA
, 4);
610 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, 0, 4);
615 bge_writemem_ind(struct bge_softc
*sc
, int off
, int val
)
619 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5906
&&
620 off
>= BGE_STATS_BLOCK
&& off
< BGE_SEND_RING_1_TO_4
)
625 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, off
, 4);
626 pci_write_config(dev
, BGE_PCI_MEMWIN_DATA
, val
, 4);
627 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, 0, 4);
632 bge_readreg_ind(struct bge_softc
*sc
, int off
)
638 pci_write_config(dev
, BGE_PCI_REG_BASEADDR
, off
, 4);
639 return (pci_read_config(dev
, BGE_PCI_REG_DATA
, 4));
644 bge_writereg_ind(struct bge_softc
*sc
, int off
, int val
)
650 pci_write_config(dev
, BGE_PCI_REG_BASEADDR
, off
, 4);
651 pci_write_config(dev
, BGE_PCI_REG_DATA
, val
, 4);
655 bge_writemem_direct(struct bge_softc
*sc
, int off
, int val
)
657 CSR_WRITE_4(sc
, off
, val
);
665 bge_writembx(struct bge_softc
*sc
, int off
, int val
)
667 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5906
)
668 off
+= BGE_LPMBX_IRQ0_HI
- BGE_MBX_IRQ0_HI
;
670 CSR_WRITE_4(sc
, off
, val
);
671 if ((sc
->bge_flags
& BGE_FLAG_MBOX_REORDER
) != 0)
676 * Clear all stale locks and select the lock for this driver instance.
679 bge_ape_lock_init(struct bge_softc
*sc
)
681 uint32_t bit
, regbase
;
684 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5761
)
685 regbase
= BGE_APE_LOCK_GRANT
;
687 regbase
= BGE_APE_PER_LOCK_GRANT
;
689 /* Clear any stale locks. */
690 for (i
= BGE_APE_LOCK_PHY0
; i
<= BGE_APE_LOCK_GPIO
; i
++) {
692 case BGE_APE_LOCK_PHY0
:
693 case BGE_APE_LOCK_PHY1
:
694 case BGE_APE_LOCK_PHY2
:
695 case BGE_APE_LOCK_PHY3
:
696 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
699 if (sc
->bge_func_addr
== 0)
700 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
702 bit
= (1 << sc
->bge_func_addr
);
704 APE_WRITE_4(sc
, regbase
+ 4 * i
, bit
);
707 /* Select the PHY lock based on the device's function number. */
708 switch (sc
->bge_func_addr
) {
710 sc
->bge_phy_ape_lock
= BGE_APE_LOCK_PHY0
;
713 sc
->bge_phy_ape_lock
= BGE_APE_LOCK_PHY1
;
716 sc
->bge_phy_ape_lock
= BGE_APE_LOCK_PHY2
;
719 sc
->bge_phy_ape_lock
= BGE_APE_LOCK_PHY3
;
722 device_printf(sc
->bge_dev
,
723 "PHY lock not supported on this function\n");
728 * Check for APE firmware, set flags, and print version info.
731 bge_ape_read_fw_ver(struct bge_softc
*sc
)
734 uint32_t apedata
, features
;
736 /* Check for a valid APE signature in shared memory. */
737 apedata
= APE_READ_4(sc
, BGE_APE_SEG_SIG
);
738 if (apedata
!= BGE_APE_SEG_SIG_MAGIC
) {
739 sc
->bge_mfw_flags
&= ~ BGE_MFW_ON_APE
;
743 /* Check if APE firmware is running. */
744 apedata
= APE_READ_4(sc
, BGE_APE_FW_STATUS
);
745 if ((apedata
& BGE_APE_FW_STATUS_READY
) == 0) {
746 device_printf(sc
->bge_dev
, "APE signature found "
747 "but FW status not ready! 0x%08x\n", apedata
);
751 sc
->bge_mfw_flags
|= BGE_MFW_ON_APE
;
753 /* Fetch the APE firwmare type and version. */
754 apedata
= APE_READ_4(sc
, BGE_APE_FW_VERSION
);
755 features
= APE_READ_4(sc
, BGE_APE_FW_FEATURES
);
756 if ((features
& BGE_APE_FW_FEATURE_NCSI
) != 0) {
757 sc
->bge_mfw_flags
|= BGE_MFW_TYPE_NCSI
;
759 } else if ((features
& BGE_APE_FW_FEATURE_DASH
) != 0) {
760 sc
->bge_mfw_flags
|= BGE_MFW_TYPE_DASH
;
765 /* Print the APE firmware version. */
766 device_printf(sc
->bge_dev
, "APE FW version: %s v%d.%d.%d.%d\n",
768 (apedata
& BGE_APE_FW_VERSION_MAJMSK
) >> BGE_APE_FW_VERSION_MAJSFT
,
769 (apedata
& BGE_APE_FW_VERSION_MINMSK
) >> BGE_APE_FW_VERSION_MINSFT
,
770 (apedata
& BGE_APE_FW_VERSION_REVMSK
) >> BGE_APE_FW_VERSION_REVSFT
,
771 (apedata
& BGE_APE_FW_VERSION_BLDMSK
));
775 bge_ape_lock(struct bge_softc
*sc
, int locknum
)
777 uint32_t bit
, gnt
, req
, status
;
780 if ((sc
->bge_mfw_flags
& BGE_MFW_ON_APE
) == 0)
783 /* Lock request/grant registers have different bases. */
784 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5761
) {
785 req
= BGE_APE_LOCK_REQ
;
786 gnt
= BGE_APE_LOCK_GRANT
;
788 req
= BGE_APE_PER_LOCK_REQ
;
789 gnt
= BGE_APE_PER_LOCK_GRANT
;
795 case BGE_APE_LOCK_GPIO
:
796 /* Lock required when using GPIO. */
797 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5761
)
799 if (sc
->bge_func_addr
== 0)
800 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
802 bit
= (1 << sc
->bge_func_addr
);
804 case BGE_APE_LOCK_GRC
:
805 /* Lock required to reset the device. */
806 if (sc
->bge_func_addr
== 0)
807 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
809 bit
= (1 << sc
->bge_func_addr
);
811 case BGE_APE_LOCK_MEM
:
812 /* Lock required when accessing certain APE memory. */
813 if (sc
->bge_func_addr
== 0)
814 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
816 bit
= (1 << sc
->bge_func_addr
);
818 case BGE_APE_LOCK_PHY0
:
819 case BGE_APE_LOCK_PHY1
:
820 case BGE_APE_LOCK_PHY2
:
821 case BGE_APE_LOCK_PHY3
:
822 /* Lock required when accessing PHYs. */
823 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
829 /* Request a lock. */
830 APE_WRITE_4(sc
, req
+ off
, bit
);
832 /* Wait up to 1 second to acquire lock. */
833 for (i
= 0; i
< 20000; i
++) {
834 status
= APE_READ_4(sc
, gnt
+ off
);
840 /* Handle any errors. */
842 device_printf(sc
->bge_dev
, "APE lock %d request failed! "
843 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
844 locknum
, req
+ off
, bit
& 0xFFFF, gnt
+ off
,
846 /* Revoke the lock request. */
847 APE_WRITE_4(sc
, gnt
+ off
, bit
);
855 bge_ape_unlock(struct bge_softc
*sc
, int locknum
)
860 if ((sc
->bge_mfw_flags
& BGE_MFW_ON_APE
) == 0)
863 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5761
)
864 gnt
= BGE_APE_LOCK_GRANT
;
866 gnt
= BGE_APE_PER_LOCK_GRANT
;
871 case BGE_APE_LOCK_GPIO
:
872 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5761
)
874 if (sc
->bge_func_addr
== 0)
875 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
877 bit
= (1 << sc
->bge_func_addr
);
879 case BGE_APE_LOCK_GRC
:
880 if (sc
->bge_func_addr
== 0)
881 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
883 bit
= (1 << sc
->bge_func_addr
);
885 case BGE_APE_LOCK_MEM
:
886 if (sc
->bge_func_addr
== 0)
887 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
889 bit
= (1 << sc
->bge_func_addr
);
891 case BGE_APE_LOCK_PHY0
:
892 case BGE_APE_LOCK_PHY1
:
893 case BGE_APE_LOCK_PHY2
:
894 case BGE_APE_LOCK_PHY3
:
895 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
901 APE_WRITE_4(sc
, gnt
+ off
, bit
);
905 * Send an event to the APE firmware.
908 bge_ape_send_event(struct bge_softc
*sc
, uint32_t event
)
913 /* NCSI does not support APE events. */
914 if ((sc
->bge_mfw_flags
& BGE_MFW_ON_APE
) == 0)
917 /* Wait up to 1ms for APE to service previous event. */
918 for (i
= 10; i
> 0; i
--) {
919 if (bge_ape_lock(sc
, BGE_APE_LOCK_MEM
) != 0)
921 apedata
= APE_READ_4(sc
, BGE_APE_EVENT_STATUS
);
922 if ((apedata
& BGE_APE_EVENT_STATUS_EVENT_PENDING
) == 0) {
923 APE_WRITE_4(sc
, BGE_APE_EVENT_STATUS
, event
|
924 BGE_APE_EVENT_STATUS_EVENT_PENDING
);
925 bge_ape_unlock(sc
, BGE_APE_LOCK_MEM
);
926 APE_WRITE_4(sc
, BGE_APE_EVENT
, BGE_APE_EVENT_1
);
929 bge_ape_unlock(sc
, BGE_APE_LOCK_MEM
);
933 device_printf(sc
->bge_dev
, "APE event 0x%08x send timed out\n",
938 bge_ape_driver_state_change(struct bge_softc
*sc
, int kind
)
940 uint32_t apedata
, event
;
942 if ((sc
->bge_mfw_flags
& BGE_MFW_ON_APE
) == 0)
946 case BGE_RESET_START
:
947 /* If this is the first load, clear the load counter. */
948 apedata
= APE_READ_4(sc
, BGE_APE_HOST_SEG_SIG
);
949 if (apedata
!= BGE_APE_HOST_SEG_SIG_MAGIC
)
950 APE_WRITE_4(sc
, BGE_APE_HOST_INIT_COUNT
, 0);
952 apedata
= APE_READ_4(sc
, BGE_APE_HOST_INIT_COUNT
);
953 APE_WRITE_4(sc
, BGE_APE_HOST_INIT_COUNT
, ++apedata
);
955 APE_WRITE_4(sc
, BGE_APE_HOST_SEG_SIG
,
956 BGE_APE_HOST_SEG_SIG_MAGIC
);
957 APE_WRITE_4(sc
, BGE_APE_HOST_SEG_LEN
,
958 BGE_APE_HOST_SEG_LEN_MAGIC
);
960 /* Add some version info if bge(4) supports it. */
961 APE_WRITE_4(sc
, BGE_APE_HOST_DRIVER_ID
,
962 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
963 APE_WRITE_4(sc
, BGE_APE_HOST_BEHAVIOR
,
964 BGE_APE_HOST_BEHAV_NO_PHYLOCK
);
965 APE_WRITE_4(sc
, BGE_APE_HOST_HEARTBEAT_INT_MS
,
966 BGE_APE_HOST_HEARTBEAT_INT_DISABLE
);
967 APE_WRITE_4(sc
, BGE_APE_HOST_DRVR_STATE
,
968 BGE_APE_HOST_DRVR_STATE_START
);
969 event
= BGE_APE_EVENT_STATUS_STATE_START
;
971 case BGE_RESET_SHUTDOWN
:
972 APE_WRITE_4(sc
, BGE_APE_HOST_DRVR_STATE
,
973 BGE_APE_HOST_DRVR_STATE_UNLOAD
);
974 event
= BGE_APE_EVENT_STATUS_STATE_UNLOAD
;
976 case BGE_RESET_SUSPEND
:
977 event
= BGE_APE_EVENT_STATUS_STATE_SUSPEND
;
983 bge_ape_send_event(sc
, event
| BGE_APE_EVENT_STATUS_DRIVER_EVNT
|
984 BGE_APE_EVENT_STATUS_STATE_CHNGE
);
988 * Map a single buffer address.
992 bge_dma_map_addr(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
994 struct bge_dmamap_arg
*ctx
;
999 KASSERT(nseg
== 1, ("%s: %d segments returned!", __func__
, nseg
));
1002 ctx
->bge_busaddr
= segs
->ds_addr
;
1006 bge_nvram_getbyte(struct bge_softc
*sc
, int addr
, uint8_t *dest
)
1008 uint32_t access
, byte
= 0;
1012 CSR_WRITE_4(sc
, BGE_NVRAM_SWARB
, BGE_NVRAMSWARB_SET1
);
1013 for (i
= 0; i
< 8000; i
++) {
1014 if (CSR_READ_4(sc
, BGE_NVRAM_SWARB
) & BGE_NVRAMSWARB_GNT1
)
1021 /* Enable access. */
1022 access
= CSR_READ_4(sc
, BGE_NVRAM_ACCESS
);
1023 CSR_WRITE_4(sc
, BGE_NVRAM_ACCESS
, access
| BGE_NVRAMACC_ENABLE
);
1025 CSR_WRITE_4(sc
, BGE_NVRAM_ADDR
, addr
& 0xfffffffc);
1026 CSR_WRITE_4(sc
, BGE_NVRAM_CMD
, BGE_NVRAM_READCMD
);
1027 for (i
= 0; i
< BGE_TIMEOUT
* 10; i
++) {
1029 if (CSR_READ_4(sc
, BGE_NVRAM_CMD
) & BGE_NVRAMCMD_DONE
) {
1035 if (i
== BGE_TIMEOUT
* 10) {
1036 if_printf(sc
->bge_ifp
, "nvram read timed out\n");
1041 byte
= CSR_READ_4(sc
, BGE_NVRAM_RDDATA
);
1043 *dest
= (bswap32(byte
) >> ((addr
% 4) * 8)) & 0xFF;
1045 /* Disable access. */
1046 CSR_WRITE_4(sc
, BGE_NVRAM_ACCESS
, access
);
1049 CSR_WRITE_4(sc
, BGE_NVRAM_SWARB
, BGE_NVRAMSWARB_CLR1
);
1050 CSR_READ_4(sc
, BGE_NVRAM_SWARB
);
1056 * Read a sequence of bytes from NVRAM.
1059 bge_read_nvram(struct bge_softc
*sc
, caddr_t dest
, int off
, int cnt
)
1064 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5906
)
1067 for (i
= 0; i
< cnt
; i
++) {
1068 err
= bge_nvram_getbyte(sc
, off
+ i
, &byte
);
1074 return (err
? 1 : 0);
1078 * Read a byte of data stored in the EEPROM at address 'addr.' The
1079 * BCM570x supports both the traditional bitbang interface and an
1080 * auto access interface for reading the EEPROM. We use the auto
1084 bge_eeprom_getbyte(struct bge_softc
*sc
, int addr
, uint8_t *dest
)
1090 * Enable use of auto EEPROM access so we can avoid
1091 * having to use the bitbang method.
1093 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_AUTO_EEPROM
);
1095 /* Reset the EEPROM, load the clock period. */
1096 CSR_WRITE_4(sc
, BGE_EE_ADDR
,
1097 BGE_EEADDR_RESET
| BGE_EEHALFCLK(BGE_HALFCLK_384SCL
));
1100 /* Issue the read EEPROM command. */
1101 CSR_WRITE_4(sc
, BGE_EE_ADDR
, BGE_EE_READCMD
| addr
);
1103 /* Wait for completion */
1104 for(i
= 0; i
< BGE_TIMEOUT
* 10; i
++) {
1106 if (CSR_READ_4(sc
, BGE_EE_ADDR
) & BGE_EEADDR_DONE
)
1110 if (i
== BGE_TIMEOUT
* 10) {
1111 device_printf(sc
->bge_dev
, "EEPROM read timed out\n");
1116 byte
= CSR_READ_4(sc
, BGE_EE_DATA
);
1118 *dest
= (byte
>> ((addr
% 4) * 8)) & 0xFF;
1124 * Read a sequence of bytes from the EEPROM.
1127 bge_read_eeprom(struct bge_softc
*sc
, caddr_t dest
, int off
, int cnt
)
1132 for (i
= 0; i
< cnt
; i
++) {
1133 error
= bge_eeprom_getbyte(sc
, off
+ i
, &byte
);
1139 return (error
? 1 : 0);
1143 bge_miibus_readreg(device_t dev
, int phy
, int reg
)
1145 struct bge_softc
*sc
;
1149 sc
= device_get_softc(dev
);
1151 if (bge_ape_lock(sc
, sc
->bge_phy_ape_lock
) != 0)
1154 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
1155 if ((sc
->bge_mi_mode
& BGE_MIMODE_AUTOPOLL
) != 0) {
1156 CSR_WRITE_4(sc
, BGE_MI_MODE
,
1157 sc
->bge_mi_mode
& ~BGE_MIMODE_AUTOPOLL
);
1161 CSR_WRITE_4(sc
, BGE_MI_COMM
, BGE_MICMD_READ
| BGE_MICOMM_BUSY
|
1162 BGE_MIPHY(phy
) | BGE_MIREG(reg
));
1164 /* Poll for the PHY register access to complete. */
1165 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
1167 val
= CSR_READ_4(sc
, BGE_MI_COMM
);
1168 if ((val
& BGE_MICOMM_BUSY
) == 0) {
1170 val
= CSR_READ_4(sc
, BGE_MI_COMM
);
1175 if (i
== BGE_TIMEOUT
) {
1176 device_printf(sc
->bge_dev
,
1177 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
1182 /* Restore the autopoll bit if necessary. */
1183 if ((sc
->bge_mi_mode
& BGE_MIMODE_AUTOPOLL
) != 0) {
1184 CSR_WRITE_4(sc
, BGE_MI_MODE
, sc
->bge_mi_mode
);
1188 bge_ape_unlock(sc
, sc
->bge_phy_ape_lock
);
1190 if (val
& BGE_MICOMM_READFAIL
)
1193 return (val
& 0xFFFF);
1197 bge_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
1199 struct bge_softc
*sc
;
1202 sc
= device_get_softc(dev
);
1204 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5906
&&
1205 (reg
== BRGPHY_MII_1000CTL
|| reg
== BRGPHY_MII_AUXCTL
))
1208 if (bge_ape_lock(sc
, sc
->bge_phy_ape_lock
) != 0)
1211 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
1212 if ((sc
->bge_mi_mode
& BGE_MIMODE_AUTOPOLL
) != 0) {
1213 CSR_WRITE_4(sc
, BGE_MI_MODE
,
1214 sc
->bge_mi_mode
& ~BGE_MIMODE_AUTOPOLL
);
1218 CSR_WRITE_4(sc
, BGE_MI_COMM
, BGE_MICMD_WRITE
| BGE_MICOMM_BUSY
|
1219 BGE_MIPHY(phy
) | BGE_MIREG(reg
) | val
);
1221 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
1223 if (!(CSR_READ_4(sc
, BGE_MI_COMM
) & BGE_MICOMM_BUSY
)) {
1225 CSR_READ_4(sc
, BGE_MI_COMM
); /* dummy read */
1230 /* Restore the autopoll bit if necessary. */
1231 if ((sc
->bge_mi_mode
& BGE_MIMODE_AUTOPOLL
) != 0) {
1232 CSR_WRITE_4(sc
, BGE_MI_MODE
, sc
->bge_mi_mode
);
1236 bge_ape_unlock(sc
, sc
->bge_phy_ape_lock
);
1238 if (i
== BGE_TIMEOUT
)
1239 device_printf(sc
->bge_dev
,
1240 "PHY write timed out (phy %d, reg %d, val 0x%04x)\n",
1247 bge_miibus_statchg(device_t dev
)
1249 struct bge_softc
*sc
;
1250 struct mii_data
*mii
;
1251 uint32_t mac_mode
, rx_mode
, tx_mode
;
1253 sc
= device_get_softc(dev
);
1254 if ((sc
->bge_ifp
->if_drv_flags
& IFF_DRV_RUNNING
) == 0)
1256 mii
= device_get_softc(sc
->bge_miibus
);
1258 if ((mii
->mii_media_status
& (IFM_ACTIVE
| IFM_AVALID
)) ==
1259 (IFM_ACTIVE
| IFM_AVALID
)) {
1260 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
1268 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5906
)
1279 if (sc
->bge_link
== 0)
1283 * APE firmware touches these registers to keep the MAC
1284 * connected to the outside world. Try to keep the
1288 /* Set the port mode (MII/GMII) to match the link speed. */
1289 mac_mode
= CSR_READ_4(sc
, BGE_MAC_MODE
) &
1290 ~(BGE_MACMODE_PORTMODE
| BGE_MACMODE_HALF_DUPLEX
);
1291 tx_mode
= CSR_READ_4(sc
, BGE_TX_MODE
);
1292 rx_mode
= CSR_READ_4(sc
, BGE_RX_MODE
);
1294 if (IFM_SUBTYPE(mii
->mii_media_active
) == IFM_1000_T
||
1295 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_1000_SX
)
1296 mac_mode
|= BGE_PORTMODE_GMII
;
1298 mac_mode
|= BGE_PORTMODE_MII
;
1300 /* Set MAC flow control behavior to match link flow control settings. */
1301 tx_mode
&= ~BGE_TXMODE_FLOWCTL_ENABLE
;
1302 rx_mode
&= ~BGE_RXMODE_FLOWCTL_ENABLE
;
1303 if ((IFM_OPTIONS(mii
->mii_media_active
) & IFM_FDX
) != 0) {
1304 if ((IFM_OPTIONS(mii
->mii_media_active
) & IFM_ETH_TXPAUSE
) != 0)
1305 tx_mode
|= BGE_TXMODE_FLOWCTL_ENABLE
;
1306 if ((IFM_OPTIONS(mii
->mii_media_active
) & IFM_ETH_RXPAUSE
) != 0)
1307 rx_mode
|= BGE_RXMODE_FLOWCTL_ENABLE
;
1309 mac_mode
|= BGE_MACMODE_HALF_DUPLEX
;
1311 CSR_WRITE_4(sc
, BGE_MAC_MODE
, mac_mode
);
1313 CSR_WRITE_4(sc
, BGE_TX_MODE
, tx_mode
);
1314 CSR_WRITE_4(sc
, BGE_RX_MODE
, rx_mode
);
1318 * Intialize a standard receive ring descriptor.
1321 bge_newbuf_std(struct bge_softc
*sc
, int i
)
1324 struct bge_rx_bd
*r
;
1325 bus_dma_segment_t segs
[1];
1329 if (sc
->bge_flags
& BGE_FLAG_JUMBO_STD
&&
1330 (sc
->bge_ifp
->if_mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
+
1331 ETHER_VLAN_ENCAP_LEN
> (MCLBYTES
- ETHER_ALIGN
))) {
1332 m
= m_getjcl(M_NOWAIT
, MT_DATA
, M_PKTHDR
, MJUM9BYTES
);
1335 m
->m_len
= m
->m_pkthdr
.len
= MJUM9BYTES
;
1337 m
= m_getcl(M_NOWAIT
, MT_DATA
, M_PKTHDR
);
1340 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
1342 if ((sc
->bge_flags
& BGE_FLAG_RX_ALIGNBUG
) == 0)
1343 m_adj(m
, ETHER_ALIGN
);
1345 error
= bus_dmamap_load_mbuf_sg(sc
->bge_cdata
.bge_rx_mtag
,
1346 sc
->bge_cdata
.bge_rx_std_sparemap
, m
, segs
, &nsegs
, 0);
1351 if (sc
->bge_cdata
.bge_rx_std_chain
[i
] != NULL
) {
1352 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_mtag
,
1353 sc
->bge_cdata
.bge_rx_std_dmamap
[i
], BUS_DMASYNC_POSTREAD
);
1354 bus_dmamap_unload(sc
->bge_cdata
.bge_rx_mtag
,
1355 sc
->bge_cdata
.bge_rx_std_dmamap
[i
]);
1357 map
= sc
->bge_cdata
.bge_rx_std_dmamap
[i
];
1358 sc
->bge_cdata
.bge_rx_std_dmamap
[i
] = sc
->bge_cdata
.bge_rx_std_sparemap
;
1359 sc
->bge_cdata
.bge_rx_std_sparemap
= map
;
1360 sc
->bge_cdata
.bge_rx_std_chain
[i
] = m
;
1361 sc
->bge_cdata
.bge_rx_std_seglen
[i
] = segs
[0].ds_len
;
1362 r
= &sc
->bge_ldata
.bge_rx_std_ring
[sc
->bge_std
];
1363 r
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(segs
[0].ds_addr
);
1364 r
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(segs
[0].ds_addr
);
1365 r
->bge_flags
= BGE_RXBDFLAG_END
;
1366 r
->bge_len
= segs
[0].ds_len
;
1369 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_mtag
,
1370 sc
->bge_cdata
.bge_rx_std_dmamap
[i
], BUS_DMASYNC_PREREAD
);
1376 * Initialize a jumbo receive ring descriptor. This allocates
1377 * a jumbo buffer from the pool managed internally by the driver.
1380 bge_newbuf_jumbo(struct bge_softc
*sc
, int i
)
1382 bus_dma_segment_t segs
[BGE_NSEG_JUMBO
];
1384 struct bge_extrx_bd
*r
;
1388 MGETHDR(m
, M_NOWAIT
, MT_DATA
);
1392 m_cljget(m
, M_NOWAIT
, MJUM9BYTES
);
1393 if (!(m
->m_flags
& M_EXT
)) {
1397 m
->m_len
= m
->m_pkthdr
.len
= MJUM9BYTES
;
1398 if ((sc
->bge_flags
& BGE_FLAG_RX_ALIGNBUG
) == 0)
1399 m_adj(m
, ETHER_ALIGN
);
1401 error
= bus_dmamap_load_mbuf_sg(sc
->bge_cdata
.bge_mtag_jumbo
,
1402 sc
->bge_cdata
.bge_rx_jumbo_sparemap
, m
, segs
, &nsegs
, 0);
1408 if (sc
->bge_cdata
.bge_rx_jumbo_chain
[i
] != NULL
) {
1409 bus_dmamap_sync(sc
->bge_cdata
.bge_mtag_jumbo
,
1410 sc
->bge_cdata
.bge_rx_jumbo_dmamap
[i
], BUS_DMASYNC_POSTREAD
);
1411 bus_dmamap_unload(sc
->bge_cdata
.bge_mtag_jumbo
,
1412 sc
->bge_cdata
.bge_rx_jumbo_dmamap
[i
]);
1414 map
= sc
->bge_cdata
.bge_rx_jumbo_dmamap
[i
];
1415 sc
->bge_cdata
.bge_rx_jumbo_dmamap
[i
] =
1416 sc
->bge_cdata
.bge_rx_jumbo_sparemap
;
1417 sc
->bge_cdata
.bge_rx_jumbo_sparemap
= map
;
1418 sc
->bge_cdata
.bge_rx_jumbo_chain
[i
] = m
;
1419 sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][0] = 0;
1420 sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][1] = 0;
1421 sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][2] = 0;
1422 sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][3] = 0;
1425 * Fill in the extended RX buffer descriptor.
1427 r
= &sc
->bge_ldata
.bge_rx_jumbo_ring
[sc
->bge_jumbo
];
1428 r
->bge_flags
= BGE_RXBDFLAG_JUMBO_RING
| BGE_RXBDFLAG_END
;
1430 r
->bge_len3
= r
->bge_len2
= r
->bge_len1
= 0;
1433 r
->bge_addr3
.bge_addr_lo
= BGE_ADDR_LO(segs
[3].ds_addr
);
1434 r
->bge_addr3
.bge_addr_hi
= BGE_ADDR_HI(segs
[3].ds_addr
);
1435 r
->bge_len3
= segs
[3].ds_len
;
1436 sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][3] = segs
[3].ds_len
;
1438 r
->bge_addr2
.bge_addr_lo
= BGE_ADDR_LO(segs
[2].ds_addr
);
1439 r
->bge_addr2
.bge_addr_hi
= BGE_ADDR_HI(segs
[2].ds_addr
);
1440 r
->bge_len2
= segs
[2].ds_len
;
1441 sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][2] = segs
[2].ds_len
;
1443 r
->bge_addr1
.bge_addr_lo
= BGE_ADDR_LO(segs
[1].ds_addr
);
1444 r
->bge_addr1
.bge_addr_hi
= BGE_ADDR_HI(segs
[1].ds_addr
);
1445 r
->bge_len1
= segs
[1].ds_len
;
1446 sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][1] = segs
[1].ds_len
;
1448 r
->bge_addr0
.bge_addr_lo
= BGE_ADDR_LO(segs
[0].ds_addr
);
1449 r
->bge_addr0
.bge_addr_hi
= BGE_ADDR_HI(segs
[0].ds_addr
);
1450 r
->bge_len0
= segs
[0].ds_len
;
1451 sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][0] = segs
[0].ds_len
;
1454 panic("%s: %d segments\n", __func__
, nsegs
);
1457 bus_dmamap_sync(sc
->bge_cdata
.bge_mtag_jumbo
,
1458 sc
->bge_cdata
.bge_rx_jumbo_dmamap
[i
], BUS_DMASYNC_PREREAD
);
1464 bge_init_rx_ring_std(struct bge_softc
*sc
)
1468 bzero(sc
->bge_ldata
.bge_rx_std_ring
, BGE_STD_RX_RING_SZ
);
1470 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
1471 if ((error
= bge_newbuf_std(sc
, i
)) != 0)
1473 BGE_INC(sc
->bge_std
, BGE_STD_RX_RING_CNT
);
1476 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
1477 sc
->bge_cdata
.bge_rx_std_ring_map
, BUS_DMASYNC_PREWRITE
);
1480 bge_writembx(sc
, BGE_MBX_RX_STD_PROD_LO
, BGE_STD_RX_RING_CNT
- 1);
1486 bge_free_rx_ring_std(struct bge_softc
*sc
)
1490 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
1491 if (sc
->bge_cdata
.bge_rx_std_chain
[i
] != NULL
) {
1492 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_mtag
,
1493 sc
->bge_cdata
.bge_rx_std_dmamap
[i
],
1494 BUS_DMASYNC_POSTREAD
);
1495 bus_dmamap_unload(sc
->bge_cdata
.bge_rx_mtag
,
1496 sc
->bge_cdata
.bge_rx_std_dmamap
[i
]);
1497 m_freem(sc
->bge_cdata
.bge_rx_std_chain
[i
]);
1498 sc
->bge_cdata
.bge_rx_std_chain
[i
] = NULL
;
1500 bzero((char *)&sc
->bge_ldata
.bge_rx_std_ring
[i
],
1501 sizeof(struct bge_rx_bd
));
1506 bge_init_rx_ring_jumbo(struct bge_softc
*sc
)
1508 struct bge_rcb
*rcb
;
1511 bzero(sc
->bge_ldata
.bge_rx_jumbo_ring
, BGE_JUMBO_RX_RING_SZ
);
1513 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
1514 if ((error
= bge_newbuf_jumbo(sc
, i
)) != 0)
1516 BGE_INC(sc
->bge_jumbo
, BGE_JUMBO_RX_RING_CNT
);
1519 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
1520 sc
->bge_cdata
.bge_rx_jumbo_ring_map
, BUS_DMASYNC_PREWRITE
);
1524 /* Enable the jumbo receive producer ring. */
1525 rcb
= &sc
->bge_ldata
.bge_info
.bge_jumbo_rx_rcb
;
1526 rcb
->bge_maxlen_flags
=
1527 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD
);
1528 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS
, rcb
->bge_maxlen_flags
);
1530 bge_writembx(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, BGE_JUMBO_RX_RING_CNT
- 1);
1536 bge_free_rx_ring_jumbo(struct bge_softc
*sc
)
1540 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
1541 if (sc
->bge_cdata
.bge_rx_jumbo_chain
[i
] != NULL
) {
1542 bus_dmamap_sync(sc
->bge_cdata
.bge_mtag_jumbo
,
1543 sc
->bge_cdata
.bge_rx_jumbo_dmamap
[i
],
1544 BUS_DMASYNC_POSTREAD
);
1545 bus_dmamap_unload(sc
->bge_cdata
.bge_mtag_jumbo
,
1546 sc
->bge_cdata
.bge_rx_jumbo_dmamap
[i
]);
1547 m_freem(sc
->bge_cdata
.bge_rx_jumbo_chain
[i
]);
1548 sc
->bge_cdata
.bge_rx_jumbo_chain
[i
] = NULL
;
1550 bzero((char *)&sc
->bge_ldata
.bge_rx_jumbo_ring
[i
],
1551 sizeof(struct bge_extrx_bd
));
1556 bge_free_tx_ring(struct bge_softc
*sc
)
1560 if (sc
->bge_ldata
.bge_tx_ring
== NULL
)
1563 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
1564 if (sc
->bge_cdata
.bge_tx_chain
[i
] != NULL
) {
1565 bus_dmamap_sync(sc
->bge_cdata
.bge_tx_mtag
,
1566 sc
->bge_cdata
.bge_tx_dmamap
[i
],
1567 BUS_DMASYNC_POSTWRITE
);
1568 bus_dmamap_unload(sc
->bge_cdata
.bge_tx_mtag
,
1569 sc
->bge_cdata
.bge_tx_dmamap
[i
]);
1570 m_freem(sc
->bge_cdata
.bge_tx_chain
[i
]);
1571 sc
->bge_cdata
.bge_tx_chain
[i
] = NULL
;
1573 bzero((char *)&sc
->bge_ldata
.bge_tx_ring
[i
],
1574 sizeof(struct bge_tx_bd
));
1579 bge_init_tx_ring(struct bge_softc
*sc
)
1582 sc
->bge_tx_saved_considx
= 0;
1584 bzero(sc
->bge_ldata
.bge_tx_ring
, BGE_TX_RING_SZ
);
1585 bus_dmamap_sync(sc
->bge_cdata
.bge_tx_ring_tag
,
1586 sc
->bge_cdata
.bge_tx_ring_map
, BUS_DMASYNC_PREWRITE
);
1588 /* Initialize transmit producer index for host-memory send ring. */
1589 sc
->bge_tx_prodidx
= 0;
1590 bge_writembx(sc
, BGE_MBX_TX_HOST_PROD0_LO
, sc
->bge_tx_prodidx
);
1592 /* 5700 b2 errata */
1593 if (sc
->bge_chiprev
== BGE_CHIPREV_5700_BX
)
1594 bge_writembx(sc
, BGE_MBX_TX_HOST_PROD0_LO
, sc
->bge_tx_prodidx
);
1596 /* NIC-memory send ring not used; initialize to zero. */
1597 bge_writembx(sc
, BGE_MBX_TX_NIC_PROD0_LO
, 0);
1598 /* 5700 b2 errata */
1599 if (sc
->bge_chiprev
== BGE_CHIPREV_5700_BX
)
1600 bge_writembx(sc
, BGE_MBX_TX_NIC_PROD0_LO
, 0);
1606 bge_setpromisc(struct bge_softc
*sc
)
1610 BGE_LOCK_ASSERT(sc
);
1614 /* Enable or disable promiscuous mode as needed. */
1615 if (ifp
->if_flags
& IFF_PROMISC
)
1616 BGE_SETBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_PROMISC
);
1618 BGE_CLRBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_PROMISC
);
1622 bge_setmulti(struct bge_softc
*sc
)
1625 struct ifmultiaddr
*ifma
;
1626 uint32_t hashes
[4] = { 0, 0, 0, 0 };
1629 BGE_LOCK_ASSERT(sc
);
1633 if (ifp
->if_flags
& IFF_ALLMULTI
|| ifp
->if_flags
& IFF_PROMISC
) {
1634 for (i
= 0; i
< 4; i
++)
1635 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), 0xFFFFFFFF);
1639 /* First, zot all the existing filters. */
1640 for (i
= 0; i
< 4; i
++)
1641 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), 0);
1643 /* Now program new ones. */
1644 if_maddr_rlock(ifp
);
1645 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1646 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1648 h
= ether_crc32_le(LLADDR((struct sockaddr_dl
*)
1649 ifma
->ifma_addr
), ETHER_ADDR_LEN
) & 0x7F;
1650 hashes
[(h
& 0x60) >> 5] |= 1 << (h
& 0x1F);
1652 if_maddr_runlock(ifp
);
1654 for (i
= 0; i
< 4; i
++)
1655 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), hashes
[i
]);
1659 bge_setvlan(struct bge_softc
*sc
)
1663 BGE_LOCK_ASSERT(sc
);
1667 /* Enable or disable VLAN tag stripping as needed. */
1668 if (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
)
1669 BGE_CLRBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_KEEP_VLAN_DIAG
);
1671 BGE_SETBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_KEEP_VLAN_DIAG
);
1675 bge_sig_pre_reset(struct bge_softc
*sc
, int type
)
1679 * Some chips don't like this so only do this if ASF is enabled
1681 if (sc
->bge_asf_mode
)
1682 bge_writemem_ind(sc
, BGE_SRAM_FW_MB
, BGE_SRAM_FW_MB_MAGIC
);
1684 if (sc
->bge_asf_mode
& ASF_NEW_HANDSHAKE
) {
1686 case BGE_RESET_START
:
1687 bge_writemem_ind(sc
, BGE_SRAM_FW_DRV_STATE_MB
,
1688 BGE_FW_DRV_STATE_START
);
1690 case BGE_RESET_SHUTDOWN
:
1691 bge_writemem_ind(sc
, BGE_SRAM_FW_DRV_STATE_MB
,
1692 BGE_FW_DRV_STATE_UNLOAD
);
1694 case BGE_RESET_SUSPEND
:
1695 bge_writemem_ind(sc
, BGE_SRAM_FW_DRV_STATE_MB
,
1696 BGE_FW_DRV_STATE_SUSPEND
);
1701 if (type
== BGE_RESET_START
|| type
== BGE_RESET_SUSPEND
)
1702 bge_ape_driver_state_change(sc
, type
);
1706 bge_sig_post_reset(struct bge_softc
*sc
, int type
)
1709 if (sc
->bge_asf_mode
& ASF_NEW_HANDSHAKE
) {
1711 case BGE_RESET_START
:
1712 bge_writemem_ind(sc
, BGE_SRAM_FW_DRV_STATE_MB
,
1713 BGE_FW_DRV_STATE_START_DONE
);
1716 case BGE_RESET_SHUTDOWN
:
1717 bge_writemem_ind(sc
, BGE_SRAM_FW_DRV_STATE_MB
,
1718 BGE_FW_DRV_STATE_UNLOAD_DONE
);
1722 if (type
== BGE_RESET_SHUTDOWN
)
1723 bge_ape_driver_state_change(sc
, type
);
1727 bge_sig_legacy(struct bge_softc
*sc
, int type
)
1730 if (sc
->bge_asf_mode
) {
1732 case BGE_RESET_START
:
1733 bge_writemem_ind(sc
, BGE_SRAM_FW_DRV_STATE_MB
,
1734 BGE_FW_DRV_STATE_START
);
1736 case BGE_RESET_SHUTDOWN
:
1737 bge_writemem_ind(sc
, BGE_SRAM_FW_DRV_STATE_MB
,
1738 BGE_FW_DRV_STATE_UNLOAD
);
1745 bge_stop_fw(struct bge_softc
*sc
)
1749 if (sc
->bge_asf_mode
) {
1750 bge_writemem_ind(sc
, BGE_SRAM_FW_CMD_MB
, BGE_FW_CMD_PAUSE
);
1751 CSR_WRITE_4(sc
, BGE_RX_CPU_EVENT
,
1752 CSR_READ_4(sc
, BGE_RX_CPU_EVENT
) | BGE_RX_CPU_DRV_EVENT
);
1754 for (i
= 0; i
< 100; i
++ ) {
1755 if (!(CSR_READ_4(sc
, BGE_RX_CPU_EVENT
) &
1756 BGE_RX_CPU_DRV_EVENT
))
1764 bge_dma_swap_options(struct bge_softc
*sc
)
1766 uint32_t dma_options
;
1768 dma_options
= BGE_MODECTL_WORDSWAP_NONFRAME
|
1769 BGE_MODECTL_BYTESWAP_DATA
| BGE_MODECTL_WORDSWAP_DATA
;
1770 #if BYTE_ORDER == BIG_ENDIAN
1771 dma_options
|= BGE_MODECTL_BYTESWAP_NONFRAME
;
1773 return (dma_options
);
1777 * Do endian, PCI and DMA initialization.
1780 bge_chipinit(struct bge_softc
*sc
)
1782 uint32_t dma_rw_ctl
, misc_ctl
, mode_ctl
;
1786 /* Set endianness before we access any non-PCI registers. */
1787 misc_ctl
= BGE_INIT
;
1788 if (sc
->bge_flags
& BGE_FLAG_TAGGED_STATUS
)
1789 misc_ctl
|= BGE_PCIMISCCTL_TAGGED_STATUS
;
1790 pci_write_config(sc
->bge_dev
, BGE_PCI_MISC_CTL
, misc_ctl
, 4);
1793 * Clear the MAC statistics block in the NIC's
1796 for (i
= BGE_STATS_BLOCK
;
1797 i
< BGE_STATS_BLOCK_END
+ 1; i
+= sizeof(uint32_t))
1798 BGE_MEMWIN_WRITE(sc
, i
, 0);
1800 for (i
= BGE_STATUS_BLOCK
;
1801 i
< BGE_STATUS_BLOCK_END
+ 1; i
+= sizeof(uint32_t))
1802 BGE_MEMWIN_WRITE(sc
, i
, 0);
1804 if (sc
->bge_chiprev
== BGE_CHIPREV_5704_BX
) {
1806 * Fix data corruption caused by non-qword write with WB.
1807 * Fix master abort in PCI mode.
1808 * Fix PCI latency timer.
1810 val
= pci_read_config(sc
->bge_dev
, BGE_PCI_MSI_DATA
+ 2, 2);
1811 val
|= (1 << 10) | (1 << 12) | (1 << 13);
1812 pci_write_config(sc
->bge_dev
, BGE_PCI_MSI_DATA
+ 2, val
, 2);
1815 if (sc
->bge_asicrev
== BGE_ASICREV_BCM57765
||
1816 sc
->bge_asicrev
== BGE_ASICREV_BCM57766
) {
1818 * For the 57766 and non Ax versions of 57765, bootcode
1819 * needs to setup the PCIE Fast Training Sequence (FTS)
1820 * value to prevent transmit hangs.
1822 if (sc
->bge_chiprev
!= BGE_CHIPREV_57765_AX
) {
1823 CSR_WRITE_4(sc
, BGE_CPMU_PADRNG_CTL
,
1824 CSR_READ_4(sc
, BGE_CPMU_PADRNG_CTL
) |
1825 BGE_CPMU_PADRNG_CTL_RDIV2
);
1830 * Set up the PCI DMA control register.
1832 dma_rw_ctl
= BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1833 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1834 if (sc
->bge_flags
& BGE_FLAG_PCIE
) {
1835 if (sc
->bge_mps
>= 256)
1836 dma_rw_ctl
|= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1838 dma_rw_ctl
|= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1839 } else if (sc
->bge_flags
& BGE_FLAG_PCIX
) {
1840 if (BGE_IS_5714_FAMILY(sc
)) {
1841 /* 256 bytes for read and write. */
1842 dma_rw_ctl
|= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1843 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1844 dma_rw_ctl
|= (sc
->bge_asicrev
== BGE_ASICREV_BCM5780
) ?
1845 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL
:
1846 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL
;
1847 } else if (sc
->bge_asicrev
== BGE_ASICREV_BCM5703
) {
1849 * In the BCM5703, the DMA read watermark should
1850 * be set to less than or equal to the maximum
1851 * memory read byte count of the PCI-X command
1854 dma_rw_ctl
|= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1855 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1856 } else if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
1857 /* 1536 bytes for read, 384 bytes for write. */
1858 dma_rw_ctl
|= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1859 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1861 /* 384 bytes for read and write. */
1862 dma_rw_ctl
|= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1863 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1866 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5703
||
1867 sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
1870 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1871 tmp
= CSR_READ_4(sc
, BGE_PCI_CLKCTL
) & 0x1F;
1872 if (tmp
== 6 || tmp
== 7)
1874 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL
;
1876 /* Set PCI-X DMA write workaround. */
1877 dma_rw_ctl
|= BGE_PCIDMARWCTL_ASRT_ALL_BE
;
1880 /* Conventional PCI bus: 256 bytes for read and write. */
1881 dma_rw_ctl
|= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1882 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1884 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
1885 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
)
1888 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
||
1889 sc
->bge_asicrev
== BGE_ASICREV_BCM5701
)
1890 dma_rw_ctl
|= BGE_PCIDMARWCTL_USE_MRM
|
1891 BGE_PCIDMARWCTL_ASRT_ALL_BE
;
1892 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5703
||
1893 sc
->bge_asicrev
== BGE_ASICREV_BCM5704
)
1894 dma_rw_ctl
&= ~BGE_PCIDMARWCTL_MINDMA
;
1895 if (BGE_IS_5717_PLUS(sc
)) {
1896 dma_rw_ctl
&= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT
;
1897 if (sc
->bge_chipid
== BGE_CHIPID_BCM57765_A0
)
1898 dma_rw_ctl
&= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK
;
1900 * Enable HW workaround for controllers that misinterpret
1901 * a status tag update and leave interrupts permanently
1904 if (!BGE_IS_57765_PLUS(sc
) &&
1905 sc
->bge_asicrev
!= BGE_ASICREV_BCM5717
&&
1906 sc
->bge_asicrev
!= BGE_ASICREV_BCM5762
)
1907 dma_rw_ctl
|= BGE_PCIDMARWCTL_TAGGED_STATUS_WA
;
1909 pci_write_config(sc
->bge_dev
, BGE_PCI_DMA_RW_CTL
, dma_rw_ctl
, 4);
1912 * Set up general mode register.
1914 mode_ctl
= bge_dma_swap_options(sc
);
1915 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5720
||
1916 sc
->bge_asicrev
== BGE_ASICREV_BCM5762
) {
1917 /* Retain Host-2-BMC settings written by APE firmware. */
1918 mode_ctl
|= CSR_READ_4(sc
, BGE_MODE_CTL
) &
1919 (BGE_MODECTL_BYTESWAP_B2HRX_DATA
|
1920 BGE_MODECTL_WORDSWAP_B2HRX_DATA
|
1921 BGE_MODECTL_B2HRX_ENABLE
| BGE_MODECTL_HTX2B_ENABLE
);
1923 mode_ctl
|= BGE_MODECTL_MAC_ATTN_INTR
| BGE_MODECTL_HOST_SEND_BDS
|
1924 BGE_MODECTL_TX_NO_PHDR_CSUM
;
1927 * BCM5701 B5 have a bug causing data corruption when using
1928 * 64-bit DMA reads, which can be terminated early and then
1929 * completed later as 32-bit accesses, in combination with
1932 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5701
&&
1933 sc
->bge_chipid
== BGE_CHIPID_BCM5701_B5
)
1934 mode_ctl
|= BGE_MODECTL_FORCE_PCI32
;
1937 * Tell the firmware the driver is running
1939 if (sc
->bge_asf_mode
& ASF_STACKUP
)
1940 mode_ctl
|= BGE_MODECTL_STACKUP
;
1942 CSR_WRITE_4(sc
, BGE_MODE_CTL
, mode_ctl
);
1945 * Disable memory write invalidate. Apparently it is not supported
1946 * properly by these devices. Also ensure that INTx isn't disabled,
1947 * as these chips need it even when using MSI.
1949 PCI_CLRBIT(sc
->bge_dev
, BGE_PCI_CMD
,
1950 PCIM_CMD_INTxDIS
| PCIM_CMD_MWIEN
, 4);
1952 /* Set the timer prescaler (always 66 MHz). */
1953 CSR_WRITE_4(sc
, BGE_MISC_CFG
, BGE_32BITTIME_66MHZ
);
1955 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1956 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5906
) {
1957 DELAY(40); /* XXX */
1959 /* Put PHY into ready state */
1960 BGE_CLRBIT(sc
, BGE_MISC_CFG
, BGE_MISCCFG_EPHY_IDDQ
);
1961 CSR_READ_4(sc
, BGE_MISC_CFG
); /* Flush */
1969 bge_blockinit(struct bge_softc
*sc
)
1971 struct bge_rcb
*rcb
;
1974 uint32_t dmactl
, rdmareg
, val
;
1978 * Initialize the memory window pointer register so that
1979 * we can access the first 32K of internal NIC RAM. This will
1980 * allow us to set up the TX send ring RCBs and the RX return
1981 * ring RCBs, plus other things which live in NIC memory.
1983 CSR_WRITE_4(sc
, BGE_PCI_MEMWIN_BASEADDR
, 0);
1985 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1987 if (!(BGE_IS_5705_PLUS(sc
))) {
1988 /* Configure mbuf memory pool */
1989 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_BASEADDR
, BGE_BUFFPOOL_1
);
1990 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
)
1991 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_LEN
, 0x10000);
1993 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_LEN
, 0x18000);
1995 /* Configure DMA resource pool */
1996 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_BASEADDR
,
1997 BGE_DMA_DESCRIPTORS
);
1998 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_LEN
, 0x2000);
2001 /* Configure mbuf pool watermarks */
2002 if (BGE_IS_5717_PLUS(sc
)) {
2003 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x0);
2004 if (sc
->bge_ifp
->if_mtu
> ETHERMTU
) {
2005 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x7e);
2006 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0xea);
2008 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x2a);
2009 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0xa0);
2011 } else if (!BGE_IS_5705_PLUS(sc
)) {
2012 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x50);
2013 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x20);
2014 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0x60);
2015 } else if (sc
->bge_asicrev
== BGE_ASICREV_BCM5906
) {
2016 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x0);
2017 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x04);
2018 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0x10);
2020 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x0);
2021 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x10);
2022 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0x60);
2025 /* Configure DMA resource watermarks */
2026 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_LOWAT
, 5);
2027 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_HIWAT
, 10);
2029 /* Enable buffer manager */
2030 val
= BGE_BMANMODE_ENABLE
| BGE_BMANMODE_LOMBUF_ATTN
;
2032 * Change the arbitration algorithm of TXMBUF read request to
2033 * round-robin instead of priority based for BCM5719. When
2034 * TXFIFO is almost empty, RDMA will hold its request until
2035 * TXFIFO is not almost empty.
2037 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5719
)
2038 val
|= BGE_BMANMODE_NO_TX_UNDERRUN
;
2039 CSR_WRITE_4(sc
, BGE_BMAN_MODE
, val
);
2041 /* Poll for buffer manager start indication */
2042 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
2044 if (CSR_READ_4(sc
, BGE_BMAN_MODE
) & BGE_BMANMODE_ENABLE
)
2048 if (i
== BGE_TIMEOUT
) {
2049 device_printf(sc
->bge_dev
, "buffer manager failed to start\n");
2053 /* Enable flow-through queues */
2054 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0xFFFFFFFF);
2055 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0);
2057 /* Wait until queue initialization is complete */
2058 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
2060 if (CSR_READ_4(sc
, BGE_FTQ_RESET
) == 0)
2064 if (i
== BGE_TIMEOUT
) {
2065 device_printf(sc
->bge_dev
, "flow-through queue init failed\n");
2070 * Summary of rings supported by the controller:
2072 * Standard Receive Producer Ring
2073 * - This ring is used to feed receive buffers for "standard"
2074 * sized frames (typically 1536 bytes) to the controller.
2076 * Jumbo Receive Producer Ring
2077 * - This ring is used to feed receive buffers for jumbo sized
2078 * frames (i.e. anything bigger than the "standard" frames)
2079 * to the controller.
2081 * Mini Receive Producer Ring
2082 * - This ring is used to feed receive buffers for "mini"
2083 * sized frames to the controller.
2084 * - This feature required external memory for the controller
2085 * but was never used in a production system. Should always
2088 * Receive Return Ring
2089 * - After the controller has placed an incoming frame into a
2090 * receive buffer that buffer is moved into a receive return
2091 * ring. The driver is then responsible to passing the
2092 * buffer up to the stack. Many versions of the controller
2093 * support multiple RR rings.
2096 * - This ring is used for outgoing frames. Many versions of
2097 * the controller support multiple send rings.
2100 /* Initialize the standard receive producer ring control block. */
2101 rcb
= &sc
->bge_ldata
.bge_info
.bge_std_rx_rcb
;
2102 rcb
->bge_hostaddr
.bge_addr_lo
=
2103 BGE_ADDR_LO(sc
->bge_ldata
.bge_rx_std_ring_paddr
);
2104 rcb
->bge_hostaddr
.bge_addr_hi
=
2105 BGE_ADDR_HI(sc
->bge_ldata
.bge_rx_std_ring_paddr
);
2106 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
2107 sc
->bge_cdata
.bge_rx_std_ring_map
, BUS_DMASYNC_PREREAD
);
2108 if (BGE_IS_5717_PLUS(sc
)) {
2110 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
2111 * Bits 15-2 : Maximum RX frame size
2112 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
2115 rcb
->bge_maxlen_flags
=
2116 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN
<< 2);
2117 } else if (BGE_IS_5705_PLUS(sc
)) {
2119 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
2120 * Bits 15-2 : Reserved (should be 0)
2121 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2124 rcb
->bge_maxlen_flags
= BGE_RCB_MAXLEN_FLAGS(512, 0);
2127 * Ring size is always XXX entries
2128 * Bits 31-16: Maximum RX frame size
2129 * Bits 15-2 : Reserved (should be 0)
2130 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2133 rcb
->bge_maxlen_flags
=
2134 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN
, 0);
2136 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5717
||
2137 sc
->bge_asicrev
== BGE_ASICREV_BCM5719
||
2138 sc
->bge_asicrev
== BGE_ASICREV_BCM5720
)
2139 rcb
->bge_nicaddr
= BGE_STD_RX_RINGS_5717
;
2141 rcb
->bge_nicaddr
= BGE_STD_RX_RINGS
;
2142 /* Write the standard receive producer ring control block. */
2143 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_HADDR_HI
, rcb
->bge_hostaddr
.bge_addr_hi
);
2144 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_HADDR_LO
, rcb
->bge_hostaddr
.bge_addr_lo
);
2145 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_MAXLEN_FLAGS
, rcb
->bge_maxlen_flags
);
2146 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_NICADDR
, rcb
->bge_nicaddr
);
2148 /* Reset the standard receive producer ring producer index. */
2149 bge_writembx(sc
, BGE_MBX_RX_STD_PROD_LO
, 0);
2152 * Initialize the jumbo RX producer ring control
2153 * block. We set the 'ring disabled' bit in the
2154 * flags field until we're actually ready to start
2155 * using this ring (i.e. once we set the MTU
2156 * high enough to require it).
2158 if (BGE_IS_JUMBO_CAPABLE(sc
)) {
2159 rcb
= &sc
->bge_ldata
.bge_info
.bge_jumbo_rx_rcb
;
2160 /* Get the jumbo receive producer ring RCB parameters. */
2161 rcb
->bge_hostaddr
.bge_addr_lo
=
2162 BGE_ADDR_LO(sc
->bge_ldata
.bge_rx_jumbo_ring_paddr
);
2163 rcb
->bge_hostaddr
.bge_addr_hi
=
2164 BGE_ADDR_HI(sc
->bge_ldata
.bge_rx_jumbo_ring_paddr
);
2165 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
2166 sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
2167 BUS_DMASYNC_PREREAD
);
2168 rcb
->bge_maxlen_flags
= BGE_RCB_MAXLEN_FLAGS(0,
2169 BGE_RCB_FLAG_USE_EXT_RX_BD
| BGE_RCB_FLAG_RING_DISABLED
);
2170 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5717
||
2171 sc
->bge_asicrev
== BGE_ASICREV_BCM5719
||
2172 sc
->bge_asicrev
== BGE_ASICREV_BCM5720
)
2173 rcb
->bge_nicaddr
= BGE_JUMBO_RX_RINGS_5717
;
2175 rcb
->bge_nicaddr
= BGE_JUMBO_RX_RINGS
;
2176 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_HADDR_HI
,
2177 rcb
->bge_hostaddr
.bge_addr_hi
);
2178 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_HADDR_LO
,
2179 rcb
->bge_hostaddr
.bge_addr_lo
);
2180 /* Program the jumbo receive producer ring RCB parameters. */
2181 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS
,
2182 rcb
->bge_maxlen_flags
);
2183 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_NICADDR
, rcb
->bge_nicaddr
);
2184 /* Reset the jumbo receive producer ring producer index. */
2185 bge_writembx(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, 0);
2188 /* Disable the mini receive producer ring RCB. */
2189 if (BGE_IS_5700_FAMILY(sc
)) {
2190 rcb
= &sc
->bge_ldata
.bge_info
.bge_mini_rx_rcb
;
2191 rcb
->bge_maxlen_flags
=
2192 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED
);
2193 CSR_WRITE_4(sc
, BGE_RX_MINI_RCB_MAXLEN_FLAGS
,
2194 rcb
->bge_maxlen_flags
);
2195 /* Reset the mini receive producer ring producer index. */
2196 bge_writembx(sc
, BGE_MBX_RX_MINI_PROD_LO
, 0);
2199 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2200 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5906
) {
2201 if (sc
->bge_chipid
== BGE_CHIPID_BCM5906_A0
||
2202 sc
->bge_chipid
== BGE_CHIPID_BCM5906_A1
||
2203 sc
->bge_chipid
== BGE_CHIPID_BCM5906_A2
)
2204 CSR_WRITE_4(sc
, BGE_ISO_PKT_TX
,
2205 (CSR_READ_4(sc
, BGE_ISO_PKT_TX
) & ~3) | 2);
2208 * The BD ring replenish thresholds control how often the
2209 * hardware fetches new BD's from the producer rings in host
2210 * memory. Setting the value too low on a busy system can
2211 * starve the hardware and recue the throughpout.
2213 * Set the BD ring replentish thresholds. The recommended
2214 * values are 1/8th the number of descriptors allocated to
2216 * XXX The 5754 requires a lower threshold, so it might be a
2217 * requirement of all 575x family chips. The Linux driver sets
2218 * the lower threshold for all 5705 family chips as well, but there
2219 * are reports that it might not need to be so strict.
2221 * XXX Linux does some extra fiddling here for the 5906 parts as
2224 if (BGE_IS_5705_PLUS(sc
))
2227 val
= BGE_STD_RX_RING_CNT
/ 8;
2228 CSR_WRITE_4(sc
, BGE_RBDI_STD_REPL_THRESH
, val
);
2229 if (BGE_IS_JUMBO_CAPABLE(sc
))
2230 CSR_WRITE_4(sc
, BGE_RBDI_JUMBO_REPL_THRESH
,
2231 BGE_JUMBO_RX_RING_CNT
/8);
2232 if (BGE_IS_5717_PLUS(sc
)) {
2233 CSR_WRITE_4(sc
, BGE_STD_REPLENISH_LWM
, 32);
2234 CSR_WRITE_4(sc
, BGE_JMB_REPLENISH_LWM
, 16);
2238 * Disable all send rings by setting the 'ring disabled' bit
2239 * in the flags field of all the TX send ring control blocks,
2240 * located in NIC memory.
2242 if (!BGE_IS_5705_PLUS(sc
))
2243 /* 5700 to 5704 had 16 send rings. */
2244 limit
= BGE_TX_RINGS_EXTSSRAM_MAX
;
2245 else if (BGE_IS_57765_PLUS(sc
) ||
2246 sc
->bge_asicrev
== BGE_ASICREV_BCM5762
)
2248 else if (BGE_IS_5717_PLUS(sc
))
2252 vrcb
= BGE_MEMWIN_START
+ BGE_SEND_RING_RCB
;
2253 for (i
= 0; i
< limit
; i
++) {
2254 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
2255 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED
));
2256 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
, 0);
2257 vrcb
+= sizeof(struct bge_rcb
);
2260 /* Configure send ring RCB 0 (we use only the first ring) */
2261 vrcb
= BGE_MEMWIN_START
+ BGE_SEND_RING_RCB
;
2262 BGE_HOSTADDR(taddr
, sc
->bge_ldata
.bge_tx_ring_paddr
);
2263 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
, taddr
.bge_addr_hi
);
2264 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
, taddr
.bge_addr_lo
);
2265 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5717
||
2266 sc
->bge_asicrev
== BGE_ASICREV_BCM5719
||
2267 sc
->bge_asicrev
== BGE_ASICREV_BCM5720
)
2268 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
, BGE_SEND_RING_5717
);
2270 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
,
2271 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT
));
2272 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
2273 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT
, 0));
2276 * Disable all receive return rings by setting the
2277 * 'ring diabled' bit in the flags field of all the receive
2278 * return ring control blocks, located in NIC memory.
2280 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5717
||
2281 sc
->bge_asicrev
== BGE_ASICREV_BCM5719
||
2282 sc
->bge_asicrev
== BGE_ASICREV_BCM5720
) {
2283 /* Should be 17, use 16 until we get an SRAM map. */
2285 } else if (!BGE_IS_5705_PLUS(sc
))
2286 limit
= BGE_RX_RINGS_MAX
;
2287 else if (sc
->bge_asicrev
== BGE_ASICREV_BCM5755
||
2288 sc
->bge_asicrev
== BGE_ASICREV_BCM5762
||
2289 BGE_IS_57765_PLUS(sc
))
2293 /* Disable all receive return rings. */
2294 vrcb
= BGE_MEMWIN_START
+ BGE_RX_RETURN_RING_RCB
;
2295 for (i
= 0; i
< limit
; i
++) {
2296 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
, 0);
2297 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
, 0);
2298 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
2299 BGE_RCB_FLAG_RING_DISABLED
);
2300 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
, 0);
2301 bge_writembx(sc
, BGE_MBX_RX_CONS0_LO
+
2302 (i
* (sizeof(uint64_t))), 0);
2303 vrcb
+= sizeof(struct bge_rcb
);
2307 * Set up receive return ring 0. Note that the NIC address
2308 * for RX return rings is 0x0. The return rings live entirely
2309 * within the host, so the nicaddr field in the RCB isn't used.
2311 vrcb
= BGE_MEMWIN_START
+ BGE_RX_RETURN_RING_RCB
;
2312 BGE_HOSTADDR(taddr
, sc
->bge_ldata
.bge_rx_return_ring_paddr
);
2313 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
, taddr
.bge_addr_hi
);
2314 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
, taddr
.bge_addr_lo
);
2315 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
, 0);
2316 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
2317 BGE_RCB_MAXLEN_FLAGS(sc
->bge_return_ring_cnt
, 0));
2319 /* Set random backoff seed for TX */
2320 CSR_WRITE_4(sc
, BGE_TX_RANDOM_BACKOFF
,
2321 (IF_LLADDR(sc
->bge_ifp
)[0] + IF_LLADDR(sc
->bge_ifp
)[1] +
2322 IF_LLADDR(sc
->bge_ifp
)[2] + IF_LLADDR(sc
->bge_ifp
)[3] +
2323 IF_LLADDR(sc
->bge_ifp
)[4] + IF_LLADDR(sc
->bge_ifp
)[5]) &
2324 BGE_TX_BACKOFF_SEED_MASK
);
2326 /* Set inter-packet gap */
2328 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5720
||
2329 sc
->bge_asicrev
== BGE_ASICREV_BCM5762
)
2330 val
|= CSR_READ_4(sc
, BGE_TX_LENGTHS
) &
2331 (BGE_TXLEN_JMB_FRM_LEN_MSK
| BGE_TXLEN_CNT_DN_VAL_MSK
);
2332 CSR_WRITE_4(sc
, BGE_TX_LENGTHS
, val
);
2335 * Specify which ring to use for packets that don't match
2338 CSR_WRITE_4(sc
, BGE_RX_RULES_CFG
, 0x08);
2341 * Configure number of RX lists. One interrupt distribution
2342 * list, sixteen active lists, one bad frames class.
2344 CSR_WRITE_4(sc
, BGE_RXLP_CFG
, 0x181);
2346 /* Inialize RX list placement stats mask. */
2347 CSR_WRITE_4(sc
, BGE_RXLP_STATS_ENABLE_MASK
, 0x007FFFFF);
2348 CSR_WRITE_4(sc
, BGE_RXLP_STATS_CTL
, 0x1);
2350 /* Disable host coalescing until we get it set up */
2351 CSR_WRITE_4(sc
, BGE_HCC_MODE
, 0x00000000);
2353 /* Poll to make sure it's shut down. */
2354 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
2356 if (!(CSR_READ_4(sc
, BGE_HCC_MODE
) & BGE_HCCMODE_ENABLE
))
2360 if (i
== BGE_TIMEOUT
) {
2361 device_printf(sc
->bge_dev
,
2362 "host coalescing engine failed to idle\n");
2366 /* Set up host coalescing defaults */
2367 CSR_WRITE_4(sc
, BGE_HCC_RX_COAL_TICKS
, sc
->bge_rx_coal_ticks
);
2368 CSR_WRITE_4(sc
, BGE_HCC_TX_COAL_TICKS
, sc
->bge_tx_coal_ticks
);
2369 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS
, sc
->bge_rx_max_coal_bds
);
2370 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS
, sc
->bge_tx_max_coal_bds
);
2371 if (!(BGE_IS_5705_PLUS(sc
))) {
2372 CSR_WRITE_4(sc
, BGE_HCC_RX_COAL_TICKS_INT
, 0);
2373 CSR_WRITE_4(sc
, BGE_HCC_TX_COAL_TICKS_INT
, 0);
2375 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS_INT
, 1);
2376 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS_INT
, 1);
2378 /* Set up address of statistics block */
2379 if (!(BGE_IS_5705_PLUS(sc
))) {
2380 CSR_WRITE_4(sc
, BGE_HCC_STATS_ADDR_HI
,
2381 BGE_ADDR_HI(sc
->bge_ldata
.bge_stats_paddr
));
2382 CSR_WRITE_4(sc
, BGE_HCC_STATS_ADDR_LO
,
2383 BGE_ADDR_LO(sc
->bge_ldata
.bge_stats_paddr
));
2384 CSR_WRITE_4(sc
, BGE_HCC_STATS_BASEADDR
, BGE_STATS_BLOCK
);
2385 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_BASEADDR
, BGE_STATUS_BLOCK
);
2386 CSR_WRITE_4(sc
, BGE_HCC_STATS_TICKS
, sc
->bge_stat_ticks
);
2389 /* Set up address of status block */
2390 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_ADDR_HI
,
2391 BGE_ADDR_HI(sc
->bge_ldata
.bge_status_block_paddr
));
2392 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_ADDR_LO
,
2393 BGE_ADDR_LO(sc
->bge_ldata
.bge_status_block_paddr
));
2395 /* Set up status block size. */
2396 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
&&
2397 sc
->bge_chipid
!= BGE_CHIPID_BCM5700_C0
) {
2398 val
= BGE_STATBLKSZ_FULL
;
2399 bzero(sc
->bge_ldata
.bge_status_block
, BGE_STATUS_BLK_SZ
);
2401 val
= BGE_STATBLKSZ_32BYTE
;
2402 bzero(sc
->bge_ldata
.bge_status_block
, 32);
2404 bus_dmamap_sync(sc
->bge_cdata
.bge_status_tag
,
2405 sc
->bge_cdata
.bge_status_map
,
2406 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2408 /* Turn on host coalescing state machine */
2409 CSR_WRITE_4(sc
, BGE_HCC_MODE
, val
| BGE_HCCMODE_ENABLE
);
2411 /* Turn on RX BD completion state machine and enable attentions */
2412 CSR_WRITE_4(sc
, BGE_RBDC_MODE
,
2413 BGE_RBDCMODE_ENABLE
| BGE_RBDCMODE_ATTN
);
2415 /* Turn on RX list placement state machine */
2416 CSR_WRITE_4(sc
, BGE_RXLP_MODE
, BGE_RXLPMODE_ENABLE
);
2418 /* Turn on RX list selector state machine. */
2419 if (!(BGE_IS_5705_PLUS(sc
)))
2420 CSR_WRITE_4(sc
, BGE_RXLS_MODE
, BGE_RXLSMODE_ENABLE
);
2422 /* Turn on DMA, clear stats. */
2423 val
= BGE_MACMODE_TXDMA_ENB
| BGE_MACMODE_RXDMA_ENB
|
2424 BGE_MACMODE_RX_STATS_CLEAR
| BGE_MACMODE_TX_STATS_CLEAR
|
2425 BGE_MACMODE_RX_STATS_ENB
| BGE_MACMODE_TX_STATS_ENB
|
2426 BGE_MACMODE_FRMHDR_DMA_ENB
;
2428 if (sc
->bge_flags
& BGE_FLAG_TBI
)
2429 val
|= BGE_PORTMODE_TBI
;
2430 else if (sc
->bge_flags
& BGE_FLAG_MII_SERDES
)
2431 val
|= BGE_PORTMODE_GMII
;
2433 val
|= BGE_PORTMODE_MII
;
2435 /* Allow APE to send/receive frames. */
2436 if ((sc
->bge_mfw_flags
& BGE_MFW_ON_APE
) != 0)
2437 val
|= BGE_MACMODE_APE_RX_EN
| BGE_MACMODE_APE_TX_EN
;
2439 CSR_WRITE_4(sc
, BGE_MAC_MODE
, val
);
2442 /* Set misc. local control, enable interrupts on attentions */
2443 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_ONATTN
);
2446 /* Assert GPIO pins for PHY reset */
2447 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_MISCIO_OUT0
|
2448 BGE_MLC_MISCIO_OUT1
| BGE_MLC_MISCIO_OUT2
);
2449 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_MISCIO_OUTEN0
|
2450 BGE_MLC_MISCIO_OUTEN1
| BGE_MLC_MISCIO_OUTEN2
);
2453 /* Turn on DMA completion state machine */
2454 if (!(BGE_IS_5705_PLUS(sc
)))
2455 CSR_WRITE_4(sc
, BGE_DMAC_MODE
, BGE_DMACMODE_ENABLE
);
2457 val
= BGE_WDMAMODE_ENABLE
| BGE_WDMAMODE_ALL_ATTNS
;
2459 /* Enable host coalescing bug fix. */
2460 if (BGE_IS_5755_PLUS(sc
))
2461 val
|= BGE_WDMAMODE_STATUS_TAG_FIX
;
2463 /* Request larger DMA burst size to get better performance. */
2464 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5785
)
2465 val
|= BGE_WDMAMODE_BURST_ALL_DATA
;
2467 /* Turn on write DMA state machine */
2468 CSR_WRITE_4(sc
, BGE_WDMA_MODE
, val
);
2471 /* Turn on read DMA state machine */
2472 val
= BGE_RDMAMODE_ENABLE
| BGE_RDMAMODE_ALL_ATTNS
;
2474 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5717
)
2475 val
|= BGE_RDMAMODE_MULT_DMA_RD_DIS
;
2477 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5784
||
2478 sc
->bge_asicrev
== BGE_ASICREV_BCM5785
||
2479 sc
->bge_asicrev
== BGE_ASICREV_BCM57780
)
2480 val
|= BGE_RDMAMODE_BD_SBD_CRPT_ATTN
|
2481 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN
|
2482 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN
;
2483 if (sc
->bge_flags
& BGE_FLAG_PCIE
)
2484 val
|= BGE_RDMAMODE_FIFO_LONG_BURST
;
2485 if (sc
->bge_flags
& (BGE_FLAG_TSO
| BGE_FLAG_TSO3
)) {
2486 val
|= BGE_RDMAMODE_TSO4_ENABLE
;
2487 if (sc
->bge_flags
& BGE_FLAG_TSO3
||
2488 sc
->bge_asicrev
== BGE_ASICREV_BCM5785
||
2489 sc
->bge_asicrev
== BGE_ASICREV_BCM57780
)
2490 val
|= BGE_RDMAMODE_TSO6_ENABLE
;
2493 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5720
||
2494 sc
->bge_asicrev
== BGE_ASICREV_BCM5762
) {
2495 val
|= CSR_READ_4(sc
, BGE_RDMA_MODE
) &
2496 BGE_RDMAMODE_H2BNC_VLAN_DET
;
2498 * Allow multiple outstanding read requests from
2499 * non-LSO read DMA engine.
2501 val
&= ~BGE_RDMAMODE_MULT_DMA_RD_DIS
;
2504 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5761
||
2505 sc
->bge_asicrev
== BGE_ASICREV_BCM5784
||
2506 sc
->bge_asicrev
== BGE_ASICREV_BCM5785
||
2507 sc
->bge_asicrev
== BGE_ASICREV_BCM57780
||
2508 BGE_IS_5717_PLUS(sc
) || BGE_IS_57765_PLUS(sc
)) {
2509 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5762
)
2510 rdmareg
= BGE_RDMA_RSRVCTRL_REG2
;
2512 rdmareg
= BGE_RDMA_RSRVCTRL
;
2513 dmactl
= CSR_READ_4(sc
, rdmareg
);
2515 * Adjust tx margin to prevent TX data corruption and
2516 * fix internal FIFO overflow.
2518 if (sc
->bge_chipid
== BGE_CHIPID_BCM5719_A0
||
2519 sc
->bge_asicrev
== BGE_ASICREV_BCM5762
) {
2520 dmactl
&= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
2521 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK
|
2522 BGE_RDMA_RSRVCTRL_TXMRGN_MASK
);
2523 dmactl
|= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
2524 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K
|
2525 BGE_RDMA_RSRVCTRL_TXMRGN_320B
;
2528 * Enable fix for read DMA FIFO overruns.
2529 * The fix is to limit the number of RX BDs
2530 * the hardware would fetch at a fime.
2532 CSR_WRITE_4(sc
, rdmareg
, dmactl
|
2533 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
2536 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5719
) {
2537 CSR_WRITE_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
,
2538 CSR_READ_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
) |
2539 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K
|
2540 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K
);
2541 } else if (sc
->bge_asicrev
== BGE_ASICREV_BCM5720
) {
2543 * Allow 4KB burst length reads for non-LSO frames.
2544 * Enable 512B burst length reads for buffer descriptors.
2546 CSR_WRITE_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
,
2547 CSR_READ_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
) |
2548 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512
|
2549 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K
);
2550 } else if (sc
->bge_asicrev
== BGE_ASICREV_BCM5762
) {
2551 CSR_WRITE_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL_REG2
,
2552 CSR_READ_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL_REG2
) |
2553 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K
|
2554 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K
);
2557 CSR_WRITE_4(sc
, BGE_RDMA_MODE
, val
);
2560 if (sc
->bge_flags
& BGE_FLAG_RDMA_BUG
) {
2561 for (i
= 0; i
< BGE_NUM_RDMA_CHANNELS
/ 2; i
++) {
2562 val
= CSR_READ_4(sc
, BGE_RDMA_LENGTH
+ i
* 4);
2563 if ((val
& 0xFFFF) > BGE_FRAMELEN
)
2565 if (((val
>> 16) & 0xFFFF) > BGE_FRAMELEN
)
2568 if (i
!= BGE_NUM_RDMA_CHANNELS
/ 2) {
2569 val
= CSR_READ_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
);
2570 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5719
)
2571 val
|= BGE_RDMA_TX_LENGTH_WA_5719
;
2573 val
|= BGE_RDMA_TX_LENGTH_WA_5720
;
2574 CSR_WRITE_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
, val
);
2578 /* Turn on RX data completion state machine */
2579 CSR_WRITE_4(sc
, BGE_RDC_MODE
, BGE_RDCMODE_ENABLE
);
2581 /* Turn on RX BD initiator state machine */
2582 CSR_WRITE_4(sc
, BGE_RBDI_MODE
, BGE_RBDIMODE_ENABLE
);
2584 /* Turn on RX data and RX BD initiator state machine */
2585 CSR_WRITE_4(sc
, BGE_RDBDI_MODE
, BGE_RDBDIMODE_ENABLE
);
2587 /* Turn on Mbuf cluster free state machine */
2588 if (!(BGE_IS_5705_PLUS(sc
)))
2589 CSR_WRITE_4(sc
, BGE_MBCF_MODE
, BGE_MBCFMODE_ENABLE
);
2591 /* Turn on send BD completion state machine */
2592 CSR_WRITE_4(sc
, BGE_SBDC_MODE
, BGE_SBDCMODE_ENABLE
);
2594 /* Turn on send data completion state machine */
2595 val
= BGE_SDCMODE_ENABLE
;
2596 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5761
)
2597 val
|= BGE_SDCMODE_CDELAY
;
2598 CSR_WRITE_4(sc
, BGE_SDC_MODE
, val
);
2600 /* Turn on send data initiator state machine */
2601 if (sc
->bge_flags
& (BGE_FLAG_TSO
| BGE_FLAG_TSO3
))
2602 CSR_WRITE_4(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
|
2603 BGE_SDIMODE_HW_LSO_PRE_DMA
);
2605 CSR_WRITE_4(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
);
2607 /* Turn on send BD initiator state machine */
2608 CSR_WRITE_4(sc
, BGE_SBDI_MODE
, BGE_SBDIMODE_ENABLE
);
2610 /* Turn on send BD selector state machine */
2611 CSR_WRITE_4(sc
, BGE_SRS_MODE
, BGE_SRSMODE_ENABLE
);
2613 CSR_WRITE_4(sc
, BGE_SDI_STATS_ENABLE_MASK
, 0x007FFFFF);
2614 CSR_WRITE_4(sc
, BGE_SDI_STATS_CTL
,
2615 BGE_SDISTATSCTL_ENABLE
| BGE_SDISTATSCTL_FASTER
);
2617 /* ack/clear link change events */
2618 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
2619 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
2620 BGE_MACSTAT_LINK_CHANGED
);
2621 CSR_WRITE_4(sc
, BGE_MI_STS
, 0);
2624 * Enable attention when the link has changed state for
2625 * devices that use auto polling.
2627 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
2628 CSR_WRITE_4(sc
, BGE_MI_STS
, BGE_MISTS_LINK
);
2630 if (sc
->bge_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
2631 CSR_WRITE_4(sc
, BGE_MI_MODE
, sc
->bge_mi_mode
);
2634 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
&&
2635 sc
->bge_chipid
!= BGE_CHIPID_BCM5700_B2
)
2636 CSR_WRITE_4(sc
, BGE_MAC_EVT_ENB
,
2637 BGE_EVTENB_MI_INTERRUPT
);
2641 * Clear any pending link state attention.
2642 * Otherwise some link state change events may be lost until attention
2643 * is cleared by bge_intr() -> bge_link_upd() sequence.
2644 * It's not necessary on newer BCM chips - perhaps enabling link
2645 * state change attentions implies clearing pending attention.
2647 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
2648 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
2649 BGE_MACSTAT_LINK_CHANGED
);
2651 /* Enable link state change attentions. */
2652 BGE_SETBIT(sc
, BGE_MAC_EVT_ENB
, BGE_EVTENB_LINK_CHANGED
);
2657 static const struct bge_revision
*
2658 bge_lookup_rev(uint32_t chipid
)
2660 const struct bge_revision
*br
;
2662 for (br
= bge_revisions
; br
->br_name
!= NULL
; br
++) {
2663 if (br
->br_chipid
== chipid
)
2667 for (br
= bge_majorrevs
; br
->br_name
!= NULL
; br
++) {
2668 if (br
->br_chipid
== BGE_ASICREV(chipid
))
2675 static const struct bge_vendor
*
2676 bge_lookup_vendor(uint16_t vid
)
2678 const struct bge_vendor
*v
;
2680 for (v
= bge_vendors
; v
->v_name
!= NULL
; v
++)
2688 bge_chipid(device_t dev
)
2692 id
= pci_read_config(dev
, BGE_PCI_MISC_CTL
, 4) >>
2693 BGE_PCIMISCCTL_ASICREV_SHIFT
;
2694 if (BGE_ASICREV(id
) == BGE_ASICREV_USE_PRODID_REG
) {
2696 * Find the ASCI revision. Different chips use different
2699 switch (pci_get_device(dev
)) {
2700 case BCOM_DEVICEID_BCM5717
:
2701 case BCOM_DEVICEID_BCM5718
:
2702 case BCOM_DEVICEID_BCM5719
:
2703 case BCOM_DEVICEID_BCM5720
:
2704 case BCOM_DEVICEID_BCM5725
:
2705 case BCOM_DEVICEID_BCM5727
:
2706 case BCOM_DEVICEID_BCM5762
:
2707 case BCOM_DEVICEID_BCM57764
:
2708 case BCOM_DEVICEID_BCM57767
:
2709 case BCOM_DEVICEID_BCM57787
:
2710 id
= pci_read_config(dev
,
2711 BGE_PCI_GEN2_PRODID_ASICREV
, 4);
2713 case BCOM_DEVICEID_BCM57761
:
2714 case BCOM_DEVICEID_BCM57762
:
2715 case BCOM_DEVICEID_BCM57765
:
2716 case BCOM_DEVICEID_BCM57766
:
2717 case BCOM_DEVICEID_BCM57781
:
2718 case BCOM_DEVICEID_BCM57782
:
2719 case BCOM_DEVICEID_BCM57785
:
2720 case BCOM_DEVICEID_BCM57786
:
2721 case BCOM_DEVICEID_BCM57791
:
2722 case BCOM_DEVICEID_BCM57795
:
2723 id
= pci_read_config(dev
,
2724 BGE_PCI_GEN15_PRODID_ASICREV
, 4);
2727 id
= pci_read_config(dev
, BGE_PCI_PRODID_ASICREV
, 4);
2734 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2735 * against our list and return its name if we find a match.
2737 * Note that since the Broadcom controller contains VPD support, we
2738 * try to get the device name string from the controller itself instead
2739 * of the compiled-in string. It guarantees we'll always announce the
2740 * right product name. We fall back to the compiled-in string when
2741 * VPD is unavailable or corrupt.
2744 bge_probe(device_t dev
)
2748 const struct bge_revision
*br
;
2750 struct bge_softc
*sc
;
2751 const struct bge_type
*t
= bge_devs
;
2752 const struct bge_vendor
*v
;
2756 sc
= device_get_softc(dev
);
2758 vid
= pci_get_vendor(dev
);
2759 did
= pci_get_device(dev
);
2760 while(t
->bge_vid
!= 0) {
2761 if ((vid
== t
->bge_vid
) && (did
== t
->bge_did
)) {
2762 id
= bge_chipid(dev
);
2763 br
= bge_lookup_rev(id
);
2764 if (bge_has_eaddr(sc
) &&
2765 pci_get_vpd_ident(dev
, &pname
) == 0)
2766 snprintf(model
, sizeof(model
), "%s", pname
);
2768 v
= bge_lookup_vendor(vid
);
2769 snprintf(model
, sizeof(model
), "%s %s",
2770 v
!= NULL
? v
->v_name
: "Unknown",
2771 br
!= NULL
? br
->br_name
:
2772 "NetXtreme/NetLink Ethernet Controller");
2774 snprintf(buf
, sizeof(buf
), "%s, %sASIC rev. %#08x",
2775 model
, br
!= NULL
? "" : "unknown ", id
);
2776 device_set_desc_copy(dev
, buf
);
2777 return (BUS_PROBE_DEFAULT
);
2786 bge_dma_free(struct bge_softc
*sc
)
2790 /* Destroy DMA maps for RX buffers. */
2791 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
2792 if (sc
->bge_cdata
.bge_rx_std_dmamap
[i
])
2793 bus_dmamap_destroy(sc
->bge_cdata
.bge_rx_mtag
,
2794 sc
->bge_cdata
.bge_rx_std_dmamap
[i
]);
2796 if (sc
->bge_cdata
.bge_rx_std_sparemap
)
2797 bus_dmamap_destroy(sc
->bge_cdata
.bge_rx_mtag
,
2798 sc
->bge_cdata
.bge_rx_std_sparemap
);
2800 /* Destroy DMA maps for jumbo RX buffers. */
2801 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
2802 if (sc
->bge_cdata
.bge_rx_jumbo_dmamap
[i
])
2803 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag_jumbo
,
2804 sc
->bge_cdata
.bge_rx_jumbo_dmamap
[i
]);
2806 if (sc
->bge_cdata
.bge_rx_jumbo_sparemap
)
2807 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag_jumbo
,
2808 sc
->bge_cdata
.bge_rx_jumbo_sparemap
);
2810 /* Destroy DMA maps for TX buffers. */
2811 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
2812 if (sc
->bge_cdata
.bge_tx_dmamap
[i
])
2813 bus_dmamap_destroy(sc
->bge_cdata
.bge_tx_mtag
,
2814 sc
->bge_cdata
.bge_tx_dmamap
[i
]);
2817 if (sc
->bge_cdata
.bge_rx_mtag
)
2818 bus_dma_tag_destroy(sc
->bge_cdata
.bge_rx_mtag
);
2819 if (sc
->bge_cdata
.bge_mtag_jumbo
)
2820 bus_dma_tag_destroy(sc
->bge_cdata
.bge_mtag_jumbo
);
2821 if (sc
->bge_cdata
.bge_tx_mtag
)
2822 bus_dma_tag_destroy(sc
->bge_cdata
.bge_tx_mtag
);
2824 /* Destroy standard RX ring. */
2825 if (sc
->bge_cdata
.bge_rx_std_ring_map
)
2826 bus_dmamap_unload(sc
->bge_cdata
.bge_rx_std_ring_tag
,
2827 sc
->bge_cdata
.bge_rx_std_ring_map
);
2828 if (sc
->bge_cdata
.bge_rx_std_ring_map
&& sc
->bge_ldata
.bge_rx_std_ring
)
2829 bus_dmamem_free(sc
->bge_cdata
.bge_rx_std_ring_tag
,
2830 sc
->bge_ldata
.bge_rx_std_ring
,
2831 sc
->bge_cdata
.bge_rx_std_ring_map
);
2833 if (sc
->bge_cdata
.bge_rx_std_ring_tag
)
2834 bus_dma_tag_destroy(sc
->bge_cdata
.bge_rx_std_ring_tag
);
2836 /* Destroy jumbo RX ring. */
2837 if (sc
->bge_cdata
.bge_rx_jumbo_ring_map
)
2838 bus_dmamap_unload(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
2839 sc
->bge_cdata
.bge_rx_jumbo_ring_map
);
2841 if (sc
->bge_cdata
.bge_rx_jumbo_ring_map
&&
2842 sc
->bge_ldata
.bge_rx_jumbo_ring
)
2843 bus_dmamem_free(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
2844 sc
->bge_ldata
.bge_rx_jumbo_ring
,
2845 sc
->bge_cdata
.bge_rx_jumbo_ring_map
);
2847 if (sc
->bge_cdata
.bge_rx_jumbo_ring_tag
)
2848 bus_dma_tag_destroy(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
);
2850 /* Destroy RX return ring. */
2851 if (sc
->bge_cdata
.bge_rx_return_ring_map
)
2852 bus_dmamap_unload(sc
->bge_cdata
.bge_rx_return_ring_tag
,
2853 sc
->bge_cdata
.bge_rx_return_ring_map
);
2855 if (sc
->bge_cdata
.bge_rx_return_ring_map
&&
2856 sc
->bge_ldata
.bge_rx_return_ring
)
2857 bus_dmamem_free(sc
->bge_cdata
.bge_rx_return_ring_tag
,
2858 sc
->bge_ldata
.bge_rx_return_ring
,
2859 sc
->bge_cdata
.bge_rx_return_ring_map
);
2861 if (sc
->bge_cdata
.bge_rx_return_ring_tag
)
2862 bus_dma_tag_destroy(sc
->bge_cdata
.bge_rx_return_ring_tag
);
2864 /* Destroy TX ring. */
2865 if (sc
->bge_cdata
.bge_tx_ring_map
)
2866 bus_dmamap_unload(sc
->bge_cdata
.bge_tx_ring_tag
,
2867 sc
->bge_cdata
.bge_tx_ring_map
);
2869 if (sc
->bge_cdata
.bge_tx_ring_map
&& sc
->bge_ldata
.bge_tx_ring
)
2870 bus_dmamem_free(sc
->bge_cdata
.bge_tx_ring_tag
,
2871 sc
->bge_ldata
.bge_tx_ring
,
2872 sc
->bge_cdata
.bge_tx_ring_map
);
2874 if (sc
->bge_cdata
.bge_tx_ring_tag
)
2875 bus_dma_tag_destroy(sc
->bge_cdata
.bge_tx_ring_tag
);
2877 /* Destroy status block. */
2878 if (sc
->bge_cdata
.bge_status_map
)
2879 bus_dmamap_unload(sc
->bge_cdata
.bge_status_tag
,
2880 sc
->bge_cdata
.bge_status_map
);
2882 if (sc
->bge_cdata
.bge_status_map
&& sc
->bge_ldata
.bge_status_block
)
2883 bus_dmamem_free(sc
->bge_cdata
.bge_status_tag
,
2884 sc
->bge_ldata
.bge_status_block
,
2885 sc
->bge_cdata
.bge_status_map
);
2887 if (sc
->bge_cdata
.bge_status_tag
)
2888 bus_dma_tag_destroy(sc
->bge_cdata
.bge_status_tag
);
2890 /* Destroy statistics block. */
2891 if (sc
->bge_cdata
.bge_stats_map
)
2892 bus_dmamap_unload(sc
->bge_cdata
.bge_stats_tag
,
2893 sc
->bge_cdata
.bge_stats_map
);
2895 if (sc
->bge_cdata
.bge_stats_map
&& sc
->bge_ldata
.bge_stats
)
2896 bus_dmamem_free(sc
->bge_cdata
.bge_stats_tag
,
2897 sc
->bge_ldata
.bge_stats
,
2898 sc
->bge_cdata
.bge_stats_map
);
2900 if (sc
->bge_cdata
.bge_stats_tag
)
2901 bus_dma_tag_destroy(sc
->bge_cdata
.bge_stats_tag
);
2903 if (sc
->bge_cdata
.bge_buffer_tag
)
2904 bus_dma_tag_destroy(sc
->bge_cdata
.bge_buffer_tag
);
2906 /* Destroy the parent tag. */
2907 if (sc
->bge_cdata
.bge_parent_tag
)
2908 bus_dma_tag_destroy(sc
->bge_cdata
.bge_parent_tag
);
2912 bge_dma_ring_alloc(struct bge_softc
*sc
, bus_size_t alignment
,
2913 bus_size_t maxsize
, bus_dma_tag_t
*tag
, uint8_t **ring
, bus_dmamap_t
*map
,
2914 bus_addr_t
*paddr
, const char *msg
)
2916 struct bge_dmamap_arg ctx
;
2918 bus_size_t ring_end
;
2921 lowaddr
= BUS_SPACE_MAXADDR
;
2923 error
= bus_dma_tag_create(sc
->bge_cdata
.bge_parent_tag
,
2924 alignment
, 0, lowaddr
, BUS_SPACE_MAXADDR
, NULL
,
2925 NULL
, maxsize
, 1, maxsize
, 0, NULL
, NULL
, tag
);
2927 device_printf(sc
->bge_dev
,
2928 "could not create %s dma tag\n", msg
);
2931 /* Allocate DMA'able memory for ring. */
2932 error
= bus_dmamem_alloc(*tag
, (void **)ring
,
2933 BUS_DMA_NOWAIT
| BUS_DMA_ZERO
| BUS_DMA_COHERENT
, map
);
2935 device_printf(sc
->bge_dev
,
2936 "could not allocate DMA'able memory for %s\n", msg
);
2939 /* Load the address of the ring. */
2940 ctx
.bge_busaddr
= 0;
2941 error
= bus_dmamap_load(*tag
, *map
, *ring
, maxsize
, bge_dma_map_addr
,
2942 &ctx
, BUS_DMA_NOWAIT
);
2944 device_printf(sc
->bge_dev
,
2945 "could not load DMA'able memory for %s\n", msg
);
2948 *paddr
= ctx
.bge_busaddr
;
2949 ring_end
= *paddr
+ maxsize
;
2950 if ((sc
->bge_flags
& BGE_FLAG_4G_BNDRY_BUG
) != 0 &&
2951 BGE_ADDR_HI(*paddr
) != BGE_ADDR_HI(ring_end
)) {
2953 * 4GB boundary crossed. Limit maximum allowable DMA
2954 * address space to 32bit and try again.
2956 bus_dmamap_unload(*tag
, *map
);
2957 bus_dmamem_free(*tag
, *ring
, *map
);
2958 bus_dma_tag_destroy(*tag
);
2960 device_printf(sc
->bge_dev
, "4GB boundary crossed, "
2961 "limit DMA address space to 32bit for %s\n", msg
);
2965 lowaddr
= BUS_SPACE_MAXADDR_32BIT
;
2972 bge_dma_alloc(struct bge_softc
*sc
)
2975 bus_size_t boundary
, sbsz
, rxmaxsegsz
, txsegsz
, txmaxsegsz
;
2978 lowaddr
= BUS_SPACE_MAXADDR
;
2979 if ((sc
->bge_flags
& BGE_FLAG_40BIT_BUG
) != 0)
2980 lowaddr
= BGE_DMA_MAXADDR
;
2982 * Allocate the parent bus DMA tag appropriate for PCI.
2984 error
= bus_dma_tag_create(bus_get_dma_tag(sc
->bge_dev
),
2985 1, 0, lowaddr
, BUS_SPACE_MAXADDR
, NULL
,
2986 NULL
, BUS_SPACE_MAXSIZE_32BIT
, 0, BUS_SPACE_MAXSIZE_32BIT
,
2987 0, NULL
, NULL
, &sc
->bge_cdata
.bge_parent_tag
);
2989 device_printf(sc
->bge_dev
,
2990 "could not allocate parent dma tag\n");
2994 /* Create tag for standard RX ring. */
2995 error
= bge_dma_ring_alloc(sc
, PAGE_SIZE
, BGE_STD_RX_RING_SZ
,
2996 &sc
->bge_cdata
.bge_rx_std_ring_tag
,
2997 (uint8_t **)&sc
->bge_ldata
.bge_rx_std_ring
,
2998 &sc
->bge_cdata
.bge_rx_std_ring_map
,
2999 &sc
->bge_ldata
.bge_rx_std_ring_paddr
, "RX ring");
3003 /* Create tag for RX return ring. */
3004 error
= bge_dma_ring_alloc(sc
, PAGE_SIZE
, BGE_RX_RTN_RING_SZ(sc
),
3005 &sc
->bge_cdata
.bge_rx_return_ring_tag
,
3006 (uint8_t **)&sc
->bge_ldata
.bge_rx_return_ring
,
3007 &sc
->bge_cdata
.bge_rx_return_ring_map
,
3008 &sc
->bge_ldata
.bge_rx_return_ring_paddr
, "RX return ring");
3012 /* Create tag for TX ring. */
3013 error
= bge_dma_ring_alloc(sc
, PAGE_SIZE
, BGE_TX_RING_SZ
,
3014 &sc
->bge_cdata
.bge_tx_ring_tag
,
3015 (uint8_t **)&sc
->bge_ldata
.bge_tx_ring
,
3016 &sc
->bge_cdata
.bge_tx_ring_map
,
3017 &sc
->bge_ldata
.bge_tx_ring_paddr
, "TX ring");
3022 * Create tag for status block.
3023 * Because we only use single Tx/Rx/Rx return ring, use
3024 * minimum status block size except BCM5700 AX/BX which
3025 * seems to want to see full status block size regardless
3026 * of configured number of ring.
3028 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
&&
3029 sc
->bge_chipid
!= BGE_CHIPID_BCM5700_C0
)
3030 sbsz
= BGE_STATUS_BLK_SZ
;
3033 error
= bge_dma_ring_alloc(sc
, PAGE_SIZE
, sbsz
,
3034 &sc
->bge_cdata
.bge_status_tag
,
3035 (uint8_t **)&sc
->bge_ldata
.bge_status_block
,
3036 &sc
->bge_cdata
.bge_status_map
,
3037 &sc
->bge_ldata
.bge_status_block_paddr
, "status block");
3041 /* Create tag for statistics block. */
3042 error
= bge_dma_ring_alloc(sc
, PAGE_SIZE
, BGE_STATS_SZ
,
3043 &sc
->bge_cdata
.bge_stats_tag
,
3044 (uint8_t **)&sc
->bge_ldata
.bge_stats
,
3045 &sc
->bge_cdata
.bge_stats_map
,
3046 &sc
->bge_ldata
.bge_stats_paddr
, "statistics block");
3050 /* Create tag for jumbo RX ring. */
3051 if (BGE_IS_JUMBO_CAPABLE(sc
)) {
3052 error
= bge_dma_ring_alloc(sc
, PAGE_SIZE
, BGE_JUMBO_RX_RING_SZ
,
3053 &sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
3054 (uint8_t **)&sc
->bge_ldata
.bge_rx_jumbo_ring
,
3055 &sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
3056 &sc
->bge_ldata
.bge_rx_jumbo_ring_paddr
, "jumbo RX ring");
3061 /* Create parent tag for buffers. */
3063 if ((sc
->bge_flags
& BGE_FLAG_4G_BNDRY_BUG
) != 0) {
3064 boundary
= BGE_DMA_BNDRY
;
3067 * watchdog timeout issue was observed on BCM5704 which
3068 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
3069 * Both limiting DMA address space to 32bits and flushing
3070 * mailbox write seem to address the issue.
3072 if (sc
->bge_pcixcap
!= 0)
3073 lowaddr
= BUS_SPACE_MAXADDR_32BIT
;
3075 error
= bus_dma_tag_create(bus_get_dma_tag(sc
->bge_dev
),
3076 1, boundary
, lowaddr
, BUS_SPACE_MAXADDR
, NULL
,
3077 NULL
, BUS_SPACE_MAXSIZE_32BIT
, 0, BUS_SPACE_MAXSIZE_32BIT
,
3078 0, NULL
, NULL
, &sc
->bge_cdata
.bge_buffer_tag
);
3080 device_printf(sc
->bge_dev
,
3081 "could not allocate buffer dma tag\n");
3084 /* Create tag for Tx mbufs. */
3085 if (sc
->bge_flags
& (BGE_FLAG_TSO
| BGE_FLAG_TSO3
)) {
3086 txsegsz
= BGE_TSOSEG_SZ
;
3087 txmaxsegsz
= 65535 + sizeof(struct ether_vlan_header
);
3090 txmaxsegsz
= MCLBYTES
* BGE_NSEG_NEW
;
3092 error
= bus_dma_tag_create(sc
->bge_cdata
.bge_buffer_tag
, 1,
3093 0, BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
, NULL
, NULL
,
3094 txmaxsegsz
, BGE_NSEG_NEW
, txsegsz
, 0, NULL
, NULL
,
3095 &sc
->bge_cdata
.bge_tx_mtag
);
3098 device_printf(sc
->bge_dev
, "could not allocate TX dma tag\n");
3102 /* Create tag for Rx mbufs. */
3103 if (sc
->bge_flags
& BGE_FLAG_JUMBO_STD
)
3104 rxmaxsegsz
= MJUM9BYTES
;
3106 rxmaxsegsz
= MCLBYTES
;
3107 error
= bus_dma_tag_create(sc
->bge_cdata
.bge_buffer_tag
, 1, 0,
3108 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
, NULL
, NULL
, rxmaxsegsz
, 1,
3109 rxmaxsegsz
, 0, NULL
, NULL
, &sc
->bge_cdata
.bge_rx_mtag
);
3112 device_printf(sc
->bge_dev
, "could not allocate RX dma tag\n");
3116 /* Create DMA maps for RX buffers. */
3117 error
= bus_dmamap_create(sc
->bge_cdata
.bge_rx_mtag
, 0,
3118 &sc
->bge_cdata
.bge_rx_std_sparemap
);
3120 device_printf(sc
->bge_dev
,
3121 "can't create spare DMA map for RX\n");
3124 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
3125 error
= bus_dmamap_create(sc
->bge_cdata
.bge_rx_mtag
, 0,
3126 &sc
->bge_cdata
.bge_rx_std_dmamap
[i
]);
3128 device_printf(sc
->bge_dev
,
3129 "can't create DMA map for RX\n");
3134 /* Create DMA maps for TX buffers. */
3135 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
3136 error
= bus_dmamap_create(sc
->bge_cdata
.bge_tx_mtag
, 0,
3137 &sc
->bge_cdata
.bge_tx_dmamap
[i
]);
3139 device_printf(sc
->bge_dev
,
3140 "can't create DMA map for TX\n");
3145 /* Create tags for jumbo RX buffers. */
3146 if (BGE_IS_JUMBO_CAPABLE(sc
)) {
3147 error
= bus_dma_tag_create(sc
->bge_cdata
.bge_buffer_tag
,
3148 1, 0, BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
, NULL
,
3149 NULL
, MJUM9BYTES
, BGE_NSEG_JUMBO
, PAGE_SIZE
,
3150 0, NULL
, NULL
, &sc
->bge_cdata
.bge_mtag_jumbo
);
3152 device_printf(sc
->bge_dev
,
3153 "could not allocate jumbo dma tag\n");
3156 /* Create DMA maps for jumbo RX buffers. */
3157 error
= bus_dmamap_create(sc
->bge_cdata
.bge_mtag_jumbo
,
3158 0, &sc
->bge_cdata
.bge_rx_jumbo_sparemap
);
3160 device_printf(sc
->bge_dev
,
3161 "can't create spare DMA map for jumbo RX\n");
3164 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
3165 error
= bus_dmamap_create(sc
->bge_cdata
.bge_mtag_jumbo
,
3166 0, &sc
->bge_cdata
.bge_rx_jumbo_dmamap
[i
]);
3168 device_printf(sc
->bge_dev
,
3169 "can't create DMA map for jumbo RX\n");
3179 * Return true if this device has more than one port.
3182 bge_has_multiple_ports(struct bge_softc
*sc
)
3184 device_t dev
= sc
->bge_dev
;
3185 u_int b
, d
, f
, fscan
, s
;
3187 d
= pci_get_domain(dev
);
3188 b
= pci_get_bus(dev
);
3189 s
= pci_get_slot(dev
);
3190 f
= pci_get_function(dev
);
3191 for (fscan
= 0; fscan
<= PCI_FUNCMAX
; fscan
++)
3192 if (fscan
!= f
&& pci_find_dbsf(d
, b
, s
, fscan
) != NULL
)
3198 * Return true if MSI can be used with this device.
3201 bge_can_use_msi(struct bge_softc
*sc
)
3203 int can_use_msi
= 0;
3206 // temporary workaround, the int disable happens in msi enable through
3207 // setup intr in our case which undoes the re-enabling done by the driver
3211 if (sc
->bge_msi
== 0)
3214 /* Disable MSI for polling(4). */
3215 #ifdef DEVICE_POLLING
3218 switch (sc
->bge_asicrev
) {
3219 case BGE_ASICREV_BCM5714_A0
:
3220 case BGE_ASICREV_BCM5714
:
3222 * Apparently, MSI doesn't work when these chips are
3223 * configured in single-port mode.
3225 if (bge_has_multiple_ports(sc
))
3228 case BGE_ASICREV_BCM5750
:
3229 if (sc
->bge_chiprev
!= BGE_CHIPREV_5750_AX
&&
3230 sc
->bge_chiprev
!= BGE_CHIPREV_5750_BX
)
3234 if (BGE_IS_575X_PLUS(sc
))
3237 return (can_use_msi
);
3241 bge_mbox_reorder(struct bge_softc
*sc
)
3244 /* Lists of PCI bridges that are known to reorder mailbox writes. */
3245 static const struct mbox_reorder
{
3246 const uint16_t vendor
;
3247 const uint16_t device
;
3249 } mbox_reorder_lists
[] = {
3250 { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" },
3252 devclass_t pci
, pcib
;
3256 pci
= devclass_find("pci");
3257 pcib
= devclass_find("pcib");
3259 bus
= device_get_parent(dev
);
3261 dev
= device_get_parent(bus
);
3262 bus
= device_get_parent(dev
);
3263 if (device_get_devclass(dev
) != pcib
)
3265 for (i
= 0; i
< nitems(mbox_reorder_lists
); i
++) {
3266 if (pci_get_vendor(dev
) ==
3267 mbox_reorder_lists
[i
].vendor
&&
3268 pci_get_device(dev
) ==
3269 mbox_reorder_lists
[i
].device
) {
3270 device_printf(sc
->bge_dev
,
3271 "enabling MBOX workaround for %s\n",
3272 mbox_reorder_lists
[i
].desc
);
3276 if (device_get_devclass(bus
) != pci
)
3284 bge_devinfo(struct bge_softc
*sc
)
3288 device_printf(sc
->bge_dev
,
3289 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ",
3290 sc
->bge_chipid
, sc
->bge_asicrev
, sc
->bge_chiprev
);
3291 if (sc
->bge_flags
& BGE_FLAG_PCIE
)
3293 else if (sc
->bge_flags
& BGE_FLAG_PCIX
) {
3295 cfg
= CSR_READ_4(sc
, BGE_MISC_CFG
) & BGE_MISCCFG_BOARD_ID_MASK
;
3296 if (cfg
== BGE_MISCCFG_BOARD_ID_5704CIOBE
)
3299 clk
= CSR_READ_4(sc
, BGE_PCI_CLKCTL
) & 0x1F;
3318 printf("%u MHz\n", clk
);
3320 if (sc
->bge_pcixcap
!= 0)
3321 printf("PCI on PCI-X ");
3324 cfg
= pci_read_config(sc
->bge_dev
, BGE_PCI_PCISTATE
, 4);
3325 if (cfg
& BGE_PCISTATE_PCI_BUSSPEED
)
3329 if (cfg
& BGE_PCISTATE_32BIT_BUS
)
3330 printf("%u MHz; 32bit\n", clk
);
3332 printf("%u MHz; 64bit\n", clk
);
3337 bge_attach(device_t dev
)
3340 struct bge_softc
*sc
;
3341 uint32_t hwcfg
= 0, misccfg
, pcistate
;
3342 u_char eaddr
[ETHER_ADDR_LEN
];
3343 int capmask
, error
, reg
, rid
, trys
;
3345 sc
= device_get_softc(dev
);
3348 BGE_LOCK_INIT(sc
, device_get_nameunit(dev
));
3349 TASK_INIT(&sc
->bge_intr_task
, 0, bge_intr_task
, sc
);
3350 callout_init_mtx(&sc
->bge_stat_ch
, &sc
->bge_mtx
, 0);
3352 pci_enable_busmaster(dev
);
3355 * Allocate control/status registers.
3358 sc
->bge_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
, &rid
,
3361 if (sc
->bge_res
== NULL
) {
3362 device_printf (sc
->bge_dev
, "couldn't map BAR0 memory\n");
3367 /* Save various chip information. */
3368 sc
->bge_func_addr
= pci_get_function(dev
);
3369 sc
->bge_chipid
= bge_chipid(dev
);
3370 sc
->bge_asicrev
= BGE_ASICREV(sc
->bge_chipid
);
3371 sc
->bge_chiprev
= BGE_CHIPREV(sc
->bge_chipid
);
3373 /* Set default PHY address. */
3374 sc
->bge_phy_addr
= 1;
3376 * PHY address mapping for various devices.
3378 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
3379 * ---------+-------+-------+-------+-------+
3380 * BCM57XX | 1 | X | X | X |
3381 * BCM5704 | 1 | X | 1 | X |
3382 * BCM5717 | 1 | 8 | 2 | 9 |
3383 * BCM5719 | 1 | 8 | 2 | 9 |
3384 * BCM5720 | 1 | 8 | 2 | 9 |
3386 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
3387 * ---------+-------+-------+-------+-------+
3388 * BCM57XX | X | X | X | X |
3389 * BCM5704 | X | X | X | X |
3390 * BCM5717 | X | X | X | X |
3391 * BCM5719 | 3 | 10 | 4 | 11 |
3392 * BCM5720 | X | X | X | X |
3394 * Other addresses may respond but they are not
3395 * IEEE compliant PHYs and should be ignored.
3397 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5717
||
3398 sc
->bge_asicrev
== BGE_ASICREV_BCM5719
||
3399 sc
->bge_asicrev
== BGE_ASICREV_BCM5720
) {
3400 if (sc
->bge_chipid
!= BGE_CHIPID_BCM5717_A0
) {
3401 if (CSR_READ_4(sc
, BGE_SGDIG_STS
) &
3402 BGE_SGDIGSTS_IS_SERDES
)
3403 sc
->bge_phy_addr
= sc
->bge_func_addr
+ 8;
3405 sc
->bge_phy_addr
= sc
->bge_func_addr
+ 1;
3407 if (CSR_READ_4(sc
, BGE_CPMU_PHY_STRAP
) &
3408 BGE_CPMU_PHY_STRAP_IS_SERDES
)
3409 sc
->bge_phy_addr
= sc
->bge_func_addr
+ 8;
3411 sc
->bge_phy_addr
= sc
->bge_func_addr
+ 1;
3415 if (bge_has_eaddr(sc
))
3416 sc
->bge_flags
|= BGE_FLAG_EADDR
;
3418 /* Save chipset family. */
3419 switch (sc
->bge_asicrev
) {
3420 case BGE_ASICREV_BCM5762
:
3421 case BGE_ASICREV_BCM57765
:
3422 case BGE_ASICREV_BCM57766
:
3423 sc
->bge_flags
|= BGE_FLAG_57765_PLUS
;
3425 case BGE_ASICREV_BCM5717
:
3426 case BGE_ASICREV_BCM5719
:
3427 case BGE_ASICREV_BCM5720
:
3428 sc
->bge_flags
|= BGE_FLAG_5717_PLUS
| BGE_FLAG_5755_PLUS
|
3429 BGE_FLAG_575X_PLUS
| BGE_FLAG_5705_PLUS
| BGE_FLAG_JUMBO
|
3430 BGE_FLAG_JUMBO_FRAME
;
3431 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5719
||
3432 sc
->bge_asicrev
== BGE_ASICREV_BCM5720
) {
3434 * Enable work around for DMA engine miscalculation
3435 * of TXMBUF available space.
3437 sc
->bge_flags
|= BGE_FLAG_RDMA_BUG
;
3438 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5719
&&
3439 sc
->bge_chipid
== BGE_CHIPID_BCM5719_A0
) {
3440 /* Jumbo frame on BCM5719 A0 does not work. */
3441 sc
->bge_flags
&= ~BGE_FLAG_JUMBO
;
3445 case BGE_ASICREV_BCM5755
:
3446 case BGE_ASICREV_BCM5761
:
3447 case BGE_ASICREV_BCM5784
:
3448 case BGE_ASICREV_BCM5785
:
3449 case BGE_ASICREV_BCM5787
:
3450 case BGE_ASICREV_BCM57780
:
3451 sc
->bge_flags
|= BGE_FLAG_5755_PLUS
| BGE_FLAG_575X_PLUS
|
3454 case BGE_ASICREV_BCM5700
:
3455 case BGE_ASICREV_BCM5701
:
3456 case BGE_ASICREV_BCM5703
:
3457 case BGE_ASICREV_BCM5704
:
3458 sc
->bge_flags
|= BGE_FLAG_5700_FAMILY
| BGE_FLAG_JUMBO
;
3460 case BGE_ASICREV_BCM5714_A0
:
3461 case BGE_ASICREV_BCM5780
:
3462 case BGE_ASICREV_BCM5714
:
3463 sc
->bge_flags
|= BGE_FLAG_5714_FAMILY
| BGE_FLAG_JUMBO_STD
;
3465 case BGE_ASICREV_BCM5750
:
3466 case BGE_ASICREV_BCM5752
:
3467 case BGE_ASICREV_BCM5906
:
3468 sc
->bge_flags
|= BGE_FLAG_575X_PLUS
;
3470 case BGE_ASICREV_BCM5705
:
3471 sc
->bge_flags
|= BGE_FLAG_5705_PLUS
;
3475 /* Identify chips with APE processor. */
3476 switch (sc
->bge_asicrev
) {
3477 case BGE_ASICREV_BCM5717
:
3478 case BGE_ASICREV_BCM5719
:
3479 case BGE_ASICREV_BCM5720
:
3480 case BGE_ASICREV_BCM5761
:
3481 case BGE_ASICREV_BCM5762
:
3482 sc
->bge_flags
|= BGE_FLAG_APE
;
3486 /* Chips with APE need BAR2 access for APE registers/memory. */
3487 if ((sc
->bge_flags
& BGE_FLAG_APE
) != 0) {
3489 sc
->bge_res2
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
, &rid
,
3491 if (sc
->bge_res2
== NULL
) {
3492 device_printf (sc
->bge_dev
,
3493 "couldn't map BAR2 memory\n");
3498 /* Enable APE register/memory access by host driver. */
3499 pcistate
= pci_read_config(dev
, BGE_PCI_PCISTATE
, 4);
3500 pcistate
|= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR
|
3501 BGE_PCISTATE_ALLOW_APE_SHMEM_WR
|
3502 BGE_PCISTATE_ALLOW_APE_PSPACE_WR
;
3503 pci_write_config(dev
, BGE_PCI_PCISTATE
, pcistate
, 4);
3505 bge_ape_lock_init(sc
);
3506 bge_ape_read_fw_ver(sc
);
3509 /* Add SYSCTLs, requires the chipset family to be set. */
3510 bge_add_sysctls(sc
);
3512 /* Identify the chips that use an CPMU. */
3513 if (BGE_IS_5717_PLUS(sc
) ||
3514 sc
->bge_asicrev
== BGE_ASICREV_BCM5784
||
3515 sc
->bge_asicrev
== BGE_ASICREV_BCM5761
||
3516 sc
->bge_asicrev
== BGE_ASICREV_BCM5785
||
3517 sc
->bge_asicrev
== BGE_ASICREV_BCM57780
)
3518 sc
->bge_flags
|= BGE_FLAG_CPMU_PRESENT
;
3519 if ((sc
->bge_flags
& BGE_FLAG_CPMU_PRESENT
) != 0)
3520 sc
->bge_mi_mode
= BGE_MIMODE_500KHZ_CONST
;
3522 sc
->bge_mi_mode
= BGE_MIMODE_BASE
;
3523 /* Enable auto polling for BCM570[0-5]. */
3524 if (BGE_IS_5700_FAMILY(sc
) || sc
->bge_asicrev
== BGE_ASICREV_BCM5705
)
3525 sc
->bge_mi_mode
|= BGE_MIMODE_AUTOPOLL
;
3528 * All Broadcom controllers have 4GB boundary DMA bug.
3529 * Whenever an address crosses a multiple of the 4GB boundary
3530 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3531 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3532 * state machine will lockup and cause the device to hang.
3534 sc
->bge_flags
|= BGE_FLAG_4G_BNDRY_BUG
;
3536 /* BCM5755 or higher and BCM5906 have short DMA bug. */
3537 if (BGE_IS_5755_PLUS(sc
) || sc
->bge_asicrev
== BGE_ASICREV_BCM5906
)
3538 sc
->bge_flags
|= BGE_FLAG_SHORT_DMA_BUG
;
3541 * BCM5719 cannot handle DMA requests for DMA segments that
3542 * have larger than 4KB in size. However the maximum DMA
3543 * segment size created in DMA tag is 4KB for TSO, so we
3544 * wouldn't encounter the issue here.
3546 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5719
)
3547 sc
->bge_flags
|= BGE_FLAG_4K_RDMA_BUG
;
3549 misccfg
= CSR_READ_4(sc
, BGE_MISC_CFG
) & BGE_MISCCFG_BOARD_ID_MASK
;
3550 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5705
) {
3551 if (misccfg
== BGE_MISCCFG_BOARD_ID_5788
||
3552 misccfg
== BGE_MISCCFG_BOARD_ID_5788M
)
3553 sc
->bge_flags
|= BGE_FLAG_5788
;
3556 capmask
= BMSR_DEFCAPMASK
;
3557 if ((sc
->bge_asicrev
== BGE_ASICREV_BCM5703
&&
3558 (misccfg
== 0x4000 || misccfg
== 0x8000)) ||
3559 (sc
->bge_asicrev
== BGE_ASICREV_BCM5705
&&
3560 pci_get_vendor(dev
) == BCOM_VENDORID
&&
3561 (pci_get_device(dev
) == BCOM_DEVICEID_BCM5901
||
3562 pci_get_device(dev
) == BCOM_DEVICEID_BCM5901A2
||
3563 pci_get_device(dev
) == BCOM_DEVICEID_BCM5705F
)) ||
3564 (pci_get_vendor(dev
) == BCOM_VENDORID
&&
3565 (pci_get_device(dev
) == BCOM_DEVICEID_BCM5751F
||
3566 pci_get_device(dev
) == BCOM_DEVICEID_BCM5753F
||
3567 pci_get_device(dev
) == BCOM_DEVICEID_BCM5787F
)) ||
3568 pci_get_device(dev
) == BCOM_DEVICEID_BCM57790
||
3569 pci_get_device(dev
) == BCOM_DEVICEID_BCM57791
||
3570 pci_get_device(dev
) == BCOM_DEVICEID_BCM57795
||
3571 sc
->bge_asicrev
== BGE_ASICREV_BCM5906
) {
3572 /* These chips are 10/100 only. */
3573 capmask
&= ~BMSR_EXTSTAT
;
3574 sc
->bge_phy_flags
|= BGE_PHY_NO_WIRESPEED
;
3578 * Some controllers seem to require a special firmware to use
3579 * TSO. But the firmware is not available to FreeBSD and Linux
3580 * claims that the TSO performed by the firmware is slower than
3581 * hardware based TSO. Moreover the firmware based TSO has one
3582 * known bug which can't handle TSO if Ethernet header + IP/TCP
3583 * header is greater than 80 bytes. A workaround for the TSO
3584 * bug exist but it seems it's too expensive than not using
3585 * TSO at all. Some hardwares also have the TSO bug so limit
3586 * the TSO to the controllers that are not affected TSO issues
3587 * (e.g. 5755 or higher).
3589 if (BGE_IS_5717_PLUS(sc
)) {
3590 /* BCM5717 requires different TSO configuration. */
3591 sc
->bge_flags
|= BGE_FLAG_TSO3
;
3592 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5719
&&
3593 sc
->bge_chipid
== BGE_CHIPID_BCM5719_A0
) {
3594 /* TSO on BCM5719 A0 does not work. */
3595 sc
->bge_flags
&= ~BGE_FLAG_TSO3
;
3597 } else if (BGE_IS_5755_PLUS(sc
)) {
3599 * BCM5754 and BCM5787 shares the same ASIC id so
3600 * explicit device id check is required.
3601 * Due to unknown reason TSO does not work on BCM5755M.
3603 if (pci_get_device(dev
) != BCOM_DEVICEID_BCM5754
&&
3604 pci_get_device(dev
) != BCOM_DEVICEID_BCM5754M
&&
3605 pci_get_device(dev
) != BCOM_DEVICEID_BCM5755M
)
3606 sc
->bge_flags
|= BGE_FLAG_TSO
;
3610 * Check if this is a PCI-X or PCI Express device.
3612 if (pci_find_cap(dev
, PCIY_EXPRESS
, ®
) == 0) {
3614 * Found a PCI Express capabilities register, this
3615 * must be a PCI Express device.
3617 sc
->bge_flags
|= BGE_FLAG_PCIE
;
3618 sc
->bge_expcap
= reg
;
3619 /* Extract supported maximum payload size. */
3620 sc
->bge_mps
= pci_read_config(dev
, sc
->bge_expcap
+
3621 PCIER_DEVICE_CAP
, 2);
3622 sc
->bge_mps
= 128 << (sc
->bge_mps
& PCIEM_CAP_MAX_PAYLOAD
);
3623 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5719
||
3624 sc
->bge_asicrev
== BGE_ASICREV_BCM5720
)
3625 sc
->bge_expmrq
= 2048;
3627 sc
->bge_expmrq
= 4096;
3628 pci_set_max_read_req(dev
, sc
->bge_expmrq
);
3631 * Check if the device is in PCI-X Mode.
3632 * (This bit is not valid on PCI Express controllers.)
3634 if (pci_find_cap(dev
, PCIY_PCIX
, ®
) == 0)
3635 sc
->bge_pcixcap
= reg
;
3636 if ((pci_read_config(dev
, BGE_PCI_PCISTATE
, 4) &
3637 BGE_PCISTATE_PCI_BUSMODE
) == 0)
3638 sc
->bge_flags
|= BGE_FLAG_PCIX
;
3642 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3643 * not actually a MAC controller bug but an issue with the embedded
3644 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3646 if (BGE_IS_5714_FAMILY(sc
) && (sc
->bge_flags
& BGE_FLAG_PCIX
))
3647 sc
->bge_flags
|= BGE_FLAG_40BIT_BUG
;
3649 * Some PCI-X bridges are known to trigger write reordering to
3650 * the mailbox registers. Typical phenomena is watchdog timeouts
3651 * caused by out-of-order TX completions. Enable workaround for
3652 * PCI-X devices that live behind these bridges.
3653 * Note, PCI-X controllers can run in PCI mode so we can't use
3654 * BGE_FLAG_PCIX flag to detect PCI-X controllers.
3656 if (sc
->bge_pcixcap
!= 0 && bge_mbox_reorder(sc
) != 0)
3657 sc
->bge_flags
|= BGE_FLAG_MBOX_REORDER
;
3659 * Allocate the interrupt, using MSI if possible. These devices
3660 * support 8 MSI messages, but only the first one is used in
3664 if (pci_find_cap(sc
->bge_dev
, PCIY_MSI
, ®
) == 0) {
3665 sc
->bge_msicap
= reg
;
3667 if (bge_can_use_msi(sc
) && pci_alloc_msi(dev
, ®
) == 0) {
3669 sc
->bge_flags
|= BGE_FLAG_MSI
;
3674 * All controllers except BCM5700 supports tagged status but
3675 * we use tagged status only for MSI case on BCM5717. Otherwise
3676 * MSI on BCM5717 does not work.
3678 #ifndef DEVICE_POLLING
3679 if (sc
->bge_flags
& BGE_FLAG_MSI
&& BGE_IS_5717_PLUS(sc
))
3680 sc
->bge_flags
|= BGE_FLAG_TAGGED_STATUS
;
3683 sc
->bge_irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
, &rid
,
3684 RF_ACTIVE
| (rid
!= 0 ? 0 : RF_SHAREABLE
));
3686 if (sc
->bge_irq
== NULL
) {
3687 device_printf(sc
->bge_dev
, "couldn't map interrupt\n");
3694 sc
->bge_asf_mode
= 0;
3695 /* No ASF if APE present. */
3696 if ((sc
->bge_flags
& BGE_FLAG_APE
) == 0) {
3697 if (bge_allow_asf
&& (bge_readmem_ind(sc
, BGE_SRAM_DATA_SIG
) ==
3698 BGE_SRAM_DATA_SIG_MAGIC
)) {
3699 if (bge_readmem_ind(sc
, BGE_SRAM_DATA_CFG
) &
3701 sc
->bge_asf_mode
|= ASF_ENABLE
;
3702 sc
->bge_asf_mode
|= ASF_STACKUP
;
3703 if (BGE_IS_575X_PLUS(sc
))
3704 sc
->bge_asf_mode
|= ASF_NEW_HANDSHAKE
;
3710 bge_sig_pre_reset(sc
, BGE_RESET_SHUTDOWN
);
3711 if (bge_reset(sc
)) {
3712 device_printf(sc
->bge_dev
, "chip reset failed\n");
3717 bge_sig_legacy(sc
, BGE_RESET_SHUTDOWN
);
3718 bge_sig_post_reset(sc
, BGE_RESET_SHUTDOWN
);
3720 if (bge_chipinit(sc
)) {
3721 device_printf(sc
->bge_dev
, "chip initialization failed\n");
3726 error
= bge_get_eaddr(sc
, eaddr
);
3728 device_printf(sc
->bge_dev
,
3729 "failed to read station address\n");
3734 /* 5705 limits RX return ring to 512 entries. */
3735 if (BGE_IS_5717_PLUS(sc
))
3736 sc
->bge_return_ring_cnt
= BGE_RETURN_RING_CNT
;
3737 else if (BGE_IS_5705_PLUS(sc
))
3738 sc
->bge_return_ring_cnt
= BGE_RETURN_RING_CNT_5705
;
3740 sc
->bge_return_ring_cnt
= BGE_RETURN_RING_CNT
;
3742 if (bge_dma_alloc(sc
)) {
3743 device_printf(sc
->bge_dev
,
3744 "failed to allocate DMA resources\n");
3749 /* Set default tuneable values. */
3750 sc
->bge_stat_ticks
= BGE_TICKS_PER_SEC
;
3751 sc
->bge_rx_coal_ticks
= 150;
3752 sc
->bge_tx_coal_ticks
= 150;
3753 sc
->bge_rx_max_coal_bds
= 10;
3754 sc
->bge_tx_max_coal_bds
= 10;
3756 /* Initialize checksum features to use. */
3757 sc
->bge_csum_features
= BGE_CSUM_FEATURES
;
3758 if (sc
->bge_forced_udpcsum
!= 0)
3759 sc
->bge_csum_features
|= CSUM_UDP
;
3761 /* Set up ifnet structure */
3762 ifp
= sc
->bge_ifp
= if_alloc(IFT_ETHER
);
3764 device_printf(sc
->bge_dev
, "failed to if_alloc()\n");
3769 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
3770 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
3771 ifp
->if_ioctl
= bge_ioctl
;
3772 ifp
->if_start
= bge_start
;
3773 ifp
->if_init
= bge_init
;
3774 ifp
->if_snd
.ifq_drv_maxlen
= BGE_TX_RING_CNT
- 1;
3775 IFQ_SET_MAXLEN(&ifp
->if_snd
, ifp
->if_snd
.ifq_drv_maxlen
);
3776 IFQ_SET_READY(&ifp
->if_snd
);
3777 ifp
->if_hwassist
= sc
->bge_csum_features
;
3778 ifp
->if_capabilities
= IFCAP_HWCSUM
| IFCAP_VLAN_HWTAGGING
|
3780 if ((sc
->bge_flags
& (BGE_FLAG_TSO
| BGE_FLAG_TSO3
)) != 0) {
3781 ifp
->if_hwassist
|= CSUM_TSO
;
3782 ifp
->if_capabilities
|= IFCAP_TSO4
| IFCAP_VLAN_HWTSO
;
3784 #ifdef IFCAP_VLAN_HWCSUM
3785 ifp
->if_capabilities
|= IFCAP_VLAN_HWCSUM
;
3787 ifp
->if_capenable
= ifp
->if_capabilities
;
3788 #ifdef DEVICE_POLLING
3789 ifp
->if_capabilities
|= IFCAP_POLLING
;
3793 * 5700 B0 chips do not support checksumming correctly due
3796 if (sc
->bge_chipid
== BGE_CHIPID_BCM5700_B0
) {
3797 ifp
->if_capabilities
&= ~IFCAP_HWCSUM
;
3798 ifp
->if_capenable
&= ~IFCAP_HWCSUM
;
3799 ifp
->if_hwassist
= 0;
3803 * Figure out what sort of media we have by checking the
3804 * hardware config word in the first 32k of NIC internal memory,
3805 * or fall back to examining the EEPROM if necessary.
3806 * Note: on some BCM5700 cards, this value appears to be unset.
3807 * If that's the case, we have to rely on identifying the NIC
3808 * by its PCI subsystem ID, as we do below for the SysKonnect
3811 if (bge_readmem_ind(sc
, BGE_SRAM_DATA_SIG
) == BGE_SRAM_DATA_SIG_MAGIC
)
3812 hwcfg
= bge_readmem_ind(sc
, BGE_SRAM_DATA_CFG
);
3813 else if ((sc
->bge_flags
& BGE_FLAG_EADDR
) &&
3814 (sc
->bge_asicrev
!= BGE_ASICREV_BCM5906
)) {
3815 if (bge_read_eeprom(sc
, (caddr_t
)&hwcfg
, BGE_EE_HWCFG_OFFSET
,
3817 device_printf(sc
->bge_dev
, "failed to read EEPROM\n");
3821 hwcfg
= ntohl(hwcfg
);
3824 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3825 if ((pci_read_config(dev
, BGE_PCI_SUBSYS
, 4) >> 16) ==
3826 SK_SUBSYSID_9D41
|| (hwcfg
& BGE_HWCFG_MEDIA
) == BGE_MEDIA_FIBER
) {
3827 if (BGE_IS_5705_PLUS(sc
)) {
3828 sc
->bge_flags
|= BGE_FLAG_MII_SERDES
;
3829 sc
->bge_phy_flags
|= BGE_PHY_NO_WIRESPEED
;
3831 sc
->bge_flags
|= BGE_FLAG_TBI
;
3834 /* Set various PHY bug flags. */
3835 if (sc
->bge_chipid
== BGE_CHIPID_BCM5701_A0
||
3836 sc
->bge_chipid
== BGE_CHIPID_BCM5701_B0
)
3837 sc
->bge_phy_flags
|= BGE_PHY_CRC_BUG
;
3838 if (sc
->bge_chiprev
== BGE_CHIPREV_5703_AX
||
3839 sc
->bge_chiprev
== BGE_CHIPREV_5704_AX
)
3840 sc
->bge_phy_flags
|= BGE_PHY_ADC_BUG
;
3841 if (sc
->bge_chipid
== BGE_CHIPID_BCM5704_A0
)
3842 sc
->bge_phy_flags
|= BGE_PHY_5704_A0_BUG
;
3843 if (pci_get_subvendor(dev
) == DELL_VENDORID
)
3844 sc
->bge_phy_flags
|= BGE_PHY_NO_3LED
;
3845 if ((BGE_IS_5705_PLUS(sc
)) &&
3846 sc
->bge_asicrev
!= BGE_ASICREV_BCM5906
&&
3847 sc
->bge_asicrev
!= BGE_ASICREV_BCM5785
&&
3848 sc
->bge_asicrev
!= BGE_ASICREV_BCM57780
&&
3849 !BGE_IS_5717_PLUS(sc
)) {
3850 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5755
||
3851 sc
->bge_asicrev
== BGE_ASICREV_BCM5761
||
3852 sc
->bge_asicrev
== BGE_ASICREV_BCM5784
||
3853 sc
->bge_asicrev
== BGE_ASICREV_BCM5787
) {
3854 if (pci_get_device(dev
) != BCOM_DEVICEID_BCM5722
&&
3855 pci_get_device(dev
) != BCOM_DEVICEID_BCM5756
)
3856 sc
->bge_phy_flags
|= BGE_PHY_JITTER_BUG
;
3857 if (pci_get_device(dev
) == BCOM_DEVICEID_BCM5755M
)
3858 sc
->bge_phy_flags
|= BGE_PHY_ADJUST_TRIM
;
3860 sc
->bge_phy_flags
|= BGE_PHY_BER_BUG
;
3864 * Don't enable Ethernet@WireSpeed for the 5700 or the
3865 * 5705 A0 and A1 chips.
3867 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
||
3868 (sc
->bge_asicrev
== BGE_ASICREV_BCM5705
&&
3869 (sc
->bge_chipid
!= BGE_CHIPID_BCM5705_A0
&&
3870 sc
->bge_chipid
!= BGE_CHIPID_BCM5705_A1
)))
3871 sc
->bge_phy_flags
|= BGE_PHY_NO_WIRESPEED
;
3873 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
3874 ifmedia_init(&sc
->bge_ifmedia
, IFM_IMASK
, bge_ifmedia_upd
,
3876 ifmedia_add(&sc
->bge_ifmedia
, IFM_ETHER
| IFM_1000_SX
, 0, NULL
);
3877 ifmedia_add(&sc
->bge_ifmedia
, IFM_ETHER
| IFM_1000_SX
| IFM_FDX
,
3879 ifmedia_add(&sc
->bge_ifmedia
, IFM_ETHER
| IFM_AUTO
, 0, NULL
);
3880 ifmedia_set(&sc
->bge_ifmedia
, IFM_ETHER
| IFM_AUTO
);
3881 sc
->bge_ifmedia
.ifm_media
= sc
->bge_ifmedia
.ifm_cur
->ifm_media
;
3884 * Do transceiver setup and tell the firmware the
3885 * driver is down so we can try to get access the
3886 * probe if ASF is running. Retry a couple of times
3887 * if we get a conflict with the ASF firmware accessing
3891 BGE_CLRBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
3893 bge_asf_driver_up(sc
);
3895 error
= mii_attach(dev
, &sc
->bge_miibus
, ifp
, bge_ifmedia_upd
,
3896 bge_ifmedia_sts
, capmask
, sc
->bge_phy_addr
, MII_OFFSET_ANY
,
3900 device_printf(sc
->bge_dev
, "Try again\n");
3901 bge_miibus_writereg(sc
->bge_dev
,
3902 sc
->bge_phy_addr
, MII_BMCR
, BMCR_RESET
);
3905 device_printf(sc
->bge_dev
, "attaching PHYs failed\n");
3910 * Now tell the firmware we are going up after probing the PHY
3912 if (sc
->bge_asf_mode
& ASF_STACKUP
)
3913 BGE_SETBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
3917 * When using the BCM5701 in PCI-X mode, data corruption has
3918 * been observed in the first few bytes of some received packets.
3919 * Aligning the packet buffer in memory eliminates the corruption.
3920 * Unfortunately, this misaligns the packet payloads. On platforms
3921 * which do not support unaligned accesses, we will realign the
3922 * payloads by copying the received packets.
3924 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5701
&&
3925 sc
->bge_flags
& BGE_FLAG_PCIX
)
3926 sc
->bge_flags
|= BGE_FLAG_RX_ALIGNBUG
;
3929 * Call MI attach routine.
3931 ether_ifattach(ifp
, eaddr
);
3933 /* Tell upper layer we support long frames. */
3934 ifp
->if_data
.ifi_hdrlen
= sizeof(struct ether_vlan_header
);
3939 if (BGE_IS_5755_PLUS(sc
) && sc
->bge_flags
& BGE_FLAG_MSI
) {
3940 /* Take advantage of single-shot MSI. */
3941 CSR_WRITE_4(sc
, BGE_MSI_MODE
, CSR_READ_4(sc
, BGE_MSI_MODE
) &
3942 ~BGE_MSIMODE_ONE_SHOT_DISABLE
);
3943 sc
->bge_tq
= taskqueue_create_fast("bge_taskq", M_WAITOK
,
3944 taskqueue_thread_enqueue
, &sc
->bge_tq
);
3945 if (sc
->bge_tq
== NULL
) {
3946 device_printf(dev
, "could not create taskqueue.\n");
3947 ether_ifdetach(ifp
);
3951 error
= taskqueue_start_threads(&sc
->bge_tq
, 1, PI_NET
,
3952 "%s taskq", device_get_nameunit(sc
->bge_dev
));
3954 device_printf(dev
, "could not start threads.\n");
3955 ether_ifdetach(ifp
);
3958 error
= bus_setup_intr(dev
, sc
->bge_irq
,
3959 INTR_TYPE_NET
| INTR_MPSAFE
, bge_msi_intr
, NULL
, sc
,
3962 error
= bus_setup_intr(dev
, sc
->bge_irq
,
3963 INTR_TYPE_NET
| INTR_MPSAFE
, NULL
, bge_intr
, sc
,
3967 ether_ifdetach(ifp
);
3968 device_printf(sc
->bge_dev
, "couldn't set up irq\n");
3978 bge_detach(device_t dev
)
3980 struct bge_softc
*sc
;
3983 sc
= device_get_softc(dev
);
3986 #ifdef DEVICE_POLLING
3987 if (ifp
->if_capenable
& IFCAP_POLLING
)
3988 ether_poll_deregister(ifp
);
3991 if (device_is_attached(dev
)) {
3992 ether_ifdetach(ifp
);
3996 callout_drain(&sc
->bge_stat_ch
);
4000 taskqueue_drain(sc
->bge_tq
, &sc
->bge_intr_task
);
4002 if (sc
->bge_flags
& BGE_FLAG_TBI
)
4003 ifmedia_removeall(&sc
->bge_ifmedia
);
4004 else if (sc
->bge_miibus
!= NULL
) {
4005 bus_generic_detach(dev
);
4006 device_delete_child(dev
, sc
->bge_miibus
);
4009 bge_release_resources(sc
);
4015 bge_release_resources(struct bge_softc
*sc
)
4021 if (sc
->bge_tq
!= NULL
)
4022 taskqueue_free(sc
->bge_tq
);
4024 if (sc
->bge_intrhand
!= NULL
)
4025 bus_teardown_intr(dev
, sc
->bge_irq
, sc
->bge_intrhand
);
4027 if (sc
->bge_irq
!= NULL
) {
4028 bus_release_resource(dev
, SYS_RES_IRQ
,
4029 rman_get_rid(sc
->bge_irq
), sc
->bge_irq
);
4030 pci_release_msi(dev
);
4033 if (sc
->bge_res
!= NULL
)
4034 bus_release_resource(dev
, SYS_RES_MEMORY
,
4035 rman_get_rid(sc
->bge_res
), sc
->bge_res
);
4037 if (sc
->bge_res2
!= NULL
)
4038 bus_release_resource(dev
, SYS_RES_MEMORY
,
4039 rman_get_rid(sc
->bge_res2
), sc
->bge_res2
);
4041 if (sc
->bge_ifp
!= NULL
)
4042 if_free(sc
->bge_ifp
);
4046 if (mtx_initialized(&sc
->bge_mtx
)) /* XXX */
4047 BGE_LOCK_DESTROY(sc
);
4051 bge_reset(struct bge_softc
*sc
)
4054 uint32_t cachesize
, command
, mac_mode
, mac_mode_mask
, reset
, val
;
4055 void (*write_op
)(struct bge_softc
*, int, int);
4061 mac_mode_mask
= BGE_MACMODE_HALF_DUPLEX
| BGE_MACMODE_PORTMODE
;
4062 if ((sc
->bge_mfw_flags
& BGE_MFW_ON_APE
) != 0)
4063 mac_mode_mask
|= BGE_MACMODE_APE_RX_EN
| BGE_MACMODE_APE_TX_EN
;
4064 mac_mode
= CSR_READ_4(sc
, BGE_MAC_MODE
) & mac_mode_mask
;
4066 if (BGE_IS_575X_PLUS(sc
) && !BGE_IS_5714_FAMILY(sc
) &&
4067 (sc
->bge_asicrev
!= BGE_ASICREV_BCM5906
)) {
4068 if (sc
->bge_flags
& BGE_FLAG_PCIE
)
4069 write_op
= bge_writemem_direct
;
4071 write_op
= bge_writemem_ind
;
4073 write_op
= bge_writereg_ind
;
4075 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5700
&&
4076 sc
->bge_asicrev
!= BGE_ASICREV_BCM5701
) {
4077 CSR_WRITE_4(sc
, BGE_NVRAM_SWARB
, BGE_NVRAMSWARB_SET1
);
4078 for (i
= 0; i
< 8000; i
++) {
4079 if (CSR_READ_4(sc
, BGE_NVRAM_SWARB
) &
4080 BGE_NVRAMSWARB_GNT1
)
4086 device_printf(dev
, "NVRAM lock timedout!\n");
4089 /* Take APE lock when performing reset. */
4090 bge_ape_lock(sc
, BGE_APE_LOCK_GRC
);
4092 /* Save some important PCI state. */
4093 cachesize
= pci_read_config(dev
, BGE_PCI_CACHESZ
, 4);
4094 command
= pci_read_config(dev
, BGE_PCI_CMD
, 4);
4096 pci_write_config(dev
, BGE_PCI_MISC_CTL
,
4097 BGE_PCIMISCCTL_INDIRECT_ACCESS
| BGE_PCIMISCCTL_MASK_PCI_INTR
|
4098 BGE_HIF_SWAP_OPTIONS
| BGE_PCIMISCCTL_PCISTATE_RW
, 4);
4100 /* Disable fastboot on controllers that support it. */
4101 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5752
||
4102 BGE_IS_5755_PLUS(sc
)) {
4104 device_printf(dev
, "Disabling fastboot\n");
4105 CSR_WRITE_4(sc
, BGE_FASTBOOT_PC
, 0x0);
4109 * Write the magic number to SRAM at offset 0xB50.
4110 * When firmware finishes its initialization it will
4111 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
4113 bge_writemem_ind(sc
, BGE_SRAM_FW_MB
, BGE_SRAM_FW_MB_MAGIC
);
4115 reset
= BGE_MISCCFG_RESET_CORE_CLOCKS
| BGE_32BITTIME_66MHZ
;
4117 /* XXX: Broadcom Linux driver. */
4118 if (sc
->bge_flags
& BGE_FLAG_PCIE
) {
4119 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5785
&&
4120 (sc
->bge_flags
& BGE_FLAG_5717_PLUS
) == 0) {
4121 if (CSR_READ_4(sc
, 0x7E2C) == 0x60) /* PCIE 1.0 */
4122 CSR_WRITE_4(sc
, 0x7E2C, 0x20);
4124 if (sc
->bge_chipid
!= BGE_CHIPID_BCM5750_A0
) {
4125 /* Prevent PCIE link training during global reset */
4126 CSR_WRITE_4(sc
, BGE_MISC_CFG
, 1 << 29);
4131 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5906
) {
4132 val
= CSR_READ_4(sc
, BGE_VCPU_STATUS
);
4133 CSR_WRITE_4(sc
, BGE_VCPU_STATUS
,
4134 val
| BGE_VCPU_STATUS_DRV_RESET
);
4135 val
= CSR_READ_4(sc
, BGE_VCPU_EXT_CTRL
);
4136 CSR_WRITE_4(sc
, BGE_VCPU_EXT_CTRL
,
4137 val
& ~BGE_VCPU_EXT_CTRL_HALT_CPU
);
4141 * Set GPHY Power Down Override to leave GPHY
4142 * powered up in D0 uninitialized.
4144 if (BGE_IS_5705_PLUS(sc
) &&
4145 (sc
->bge_flags
& BGE_FLAG_CPMU_PRESENT
) == 0)
4146 reset
|= BGE_MISCCFG_GPHY_PD_OVERRIDE
;
4148 /* Issue global reset */
4149 write_op(sc
, BGE_MISC_CFG
, reset
);
4151 if (sc
->bge_flags
& BGE_FLAG_PCIE
)
4156 /* XXX: Broadcom Linux driver. */
4157 if (sc
->bge_flags
& BGE_FLAG_PCIE
) {
4158 if (sc
->bge_chipid
== BGE_CHIPID_BCM5750_A0
) {
4159 DELAY(500000); /* wait for link training to complete */
4160 val
= pci_read_config(dev
, 0xC4, 4);
4161 pci_write_config(dev
, 0xC4, val
| (1 << 15), 4);
4163 devctl
= pci_read_config(dev
,
4164 sc
->bge_expcap
+ PCIER_DEVICE_CTL
, 2);
4165 /* Clear enable no snoop and disable relaxed ordering. */
4166 devctl
&= ~(PCIEM_CTL_RELAXED_ORD_ENABLE
|
4167 PCIEM_CTL_NOSNOOP_ENABLE
);
4168 pci_write_config(dev
, sc
->bge_expcap
+ PCIER_DEVICE_CTL
,
4170 pci_set_max_read_req(dev
, sc
->bge_expmrq
);
4171 /* Clear error status. */
4172 pci_write_config(dev
, sc
->bge_expcap
+ PCIER_DEVICE_STA
,
4173 PCIEM_STA_CORRECTABLE_ERROR
|
4174 PCIEM_STA_NON_FATAL_ERROR
| PCIEM_STA_FATAL_ERROR
|
4175 PCIEM_STA_UNSUPPORTED_REQ
, 2);
4178 /* Reset some of the PCI state that got zapped by reset. */
4179 pci_write_config(dev
, BGE_PCI_MISC_CTL
,
4180 BGE_PCIMISCCTL_INDIRECT_ACCESS
| BGE_PCIMISCCTL_MASK_PCI_INTR
|
4181 BGE_HIF_SWAP_OPTIONS
| BGE_PCIMISCCTL_PCISTATE_RW
, 4);
4182 val
= BGE_PCISTATE_ROM_ENABLE
| BGE_PCISTATE_ROM_RETRY_ENABLE
;
4183 if (sc
->bge_chipid
== BGE_CHIPID_BCM5704_A0
&&
4184 (sc
->bge_flags
& BGE_FLAG_PCIX
) != 0)
4185 val
|= BGE_PCISTATE_RETRY_SAME_DMA
;
4186 if ((sc
->bge_mfw_flags
& BGE_MFW_ON_APE
) != 0)
4187 val
|= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR
|
4188 BGE_PCISTATE_ALLOW_APE_SHMEM_WR
|
4189 BGE_PCISTATE_ALLOW_APE_PSPACE_WR
;
4190 pci_write_config(dev
, BGE_PCI_PCISTATE
, val
, 4);
4191 pci_write_config(dev
, BGE_PCI_CACHESZ
, cachesize
, 4);
4192 pci_write_config(dev
, BGE_PCI_CMD
, command
, 4);
4194 * Disable PCI-X relaxed ordering to ensure status block update
4195 * comes first then packet buffer DMA. Otherwise driver may
4196 * read stale status block.
4198 if (sc
->bge_flags
& BGE_FLAG_PCIX
) {
4199 devctl
= pci_read_config(dev
,
4200 sc
->bge_pcixcap
+ PCIXR_COMMAND
, 2);
4201 devctl
&= ~PCIXM_COMMAND_ERO
;
4202 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5703
) {
4203 devctl
&= ~PCIXM_COMMAND_MAX_READ
;
4204 devctl
|= PCIXM_COMMAND_MAX_READ_2048
;
4205 } else if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
4206 devctl
&= ~(PCIXM_COMMAND_MAX_SPLITS
|
4207 PCIXM_COMMAND_MAX_READ
);
4208 devctl
|= PCIXM_COMMAND_MAX_READ_2048
;
4210 pci_write_config(dev
, sc
->bge_pcixcap
+ PCIXR_COMMAND
,
4213 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
4214 if (BGE_IS_5714_FAMILY(sc
)) {
4215 /* This chip disables MSI on reset. */
4216 if (sc
->bge_flags
& BGE_FLAG_MSI
) {
4217 val
= pci_read_config(dev
,
4218 sc
->bge_msicap
+ PCIR_MSI_CTRL
, 2);
4219 pci_write_config(dev
,
4220 sc
->bge_msicap
+ PCIR_MSI_CTRL
,
4221 val
| PCIM_MSICTRL_MSI_ENABLE
, 2);
4222 val
= CSR_READ_4(sc
, BGE_MSI_MODE
);
4223 CSR_WRITE_4(sc
, BGE_MSI_MODE
,
4224 val
| BGE_MSIMODE_ENABLE
);
4226 val
= CSR_READ_4(sc
, BGE_MARB_MODE
);
4227 CSR_WRITE_4(sc
, BGE_MARB_MODE
, BGE_MARBMODE_ENABLE
| val
);
4229 CSR_WRITE_4(sc
, BGE_MARB_MODE
, BGE_MARBMODE_ENABLE
);
4231 /* Fix up byte swapping. */
4232 CSR_WRITE_4(sc
, BGE_MODE_CTL
, bge_dma_swap_options(sc
));
4234 val
= CSR_READ_4(sc
, BGE_MAC_MODE
);
4235 val
= (val
& ~mac_mode_mask
) | mac_mode
;
4236 CSR_WRITE_4(sc
, BGE_MAC_MODE
, val
);
4239 bge_ape_unlock(sc
, BGE_APE_LOCK_GRC
);
4241 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5906
) {
4242 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
4243 val
= CSR_READ_4(sc
, BGE_VCPU_STATUS
);
4244 if (val
& BGE_VCPU_STATUS_INIT_DONE
)
4248 if (i
== BGE_TIMEOUT
) {
4249 device_printf(dev
, "reset timed out\n");
4254 * Poll until we see the 1's complement of the magic number.
4255 * This indicates that the firmware initialization is complete.
4256 * We expect this to fail if no chip containing the Ethernet
4257 * address is fitted though.
4259 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
4261 val
= bge_readmem_ind(sc
, BGE_SRAM_FW_MB
);
4262 if (val
== ~BGE_SRAM_FW_MB_MAGIC
)
4266 if ((sc
->bge_flags
& BGE_FLAG_EADDR
) && i
== BGE_TIMEOUT
)
4268 "firmware handshake timed out, found 0x%08x\n",
4270 /* BCM57765 A0 needs additional time before accessing. */
4271 if (sc
->bge_chipid
== BGE_CHIPID_BCM57765_A0
)
4272 DELAY(10 * 1000); /* XXX */
4276 * The 5704 in TBI mode apparently needs some special
4277 * adjustment to insure the SERDES drive level is set
4280 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
&&
4281 sc
->bge_flags
& BGE_FLAG_TBI
) {
4282 val
= CSR_READ_4(sc
, BGE_SERDES_CFG
);
4283 val
= (val
& ~0xFFF) | 0x880;
4284 CSR_WRITE_4(sc
, BGE_SERDES_CFG
, val
);
4287 /* XXX: Broadcom Linux driver. */
4288 if (sc
->bge_flags
& BGE_FLAG_PCIE
&&
4289 !BGE_IS_5717_PLUS(sc
) &&
4290 sc
->bge_chipid
!= BGE_CHIPID_BCM5750_A0
&&
4291 sc
->bge_asicrev
!= BGE_ASICREV_BCM5785
) {
4292 /* Enable Data FIFO protection. */
4293 val
= CSR_READ_4(sc
, 0x7C00);
4294 CSR_WRITE_4(sc
, 0x7C00, val
| (1 << 25));
4297 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5720
)
4298 BGE_CLRBIT(sc
, BGE_CPMU_CLCK_ORIDE
,
4299 CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
4304 static __inline
void
4305 bge_rxreuse_std(struct bge_softc
*sc
, int i
)
4307 struct bge_rx_bd
*r
;
4309 r
= &sc
->bge_ldata
.bge_rx_std_ring
[sc
->bge_std
];
4310 r
->bge_flags
= BGE_RXBDFLAG_END
;
4311 r
->bge_len
= sc
->bge_cdata
.bge_rx_std_seglen
[i
];
4313 BGE_INC(sc
->bge_std
, BGE_STD_RX_RING_CNT
);
4316 static __inline
void
4317 bge_rxreuse_jumbo(struct bge_softc
*sc
, int i
)
4319 struct bge_extrx_bd
*r
;
4321 r
= &sc
->bge_ldata
.bge_rx_jumbo_ring
[sc
->bge_jumbo
];
4322 r
->bge_flags
= BGE_RXBDFLAG_JUMBO_RING
| BGE_RXBDFLAG_END
;
4323 r
->bge_len0
= sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][0];
4324 r
->bge_len1
= sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][1];
4325 r
->bge_len2
= sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][2];
4326 r
->bge_len3
= sc
->bge_cdata
.bge_rx_jumbo_seglen
[i
][3];
4328 BGE_INC(sc
->bge_jumbo
, BGE_JUMBO_RX_RING_CNT
);
4332 * Frame reception handling. This is called if there's a frame
4333 * on the receive return list.
4335 * Note: we have to be able to handle two possibilities here:
4336 * 1) the frame is from the jumbo receive ring
4337 * 2) the frame is from the standard receive ring
4341 bge_rxeof(struct bge_softc
*sc
, uint16_t rx_prod
, int holdlck
)
4344 int rx_npkts
= 0, stdcnt
= 0, jumbocnt
= 0;
4347 rx_cons
= sc
->bge_rx_saved_considx
;
4349 /* Nothing to do. */
4350 if (rx_cons
== rx_prod
)
4355 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_return_ring_tag
,
4356 sc
->bge_cdata
.bge_rx_return_ring_map
, BUS_DMASYNC_POSTREAD
);
4357 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
4358 sc
->bge_cdata
.bge_rx_std_ring_map
, BUS_DMASYNC_POSTWRITE
);
4359 if (BGE_IS_JUMBO_CAPABLE(sc
) &&
4360 ifp
->if_mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN
>
4361 (MCLBYTES
- ETHER_ALIGN
))
4362 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
4363 sc
->bge_cdata
.bge_rx_jumbo_ring_map
, BUS_DMASYNC_POSTWRITE
);
4365 while (rx_cons
!= rx_prod
) {
4366 struct bge_rx_bd
*cur_rx
;
4368 struct mbuf
*m
= NULL
;
4369 uint16_t vlan_tag
= 0;
4372 #ifdef DEVICE_POLLING
4373 if (ifp
->if_capenable
& IFCAP_POLLING
) {
4374 if (sc
->rxcycles
<= 0)
4380 cur_rx
= &sc
->bge_ldata
.bge_rx_return_ring
[rx_cons
];
4382 rxidx
= cur_rx
->bge_idx
;
4383 BGE_INC(rx_cons
, sc
->bge_return_ring_cnt
);
4385 if (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
&&
4386 cur_rx
->bge_flags
& BGE_RXBDFLAG_VLAN_TAG
) {
4388 vlan_tag
= cur_rx
->bge_vlan_tag
;
4391 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_JUMBO_RING
) {
4393 m
= sc
->bge_cdata
.bge_rx_jumbo_chain
[rxidx
];
4394 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_ERROR
) {
4395 bge_rxreuse_jumbo(sc
, rxidx
);
4398 if (bge_newbuf_jumbo(sc
, rxidx
) != 0) {
4399 bge_rxreuse_jumbo(sc
, rxidx
);
4403 BGE_INC(sc
->bge_jumbo
, BGE_JUMBO_RX_RING_CNT
);
4406 m
= sc
->bge_cdata
.bge_rx_std_chain
[rxidx
];
4407 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_ERROR
) {
4408 bge_rxreuse_std(sc
, rxidx
);
4411 if (bge_newbuf_std(sc
, rxidx
) != 0) {
4412 bge_rxreuse_std(sc
, rxidx
);
4416 BGE_INC(sc
->bge_std
, BGE_STD_RX_RING_CNT
);
4420 #ifndef __NO_STRICT_ALIGNMENT
4422 * For architectures with strict alignment we must make sure
4423 * the payload is aligned.
4425 if (sc
->bge_flags
& BGE_FLAG_RX_ALIGNBUG
) {
4426 bcopy(m
->m_data
, m
->m_data
+ ETHER_ALIGN
,
4428 m
->m_data
+= ETHER_ALIGN
;
4431 m
->m_pkthdr
.len
= m
->m_len
= cur_rx
->bge_len
- ETHER_CRC_LEN
;
4432 m
->m_pkthdr
.rcvif
= ifp
;
4434 if (ifp
->if_capenable
& IFCAP_RXCSUM
)
4435 bge_rxcsum(sc
, cur_rx
, m
);
4438 * If we received a packet with a vlan tag,
4439 * attach that information to the packet.
4442 m
->m_pkthdr
.ether_vtag
= vlan_tag
;
4443 m
->m_flags
|= M_VLANTAG
;
4448 (*ifp
->if_input
)(ifp
, m
);
4451 (*ifp
->if_input
)(ifp
, m
);
4454 if (!(ifp
->if_drv_flags
& IFF_DRV_RUNNING
))
4458 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_return_ring_tag
,
4459 sc
->bge_cdata
.bge_rx_return_ring_map
, BUS_DMASYNC_PREREAD
);
4461 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
4462 sc
->bge_cdata
.bge_rx_std_ring_map
, BUS_DMASYNC_PREWRITE
);
4465 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
4466 sc
->bge_cdata
.bge_rx_jumbo_ring_map
, BUS_DMASYNC_PREWRITE
);
4468 sc
->bge_rx_saved_considx
= rx_cons
;
4469 bge_writembx(sc
, BGE_MBX_RX_CONS0_LO
, sc
->bge_rx_saved_considx
);
4471 bge_writembx(sc
, BGE_MBX_RX_STD_PROD_LO
, (sc
->bge_std
+
4472 BGE_STD_RX_RING_CNT
- 1) % BGE_STD_RX_RING_CNT
);
4474 bge_writembx(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, (sc
->bge_jumbo
+
4475 BGE_JUMBO_RX_RING_CNT
- 1) % BGE_JUMBO_RX_RING_CNT
);
4478 * This register wraps very quickly under heavy packet drops.
4479 * If you need correct statistics, you can enable this check.
4481 if (BGE_IS_5705_PLUS(sc
))
4482 ifp
->if_ierrors
+= CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_IFIN_DROPS
);
4488 bge_rxcsum(struct bge_softc
*sc
, struct bge_rx_bd
*cur_rx
, struct mbuf
*m
)
4491 if (BGE_IS_5717_PLUS(sc
)) {
4492 if ((cur_rx
->bge_flags
& BGE_RXBDFLAG_IPV6
) == 0) {
4493 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_IP_CSUM
) {
4494 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
;
4495 if ((cur_rx
->bge_error_flag
&
4496 BGE_RXERRFLAG_IP_CSUM_NOK
) == 0)
4497 m
->m_pkthdr
.csum_flags
|= CSUM_IP_VALID
;
4499 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_TCP_UDP_CSUM
) {
4500 m
->m_pkthdr
.csum_data
=
4501 cur_rx
->bge_tcp_udp_csum
;
4502 m
->m_pkthdr
.csum_flags
|= CSUM_DATA_VALID
|
4507 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_IP_CSUM
) {
4508 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
;
4509 if ((cur_rx
->bge_ip_csum
^ 0xFFFF) == 0)
4510 m
->m_pkthdr
.csum_flags
|= CSUM_IP_VALID
;
4512 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_TCP_UDP_CSUM
&&
4513 m
->m_pkthdr
.len
>= ETHER_MIN_NOPAD
) {
4514 m
->m_pkthdr
.csum_data
=
4515 cur_rx
->bge_tcp_udp_csum
;
4516 m
->m_pkthdr
.csum_flags
|= CSUM_DATA_VALID
|
4523 bge_txeof(struct bge_softc
*sc
, uint16_t tx_cons
)
4525 struct bge_tx_bd
*cur_tx
;
4528 BGE_LOCK_ASSERT(sc
);
4530 /* Nothing to do. */
4531 if (sc
->bge_tx_saved_considx
== tx_cons
)
4536 bus_dmamap_sync(sc
->bge_cdata
.bge_tx_ring_tag
,
4537 sc
->bge_cdata
.bge_tx_ring_map
, BUS_DMASYNC_POSTWRITE
);
4539 * Go through our tx ring and free mbufs for those
4540 * frames that have been sent.
4542 while (sc
->bge_tx_saved_considx
!= tx_cons
) {
4545 idx
= sc
->bge_tx_saved_considx
;
4546 cur_tx
= &sc
->bge_ldata
.bge_tx_ring
[idx
];
4547 if (cur_tx
->bge_flags
& BGE_TXBDFLAG_END
)
4549 if (sc
->bge_cdata
.bge_tx_chain
[idx
] != NULL
) {
4550 bus_dmamap_sync(sc
->bge_cdata
.bge_tx_mtag
,
4551 sc
->bge_cdata
.bge_tx_dmamap
[idx
],
4552 BUS_DMASYNC_POSTWRITE
);
4553 bus_dmamap_unload(sc
->bge_cdata
.bge_tx_mtag
,
4554 sc
->bge_cdata
.bge_tx_dmamap
[idx
]);
4555 m_freem(sc
->bge_cdata
.bge_tx_chain
[idx
]);
4556 sc
->bge_cdata
.bge_tx_chain
[idx
] = NULL
;
4559 BGE_INC(sc
->bge_tx_saved_considx
, BGE_TX_RING_CNT
);
4562 ifp
->if_drv_flags
&= ~IFF_DRV_OACTIVE
;
4563 if (sc
->bge_txcnt
== 0)
4567 #ifdef DEVICE_POLLING
4569 bge_poll(struct ifnet
*ifp
, enum poll_cmd cmd
, int count
)
4571 struct bge_softc
*sc
= ifp
->if_softc
;
4572 uint16_t rx_prod
, tx_cons
;
4573 uint32_t statusword
;
4577 if (!(ifp
->if_drv_flags
& IFF_DRV_RUNNING
)) {
4582 bus_dmamap_sync(sc
->bge_cdata
.bge_status_tag
,
4583 sc
->bge_cdata
.bge_status_map
,
4584 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
4585 /* Fetch updates from the status block. */
4586 rx_prod
= sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_rx_prod_idx
;
4587 tx_cons
= sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_tx_cons_idx
;
4589 statusword
= sc
->bge_ldata
.bge_status_block
->bge_status
;
4590 /* Clear the status so the next pass only sees the changes. */
4591 sc
->bge_ldata
.bge_status_block
->bge_status
= 0;
4593 bus_dmamap_sync(sc
->bge_cdata
.bge_status_tag
,
4594 sc
->bge_cdata
.bge_status_map
,
4595 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
4597 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
4598 if (statusword
& BGE_STATFLAG_LINKSTATE_CHANGED
)
4601 if (cmd
== POLL_AND_CHECK_STATUS
)
4602 if ((sc
->bge_asicrev
== BGE_ASICREV_BCM5700
&&
4603 sc
->bge_chipid
!= BGE_CHIPID_BCM5700_B2
) ||
4604 sc
->bge_link_evt
|| (sc
->bge_flags
& BGE_FLAG_TBI
))
4607 sc
->rxcycles
= count
;
4608 rx_npkts
= bge_rxeof(sc
, rx_prod
, 1);
4609 if (!(ifp
->if_drv_flags
& IFF_DRV_RUNNING
)) {
4613 bge_txeof(sc
, tx_cons
);
4614 if (!IFQ_DRV_IS_EMPTY(&ifp
->if_snd
))
4615 bge_start_locked(ifp
);
4620 #endif /* DEVICE_POLLING */
4623 bge_msi_intr(void *arg
)
4625 struct bge_softc
*sc
;
4627 sc
= (struct bge_softc
*)arg
;
4629 * This interrupt is not shared and controller already
4630 * disabled further interrupt.
4632 taskqueue_enqueue(sc
->bge_tq
, &sc
->bge_intr_task
);
4633 return (FILTER_HANDLED
);
4637 bge_intr_task(void *arg
, int pending
)
4639 struct bge_softc
*sc
;
4641 uint32_t status
, status_tag
;
4642 uint16_t rx_prod
, tx_cons
;
4644 sc
= (struct bge_softc
*)arg
;
4648 if ((ifp
->if_drv_flags
& IFF_DRV_RUNNING
) == 0) {
4653 /* Get updated status block. */
4654 bus_dmamap_sync(sc
->bge_cdata
.bge_status_tag
,
4655 sc
->bge_cdata
.bge_status_map
,
4656 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
4658 /* Save producer/consumer indices. */
4659 rx_prod
= sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_rx_prod_idx
;
4660 tx_cons
= sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_tx_cons_idx
;
4661 status
= sc
->bge_ldata
.bge_status_block
->bge_status
;
4662 status_tag
= sc
->bge_ldata
.bge_status_block
->bge_status_tag
<< 24;
4663 /* Dirty the status flag. */
4664 sc
->bge_ldata
.bge_status_block
->bge_status
= 0;
4665 bus_dmamap_sync(sc
->bge_cdata
.bge_status_tag
,
4666 sc
->bge_cdata
.bge_status_map
,
4667 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
4668 if ((sc
->bge_flags
& BGE_FLAG_TAGGED_STATUS
) == 0)
4671 if ((status
& BGE_STATFLAG_LINKSTATE_CHANGED
) != 0)
4674 /* Let controller work. */
4675 bge_writembx(sc
, BGE_MBX_IRQ0_LO
, status_tag
);
4677 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
&&
4678 sc
->bge_rx_saved_considx
!= rx_prod
) {
4679 /* Check RX return ring producer/consumer. */
4681 bge_rxeof(sc
, rx_prod
, 0);
4684 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
4685 /* Check TX ring producer/consumer. */
4686 bge_txeof(sc
, tx_cons
);
4687 if (!IFQ_DRV_IS_EMPTY(&ifp
->if_snd
))
4688 bge_start_locked(ifp
);
4696 struct bge_softc
*sc
;
4698 uint32_t statusword
;
4699 uint16_t rx_prod
, tx_cons
;
4707 #ifdef DEVICE_POLLING
4708 if (ifp
->if_capenable
& IFCAP_POLLING
) {
4715 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4716 * disable interrupts by writing nonzero like we used to, since with
4717 * our current organization this just gives complications and
4718 * pessimizations for re-enabling interrupts. We used to have races
4719 * instead of the necessary complications. Disabling interrupts
4720 * would just reduce the chance of a status update while we are
4721 * running (by switching to the interrupt-mode coalescence
4722 * parameters), but this chance is already very low so it is more
4723 * efficient to get another interrupt than prevent it.
4725 * We do the ack first to ensure another interrupt if there is a
4726 * status update after the ack. We don't check for the status
4727 * changing later because it is more efficient to get another
4728 * interrupt than prevent it, not quite as above (not checking is
4729 * a smaller optimization than not toggling the interrupt enable,
4730 * since checking doesn't involve PCI accesses and toggling require
4731 * the status check). So toggling would probably be a pessimization
4732 * even with MSI. It would only be needed for using a task queue.
4734 bge_writembx(sc
, BGE_MBX_IRQ0_LO
, 0);
4737 * Do the mandatory PCI flush as well as get the link status.
4739 statusword
= CSR_READ_4(sc
, BGE_MAC_STS
) & BGE_MACSTAT_LINK_CHANGED
;
4741 /* Make sure the descriptor ring indexes are coherent. */
4742 bus_dmamap_sync(sc
->bge_cdata
.bge_status_tag
,
4743 sc
->bge_cdata
.bge_status_map
,
4744 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
4745 rx_prod
= sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_rx_prod_idx
;
4746 tx_cons
= sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_tx_cons_idx
;
4747 sc
->bge_ldata
.bge_status_block
->bge_status
= 0;
4748 bus_dmamap_sync(sc
->bge_cdata
.bge_status_tag
,
4749 sc
->bge_cdata
.bge_status_map
,
4750 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
4752 if ((sc
->bge_asicrev
== BGE_ASICREV_BCM5700
&&
4753 sc
->bge_chipid
!= BGE_CHIPID_BCM5700_B2
) ||
4754 statusword
|| sc
->bge_link_evt
)
4757 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
4758 /* Check RX return ring producer/consumer. */
4759 bge_rxeof(sc
, rx_prod
, 1);
4762 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
4763 /* Check TX ring producer/consumer. */
4764 bge_txeof(sc
, tx_cons
);
4767 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
&&
4768 !IFQ_DRV_IS_EMPTY(&ifp
->if_snd
))
4769 bge_start_locked(ifp
);
4775 bge_asf_driver_up(struct bge_softc
*sc
)
4777 if (sc
->bge_asf_mode
& ASF_STACKUP
) {
4778 /* Send ASF heartbeat aprox. every 2s */
4779 if (sc
->bge_asf_count
)
4780 sc
->bge_asf_count
--;
4782 sc
->bge_asf_count
= 2;
4783 bge_writemem_ind(sc
, BGE_SRAM_FW_CMD_MB
,
4784 BGE_FW_CMD_DRV_ALIVE
);
4785 bge_writemem_ind(sc
, BGE_SRAM_FW_CMD_LEN_MB
, 4);
4786 bge_writemem_ind(sc
, BGE_SRAM_FW_CMD_DATA_MB
,
4787 BGE_FW_HB_TIMEOUT_SEC
);
4788 CSR_WRITE_4(sc
, BGE_RX_CPU_EVENT
,
4789 CSR_READ_4(sc
, BGE_RX_CPU_EVENT
) |
4790 BGE_RX_CPU_DRV_EVENT
);
4798 struct bge_softc
*sc
= xsc
;
4799 struct mii_data
*mii
= NULL
;
4801 BGE_LOCK_ASSERT(sc
);
4803 /* Synchronize with possible callout reset/stop. */
4804 if (callout_pending(&sc
->bge_stat_ch
) ||
4805 !callout_active(&sc
->bge_stat_ch
))
4808 if (BGE_IS_5705_PLUS(sc
))
4809 bge_stats_update_regs(sc
);
4811 bge_stats_update(sc
);
4813 /* XXX Add APE heartbeat check here? */
4815 if ((sc
->bge_flags
& BGE_FLAG_TBI
) == 0) {
4816 mii
= device_get_softc(sc
->bge_miibus
);
4818 * Do not touch PHY if we have link up. This could break
4819 * IPMI/ASF mode or produce extra input errors
4820 * (extra errors was reported for bcm5701 & bcm5704).
4826 * Since in TBI mode auto-polling can't be used we should poll
4827 * link status manually. Here we register pending link event
4828 * and trigger interrupt.
4830 #ifdef DEVICE_POLLING
4831 /* In polling mode we poll link state in bge_poll(). */
4832 if (!(sc
->bge_ifp
->if_capenable
& IFCAP_POLLING
))
4836 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
||
4837 sc
->bge_flags
& BGE_FLAG_5788
)
4838 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_SET
);
4840 BGE_SETBIT(sc
, BGE_HCC_MODE
, BGE_HCCMODE_COAL_NOW
);
4844 bge_asf_driver_up(sc
);
4847 callout_reset(&sc
->bge_stat_ch
, hz
, bge_tick
, sc
);
4851 bge_stats_update_regs(struct bge_softc
*sc
)
4854 struct bge_mac_stats
*stats
;
4858 stats
= &sc
->bge_mac_stats
;
4860 stats
->ifHCOutOctets
+=
4861 CSR_READ_4(sc
, BGE_TX_MAC_STATS_OCTETS
);
4862 stats
->etherStatsCollisions
+=
4863 CSR_READ_4(sc
, BGE_TX_MAC_STATS_COLLS
);
4864 stats
->outXonSent
+=
4865 CSR_READ_4(sc
, BGE_TX_MAC_STATS_XON_SENT
);
4866 stats
->outXoffSent
+=
4867 CSR_READ_4(sc
, BGE_TX_MAC_STATS_XOFF_SENT
);
4868 stats
->dot3StatsInternalMacTransmitErrors
+=
4869 CSR_READ_4(sc
, BGE_TX_MAC_STATS_ERRORS
);
4870 stats
->dot3StatsSingleCollisionFrames
+=
4871 CSR_READ_4(sc
, BGE_TX_MAC_STATS_SINGLE_COLL
);
4872 stats
->dot3StatsMultipleCollisionFrames
+=
4873 CSR_READ_4(sc
, BGE_TX_MAC_STATS_MULTI_COLL
);
4874 stats
->dot3StatsDeferredTransmissions
+=
4875 CSR_READ_4(sc
, BGE_TX_MAC_STATS_DEFERRED
);
4876 stats
->dot3StatsExcessiveCollisions
+=
4877 CSR_READ_4(sc
, BGE_TX_MAC_STATS_EXCESS_COLL
);
4878 stats
->dot3StatsLateCollisions
+=
4879 CSR_READ_4(sc
, BGE_TX_MAC_STATS_LATE_COLL
);
4880 stats
->ifHCOutUcastPkts
+=
4881 CSR_READ_4(sc
, BGE_TX_MAC_STATS_UCAST
);
4882 stats
->ifHCOutMulticastPkts
+=
4883 CSR_READ_4(sc
, BGE_TX_MAC_STATS_MCAST
);
4884 stats
->ifHCOutBroadcastPkts
+=
4885 CSR_READ_4(sc
, BGE_TX_MAC_STATS_BCAST
);
4887 stats
->ifHCInOctets
+=
4888 CSR_READ_4(sc
, BGE_RX_MAC_STATS_OCTESTS
);
4889 stats
->etherStatsFragments
+=
4890 CSR_READ_4(sc
, BGE_RX_MAC_STATS_FRAGMENTS
);
4891 stats
->ifHCInUcastPkts
+=
4892 CSR_READ_4(sc
, BGE_RX_MAC_STATS_UCAST
);
4893 stats
->ifHCInMulticastPkts
+=
4894 CSR_READ_4(sc
, BGE_RX_MAC_STATS_MCAST
);
4895 stats
->ifHCInBroadcastPkts
+=
4896 CSR_READ_4(sc
, BGE_RX_MAC_STATS_BCAST
);
4897 stats
->dot3StatsFCSErrors
+=
4898 CSR_READ_4(sc
, BGE_RX_MAC_STATS_FCS_ERRORS
);
4899 stats
->dot3StatsAlignmentErrors
+=
4900 CSR_READ_4(sc
, BGE_RX_MAC_STATS_ALGIN_ERRORS
);
4901 stats
->xonPauseFramesReceived
+=
4902 CSR_READ_4(sc
, BGE_RX_MAC_STATS_XON_RCVD
);
4903 stats
->xoffPauseFramesReceived
+=
4904 CSR_READ_4(sc
, BGE_RX_MAC_STATS_XOFF_RCVD
);
4905 stats
->macControlFramesReceived
+=
4906 CSR_READ_4(sc
, BGE_RX_MAC_STATS_CTRL_RCVD
);
4907 stats
->xoffStateEntered
+=
4908 CSR_READ_4(sc
, BGE_RX_MAC_STATS_XOFF_ENTERED
);
4909 stats
->dot3StatsFramesTooLong
+=
4910 CSR_READ_4(sc
, BGE_RX_MAC_STATS_FRAME_TOO_LONG
);
4911 stats
->etherStatsJabbers
+=
4912 CSR_READ_4(sc
, BGE_RX_MAC_STATS_JABBERS
);
4913 stats
->etherStatsUndersizePkts
+=
4914 CSR_READ_4(sc
, BGE_RX_MAC_STATS_UNDERSIZE
);
4916 stats
->FramesDroppedDueToFilters
+=
4917 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_FILTDROP
);
4918 stats
->DmaWriteQueueFull
+=
4919 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL
);
4920 stats
->DmaWriteHighPriQueueFull
+=
4921 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL
);
4922 stats
->NoMoreRxBDs
+=
4923 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_OUT_OF_BDS
);
4926 * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS
4927 * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0
4928 * includes number of unwanted multicast frames. This comes
4929 * from silicon bug and known workaround to get rough(not
4930 * exact) counter is to enable interrupt on MBUF low water
4931 * attention. This can be accomplished by setting
4932 * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE,
4933 * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and
4934 * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL.
4935 * However that change would generate more interrupts and
4936 * there are still possibilities of losing multiple frames
4937 * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling.
4938 * Given that the workaround still would not get correct
4939 * counter I don't think it's worth to implement it. So
4940 * ignore reading the counter on controllers that have the
4943 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5717
&&
4944 sc
->bge_chipid
!= BGE_CHIPID_BCM5719_A0
&&
4945 sc
->bge_chipid
!= BGE_CHIPID_BCM5720_A0
)
4946 stats
->InputDiscards
+=
4947 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_IFIN_DROPS
);
4948 stats
->InputErrors
+=
4949 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_IFIN_ERRORS
);
4950 stats
->RecvThresholdHit
+=
4951 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_RXTHRESH_HIT
);
4953 ifp
->if_collisions
= (u_long
)stats
->etherStatsCollisions
;
4954 ifp
->if_ierrors
= (u_long
)(stats
->NoMoreRxBDs
+ stats
->InputDiscards
+
4955 stats
->InputErrors
);
4957 if (sc
->bge_flags
& BGE_FLAG_RDMA_BUG
) {
4959 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS
4960 * frames, it's safe to disable workaround for DMA engine's
4961 * miscalculation of TXMBUF space.
4963 if (stats
->ifHCOutUcastPkts
+ stats
->ifHCOutMulticastPkts
+
4964 stats
->ifHCOutBroadcastPkts
> BGE_NUM_RDMA_CHANNELS
) {
4965 val
= CSR_READ_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
);
4966 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5719
)
4967 val
&= ~BGE_RDMA_TX_LENGTH_WA_5719
;
4969 val
&= ~BGE_RDMA_TX_LENGTH_WA_5720
;
4970 CSR_WRITE_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
, val
);
4971 sc
->bge_flags
&= ~BGE_FLAG_RDMA_BUG
;
4977 bge_stats_clear_regs(struct bge_softc
*sc
)
4980 CSR_READ_4(sc
, BGE_TX_MAC_STATS_OCTETS
);
4981 CSR_READ_4(sc
, BGE_TX_MAC_STATS_COLLS
);
4982 CSR_READ_4(sc
, BGE_TX_MAC_STATS_XON_SENT
);
4983 CSR_READ_4(sc
, BGE_TX_MAC_STATS_XOFF_SENT
);
4984 CSR_READ_4(sc
, BGE_TX_MAC_STATS_ERRORS
);
4985 CSR_READ_4(sc
, BGE_TX_MAC_STATS_SINGLE_COLL
);
4986 CSR_READ_4(sc
, BGE_TX_MAC_STATS_MULTI_COLL
);
4987 CSR_READ_4(sc
, BGE_TX_MAC_STATS_DEFERRED
);
4988 CSR_READ_4(sc
, BGE_TX_MAC_STATS_EXCESS_COLL
);
4989 CSR_READ_4(sc
, BGE_TX_MAC_STATS_LATE_COLL
);
4990 CSR_READ_4(sc
, BGE_TX_MAC_STATS_UCAST
);
4991 CSR_READ_4(sc
, BGE_TX_MAC_STATS_MCAST
);
4992 CSR_READ_4(sc
, BGE_TX_MAC_STATS_BCAST
);
4994 CSR_READ_4(sc
, BGE_RX_MAC_STATS_OCTESTS
);
4995 CSR_READ_4(sc
, BGE_RX_MAC_STATS_FRAGMENTS
);
4996 CSR_READ_4(sc
, BGE_RX_MAC_STATS_UCAST
);
4997 CSR_READ_4(sc
, BGE_RX_MAC_STATS_MCAST
);
4998 CSR_READ_4(sc
, BGE_RX_MAC_STATS_BCAST
);
4999 CSR_READ_4(sc
, BGE_RX_MAC_STATS_FCS_ERRORS
);
5000 CSR_READ_4(sc
, BGE_RX_MAC_STATS_ALGIN_ERRORS
);
5001 CSR_READ_4(sc
, BGE_RX_MAC_STATS_XON_RCVD
);
5002 CSR_READ_4(sc
, BGE_RX_MAC_STATS_XOFF_RCVD
);
5003 CSR_READ_4(sc
, BGE_RX_MAC_STATS_CTRL_RCVD
);
5004 CSR_READ_4(sc
, BGE_RX_MAC_STATS_XOFF_ENTERED
);
5005 CSR_READ_4(sc
, BGE_RX_MAC_STATS_FRAME_TOO_LONG
);
5006 CSR_READ_4(sc
, BGE_RX_MAC_STATS_JABBERS
);
5007 CSR_READ_4(sc
, BGE_RX_MAC_STATS_UNDERSIZE
);
5009 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_FILTDROP
);
5010 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL
);
5011 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL
);
5012 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_OUT_OF_BDS
);
5013 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_IFIN_DROPS
);
5014 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_IFIN_ERRORS
);
5015 CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_RXTHRESH_HIT
);
5019 bge_stats_update(struct bge_softc
*sc
)
5023 uint32_t cnt
; /* current register value */
5027 stats
= BGE_MEMWIN_START
+ BGE_STATS_BLOCK
;
5029 #define READ_STAT(sc, stats, stat) \
5030 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
5032 cnt
= READ_STAT(sc
, stats
, txstats
.etherStatsCollisions
.bge_addr_lo
);
5033 ifp
->if_collisions
+= (uint32_t)(cnt
- sc
->bge_tx_collisions
);
5034 sc
->bge_tx_collisions
= cnt
;
5036 cnt
= READ_STAT(sc
, stats
, nicNoMoreRxBDs
.bge_addr_lo
);
5037 ifp
->if_ierrors
+= (uint32_t)(cnt
- sc
->bge_rx_nobds
);
5038 sc
->bge_rx_nobds
= cnt
;
5039 cnt
= READ_STAT(sc
, stats
, ifInErrors
.bge_addr_lo
);
5040 ifp
->if_ierrors
+= (uint32_t)(cnt
- sc
->bge_rx_inerrs
);
5041 sc
->bge_rx_inerrs
= cnt
;
5042 cnt
= READ_STAT(sc
, stats
, ifInDiscards
.bge_addr_lo
);
5043 ifp
->if_ierrors
+= (uint32_t)(cnt
- sc
->bge_rx_discards
);
5044 sc
->bge_rx_discards
= cnt
;
5046 cnt
= READ_STAT(sc
, stats
, txstats
.ifOutDiscards
.bge_addr_lo
);
5047 ifp
->if_oerrors
+= (uint32_t)(cnt
- sc
->bge_tx_discards
);
5048 sc
->bge_tx_discards
= cnt
;
5054 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
5055 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
5056 * but when such padded frames employ the bge IP/TCP checksum offload,
5057 * the hardware checksum assist gives incorrect results (possibly
5058 * from incorporating its own padding into the UDP/TCP checksum; who knows).
5059 * If we pad such runts with zeros, the onboard checksum comes out correct.
5062 bge_cksum_pad(struct mbuf
*m
)
5064 int padlen
= ETHER_MIN_NOPAD
- m
->m_pkthdr
.len
;
5067 /* If there's only the packet-header and we can pad there, use it. */
5068 if (m
->m_pkthdr
.len
== m
->m_len
&& M_WRITABLE(m
) &&
5069 M_TRAILINGSPACE(m
) >= padlen
) {
5073 * Walk packet chain to find last mbuf. We will either
5074 * pad there, or append a new mbuf and pad it.
5076 for (last
= m
; last
->m_next
!= NULL
; last
= last
->m_next
);
5077 if (!(M_WRITABLE(last
) && M_TRAILINGSPACE(last
) >= padlen
)) {
5078 /* Allocate new empty mbuf, pad it. Compact later. */
5081 MGET(n
, M_NOWAIT
, MT_DATA
);
5090 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
5091 memset(mtod(last
, caddr_t
) + last
->m_len
, 0, padlen
);
5092 last
->m_len
+= padlen
;
5093 m
->m_pkthdr
.len
+= padlen
;
5098 static struct mbuf
*
5099 bge_check_short_dma(struct mbuf
*m
)
5105 * If device receive two back-to-back send BDs with less than
5106 * or equal to 8 total bytes then the device may hang. The two
5107 * back-to-back send BDs must in the same frame for this failure
5108 * to occur. Scan mbuf chains and see whether two back-to-back
5109 * send BDs are there. If this is the case, allocate new mbuf
5110 * and copy the frame to workaround the silicon bug.
5112 for (n
= m
, found
= 0; n
!= NULL
; n
= n
->m_next
) {
5123 n
= m_defrag(m
, M_NOWAIT
);
5131 static struct mbuf
*
5132 bge_setup_tso(struct bge_softc
*sc
, struct mbuf
*m
, uint16_t *mss
,
5141 if (M_WRITABLE(m
) == 0) {
5142 /* Get a writable copy. */
5143 n
= m_dup(m
, M_NOWAIT
);
5149 m
= m_pullup(m
, sizeof(struct ether_header
) + sizeof(struct ip
));
5152 ip
= (struct ip
*)(mtod(m
, char *) + sizeof(struct ether_header
));
5153 poff
= sizeof(struct ether_header
) + (ip
->ip_hl
<< 2);
5154 m
= m_pullup(m
, poff
+ sizeof(struct tcphdr
));
5157 tcp
= (struct tcphdr
*)(mtod(m
, char *) + poff
);
5158 m
= m_pullup(m
, poff
+ (tcp
->th_off
<< 2));
5162 * It seems controller doesn't modify IP length and TCP pseudo
5163 * checksum. These checksum computed by upper stack should be 0.
5165 *mss
= m
->m_pkthdr
.tso_segsz
;
5166 ip
= (struct ip
*)(mtod(m
, char *) + sizeof(struct ether_header
));
5168 ip
->ip_len
= htons(*mss
+ (ip
->ip_hl
<< 2) + (tcp
->th_off
<< 2));
5169 /* Clear pseudo checksum computed by TCP stack. */
5170 tcp
= (struct tcphdr
*)(mtod(m
, char *) + poff
);
5173 * Broadcom controllers uses different descriptor format for
5174 * TSO depending on ASIC revision. Due to TSO-capable firmware
5175 * license issue and lower performance of firmware based TSO
5176 * we only support hardware based TSO.
5178 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
5179 hlen
= ((ip
->ip_hl
<< 2) + (tcp
->th_off
<< 2)) >> 2;
5180 if (sc
->bge_flags
& BGE_FLAG_TSO3
) {
5182 * For BCM5717 and newer controllers, hardware based TSO
5183 * uses the 14 lower bits of the bge_mss field to store the
5184 * MSS and the upper 2 bits to store the lowest 2 bits of
5185 * the IP/TCP header length. The upper 6 bits of the header
5186 * length are stored in the bge_flags[14:10,4] field. Jumbo
5187 * frames are supported.
5189 *mss
|= ((hlen
& 0x3) << 14);
5190 *flags
|= ((hlen
& 0xF8) << 7) | ((hlen
& 0x4) << 2);
5193 * For BCM5755 and newer controllers, hardware based TSO uses
5194 * the lower 11 bits to store the MSS and the upper 5 bits to
5195 * store the IP/TCP header length. Jumbo frames are not
5198 *mss
|= (hlen
<< 11);
5204 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
5205 * pointers to descriptors.
5208 bge_encap(struct bge_softc
*sc
, struct mbuf
**m_head
, uint32_t *txidx
)
5210 bus_dma_segment_t segs
[BGE_NSEG_NEW
];
5212 struct bge_tx_bd
*d
;
5213 struct mbuf
*m
= *m_head
;
5214 uint32_t idx
= *txidx
;
5215 uint16_t csum_flags
, mss
, vlan_tag
;
5216 int nsegs
, i
, error
;
5221 if ((sc
->bge_flags
& BGE_FLAG_SHORT_DMA_BUG
) != 0 &&
5222 m
->m_next
!= NULL
) {
5223 *m_head
= bge_check_short_dma(m
);
5224 if (*m_head
== NULL
)
5228 if ((m
->m_pkthdr
.csum_flags
& CSUM_TSO
) != 0) {
5229 *m_head
= m
= bge_setup_tso(sc
, m
, &mss
, &csum_flags
);
5230 if (*m_head
== NULL
)
5232 csum_flags
|= BGE_TXBDFLAG_CPU_PRE_DMA
|
5233 BGE_TXBDFLAG_CPU_POST_DMA
;
5234 } else if ((m
->m_pkthdr
.csum_flags
& sc
->bge_csum_features
) != 0) {
5235 if (m
->m_pkthdr
.csum_flags
& CSUM_IP
)
5236 csum_flags
|= BGE_TXBDFLAG_IP_CSUM
;
5237 if (m
->m_pkthdr
.csum_flags
& (CSUM_TCP
| CSUM_UDP
)) {
5238 csum_flags
|= BGE_TXBDFLAG_TCP_UDP_CSUM
;
5239 if (m
->m_pkthdr
.len
< ETHER_MIN_NOPAD
&&
5240 (error
= bge_cksum_pad(m
)) != 0) {
5248 if ((m
->m_pkthdr
.csum_flags
& CSUM_TSO
) == 0) {
5249 if (sc
->bge_flags
& BGE_FLAG_JUMBO_FRAME
&&
5250 m
->m_pkthdr
.len
> ETHER_MAX_LEN
)
5251 csum_flags
|= BGE_TXBDFLAG_JUMBO_FRAME
;
5252 if (sc
->bge_forced_collapse
> 0 &&
5253 (sc
->bge_flags
& BGE_FLAG_PCIE
) != 0 && m
->m_next
!= NULL
) {
5255 * Forcedly collapse mbuf chains to overcome hardware
5256 * limitation which only support a single outstanding
5257 * DMA read operation.
5259 if (sc
->bge_forced_collapse
== 1)
5260 m
= m_defrag(m
, M_NOWAIT
);
5262 m
= m_collapse(m
, M_NOWAIT
,
5263 sc
->bge_forced_collapse
);
5270 map
= sc
->bge_cdata
.bge_tx_dmamap
[idx
];
5271 error
= bus_dmamap_load_mbuf_sg(sc
->bge_cdata
.bge_tx_mtag
, map
, m
, segs
,
5272 &nsegs
, BUS_DMA_NOWAIT
);
5273 if (error
== EFBIG
) {
5274 m
= m_collapse(m
, M_NOWAIT
, BGE_NSEG_NEW
);
5281 error
= bus_dmamap_load_mbuf_sg(sc
->bge_cdata
.bge_tx_mtag
, map
,
5282 m
, segs
, &nsegs
, BUS_DMA_NOWAIT
);
5288 } else if (error
!= 0)
5291 /* Check if we have enough free send BDs. */
5292 if (sc
->bge_txcnt
+ nsegs
>= BGE_TX_RING_CNT
) {
5293 bus_dmamap_unload(sc
->bge_cdata
.bge_tx_mtag
, map
);
5297 bus_dmamap_sync(sc
->bge_cdata
.bge_tx_mtag
, map
, BUS_DMASYNC_PREWRITE
);
5299 if (m
->m_flags
& M_VLANTAG
) {
5300 csum_flags
|= BGE_TXBDFLAG_VLAN_TAG
;
5301 vlan_tag
= m
->m_pkthdr
.ether_vtag
;
5304 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5762
&&
5305 (m
->m_pkthdr
.csum_flags
& CSUM_TSO
) != 0) {
5307 * 5725 family of devices corrupts TSO packets when TSO DMA
5308 * buffers cross into regions which are within MSS bytes of
5309 * a 4GB boundary. If we encounter the condition, drop the
5312 for (i
= 0; ; i
++) {
5313 d
= &sc
->bge_ldata
.bge_tx_ring
[idx
];
5314 d
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(segs
[i
].ds_addr
);
5315 d
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(segs
[i
].ds_addr
);
5316 d
->bge_len
= segs
[i
].ds_len
;
5317 if (d
->bge_addr
.bge_addr_lo
+ segs
[i
].ds_len
+ mss
<
5318 d
->bge_addr
.bge_addr_lo
)
5320 d
->bge_flags
= csum_flags
;
5321 d
->bge_vlan_tag
= vlan_tag
;
5325 BGE_INC(idx
, BGE_TX_RING_CNT
);
5327 if (i
!= nsegs
- 1) {
5328 bus_dmamap_sync(sc
->bge_cdata
.bge_tx_mtag
, map
,
5329 BUS_DMASYNC_POSTWRITE
);
5330 bus_dmamap_unload(sc
->bge_cdata
.bge_tx_mtag
, map
);
5336 for (i
= 0; ; i
++) {
5337 d
= &sc
->bge_ldata
.bge_tx_ring
[idx
];
5338 d
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(segs
[i
].ds_addr
);
5339 d
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(segs
[i
].ds_addr
);
5340 d
->bge_len
= segs
[i
].ds_len
;
5341 d
->bge_flags
= csum_flags
;
5342 d
->bge_vlan_tag
= vlan_tag
;
5346 BGE_INC(idx
, BGE_TX_RING_CNT
);
5350 /* Mark the last segment as end of packet... */
5351 d
->bge_flags
|= BGE_TXBDFLAG_END
;
5354 * Insure that the map for this transmission
5355 * is placed at the array index of the last descriptor
5358 sc
->bge_cdata
.bge_tx_dmamap
[*txidx
] = sc
->bge_cdata
.bge_tx_dmamap
[idx
];
5359 sc
->bge_cdata
.bge_tx_dmamap
[idx
] = map
;
5360 sc
->bge_cdata
.bge_tx_chain
[idx
] = m
;
5361 sc
->bge_txcnt
+= nsegs
;
5363 BGE_INC(idx
, BGE_TX_RING_CNT
);
5370 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
5371 * to the mbuf data regions directly in the transmit descriptors.
5374 bge_start_locked(struct ifnet
*ifp
)
5376 struct bge_softc
*sc
;
5377 struct mbuf
*m_head
;
5382 BGE_LOCK_ASSERT(sc
);
5384 if (!sc
->bge_link
||
5385 (ifp
->if_drv_flags
& (IFF_DRV_RUNNING
| IFF_DRV_OACTIVE
)) !=
5389 prodidx
= sc
->bge_tx_prodidx
;
5391 for (count
= 0; !IFQ_DRV_IS_EMPTY(&ifp
->if_snd
);) {
5392 if (sc
->bge_txcnt
> BGE_TX_RING_CNT
- 16) {
5393 ifp
->if_drv_flags
|= IFF_DRV_OACTIVE
;
5396 IFQ_DRV_DEQUEUE(&ifp
->if_snd
, m_head
);
5401 * Pack the data into the transmit ring. If we
5402 * don't have room, set the OACTIVE flag and wait
5403 * for the NIC to drain the ring.
5405 if (bge_encap(sc
, &m_head
, &prodidx
)) {
5408 IFQ_DRV_PREPEND(&ifp
->if_snd
, m_head
);
5409 ifp
->if_drv_flags
|= IFF_DRV_OACTIVE
;
5415 * If there's a BPF listener, bounce a copy of this frame
5418 #ifdef ETHER_BPF_MTAP
5419 ETHER_BPF_MTAP(ifp
, m_head
);
5421 BPF_MTAP(ifp
, m_head
);
5426 bus_dmamap_sync(sc
->bge_cdata
.bge_tx_ring_tag
,
5427 sc
->bge_cdata
.bge_tx_ring_map
, BUS_DMASYNC_PREWRITE
);
5429 bge_writembx(sc
, BGE_MBX_TX_HOST_PROD0_LO
, prodidx
);
5430 /* 5700 b2 errata */
5431 if (sc
->bge_chiprev
== BGE_CHIPREV_5700_BX
)
5432 bge_writembx(sc
, BGE_MBX_TX_HOST_PROD0_LO
, prodidx
);
5434 sc
->bge_tx_prodidx
= prodidx
;
5437 * Set a timeout in case the chip goes out to lunch.
5439 sc
->bge_timer
= BGE_TX_TIMEOUT
;
5444 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
5445 * to the mbuf data regions directly in the transmit descriptors.
5448 bge_start(struct ifnet
*ifp
)
5450 struct bge_softc
*sc
;
5454 bge_start_locked(ifp
);
5459 bge_init_locked(struct bge_softc
*sc
)
5465 BGE_LOCK_ASSERT(sc
);
5469 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
)
5472 /* Cancel pending I/O and flush buffers. */
5476 bge_sig_pre_reset(sc
, BGE_RESET_START
);
5478 bge_sig_legacy(sc
, BGE_RESET_START
);
5479 bge_sig_post_reset(sc
, BGE_RESET_START
);
5484 * Init the various state machines, ring
5485 * control blocks and firmware.
5487 if (bge_blockinit(sc
)) {
5488 device_printf(sc
->bge_dev
, "initialization failure\n");
5495 CSR_WRITE_4(sc
, BGE_RX_MTU
, ifp
->if_mtu
+
5496 ETHER_HDR_LEN
+ ETHER_CRC_LEN
+
5497 (ifp
->if_capenable
& IFCAP_VLAN_MTU
? ETHER_VLAN_ENCAP_LEN
: 0));
5499 /* Load our MAC address. */
5500 m
= (uint16_t *)IF_LLADDR(sc
->bge_ifp
);
5501 CSR_WRITE_4(sc
, BGE_MAC_ADDR1_LO
, htons(m
[0]));
5502 CSR_WRITE_4(sc
, BGE_MAC_ADDR1_HI
, (htons(m
[1]) << 16) | htons(m
[2]));
5504 /* Program promiscuous mode. */
5507 /* Program multicast filter. */
5510 /* Program VLAN tag stripping. */
5513 /* Override UDP checksum offloading. */
5514 if (sc
->bge_forced_udpcsum
== 0)
5515 sc
->bge_csum_features
&= ~CSUM_UDP
;
5517 sc
->bge_csum_features
|= CSUM_UDP
;
5518 if (ifp
->if_capabilities
& IFCAP_TXCSUM
&&
5519 ifp
->if_capenable
& IFCAP_TXCSUM
) {
5520 ifp
->if_hwassist
&= ~(BGE_CSUM_FEATURES
| CSUM_UDP
);
5521 ifp
->if_hwassist
|= sc
->bge_csum_features
;
5525 if (bge_init_rx_ring_std(sc
) != 0) {
5526 device_printf(sc
->bge_dev
, "no memory for std Rx buffers.\n");
5532 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
5533 * memory to insure that the chip has in fact read the first
5534 * entry of the ring.
5536 if (sc
->bge_chipid
== BGE_CHIPID_BCM5705_A0
) {
5538 for (i
= 0; i
< 10; i
++) {
5540 v
= bge_readmem_ind(sc
, BGE_STD_RX_RINGS
+ 8);
5541 if (v
== (MCLBYTES
- ETHER_ALIGN
))
5545 device_printf (sc
->bge_dev
,
5546 "5705 A0 chip failed to load RX ring\n");
5549 /* Init jumbo RX ring. */
5550 if (BGE_IS_JUMBO_CAPABLE(sc
) &&
5551 ifp
->if_mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN
>
5552 (MCLBYTES
- ETHER_ALIGN
)) {
5553 if (bge_init_rx_ring_jumbo(sc
) != 0) {
5554 device_printf(sc
->bge_dev
,
5555 "no memory for jumbo Rx buffers.\n");
5561 /* Init our RX return ring index. */
5562 sc
->bge_rx_saved_considx
= 0;
5564 /* Init our RX/TX stat counters. */
5565 sc
->bge_rx_discards
= sc
->bge_tx_discards
= sc
->bge_tx_collisions
= 0;
5568 bge_init_tx_ring(sc
);
5570 /* Enable TX MAC state machine lockup fix. */
5571 mode
= CSR_READ_4(sc
, BGE_TX_MODE
);
5572 if (BGE_IS_5755_PLUS(sc
) || sc
->bge_asicrev
== BGE_ASICREV_BCM5906
)
5573 mode
|= BGE_TXMODE_MBUF_LOCKUP_FIX
;
5574 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5720
||
5575 sc
->bge_asicrev
== BGE_ASICREV_BCM5762
) {
5576 mode
&= ~(BGE_TXMODE_JMB_FRM_LEN
| BGE_TXMODE_CNT_DN_MODE
);
5577 mode
|= CSR_READ_4(sc
, BGE_TX_MODE
) &
5578 (BGE_TXMODE_JMB_FRM_LEN
| BGE_TXMODE_CNT_DN_MODE
);
5580 /* Turn on transmitter. */
5581 CSR_WRITE_4(sc
, BGE_TX_MODE
, mode
| BGE_TXMODE_ENABLE
);
5584 /* Turn on receiver. */
5585 mode
= CSR_READ_4(sc
, BGE_RX_MODE
);
5586 if (BGE_IS_5755_PLUS(sc
))
5587 mode
|= BGE_RXMODE_IPV6_ENABLE
;
5588 CSR_WRITE_4(sc
,BGE_RX_MODE
, mode
| BGE_RXMODE_ENABLE
);
5592 * Set the number of good frames to receive after RX MBUF
5593 * Low Watermark has been reached. After the RX MAC receives
5594 * this number of frames, it will drop subsequent incoming
5595 * frames until the MBUF High Watermark is reached.
5597 if (BGE_IS_57765_PLUS(sc
))
5598 CSR_WRITE_4(sc
, BGE_MAX_RX_FRAME_LOWAT
, 1);
5600 CSR_WRITE_4(sc
, BGE_MAX_RX_FRAME_LOWAT
, 2);
5602 /* Clear MAC statistics. */
5603 if (BGE_IS_5705_PLUS(sc
))
5604 bge_stats_clear_regs(sc
);
5606 /* Tell firmware we're alive. */
5607 BGE_SETBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
5609 #ifdef DEVICE_POLLING
5610 /* Disable interrupts if we are polling. */
5611 if (ifp
->if_capenable
& IFCAP_POLLING
) {
5612 BGE_SETBIT(sc
, BGE_PCI_MISC_CTL
,
5613 BGE_PCIMISCCTL_MASK_PCI_INTR
);
5614 bge_writembx(sc
, BGE_MBX_IRQ0_LO
, 1);
5618 /* Enable host interrupts. */
5620 BGE_SETBIT(sc
, BGE_PCI_MISC_CTL
, BGE_PCIMISCCTL_CLEAR_INTA
);
5621 BGE_CLRBIT(sc
, BGE_PCI_MISC_CTL
, BGE_PCIMISCCTL_MASK_PCI_INTR
);
5622 bge_writembx(sc
, BGE_MBX_IRQ0_LO
, 0);
5625 ifp
->if_drv_flags
|= IFF_DRV_RUNNING
;
5626 ifp
->if_drv_flags
&= ~IFF_DRV_OACTIVE
;
5628 bge_ifmedia_upd_locked(ifp
);
5630 callout_reset(&sc
->bge_stat_ch
, hz
, bge_tick
, sc
);
5636 struct bge_softc
*sc
= xsc
;
5639 bge_init_locked(sc
);
5644 * Set media options.
5647 bge_ifmedia_upd(struct ifnet
*ifp
)
5649 struct bge_softc
*sc
= ifp
->if_softc
;
5653 res
= bge_ifmedia_upd_locked(ifp
);
5660 bge_ifmedia_upd_locked(struct ifnet
*ifp
)
5662 struct bge_softc
*sc
= ifp
->if_softc
;
5663 struct mii_data
*mii
;
5664 struct mii_softc
*miisc
;
5665 struct ifmedia
*ifm
;
5667 BGE_LOCK_ASSERT(sc
);
5669 ifm
= &sc
->bge_ifmedia
;
5671 /* If this is a 1000baseX NIC, enable the TBI port. */
5672 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
5673 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
5675 switch(IFM_SUBTYPE(ifm
->ifm_media
)) {
5678 * The BCM5704 ASIC appears to have a special
5679 * mechanism for programming the autoneg
5680 * advertisement registers in TBI mode.
5682 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
5684 sgdig
= CSR_READ_4(sc
, BGE_SGDIG_STS
);
5685 if (sgdig
& BGE_SGDIGSTS_DONE
) {
5686 CSR_WRITE_4(sc
, BGE_TX_TBI_AUTONEG
, 0);
5687 sgdig
= CSR_READ_4(sc
, BGE_SGDIG_CFG
);
5688 sgdig
|= BGE_SGDIGCFG_AUTO
|
5689 BGE_SGDIGCFG_PAUSE_CAP
|
5690 BGE_SGDIGCFG_ASYM_PAUSE
;
5691 CSR_WRITE_4(sc
, BGE_SGDIG_CFG
,
5692 sgdig
| BGE_SGDIGCFG_SEND
);
5694 CSR_WRITE_4(sc
, BGE_SGDIG_CFG
, sgdig
);
5699 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
) {
5700 BGE_CLRBIT(sc
, BGE_MAC_MODE
,
5701 BGE_MACMODE_HALF_DUPLEX
);
5703 BGE_SETBIT(sc
, BGE_MAC_MODE
,
5704 BGE_MACMODE_HALF_DUPLEX
);
5715 mii
= device_get_softc(sc
->bge_miibus
);
5716 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
)
5721 * Force an interrupt so that we will call bge_link_upd
5722 * if needed and clear any pending link state attention.
5723 * Without this we are not getting any further interrupts
5724 * for link state changes and thus will not UP the link and
5725 * not be able to send in bge_start_locked. The only
5726 * way to get things working was to receive a packet and
5728 * bge_tick should help for fiber cards and we might not
5729 * need to do this here if BGE_FLAG_TBI is set but as
5730 * we poll for fiber anyway it should not harm.
5732 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
||
5733 sc
->bge_flags
& BGE_FLAG_5788
)
5734 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_SET
);
5736 BGE_SETBIT(sc
, BGE_HCC_MODE
, BGE_HCCMODE_COAL_NOW
);
5742 * Report current media status.
5745 bge_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
5747 struct bge_softc
*sc
= ifp
->if_softc
;
5748 struct mii_data
*mii
;
5752 if ((ifp
->if_flags
& IFF_UP
) == 0) {
5756 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
5757 ifmr
->ifm_status
= IFM_AVALID
;
5758 ifmr
->ifm_active
= IFM_ETHER
;
5759 if (CSR_READ_4(sc
, BGE_MAC_STS
) &
5760 BGE_MACSTAT_TBI_PCS_SYNCHED
)
5761 ifmr
->ifm_status
|= IFM_ACTIVE
;
5763 ifmr
->ifm_active
|= IFM_NONE
;
5767 ifmr
->ifm_active
|= IFM_1000_SX
;
5768 if (CSR_READ_4(sc
, BGE_MAC_MODE
) & BGE_MACMODE_HALF_DUPLEX
)
5769 ifmr
->ifm_active
|= IFM_HDX
;
5771 ifmr
->ifm_active
|= IFM_FDX
;
5776 mii
= device_get_softc(sc
->bge_miibus
);
5778 ifmr
->ifm_active
= mii
->mii_media_active
;
5779 ifmr
->ifm_status
= mii
->mii_media_status
;
5785 bge_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
)
5787 struct bge_softc
*sc
= ifp
->if_softc
;
5788 struct ifreq
*ifr
= (struct ifreq
*) data
;
5789 struct mii_data
*mii
;
5790 int flags
, mask
, error
= 0;
5794 if (BGE_IS_JUMBO_CAPABLE(sc
) ||
5795 (sc
->bge_flags
& BGE_FLAG_JUMBO_STD
)) {
5796 if (ifr
->ifr_mtu
< ETHERMIN
||
5797 ifr
->ifr_mtu
> BGE_JUMBO_MTU
) {
5801 } else if (ifr
->ifr_mtu
< ETHERMIN
|| ifr
->ifr_mtu
> ETHERMTU
) {
5806 if (ifp
->if_mtu
!= ifr
->ifr_mtu
) {
5807 ifp
->if_mtu
= ifr
->ifr_mtu
;
5808 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
5809 ifp
->if_drv_flags
&= ~IFF_DRV_RUNNING
;
5810 bge_init_locked(sc
);
5817 if (ifp
->if_flags
& IFF_UP
) {
5819 * If only the state of the PROMISC flag changed,
5820 * then just use the 'set promisc mode' command
5821 * instead of reinitializing the entire NIC. Doing
5822 * a full re-init means reloading the firmware and
5823 * waiting for it to start up, which may take a
5824 * second or two. Similarly for ALLMULTI.
5826 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
5827 flags
= ifp
->if_flags
^ sc
->bge_if_flags
;
5828 if (flags
& IFF_PROMISC
)
5830 if (flags
& IFF_ALLMULTI
)
5833 bge_init_locked(sc
);
5835 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
5839 sc
->bge_if_flags
= ifp
->if_flags
;
5845 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
5854 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
5855 error
= ifmedia_ioctl(ifp
, ifr
,
5856 &sc
->bge_ifmedia
, command
);
5858 mii
= device_get_softc(sc
->bge_miibus
);
5859 error
= ifmedia_ioctl(ifp
, ifr
,
5860 &mii
->mii_media
, command
);
5864 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
5865 #ifdef DEVICE_POLLING
5866 if (mask
& IFCAP_POLLING
) {
5867 if (ifr
->ifr_reqcap
& IFCAP_POLLING
) {
5868 error
= ether_poll_register(bge_poll
, ifp
);
5872 BGE_SETBIT(sc
, BGE_PCI_MISC_CTL
,
5873 BGE_PCIMISCCTL_MASK_PCI_INTR
);
5874 bge_writembx(sc
, BGE_MBX_IRQ0_LO
, 1);
5875 ifp
->if_capenable
|= IFCAP_POLLING
;
5878 error
= ether_poll_deregister(ifp
);
5879 /* Enable interrupt even in error case */
5881 BGE_CLRBIT(sc
, BGE_PCI_MISC_CTL
,
5882 BGE_PCIMISCCTL_MASK_PCI_INTR
);
5883 bge_writembx(sc
, BGE_MBX_IRQ0_LO
, 0);
5884 ifp
->if_capenable
&= ~IFCAP_POLLING
;
5889 if ((mask
& IFCAP_TXCSUM
) != 0 &&
5890 (ifp
->if_capabilities
& IFCAP_TXCSUM
) != 0) {
5891 ifp
->if_capenable
^= IFCAP_TXCSUM
;
5892 if ((ifp
->if_capenable
& IFCAP_TXCSUM
) != 0)
5893 ifp
->if_hwassist
|= sc
->bge_csum_features
;
5895 ifp
->if_hwassist
&= ~sc
->bge_csum_features
;
5898 if ((mask
& IFCAP_RXCSUM
) != 0 &&
5899 (ifp
->if_capabilities
& IFCAP_RXCSUM
) != 0)
5900 ifp
->if_capenable
^= IFCAP_RXCSUM
;
5902 if ((mask
& IFCAP_TSO4
) != 0 &&
5903 (ifp
->if_capabilities
& IFCAP_TSO4
) != 0) {
5904 ifp
->if_capenable
^= IFCAP_TSO4
;
5905 if ((ifp
->if_capenable
& IFCAP_TSO4
) != 0)
5906 ifp
->if_hwassist
|= CSUM_TSO
;
5908 ifp
->if_hwassist
&= ~CSUM_TSO
;
5911 if (mask
& IFCAP_VLAN_MTU
) {
5912 ifp
->if_capenable
^= IFCAP_VLAN_MTU
;
5913 ifp
->if_drv_flags
&= ~IFF_DRV_RUNNING
;
5917 if ((mask
& IFCAP_VLAN_HWTSO
) != 0 &&
5918 (ifp
->if_capabilities
& IFCAP_VLAN_HWTSO
) != 0)
5919 ifp
->if_capenable
^= IFCAP_VLAN_HWTSO
;
5920 if ((mask
& IFCAP_VLAN_HWTAGGING
) != 0 &&
5921 (ifp
->if_capabilities
& IFCAP_VLAN_HWTAGGING
) != 0) {
5922 ifp
->if_capenable
^= IFCAP_VLAN_HWTAGGING
;
5923 if ((ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) == 0)
5924 ifp
->if_capenable
&= ~IFCAP_VLAN_HWTSO
;
5929 #ifdef VLAN_CAPABILITIES
5930 VLAN_CAPABILITIES(ifp
);
5934 error
= ether_ioctl(ifp
, command
, data
);
5942 bge_watchdog(struct bge_softc
*sc
)
5947 BGE_LOCK_ASSERT(sc
);
5949 if (sc
->bge_timer
== 0 || --sc
->bge_timer
)
5952 /* If pause frames are active then don't reset the hardware. */
5953 if ((CSR_READ_4(sc
, BGE_RX_MODE
) & BGE_RXMODE_FLOWCTL_ENABLE
) != 0) {
5954 status
= CSR_READ_4(sc
, BGE_RX_STS
);
5955 if ((status
& BGE_RXSTAT_REMOTE_XOFFED
) != 0) {
5957 * If link partner has us in XOFF state then wait for
5958 * the condition to clear.
5960 CSR_WRITE_4(sc
, BGE_RX_STS
, status
);
5961 sc
->bge_timer
= BGE_TX_TIMEOUT
;
5963 } else if ((status
& BGE_RXSTAT_RCVD_XOFF
) != 0 &&
5964 (status
& BGE_RXSTAT_RCVD_XON
) != 0) {
5966 * If link partner has us in XOFF state then wait for
5967 * the condition to clear.
5969 CSR_WRITE_4(sc
, BGE_RX_STS
, status
);
5970 sc
->bge_timer
= BGE_TX_TIMEOUT
;
5974 * Any other condition is unexpected and the controller
5981 if_printf(ifp
, "watchdog timeout -- resetting\n");
5983 ifp
->if_drv_flags
&= ~IFF_DRV_RUNNING
;
5984 bge_init_locked(sc
);
5990 bge_stop_block(struct bge_softc
*sc
, bus_size_t reg
, uint32_t bit
)
5994 BGE_CLRBIT(sc
, reg
, bit
);
5996 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
5997 if ((CSR_READ_4(sc
, reg
) & bit
) == 0)
6004 * Stop the adapter and free any mbufs allocated to the
6008 bge_stop(struct bge_softc
*sc
)
6012 BGE_LOCK_ASSERT(sc
);
6016 callout_stop(&sc
->bge_stat_ch
);
6018 /* Disable host interrupts. */
6019 BGE_SETBIT(sc
, BGE_PCI_MISC_CTL
, BGE_PCIMISCCTL_MASK_PCI_INTR
);
6020 bge_writembx(sc
, BGE_MBX_IRQ0_LO
, 1);
6023 * Tell firmware we're shutting down.
6026 bge_sig_pre_reset(sc
, BGE_RESET_SHUTDOWN
);
6029 * Disable all of the receiver blocks.
6031 bge_stop_block(sc
, BGE_RX_MODE
, BGE_RXMODE_ENABLE
);
6032 bge_stop_block(sc
, BGE_RBDI_MODE
, BGE_RBDIMODE_ENABLE
);
6033 bge_stop_block(sc
, BGE_RXLP_MODE
, BGE_RXLPMODE_ENABLE
);
6034 if (BGE_IS_5700_FAMILY(sc
))
6035 bge_stop_block(sc
, BGE_RXLS_MODE
, BGE_RXLSMODE_ENABLE
);
6036 bge_stop_block(sc
, BGE_RDBDI_MODE
, BGE_RBDIMODE_ENABLE
);
6037 bge_stop_block(sc
, BGE_RDC_MODE
, BGE_RDCMODE_ENABLE
);
6038 bge_stop_block(sc
, BGE_RBDC_MODE
, BGE_RBDCMODE_ENABLE
);
6041 * Disable all of the transmit blocks.
6043 bge_stop_block(sc
, BGE_SRS_MODE
, BGE_SRSMODE_ENABLE
);
6044 bge_stop_block(sc
, BGE_SBDI_MODE
, BGE_SBDIMODE_ENABLE
);
6045 bge_stop_block(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
);
6046 bge_stop_block(sc
, BGE_RDMA_MODE
, BGE_RDMAMODE_ENABLE
);
6047 bge_stop_block(sc
, BGE_SDC_MODE
, BGE_SDCMODE_ENABLE
);
6048 if (BGE_IS_5700_FAMILY(sc
))
6049 bge_stop_block(sc
, BGE_DMAC_MODE
, BGE_DMACMODE_ENABLE
);
6050 bge_stop_block(sc
, BGE_SBDC_MODE
, BGE_SBDCMODE_ENABLE
);
6053 * Shut down all of the memory managers and related
6056 bge_stop_block(sc
, BGE_HCC_MODE
, BGE_HCCMODE_ENABLE
);
6057 bge_stop_block(sc
, BGE_WDMA_MODE
, BGE_WDMAMODE_ENABLE
);
6058 if (BGE_IS_5700_FAMILY(sc
))
6059 bge_stop_block(sc
, BGE_MBCF_MODE
, BGE_MBCFMODE_ENABLE
);
6061 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0xFFFFFFFF);
6062 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0);
6063 if (!(BGE_IS_5705_PLUS(sc
))) {
6064 BGE_CLRBIT(sc
, BGE_BMAN_MODE
, BGE_BMANMODE_ENABLE
);
6065 BGE_CLRBIT(sc
, BGE_MARB_MODE
, BGE_MARBMODE_ENABLE
);
6067 /* Update MAC statistics. */
6068 if (BGE_IS_5705_PLUS(sc
))
6069 bge_stats_update_regs(sc
);
6072 bge_sig_legacy(sc
, BGE_RESET_SHUTDOWN
);
6073 bge_sig_post_reset(sc
, BGE_RESET_SHUTDOWN
);
6076 * Keep the ASF firmware running if up.
6078 if (sc
->bge_asf_mode
& ASF_STACKUP
)
6079 BGE_SETBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
6081 BGE_CLRBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
6083 /* Free the RX lists. */
6084 bge_free_rx_ring_std(sc
);
6086 /* Free jumbo RX list. */
6087 if (BGE_IS_JUMBO_CAPABLE(sc
))
6088 bge_free_rx_ring_jumbo(sc
);
6090 /* Free TX buffers. */
6091 bge_free_tx_ring(sc
);
6093 sc
->bge_tx_saved_considx
= BGE_TXCONS_UNSET
;
6095 /* Clear MAC's link state (PHY may still have link UP). */
6096 if (bootverbose
&& sc
->bge_link
)
6097 if_printf(sc
->bge_ifp
, "link DOWN\n");
6100 ifp
->if_drv_flags
&= ~(IFF_DRV_RUNNING
| IFF_DRV_OACTIVE
);
6104 * Stop all chip I/O so that the kernel's probe routines don't
6105 * get confused by errant DMAs when rebooting.
6108 bge_shutdown(device_t dev
)
6110 struct bge_softc
*sc
;
6112 sc
= device_get_softc(dev
);
6121 bge_suspend(device_t dev
)
6123 struct bge_softc
*sc
;
6125 sc
= device_get_softc(dev
);
6134 bge_resume(device_t dev
)
6136 struct bge_softc
*sc
;
6139 sc
= device_get_softc(dev
);
6142 if (ifp
->if_flags
& IFF_UP
) {
6143 bge_init_locked(sc
);
6144 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
)
6145 bge_start_locked(ifp
);
6153 bge_link_upd(struct bge_softc
*sc
)
6155 struct mii_data
*mii
;
6156 uint32_t link
, status
;
6158 BGE_LOCK_ASSERT(sc
);
6160 /* Clear 'pending link event' flag. */
6161 sc
->bge_link_evt
= 0;
6164 * Process link state changes.
6165 * Grrr. The link status word in the status block does
6166 * not work correctly on the BCM5700 rev AX and BX chips,
6167 * according to all available information. Hence, we have
6168 * to enable MII interrupts in order to properly obtain
6169 * async link changes. Unfortunately, this also means that
6170 * we have to read the MAC status register to detect link
6171 * changes, thereby adding an additional register access to
6172 * the interrupt handler.
6174 * XXX: perhaps link state detection procedure used for
6175 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
6178 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
&&
6179 sc
->bge_chipid
!= BGE_CHIPID_BCM5700_B2
) {
6180 status
= CSR_READ_4(sc
, BGE_MAC_STS
);
6181 if (status
& BGE_MACSTAT_MI_INTERRUPT
) {
6182 mii
= device_get_softc(sc
->bge_miibus
);
6184 if (!sc
->bge_link
&&
6185 mii
->mii_media_status
& IFM_ACTIVE
&&
6186 IFM_SUBTYPE(mii
->mii_media_active
) != IFM_NONE
) {
6189 if_printf(sc
->bge_ifp
, "link UP\n");
6190 } else if (sc
->bge_link
&&
6191 (!(mii
->mii_media_status
& IFM_ACTIVE
) ||
6192 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_NONE
)) {
6195 if_printf(sc
->bge_ifp
, "link DOWN\n");
6198 /* Clear the interrupt. */
6199 CSR_WRITE_4(sc
, BGE_MAC_EVT_ENB
,
6200 BGE_EVTENB_MI_INTERRUPT
);
6201 bge_miibus_readreg(sc
->bge_dev
, sc
->bge_phy_addr
,
6203 bge_miibus_writereg(sc
->bge_dev
, sc
->bge_phy_addr
,
6204 BRGPHY_MII_IMR
, BRGPHY_INTRS
);
6209 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
6210 status
= CSR_READ_4(sc
, BGE_MAC_STS
);
6211 if (status
& BGE_MACSTAT_TBI_PCS_SYNCHED
) {
6212 if (!sc
->bge_link
) {
6214 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
6215 BGE_CLRBIT(sc
, BGE_MAC_MODE
,
6216 BGE_MACMODE_TBI_SEND_CFGS
);
6219 CSR_WRITE_4(sc
, BGE_MAC_STS
, 0xFFFFFFFF);
6221 if_printf(sc
->bge_ifp
, "link UP\n");
6222 if_link_state_change(sc
->bge_ifp
,
6225 } else if (sc
->bge_link
) {
6228 if_printf(sc
->bge_ifp
, "link DOWN\n");
6229 if_link_state_change(sc
->bge_ifp
, LINK_STATE_DOWN
);
6231 } else if ((sc
->bge_mi_mode
& BGE_MIMODE_AUTOPOLL
) != 0) {
6233 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
6234 * in status word always set. Workaround this bug by reading
6235 * PHY link status directly.
6237 link
= (CSR_READ_4(sc
, BGE_MI_STS
) & BGE_MISTS_LINK
) ? 1 : 0;
6239 if (link
!= sc
->bge_link
||
6240 sc
->bge_asicrev
== BGE_ASICREV_BCM5700
) {
6241 mii
= device_get_softc(sc
->bge_miibus
);
6243 if (!sc
->bge_link
&&
6244 mii
->mii_media_status
& IFM_ACTIVE
&&
6245 IFM_SUBTYPE(mii
->mii_media_active
) != IFM_NONE
) {
6248 if_printf(sc
->bge_ifp
, "link UP\n");
6249 } else if (sc
->bge_link
&&
6250 (!(mii
->mii_media_status
& IFM_ACTIVE
) ||
6251 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_NONE
)) {
6254 if_printf(sc
->bge_ifp
, "link DOWN\n");
6259 * For controllers that call mii_tick, we have to poll
6262 mii
= device_get_softc(sc
->bge_miibus
);
6264 bge_miibus_statchg(sc
->bge_dev
);
6267 /* Disable MAC attention when link is up. */
6268 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
6269 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
6270 BGE_MACSTAT_LINK_CHANGED
);
6274 bge_add_sysctls(struct bge_softc
*sc
)
6276 struct sysctl_ctx_list
*ctx
;
6277 struct sysctl_oid_list
*children
;
6281 ctx
= device_get_sysctl_ctx(sc
->bge_dev
);
6282 children
= SYSCTL_CHILDREN(device_get_sysctl_tree(sc
->bge_dev
));
6284 #ifdef BGE_REGISTER_DEBUG
6285 SYSCTL_ADD_PROC(ctx
, children
, OID_AUTO
, "debug_info",
6286 CTLTYPE_INT
| CTLFLAG_RW
, sc
, 0, bge_sysctl_debug_info
, "I",
6287 "Debug Information");
6289 SYSCTL_ADD_PROC(ctx
, children
, OID_AUTO
, "reg_read",
6290 CTLTYPE_INT
| CTLFLAG_RW
, sc
, 0, bge_sysctl_reg_read
, "I",
6291 "MAC Register Read");
6293 SYSCTL_ADD_PROC(ctx
, children
, OID_AUTO
, "ape_read",
6294 CTLTYPE_INT
| CTLFLAG_RW
, sc
, 0, bge_sysctl_ape_read
, "I",
6295 "APE Register Read");
6297 SYSCTL_ADD_PROC(ctx
, children
, OID_AUTO
, "mem_read",
6298 CTLTYPE_INT
| CTLFLAG_RW
, sc
, 0, bge_sysctl_mem_read
, "I",
6303 unit
= device_get_unit(sc
->bge_dev
);
6305 * A common design characteristic for many Broadcom client controllers
6306 * is that they only support a single outstanding DMA read operation
6307 * on the PCIe bus. This means that it will take twice as long to fetch
6308 * a TX frame that is split into header and payload buffers as it does
6309 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
6310 * these controllers, coalescing buffers to reduce the number of memory
6311 * reads is effective way to get maximum performance(about 940Mbps).
6312 * Without collapsing TX buffers the maximum TCP bulk transfer
6313 * performance is about 850Mbps. However forcing coalescing mbufs
6314 * consumes a lot of CPU cycles, so leave it off by default.
6316 sc
->bge_forced_collapse
= 0;
6317 snprintf(tn
, sizeof(tn
), "dev.bge.%d.forced_collapse", unit
);
6318 TUNABLE_INT_FETCH(tn
, &sc
->bge_forced_collapse
);
6319 SYSCTL_ADD_INT(ctx
, children
, OID_AUTO
, "forced_collapse",
6320 CTLFLAG_RW
, &sc
->bge_forced_collapse
, 0,
6321 "Number of fragmented TX buffers of a frame allowed before "
6322 "forced collapsing");
6325 snprintf(tn
, sizeof(tn
), "dev.bge.%d.msi", unit
);
6326 TUNABLE_INT_FETCH(tn
, &sc
->bge_msi
);
6327 SYSCTL_ADD_INT(ctx
, children
, OID_AUTO
, "msi",
6328 CTLFLAG_RD
, &sc
->bge_msi
, 0, "Enable MSI");
6331 * It seems all Broadcom controllers have a bug that can generate UDP
6332 * datagrams with checksum value 0 when TX UDP checksum offloading is
6333 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
6334 * Even though the probability of generating such UDP datagrams is
6335 * low, I don't want to see FreeBSD boxes to inject such datagrams
6336 * into network so disable UDP checksum offloading by default. Users
6337 * still override this behavior by setting a sysctl variable,
6338 * dev.bge.0.forced_udpcsum.
6340 sc
->bge_forced_udpcsum
= 0;
6341 snprintf(tn
, sizeof(tn
), "dev.bge.%d.bge_forced_udpcsum", unit
);
6342 TUNABLE_INT_FETCH(tn
, &sc
->bge_forced_udpcsum
);
6343 SYSCTL_ADD_INT(ctx
, children
, OID_AUTO
, "forced_udpcsum",
6344 CTLFLAG_RW
, &sc
->bge_forced_udpcsum
, 0,
6345 "Enable UDP checksum offloading even if controller can "
6346 "generate UDP checksum value 0");
6348 if (BGE_IS_5705_PLUS(sc
))
6349 bge_add_sysctl_stats_regs(sc
, ctx
, children
);
6351 bge_add_sysctl_stats(sc
, ctx
, children
);
6354 #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
6355 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
6356 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
6360 bge_add_sysctl_stats(struct bge_softc
*sc
, struct sysctl_ctx_list
*ctx
,
6361 struct sysctl_oid_list
*parent
)
6363 struct sysctl_oid
*tree
;
6364 struct sysctl_oid_list
*children
, *schildren
;
6366 tree
= SYSCTL_ADD_NODE(ctx
, parent
, OID_AUTO
, "stats", CTLFLAG_RD
,
6367 NULL
, "BGE Statistics");
6368 schildren
= children
= SYSCTL_CHILDREN(tree
);
6369 BGE_SYSCTL_STAT(sc
, ctx
, "Frames Dropped Due To Filters",
6370 children
, COSFramesDroppedDueToFilters
,
6371 "FramesDroppedDueToFilters");
6372 BGE_SYSCTL_STAT(sc
, ctx
, "NIC DMA Write Queue Full",
6373 children
, nicDmaWriteQueueFull
, "DmaWriteQueueFull");
6374 BGE_SYSCTL_STAT(sc
, ctx
, "NIC DMA Write High Priority Queue Full",
6375 children
, nicDmaWriteHighPriQueueFull
, "DmaWriteHighPriQueueFull");
6376 BGE_SYSCTL_STAT(sc
, ctx
, "NIC No More RX Buffer Descriptors",
6377 children
, nicNoMoreRxBDs
, "NoMoreRxBDs");
6378 BGE_SYSCTL_STAT(sc
, ctx
, "Discarded Input Frames",
6379 children
, ifInDiscards
, "InputDiscards");
6380 BGE_SYSCTL_STAT(sc
, ctx
, "Input Errors",
6381 children
, ifInErrors
, "InputErrors");
6382 BGE_SYSCTL_STAT(sc
, ctx
, "NIC Recv Threshold Hit",
6383 children
, nicRecvThresholdHit
, "RecvThresholdHit");
6384 BGE_SYSCTL_STAT(sc
, ctx
, "NIC DMA Read Queue Full",
6385 children
, nicDmaReadQueueFull
, "DmaReadQueueFull");
6386 BGE_SYSCTL_STAT(sc
, ctx
, "NIC DMA Read High Priority Queue Full",
6387 children
, nicDmaReadHighPriQueueFull
, "DmaReadHighPriQueueFull");
6388 BGE_SYSCTL_STAT(sc
, ctx
, "NIC Send Data Complete Queue Full",
6389 children
, nicSendDataCompQueueFull
, "SendDataCompQueueFull");
6390 BGE_SYSCTL_STAT(sc
, ctx
, "NIC Ring Set Send Producer Index",
6391 children
, nicRingSetSendProdIndex
, "RingSetSendProdIndex");
6392 BGE_SYSCTL_STAT(sc
, ctx
, "NIC Ring Status Update",
6393 children
, nicRingStatusUpdate
, "RingStatusUpdate");
6394 BGE_SYSCTL_STAT(sc
, ctx
, "NIC Interrupts",
6395 children
, nicInterrupts
, "Interrupts");
6396 BGE_SYSCTL_STAT(sc
, ctx
, "NIC Avoided Interrupts",
6397 children
, nicAvoidedInterrupts
, "AvoidedInterrupts");
6398 BGE_SYSCTL_STAT(sc
, ctx
, "NIC Send Threshold Hit",
6399 children
, nicSendThresholdHit
, "SendThresholdHit");
6401 tree
= SYSCTL_ADD_NODE(ctx
, schildren
, OID_AUTO
, "rx", CTLFLAG_RD
,
6402 NULL
, "BGE RX Statistics");
6403 children
= SYSCTL_CHILDREN(tree
);
6404 BGE_SYSCTL_STAT(sc
, ctx
, "Inbound Octets",
6405 children
, rxstats
.ifHCInOctets
, "ifHCInOctets");
6406 BGE_SYSCTL_STAT(sc
, ctx
, "Fragments",
6407 children
, rxstats
.etherStatsFragments
, "Fragments");
6408 BGE_SYSCTL_STAT(sc
, ctx
, "Inbound Unicast Packets",
6409 children
, rxstats
.ifHCInUcastPkts
, "UnicastPkts");
6410 BGE_SYSCTL_STAT(sc
, ctx
, "Inbound Multicast Packets",
6411 children
, rxstats
.ifHCInMulticastPkts
, "MulticastPkts");
6412 BGE_SYSCTL_STAT(sc
, ctx
, "FCS Errors",
6413 children
, rxstats
.dot3StatsFCSErrors
, "FCSErrors");
6414 BGE_SYSCTL_STAT(sc
, ctx
, "Alignment Errors",
6415 children
, rxstats
.dot3StatsAlignmentErrors
, "AlignmentErrors");
6416 BGE_SYSCTL_STAT(sc
, ctx
, "XON Pause Frames Received",
6417 children
, rxstats
.xonPauseFramesReceived
, "xonPauseFramesReceived");
6418 BGE_SYSCTL_STAT(sc
, ctx
, "XOFF Pause Frames Received",
6419 children
, rxstats
.xoffPauseFramesReceived
,
6420 "xoffPauseFramesReceived");
6421 BGE_SYSCTL_STAT(sc
, ctx
, "MAC Control Frames Received",
6422 children
, rxstats
.macControlFramesReceived
,
6423 "ControlFramesReceived");
6424 BGE_SYSCTL_STAT(sc
, ctx
, "XOFF State Entered",
6425 children
, rxstats
.xoffStateEntered
, "xoffStateEntered");
6426 BGE_SYSCTL_STAT(sc
, ctx
, "Frames Too Long",
6427 children
, rxstats
.dot3StatsFramesTooLong
, "FramesTooLong");
6428 BGE_SYSCTL_STAT(sc
, ctx
, "Jabbers",
6429 children
, rxstats
.etherStatsJabbers
, "Jabbers");
6430 BGE_SYSCTL_STAT(sc
, ctx
, "Undersized Packets",
6431 children
, rxstats
.etherStatsUndersizePkts
, "UndersizePkts");
6432 BGE_SYSCTL_STAT(sc
, ctx
, "Inbound Range Length Errors",
6433 children
, rxstats
.inRangeLengthError
, "inRangeLengthError");
6434 BGE_SYSCTL_STAT(sc
, ctx
, "Outbound Range Length Errors",
6435 children
, rxstats
.outRangeLengthError
, "outRangeLengthError");
6437 tree
= SYSCTL_ADD_NODE(ctx
, schildren
, OID_AUTO
, "tx", CTLFLAG_RD
,
6438 NULL
, "BGE TX Statistics");
6439 children
= SYSCTL_CHILDREN(tree
);
6440 BGE_SYSCTL_STAT(sc
, ctx
, "Outbound Octets",
6441 children
, txstats
.ifHCOutOctets
, "ifHCOutOctets");
6442 BGE_SYSCTL_STAT(sc
, ctx
, "TX Collisions",
6443 children
, txstats
.etherStatsCollisions
, "Collisions");
6444 BGE_SYSCTL_STAT(sc
, ctx
, "XON Sent",
6445 children
, txstats
.outXonSent
, "XonSent");
6446 BGE_SYSCTL_STAT(sc
, ctx
, "XOFF Sent",
6447 children
, txstats
.outXoffSent
, "XoffSent");
6448 BGE_SYSCTL_STAT(sc
, ctx
, "Flow Control Done",
6449 children
, txstats
.flowControlDone
, "flowControlDone");
6450 BGE_SYSCTL_STAT(sc
, ctx
, "Internal MAC TX errors",
6451 children
, txstats
.dot3StatsInternalMacTransmitErrors
,
6452 "InternalMacTransmitErrors");
6453 BGE_SYSCTL_STAT(sc
, ctx
, "Single Collision Frames",
6454 children
, txstats
.dot3StatsSingleCollisionFrames
,
6455 "SingleCollisionFrames");
6456 BGE_SYSCTL_STAT(sc
, ctx
, "Multiple Collision Frames",
6457 children
, txstats
.dot3StatsMultipleCollisionFrames
,
6458 "MultipleCollisionFrames");
6459 BGE_SYSCTL_STAT(sc
, ctx
, "Deferred Transmissions",
6460 children
, txstats
.dot3StatsDeferredTransmissions
,
6461 "DeferredTransmissions");
6462 BGE_SYSCTL_STAT(sc
, ctx
, "Excessive Collisions",
6463 children
, txstats
.dot3StatsExcessiveCollisions
,
6464 "ExcessiveCollisions");
6465 BGE_SYSCTL_STAT(sc
, ctx
, "Late Collisions",
6466 children
, txstats
.dot3StatsLateCollisions
,
6468 BGE_SYSCTL_STAT(sc
, ctx
, "Outbound Unicast Packets",
6469 children
, txstats
.ifHCOutUcastPkts
, "UnicastPkts");
6470 BGE_SYSCTL_STAT(sc
, ctx
, "Outbound Multicast Packets",
6471 children
, txstats
.ifHCOutMulticastPkts
, "MulticastPkts");
6472 BGE_SYSCTL_STAT(sc
, ctx
, "Outbound Broadcast Packets",
6473 children
, txstats
.ifHCOutBroadcastPkts
, "BroadcastPkts");
6474 BGE_SYSCTL_STAT(sc
, ctx
, "Carrier Sense Errors",
6475 children
, txstats
.dot3StatsCarrierSenseErrors
,
6476 "CarrierSenseErrors");
6477 BGE_SYSCTL_STAT(sc
, ctx
, "Outbound Discards",
6478 children
, txstats
.ifOutDiscards
, "Discards");
6479 BGE_SYSCTL_STAT(sc
, ctx
, "Outbound Errors",
6480 children
, txstats
.ifOutErrors
, "Errors");
6483 #undef BGE_SYSCTL_STAT
6485 #define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
6486 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
6489 bge_add_sysctl_stats_regs(struct bge_softc
*sc
, struct sysctl_ctx_list
*ctx
,
6490 struct sysctl_oid_list
*parent
)
6492 struct sysctl_oid
*tree
;
6493 struct sysctl_oid_list
*child
, *schild
;
6494 struct bge_mac_stats
*stats
;
6496 stats
= &sc
->bge_mac_stats
;
6497 tree
= SYSCTL_ADD_NODE(ctx
, parent
, OID_AUTO
, "stats", CTLFLAG_RD
,
6498 NULL
, "BGE Statistics");
6499 schild
= child
= SYSCTL_CHILDREN(tree
);
6500 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "FramesDroppedDueToFilters",
6501 &stats
->FramesDroppedDueToFilters
, "Frames Dropped Due to Filters");
6502 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "DmaWriteQueueFull",
6503 &stats
->DmaWriteQueueFull
, "NIC DMA Write Queue Full");
6504 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "DmaWriteHighPriQueueFull",
6505 &stats
->DmaWriteHighPriQueueFull
,
6506 "NIC DMA Write High Priority Queue Full");
6507 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "NoMoreRxBDs",
6508 &stats
->NoMoreRxBDs
, "NIC No More RX Buffer Descriptors");
6509 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "InputDiscards",
6510 &stats
->InputDiscards
, "Discarded Input Frames");
6511 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "InputErrors",
6512 &stats
->InputErrors
, "Input Errors");
6513 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "RecvThresholdHit",
6514 &stats
->RecvThresholdHit
, "NIC Recv Threshold Hit");
6516 tree
= SYSCTL_ADD_NODE(ctx
, schild
, OID_AUTO
, "rx", CTLFLAG_RD
,
6517 NULL
, "BGE RX Statistics");
6518 child
= SYSCTL_CHILDREN(tree
);
6519 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "ifHCInOctets",
6520 &stats
->ifHCInOctets
, "Inbound Octets");
6521 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "Fragments",
6522 &stats
->etherStatsFragments
, "Fragments");
6523 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "UnicastPkts",
6524 &stats
->ifHCInUcastPkts
, "Inbound Unicast Packets");
6525 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "MulticastPkts",
6526 &stats
->ifHCInMulticastPkts
, "Inbound Multicast Packets");
6527 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "BroadcastPkts",
6528 &stats
->ifHCInBroadcastPkts
, "Inbound Broadcast Packets");
6529 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "FCSErrors",
6530 &stats
->dot3StatsFCSErrors
, "FCS Errors");
6531 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "AlignmentErrors",
6532 &stats
->dot3StatsAlignmentErrors
, "Alignment Errors");
6533 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "xonPauseFramesReceived",
6534 &stats
->xonPauseFramesReceived
, "XON Pause Frames Received");
6535 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "xoffPauseFramesReceived",
6536 &stats
->xoffPauseFramesReceived
, "XOFF Pause Frames Received");
6537 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "ControlFramesReceived",
6538 &stats
->macControlFramesReceived
, "MAC Control Frames Received");
6539 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "xoffStateEntered",
6540 &stats
->xoffStateEntered
, "XOFF State Entered");
6541 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "FramesTooLong",
6542 &stats
->dot3StatsFramesTooLong
, "Frames Too Long");
6543 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "Jabbers",
6544 &stats
->etherStatsJabbers
, "Jabbers");
6545 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "UndersizePkts",
6546 &stats
->etherStatsUndersizePkts
, "Undersized Packets");
6548 tree
= SYSCTL_ADD_NODE(ctx
, schild
, OID_AUTO
, "tx", CTLFLAG_RD
,
6549 NULL
, "BGE TX Statistics");
6550 child
= SYSCTL_CHILDREN(tree
);
6551 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "ifHCOutOctets",
6552 &stats
->ifHCOutOctets
, "Outbound Octets");
6553 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "Collisions",
6554 &stats
->etherStatsCollisions
, "TX Collisions");
6555 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "XonSent",
6556 &stats
->outXonSent
, "XON Sent");
6557 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "XoffSent",
6558 &stats
->outXoffSent
, "XOFF Sent");
6559 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "InternalMacTransmitErrors",
6560 &stats
->dot3StatsInternalMacTransmitErrors
,
6561 "Internal MAC TX Errors");
6562 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "SingleCollisionFrames",
6563 &stats
->dot3StatsSingleCollisionFrames
, "Single Collision Frames");
6564 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "MultipleCollisionFrames",
6565 &stats
->dot3StatsMultipleCollisionFrames
,
6566 "Multiple Collision Frames");
6567 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "DeferredTransmissions",
6568 &stats
->dot3StatsDeferredTransmissions
, "Deferred Transmissions");
6569 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "ExcessiveCollisions",
6570 &stats
->dot3StatsExcessiveCollisions
, "Excessive Collisions");
6571 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "LateCollisions",
6572 &stats
->dot3StatsLateCollisions
, "Late Collisions");
6573 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "UnicastPkts",
6574 &stats
->ifHCOutUcastPkts
, "Outbound Unicast Packets");
6575 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "MulticastPkts",
6576 &stats
->ifHCOutMulticastPkts
, "Outbound Multicast Packets");
6577 BGE_SYSCTL_STAT_ADD64(ctx
, child
, "BroadcastPkts",
6578 &stats
->ifHCOutBroadcastPkts
, "Outbound Broadcast Packets");
6581 #undef BGE_SYSCTL_STAT_ADD64
6584 bge_sysctl_stats(SYSCTL_HANDLER_ARGS
)
6586 struct bge_softc
*sc
;
6590 sc
= (struct bge_softc
*)arg1
;
6592 result
= CSR_READ_4(sc
, BGE_MEMWIN_START
+ BGE_STATS_BLOCK
+ offset
+
6593 offsetof(bge_hostaddr
, bge_addr_lo
));
6594 return (sysctl_handle_int(oidp
, &result
, 0, req
));
6597 #ifdef BGE_REGISTER_DEBUG
6599 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS
)
6601 struct bge_softc
*sc
;
6603 int error
, result
, sbsz
;
6607 error
= sysctl_handle_int(oidp
, &result
, 0, req
);
6608 if (error
|| (req
->newptr
== NULL
))
6612 sc
= (struct bge_softc
*)arg1
;
6614 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
&&
6615 sc
->bge_chipid
!= BGE_CHIPID_BCM5700_C0
)
6616 sbsz
= BGE_STATUS_BLK_SZ
;
6619 sbdata
= (uint16_t *)sc
->bge_ldata
.bge_status_block
;
6620 printf("Status Block:\n");
6622 bus_dmamap_sync(sc
->bge_cdata
.bge_status_tag
,
6623 sc
->bge_cdata
.bge_status_map
,
6624 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
6625 for (i
= 0x0; i
< sbsz
/ sizeof(uint16_t); ) {
6627 for (j
= 0; j
< 8; j
++)
6628 printf(" %04x", sbdata
[i
++]);
6632 printf("Registers:\n");
6633 for (i
= 0x800; i
< 0xA00; ) {
6635 for (j
= 0; j
< 8; j
++) {
6636 printf(" %08x", CSR_READ_4(sc
, i
));
6643 printf("Hardware Flags:\n");
6644 if (BGE_IS_5717_PLUS(sc
))
6645 printf(" - 5717 Plus\n");
6646 if (BGE_IS_5755_PLUS(sc
))
6647 printf(" - 5755 Plus\n");
6648 if (BGE_IS_575X_PLUS(sc
))
6649 printf(" - 575X Plus\n");
6650 if (BGE_IS_5705_PLUS(sc
))
6651 printf(" - 5705 Plus\n");
6652 if (BGE_IS_5714_FAMILY(sc
))
6653 printf(" - 5714 Family\n");
6654 if (BGE_IS_5700_FAMILY(sc
))
6655 printf(" - 5700 Family\n");
6656 if (sc
->bge_flags
& BGE_FLAG_JUMBO
)
6657 printf(" - Supports Jumbo Frames\n");
6658 if (sc
->bge_flags
& BGE_FLAG_PCIX
)
6659 printf(" - PCI-X Bus\n");
6660 if (sc
->bge_flags
& BGE_FLAG_PCIE
)
6661 printf(" - PCI Express Bus\n");
6662 if (sc
->bge_phy_flags
& BGE_PHY_NO_3LED
)
6663 printf(" - No 3 LEDs\n");
6664 if (sc
->bge_flags
& BGE_FLAG_RX_ALIGNBUG
)
6665 printf(" - RX Alignment Bug\n");
6672 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS
)
6674 struct bge_softc
*sc
;
6680 error
= sysctl_handle_int(oidp
, &result
, 0, req
);
6681 if (error
|| (req
->newptr
== NULL
))
6684 if (result
< 0x8000) {
6685 sc
= (struct bge_softc
*)arg1
;
6686 val
= CSR_READ_4(sc
, result
);
6687 printf("reg 0x%06X = 0x%08X\n", result
, val
);
6694 bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS
)
6696 struct bge_softc
*sc
;
6702 error
= sysctl_handle_int(oidp
, &result
, 0, req
);
6703 if (error
|| (req
->newptr
== NULL
))
6706 if (result
< 0x8000) {
6707 sc
= (struct bge_softc
*)arg1
;
6708 val
= APE_READ_4(sc
, result
);
6709 printf("reg 0x%06X = 0x%08X\n", result
, val
);
6716 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS
)
6718 struct bge_softc
*sc
;
6724 error
= sysctl_handle_int(oidp
, &result
, 0, req
);
6725 if (error
|| (req
->newptr
== NULL
))
6728 if (result
< 0x8000) {
6729 sc
= (struct bge_softc
*)arg1
;
6730 val
= bge_readmem_ind(sc
, result
);
6731 printf("mem 0x%06X = 0x%08X\n", result
, val
);
6739 bge_get_eaddr_fw(struct bge_softc
*sc
, uint8_t ether_addr
[])
6742 if (sc
->bge_flags
& BGE_FLAG_EADDR
)
6746 OF_getetheraddr(sc
->bge_dev
, ether_addr
);
6753 bge_get_eaddr_mem(struct bge_softc
*sc
, uint8_t ether_addr
[])
6757 mac_addr
= bge_readmem_ind(sc
, BGE_SRAM_MAC_ADDR_HIGH_MB
);
6758 if ((mac_addr
>> 16) == 0x484b) {
6759 ether_addr
[0] = (uint8_t)(mac_addr
>> 8);
6760 ether_addr
[1] = (uint8_t)mac_addr
;
6761 mac_addr
= bge_readmem_ind(sc
, BGE_SRAM_MAC_ADDR_LOW_MB
);
6762 ether_addr
[2] = (uint8_t)(mac_addr
>> 24);
6763 ether_addr
[3] = (uint8_t)(mac_addr
>> 16);
6764 ether_addr
[4] = (uint8_t)(mac_addr
>> 8);
6765 ether_addr
[5] = (uint8_t)mac_addr
;
6772 bge_get_eaddr_nvram(struct bge_softc
*sc
, uint8_t ether_addr
[])
6774 int mac_offset
= BGE_EE_MAC_OFFSET
;
6776 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5906
)
6777 mac_offset
= BGE_EE_MAC_OFFSET_5906
;
6779 return (bge_read_nvram(sc
, ether_addr
, mac_offset
+ 2,
6784 bge_get_eaddr_eeprom(struct bge_softc
*sc
, uint8_t ether_addr
[])
6787 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5906
)
6790 return (bge_read_eeprom(sc
, ether_addr
, BGE_EE_MAC_OFFSET
+ 2,
6795 bge_get_eaddr(struct bge_softc
*sc
, uint8_t eaddr
[])
6797 static const bge_eaddr_fcn_t bge_eaddr_funcs
[] = {
6798 /* NOTE: Order is critical */
6801 bge_get_eaddr_nvram
,
6802 bge_get_eaddr_eeprom
,
6805 const bge_eaddr_fcn_t
*func
;
6807 for (func
= bge_eaddr_funcs
; *func
!= NULL
; ++func
) {
6808 if ((*func
)(sc
, eaddr
) == 0)
6811 return (*func
== NULL
? ENXIO
: 0);