1 /* $NetBSD: if_wm.c,v 1.187 2010/01/05 10:02:01 msaitoh Exp $ */
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 /*******************************************************************************
40 Copyright (c) 2001-2005, Intel Corporation
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
69 *******************************************************************************/
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
73 * TODO (in order of importance):
75 * - Rework how parameters are loaded from the EEPROM.
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.187 2010/01/05 10:02:01 msaitoh Exp $");
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
119 #include <sys/intr.h>
120 #include <machine/endian.h>
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
131 #include <dev/pci/if_wmreg.h>
132 #include <dev/pci/if_wmvar.h>
135 #define WM_DEBUG_LINK 0x01
136 #define WM_DEBUG_TX 0x02
137 #define WM_DEBUG_RX 0x04
138 #define WM_DEBUG_GMII 0x08
139 int wm_debug
= WM_DEBUG_TX
|WM_DEBUG_RX
|WM_DEBUG_LINK
|WM_DEBUG_GMII
;
141 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
143 #define DPRINTF(x, y) /* nothing */
144 #endif /* WM_DEBUG */
147 * Transmit descriptor list size. Due to errata, we can only have
148 * 256 hardware descriptors in the ring on < 82544, but we use 4096
149 * on >= 82544. We tell the upper layers that they can queue a lot
150 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * We allow up to 256 (!) DMA segments per packet. Pathological packet
154 * chains containing many small mbufs have been observed in zero-copy
155 * situations with jumbo frames.
157 #define WM_NTXSEGS 256
158 #define WM_IFQUEUELEN 256
159 #define WM_TXQUEUELEN_MAX 64
160 #define WM_TXQUEUELEN_MAX_82547 16
161 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
162 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
163 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
164 #define WM_NTXDESC_82542 256
165 #define WM_NTXDESC_82544 4096
166 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
167 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
168 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
169 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
170 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
172 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
175 * Receive descriptor list size. We have one Rx buffer for normal
176 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
177 * packet. We allocate 256 receive descriptors, each with a 2k
178 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
180 #define WM_NRXDESC 256
181 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
182 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
183 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186 * Control structures are DMA'd to the i82542 chip. We allocate them in
187 * a single clump that maps to a single DMA segment to make several things
190 struct wm_control_data_82544
{
192 * The receive descriptors.
194 wiseman_rxdesc_t wcd_rxdescs
[WM_NRXDESC
];
197 * The transmit descriptors. Put these at the end, because
198 * we might use a smaller number of them.
200 wiseman_txdesc_t wcd_txdescs
[WM_NTXDESC_82544
];
203 struct wm_control_data_82542
{
204 wiseman_rxdesc_t wcd_rxdescs
[WM_NRXDESC
];
205 wiseman_txdesc_t wcd_txdescs
[WM_NTXDESC_82542
];
208 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
209 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
210 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
213 * Software state for transmit jobs.
216 struct mbuf
*txs_mbuf
; /* head of our mbuf chain */
217 bus_dmamap_t txs_dmamap
; /* our DMA map */
218 int txs_firstdesc
; /* first descriptor in packet */
219 int txs_lastdesc
; /* last descriptor in packet */
220 int txs_ndesc
; /* # of descriptors used */
224 * Software state for receive buffers. Each descriptor gets a
225 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
226 * more than one buffer, we chain them together.
229 struct mbuf
*rxs_mbuf
; /* head of our mbuf chain */
230 bus_dmamap_t rxs_dmamap
; /* our DMA map */
233 #define WM_LINKUP_TIMEOUT 50
236 * Software state per device.
239 device_t sc_dev
; /* generic device information */
240 bus_space_tag_t sc_st
; /* bus space tag */
241 bus_space_handle_t sc_sh
; /* bus space handle */
242 bus_space_tag_t sc_iot
; /* I/O space tag */
243 bus_space_handle_t sc_ioh
; /* I/O space handle */
244 bus_space_tag_t sc_flasht
; /* flash registers space tag */
245 bus_space_handle_t sc_flashh
; /* flash registers space handle */
246 bus_dma_tag_t sc_dmat
; /* bus DMA tag */
247 struct ethercom sc_ethercom
; /* ethernet common data */
248 pci_chipset_tag_t sc_pc
;
251 wm_chip_type sc_type
; /* chip type */
252 int sc_flags
; /* flags; see below */
253 int sc_if_flags
; /* last if_flags */
254 int sc_bus_speed
; /* PCI/PCIX bus speed */
255 int sc_pcix_offset
; /* PCIX capability register offset */
256 int sc_flowflags
; /* 802.3x flow control flags */
258 void *sc_ih
; /* interrupt cookie */
260 int sc_ee_addrbits
; /* EEPROM address bits */
262 struct mii_data sc_mii
; /* MII/media information */
264 callout_t sc_tick_ch
; /* tick callout */
266 bus_dmamap_t sc_cddmamap
; /* control data DMA map */
267 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
272 * Software state for the transmit and receive descriptors.
274 int sc_txnum
; /* must be a power of two */
275 struct wm_txsoft sc_txsoft
[WM_TXQUEUELEN_MAX
];
276 struct wm_rxsoft sc_rxsoft
[WM_NRXDESC
];
279 * Control data structures.
281 int sc_ntxdesc
; /* must be a power of two */
282 struct wm_control_data_82544
*sc_control_data
;
283 #define sc_txdescs sc_control_data->wcd_txdescs
284 #define sc_rxdescs sc_control_data->wcd_rxdescs
286 #ifdef WM_EVENT_COUNTERS
287 /* Event counters. */
288 struct evcnt sc_ev_txsstall
; /* Tx stalled due to no txs */
289 struct evcnt sc_ev_txdstall
; /* Tx stalled due to no txd */
290 struct evcnt sc_ev_txfifo_stall
;/* Tx FIFO stalls (82547) */
291 struct evcnt sc_ev_txdw
; /* Tx descriptor interrupts */
292 struct evcnt sc_ev_txqe
; /* Tx queue empty interrupts */
293 struct evcnt sc_ev_rxintr
; /* Rx interrupts */
294 struct evcnt sc_ev_linkintr
; /* Link interrupts */
296 struct evcnt sc_ev_rxipsum
; /* IP checksums checked in-bound */
297 struct evcnt sc_ev_rxtusum
; /* TCP/UDP cksums checked in-bound */
298 struct evcnt sc_ev_txipsum
; /* IP checksums comp. out-bound */
299 struct evcnt sc_ev_txtusum
; /* TCP/UDP cksums comp. out-bound */
300 struct evcnt sc_ev_txtusum6
; /* TCP/UDP v6 cksums comp. out-bound */
301 struct evcnt sc_ev_txtso
; /* TCP seg offload out-bound (IPv4) */
302 struct evcnt sc_ev_txtso6
; /* TCP seg offload out-bound (IPv6) */
303 struct evcnt sc_ev_txtsopain
; /* painful header manip. for TSO */
305 struct evcnt sc_ev_txseg
[WM_NTXSEGS
]; /* Tx packets w/ N segments */
306 struct evcnt sc_ev_txdrop
; /* Tx packets dropped (too many segs) */
308 struct evcnt sc_ev_tu
; /* Tx underrun */
310 struct evcnt sc_ev_tx_xoff
; /* Tx PAUSE(!0) frames */
311 struct evcnt sc_ev_tx_xon
; /* Tx PAUSE(0) frames */
312 struct evcnt sc_ev_rx_xoff
; /* Rx PAUSE(!0) frames */
313 struct evcnt sc_ev_rx_xon
; /* Rx PAUSE(0) frames */
314 struct evcnt sc_ev_rx_macctl
; /* Rx Unsupported */
315 #endif /* WM_EVENT_COUNTERS */
317 bus_addr_t sc_tdt_reg
; /* offset of TDT register */
319 int sc_txfree
; /* number of free Tx descriptors */
320 int sc_txnext
; /* next ready Tx descriptor */
322 int sc_txsfree
; /* number of free Tx jobs */
323 int sc_txsnext
; /* next free Tx job */
324 int sc_txsdirty
; /* dirty Tx jobs */
326 /* These 5 variables are used only on the 82547. */
327 int sc_txfifo_size
; /* Tx FIFO size */
328 int sc_txfifo_head
; /* current head of FIFO */
329 uint32_t sc_txfifo_addr
; /* internal address of start of FIFO */
330 int sc_txfifo_stall
; /* Tx FIFO is stalled */
331 callout_t sc_txfifo_ch
; /* Tx FIFO stall work-around timer */
333 bus_addr_t sc_rdt_reg
; /* offset of RDT register */
335 int sc_rxptr
; /* next ready Rx descriptor/queue ent */
338 struct mbuf
*sc_rxhead
;
339 struct mbuf
*sc_rxtail
;
340 struct mbuf
**sc_rxtailp
;
342 uint32_t sc_ctrl
; /* prototype CTRL register */
344 uint32_t sc_ctrl_ext
; /* prototype CTRL_EXT register */
346 uint32_t sc_icr
; /* prototype interrupt bits */
347 uint32_t sc_itr
; /* prototype intr throttling reg */
348 uint32_t sc_tctl
; /* prototype TCTL register */
349 uint32_t sc_rctl
; /* prototype RCTL register */
350 uint32_t sc_txcw
; /* prototype TXCW register */
351 uint32_t sc_tipg
; /* prototype TIPG register */
352 uint32_t sc_fcrtl
; /* prototype FCRTL register */
353 uint32_t sc_pba
; /* prototype PBA register */
355 int sc_tbi_linkup
; /* TBI link status */
356 int sc_tbi_anegticks
; /* autonegotiation ticks */
357 int sc_tbi_ticks
; /* tbi ticks */
358 int sc_tbi_nrxcfg
; /* count of ICR_RXCFG */
359 int sc_tbi_lastnrxcfg
; /* count of ICR_RXCFG (on last tick) */
361 int sc_mchash_type
; /* multicast filter offset */
364 rndsource_element_t rnd_source
; /* random source */
366 int sc_ich8_flash_base
;
367 int sc_ich8_flash_bank_size
;
370 #define WM_RXCHAIN_RESET(sc) \
372 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
373 *(sc)->sc_rxtailp = NULL; \
374 (sc)->sc_rxlen = 0; \
375 } while (/*CONSTCOND*/0)
377 #define WM_RXCHAIN_LINK(sc, m) \
379 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
380 (sc)->sc_rxtailp = &(m)->m_next; \
381 } while (/*CONSTCOND*/0)
383 #ifdef WM_EVENT_COUNTERS
384 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
385 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
387 #define WM_EVCNT_INCR(ev) /* nothing */
388 #define WM_EVCNT_ADD(ev, val) /* nothing */
391 #define CSR_READ(sc, reg) \
392 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
393 #define CSR_WRITE(sc, reg, val) \
394 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
395 #define CSR_WRITE_FLUSH(sc) \
396 (void) CSR_READ((sc), WMREG_STATUS)
398 #define ICH8_FLASH_READ32(sc, reg) \
399 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
400 #define ICH8_FLASH_WRITE32(sc, reg, data) \
401 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
403 #define ICH8_FLASH_READ16(sc, reg) \
404 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
405 #define ICH8_FLASH_WRITE16(sc, reg, data) \
406 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
408 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
409 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
411 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
412 #define WM_CDTXADDR_HI(sc, x) \
413 (sizeof(bus_addr_t) == 8 ? \
414 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
416 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
417 #define WM_CDRXADDR_HI(sc, x) \
418 (sizeof(bus_addr_t) == 8 ? \
419 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
421 #define WM_CDTXSYNC(sc, x, n, ops) \
428 /* If it will wrap around, sync to the end of the ring. */ \
429 if ((__x + __n) > WM_NTXDESC(sc)) { \
430 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
431 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
432 (WM_NTXDESC(sc) - __x), (ops)); \
433 __n -= (WM_NTXDESC(sc) - __x); \
437 /* Now sync whatever is left. */ \
438 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
439 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
440 } while (/*CONSTCOND*/0)
442 #define WM_CDRXSYNC(sc, x, ops) \
444 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
445 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
446 } while (/*CONSTCOND*/0)
448 #define WM_INIT_RXDESC(sc, x) \
450 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
451 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
452 struct mbuf *__m = __rxs->rxs_mbuf; \
455 * Note: We scoot the packet forward 2 bytes in the buffer \
456 * so that the payload after the Ethernet header is aligned \
457 * to a 4-byte boundary. \
459 * XXX BRAINDAMAGE ALERT! \
460 * The stupid chip uses the same size for every buffer, which \
461 * is set in the Receive Control register. We are using the 2K \
462 * size option, but what we REALLY want is (2K - 2)! For this \
463 * reason, we can't "scoot" packets longer than the standard \
464 * Ethernet MTU. On strict-alignment platforms, if the total \
465 * size exceeds (2K - 2) we set align_tweak to 0 and let \
466 * the upper layer copy the headers. \
468 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
470 wm_set_dma_addr(&__rxd->wrx_addr, \
471 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
472 __rxd->wrx_len = 0; \
473 __rxd->wrx_cksum = 0; \
474 __rxd->wrx_status = 0; \
475 __rxd->wrx_errors = 0; \
476 __rxd->wrx_special = 0; \
477 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
479 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
480 } while (/*CONSTCOND*/0)
482 static void wm_start(struct ifnet
*);
483 static void wm_watchdog(struct ifnet
*);
484 static int wm_ioctl(struct ifnet
*, u_long
, void *);
485 static int wm_init(struct ifnet
*);
486 static void wm_stop(struct ifnet
*, int);
488 static void wm_reset(struct wm_softc
*);
489 static void wm_rxdrain(struct wm_softc
*);
490 static int wm_add_rxbuf(struct wm_softc
*, int);
491 static int wm_read_eeprom(struct wm_softc
*, int, int, u_int16_t
*);
492 static int wm_read_eeprom_eerd(struct wm_softc
*, int, int, u_int16_t
*);
493 static int wm_validate_eeprom_checksum(struct wm_softc
*);
494 static void wm_tick(void *);
496 static void wm_set_filter(struct wm_softc
*);
498 static int wm_intr(void *);
499 static void wm_txintr(struct wm_softc
*);
500 static void wm_rxintr(struct wm_softc
*);
501 static void wm_linkintr(struct wm_softc
*, uint32_t);
503 static void wm_tbi_mediainit(struct wm_softc
*);
504 static int wm_tbi_mediachange(struct ifnet
*);
505 static void wm_tbi_mediastatus(struct ifnet
*, struct ifmediareq
*);
507 static void wm_tbi_set_linkled(struct wm_softc
*);
508 static void wm_tbi_check_link(struct wm_softc
*);
510 static void wm_gmii_reset(struct wm_softc
*);
512 static int wm_gmii_i82543_readreg(device_t
, int, int);
513 static void wm_gmii_i82543_writereg(device_t
, int, int, int);
515 static int wm_gmii_i82544_readreg(device_t
, int, int);
516 static void wm_gmii_i82544_writereg(device_t
, int, int, int);
518 static int wm_gmii_i80003_readreg(device_t
, int, int);
519 static void wm_gmii_i80003_writereg(device_t
, int, int, int);
521 static int wm_gmii_bm_readreg(device_t
, int, int);
522 static void wm_gmii_bm_writereg(device_t
, int, int, int);
524 static void wm_gmii_statchg(device_t
);
526 static void wm_gmii_mediainit(struct wm_softc
*);
527 static int wm_gmii_mediachange(struct ifnet
*);
528 static void wm_gmii_mediastatus(struct ifnet
*, struct ifmediareq
*);
530 static int wm_kmrn_readreg(struct wm_softc
*, int);
531 static void wm_kmrn_writereg(struct wm_softc
*, int, int);
533 static void wm_set_spiaddrsize(struct wm_softc
*);
534 static int wm_match(device_t
, cfdata_t
, void *);
535 static void wm_attach(device_t
, device_t
, void *);
536 static int wm_is_onboard_nvm_eeprom(struct wm_softc
*);
537 static void wm_get_auto_rd_done(struct wm_softc
*);
538 static void wm_lan_init_done(struct wm_softc
*);
539 static void wm_get_cfg_done(struct wm_softc
*);
540 static int wm_get_swsm_semaphore(struct wm_softc
*);
541 static void wm_put_swsm_semaphore(struct wm_softc
*);
542 static int wm_poll_eerd_eewr_done(struct wm_softc
*, int);
543 static int wm_get_swfw_semaphore(struct wm_softc
*, uint16_t);
544 static void wm_put_swfw_semaphore(struct wm_softc
*, uint16_t);
545 static int wm_get_swfwhw_semaphore(struct wm_softc
*);
546 static void wm_put_swfwhw_semaphore(struct wm_softc
*);
548 static int wm_read_eeprom_ich8(struct wm_softc
*, int, int, uint16_t *);
549 static int32_t wm_ich8_cycle_init(struct wm_softc
*);
550 static int32_t wm_ich8_flash_cycle(struct wm_softc
*, uint32_t);
551 static int32_t wm_read_ich8_data(struct wm_softc
*, uint32_t,
552 uint32_t, uint16_t *);
553 static int32_t wm_read_ich8_byte(struct wm_softc
*, uint32_t, uint8_t *);
554 static int32_t wm_read_ich8_word(struct wm_softc
*, uint32_t, uint16_t *);
555 static void wm_82547_txfifo_stall(void *);
556 static int wm_check_mng_mode(struct wm_softc
*);
557 static int wm_check_mng_mode_ich8lan(struct wm_softc
*);
558 static int wm_check_mng_mode_82574(struct wm_softc
*);
559 static int wm_check_mng_mode_generic(struct wm_softc
*);
560 static int wm_check_reset_block(struct wm_softc
*);
561 static void wm_get_hw_control(struct wm_softc
*);
562 static int wm_check_for_link(struct wm_softc
*);
564 CFATTACH_DECL_NEW(wm
, sizeof(struct wm_softc
),
565 wm_match
, wm_attach
, NULL
, NULL
);
568 * Devices supported by this driver.
570 static const struct wm_product
{
571 pci_vendor_id_t wmp_vendor
;
572 pci_product_id_t wmp_product
;
573 const char *wmp_name
;
574 wm_chip_type wmp_type
;
576 #define WMP_F_1000X 0x01
577 #define WMP_F_1000T 0x02
579 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82542
,
580 "Intel i82542 1000BASE-X Ethernet",
581 WM_T_82542_2_1
, WMP_F_1000X
},
583 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82543GC_FIBER
,
584 "Intel i82543GC 1000BASE-X Ethernet",
585 WM_T_82543
, WMP_F_1000X
},
587 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82543GC_COPPER
,
588 "Intel i82543GC 1000BASE-T Ethernet",
589 WM_T_82543
, WMP_F_1000T
},
591 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82544EI_COPPER
,
592 "Intel i82544EI 1000BASE-T Ethernet",
593 WM_T_82544
, WMP_F_1000T
},
595 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82544EI_FIBER
,
596 "Intel i82544EI 1000BASE-X Ethernet",
597 WM_T_82544
, WMP_F_1000X
},
599 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82544GC_COPPER
,
600 "Intel i82544GC 1000BASE-T Ethernet",
601 WM_T_82544
, WMP_F_1000T
},
603 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82544GC_LOM
,
604 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
605 WM_T_82544
, WMP_F_1000T
},
607 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82540EM
,
608 "Intel i82540EM 1000BASE-T Ethernet",
609 WM_T_82540
, WMP_F_1000T
},
611 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82540EM_LOM
,
612 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
613 WM_T_82540
, WMP_F_1000T
},
615 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82540EP_LOM
,
616 "Intel i82540EP 1000BASE-T Ethernet",
617 WM_T_82540
, WMP_F_1000T
},
619 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82540EP
,
620 "Intel i82540EP 1000BASE-T Ethernet",
621 WM_T_82540
, WMP_F_1000T
},
623 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82540EP_LP
,
624 "Intel i82540EP 1000BASE-T Ethernet",
625 WM_T_82540
, WMP_F_1000T
},
627 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82545EM_COPPER
,
628 "Intel i82545EM 1000BASE-T Ethernet",
629 WM_T_82545
, WMP_F_1000T
},
631 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82545GM_COPPER
,
632 "Intel i82545GM 1000BASE-T Ethernet",
633 WM_T_82545_3
, WMP_F_1000T
},
635 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82545GM_FIBER
,
636 "Intel i82545GM 1000BASE-X Ethernet",
637 WM_T_82545_3
, WMP_F_1000X
},
639 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82545GM_SERDES
,
640 "Intel i82545GM Gigabit Ethernet (SERDES)",
641 WM_T_82545_3
, WMP_F_SERDES
},
643 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82546EB_COPPER
,
644 "Intel i82546EB 1000BASE-T Ethernet",
645 WM_T_82546
, WMP_F_1000T
},
647 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82546EB_QUAD
,
648 "Intel i82546EB 1000BASE-T Ethernet",
649 WM_T_82546
, WMP_F_1000T
},
651 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82545EM_FIBER
,
652 "Intel i82545EM 1000BASE-X Ethernet",
653 WM_T_82545
, WMP_F_1000X
},
655 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82546EB_FIBER
,
656 "Intel i82546EB 1000BASE-X Ethernet",
657 WM_T_82546
, WMP_F_1000X
},
659 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82546GB_COPPER
,
660 "Intel i82546GB 1000BASE-T Ethernet",
661 WM_T_82546_3
, WMP_F_1000T
},
663 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82546GB_FIBER
,
664 "Intel i82546GB 1000BASE-X Ethernet",
665 WM_T_82546_3
, WMP_F_1000X
},
667 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82546GB_SERDES
,
668 "Intel i82546GB Gigabit Ethernet (SERDES)",
669 WM_T_82546_3
, WMP_F_SERDES
},
671 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER
,
672 "i82546GB quad-port Gigabit Ethernet",
673 WM_T_82546_3
, WMP_F_1000T
},
675 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3
,
676 "i82546GB quad-port Gigabit Ethernet (KSP3)",
677 WM_T_82546_3
, WMP_F_1000T
},
679 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82546GB_PCIE
,
680 "Intel PRO/1000MT (82546GB)",
681 WM_T_82546_3
, WMP_F_1000T
},
683 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82541EI
,
684 "Intel i82541EI 1000BASE-T Ethernet",
685 WM_T_82541
, WMP_F_1000T
},
687 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82541ER_LOM
,
688 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
689 WM_T_82541
, WMP_F_1000T
},
691 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82541EI_MOBILE
,
692 "Intel i82541EI Mobile 1000BASE-T Ethernet",
693 WM_T_82541
, WMP_F_1000T
},
695 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82541ER
,
696 "Intel i82541ER 1000BASE-T Ethernet",
697 WM_T_82541_2
, WMP_F_1000T
},
699 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82541GI
,
700 "Intel i82541GI 1000BASE-T Ethernet",
701 WM_T_82541_2
, WMP_F_1000T
},
703 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82541GI_MOBILE
,
704 "Intel i82541GI Mobile 1000BASE-T Ethernet",
705 WM_T_82541_2
, WMP_F_1000T
},
707 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82541PI
,
708 "Intel i82541PI 1000BASE-T Ethernet",
709 WM_T_82541_2
, WMP_F_1000T
},
711 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82547EI
,
712 "Intel i82547EI 1000BASE-T Ethernet",
713 WM_T_82547
, WMP_F_1000T
},
715 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82547EI_MOBILE
,
716 "Intel i82547EI Mobile 1000BASE-T Ethernet",
717 WM_T_82547
, WMP_F_1000T
},
719 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82547GI
,
720 "Intel i82547GI 1000BASE-T Ethernet",
721 WM_T_82547_2
, WMP_F_1000T
},
723 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82571EB_COPPER
,
724 "Intel PRO/1000 PT (82571EB)",
725 WM_T_82571
, WMP_F_1000T
},
727 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82571EB_FIBER
,
728 "Intel PRO/1000 PF (82571EB)",
729 WM_T_82571
, WMP_F_1000X
},
731 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82571EB_SERDES
,
732 "Intel PRO/1000 PB (82571EB)",
733 WM_T_82571
, WMP_F_SERDES
},
735 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER
,
736 "Intel PRO/1000 QT (82571EB)",
737 WM_T_82571
, WMP_F_1000T
},
739 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82572EI_COPPER
,
740 "Intel i82572EI 1000baseT Ethernet",
741 WM_T_82572
, WMP_F_1000T
},
743 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER
,
744 "Intel® PRO/1000 PT Quad Port Server Adapter",
745 WM_T_82571
, WMP_F_1000T
, },
747 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82572EI_FIBER
,
748 "Intel i82572EI 1000baseX Ethernet",
749 WM_T_82572
, WMP_F_1000X
},
751 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82572EI_SERDES
,
752 "Intel i82572EI Gigabit Ethernet (SERDES)",
753 WM_T_82572
, WMP_F_SERDES
},
756 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82572EI
,
757 "Intel i82572EI 1000baseT Ethernet",
758 WM_T_82572
, WMP_F_1000T
},
760 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82573E
,
762 WM_T_82573
, WMP_F_1000T
},
764 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82573E_IAMT
,
765 "Intel i82573E IAMT",
766 WM_T_82573
, WMP_F_1000T
},
768 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82573L
,
769 "Intel i82573L Gigabit Ethernet",
770 WM_T_82573
, WMP_F_1000T
},
772 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82574L
,
774 WM_T_82574
, WMP_F_1000T
},
776 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82583V
,
778 WM_T_82583
, WMP_F_1000T
},
780 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT
,
781 "i80003 dual 1000baseT Ethernet",
782 WM_T_80003
, WMP_F_1000T
},
784 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT
,
785 "i80003 dual 1000baseX Ethernet",
786 WM_T_80003
, WMP_F_1000T
},
788 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT
,
789 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
790 WM_T_80003
, WMP_F_SERDES
},
793 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT
,
794 "Intel i80003 1000baseT Ethernet",
795 WM_T_80003
, WMP_F_1000T
},
797 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT
,
798 "Intel i80003 Gigabit Ethernet (SERDES)",
799 WM_T_80003
, WMP_F_SERDES
},
801 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801H_M_AMT
,
802 "Intel i82801H (M_AMT) LAN Controller",
803 WM_T_ICH8
, WMP_F_1000T
},
804 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801H_AMT
,
805 "Intel i82801H (AMT) LAN Controller",
806 WM_T_ICH8
, WMP_F_1000T
},
807 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801H_LAN
,
808 "Intel i82801H LAN Controller",
809 WM_T_ICH8
, WMP_F_1000T
},
810 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801H_IFE_LAN
,
811 "Intel i82801H (IFE) LAN Controller",
812 WM_T_ICH8
, WMP_F_1000T
},
813 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801H_M_LAN
,
814 "Intel i82801H (M) LAN Controller",
815 WM_T_ICH8
, WMP_F_1000T
},
816 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801H_IFE_GT
,
817 "Intel i82801H IFE (GT) LAN Controller",
818 WM_T_ICH8
, WMP_F_1000T
},
819 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801H_IFE_G
,
820 "Intel i82801H IFE (G) LAN Controller",
821 WM_T_ICH8
, WMP_F_1000T
},
822 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801I_IGP_AMT
,
823 "82801I (AMT) LAN Controller",
824 WM_T_ICH9
, WMP_F_1000T
},
825 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801I_IFE
,
826 "82801I LAN Controller",
827 WM_T_ICH9
, WMP_F_1000T
},
828 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801I_IFE_G
,
829 "82801I (G) LAN Controller",
830 WM_T_ICH9
, WMP_F_1000T
},
831 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801I_IFE_GT
,
832 "82801I (GT) LAN Controller",
833 WM_T_ICH9
, WMP_F_1000T
},
834 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801I_IGP_C
,
835 "82801I (C) LAN Controller",
836 WM_T_ICH9
, WMP_F_1000T
},
837 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801I_IGP_M
,
838 "82801I mobile LAN Controller",
839 WM_T_ICH9
, WMP_F_1000T
},
840 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801H_IGP_M_V
,
841 "82801I mobile (V) LAN Controller",
842 WM_T_ICH9
, WMP_F_1000T
},
843 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT
,
844 "82801I mobile (AMT) LAN Controller",
845 WM_T_ICH9
, WMP_F_1000T
},
846 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82567LM_3
,
847 "82567LM-3 LAN Controller",
848 WM_T_ICH10
, WMP_F_1000T
},
849 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82567LF_3
,
850 "82567LF-3 LAN Controller",
851 WM_T_ICH10
, WMP_F_1000T
},
852 { PCI_VENDOR_INTEL
, PCI_PRODUCT_INTEL_82801J_D_BM_LF
,
853 "i82801J (LF) LAN Controller",
854 WM_T_ICH10
, WMP_F_1000T
},
860 #ifdef WM_EVENT_COUNTERS
861 static char wm_txseg_evcnt_names
[WM_NTXSEGS
][sizeof("txsegXXX")];
862 #endif /* WM_EVENT_COUNTERS */
864 #if 0 /* Not currently used */
865 static inline uint32_t
866 wm_io_read(struct wm_softc
*sc
, int reg
)
869 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, 0, reg
);
870 return (bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, 4));
875 wm_io_write(struct wm_softc
*sc
, int reg
, uint32_t val
)
878 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, 0, reg
);
879 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, 4, val
);
883 wm_set_dma_addr(volatile wiseman_addr_t
*wa
, bus_addr_t v
)
885 wa
->wa_low
= htole32(v
& 0xffffffffU
);
886 if (sizeof(bus_addr_t
) == 8)
887 wa
->wa_high
= htole32((uint64_t) v
>> 32);
893 wm_set_spiaddrsize(struct wm_softc
*sc
)
897 sc
->sc_flags
|= WM_F_EEPROM_SPI
;
898 reg
= CSR_READ(sc
, WMREG_EECD
);
899 sc
->sc_ee_addrbits
= (reg
& EECD_EE_ABITS
) ? 16 : 8;
902 static const struct wm_product
*
903 wm_lookup(const struct pci_attach_args
*pa
)
905 const struct wm_product
*wmp
;
907 for (wmp
= wm_products
; wmp
->wmp_name
!= NULL
; wmp
++) {
908 if (PCI_VENDOR(pa
->pa_id
) == wmp
->wmp_vendor
&&
909 PCI_PRODUCT(pa
->pa_id
) == wmp
->wmp_product
)
916 wm_match(device_t parent
, cfdata_t cf
, void *aux
)
918 struct pci_attach_args
*pa
= aux
;
920 if (wm_lookup(pa
) != NULL
)
927 wm_attach(device_t parent
, device_t self
, void *aux
)
929 struct wm_softc
*sc
= device_private(self
);
930 struct pci_attach_args
*pa
= aux
;
931 prop_dictionary_t dict
;
932 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
933 pci_chipset_tag_t pc
= pa
->pa_pc
;
934 pci_intr_handle_t ih
;
936 const char *intrstr
= NULL
;
937 const char *eetype
, *xname
;
938 bus_space_tag_t memt
;
939 bus_space_handle_t memh
;
940 bus_dma_segment_t seg
;
943 const struct wm_product
*wmp
;
946 uint8_t enaddr
[ETHER_ADDR_LEN
];
947 uint16_t myea
[ETHER_ADDR_LEN
/ 2], cfg1
, cfg2
, swdpin
, io3
;
948 pcireg_t preg
, memtype
;
952 callout_init(&sc
->sc_tick_ch
, 0);
957 panic("wm_attach: impossible");
960 sc
->sc_pc
= pa
->pa_pc
;
961 sc
->sc_pcitag
= pa
->pa_tag
;
963 if (pci_dma64_available(pa
))
964 sc
->sc_dmat
= pa
->pa_dmat64
;
966 sc
->sc_dmat
= pa
->pa_dmat
;
968 preg
= PCI_REVISION(pci_conf_read(pc
, pa
->pa_tag
, PCI_CLASS_REG
));
969 aprint_naive(": Ethernet controller\n");
970 aprint_normal(": %s, rev. %d\n", wmp
->wmp_name
, preg
);
972 sc
->sc_type
= wmp
->wmp_type
;
973 if (sc
->sc_type
< WM_T_82543
) {
975 aprint_error_dev(sc
->sc_dev
,
976 "i82542 must be at least rev. 2\n");
980 sc
->sc_type
= WM_T_82542_2_0
;
983 /* Set device properties (mactype) */
984 dict
= device_properties(sc
->sc_dev
);
985 prop_dictionary_set_uint32(dict
, "mactype", sc
->sc_type
);
988 * Map the device. All devices support memory-mapped acccess,
989 * and it is really required for normal operation.
991 memtype
= pci_mapreg_type(pa
->pa_pc
, pa
->pa_tag
, WM_PCI_MMBA
);
993 case PCI_MAPREG_TYPE_MEM
| PCI_MAPREG_MEM_TYPE_32BIT
:
994 case PCI_MAPREG_TYPE_MEM
| PCI_MAPREG_MEM_TYPE_64BIT
:
995 memh_valid
= (pci_mapreg_map(pa
, WM_PCI_MMBA
,
996 memtype
, 0, &memt
, &memh
, NULL
, NULL
) == 0);
1007 aprint_error_dev(sc
->sc_dev
,
1008 "unable to map device registers\n");
1013 * In addition, i82544 and later support I/O mapped indirect
1014 * register access. It is not desirable (nor supported in
1015 * this driver) to use it for normal operation, though it is
1016 * required to work around bugs in some chip versions.
1018 if (sc
->sc_type
>= WM_T_82544
) {
1019 /* First we have to find the I/O BAR. */
1020 for (i
= PCI_MAPREG_START
; i
< PCI_MAPREG_END
; i
+= 4) {
1021 if (pci_mapreg_type(pa
->pa_pc
, pa
->pa_tag
, i
) ==
1025 if (i
== PCI_MAPREG_END
)
1026 aprint_error_dev(sc
->sc_dev
,
1027 "WARNING: unable to find I/O BAR\n");
1030 * The i8254x doesn't apparently respond when the
1031 * I/O BAR is 0, which looks somewhat like it's not
1034 preg
= pci_conf_read(pc
, pa
->pa_tag
, i
);
1035 if (PCI_MAPREG_MEM_ADDR(preg
) == 0) {
1036 aprint_error_dev(sc
->sc_dev
,
1037 "WARNING: I/O BAR at zero.\n");
1038 } else if (pci_mapreg_map(pa
, i
, PCI_MAPREG_TYPE_IO
,
1039 0, &sc
->sc_iot
, &sc
->sc_ioh
,
1041 sc
->sc_flags
|= WM_F_IOH_VALID
;
1043 aprint_error_dev(sc
->sc_dev
,
1044 "WARNING: unable to map I/O space\n");
1050 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1051 preg
= pci_conf_read(pc
, pa
->pa_tag
, PCI_COMMAND_STATUS_REG
);
1052 preg
|= PCI_COMMAND_MASTER_ENABLE
;
1053 if (sc
->sc_type
< WM_T_82542_2_1
)
1054 preg
&= ~PCI_COMMAND_INVALIDATE_ENABLE
;
1055 pci_conf_write(pc
, pa
->pa_tag
, PCI_COMMAND_STATUS_REG
, preg
);
1058 if ((error
= pci_activate(pa
->pa_pc
, pa
->pa_tag
, self
,
1059 NULL
)) && error
!= EOPNOTSUPP
) {
1060 aprint_error_dev(sc
->sc_dev
, "cannot activate %d\n", error
);
1065 * Map and establish our interrupt.
1067 if (pci_intr_map(pa
, &ih
)) {
1068 aprint_error_dev(sc
->sc_dev
, "unable to map interrupt\n");
1071 intrstr
= pci_intr_string(pc
, ih
);
1072 sc
->sc_ih
= pci_intr_establish(pc
, ih
, IPL_NET
, wm_intr
, sc
);
1073 if (sc
->sc_ih
== NULL
) {
1074 aprint_error_dev(sc
->sc_dev
, "unable to establish interrupt");
1075 if (intrstr
!= NULL
)
1076 aprint_error(" at %s", intrstr
);
1080 aprint_normal_dev(sc
->sc_dev
, "interrupting at %s\n", intrstr
);
1083 * Determine a few things about the bus we're connected to.
1085 if (sc
->sc_type
< WM_T_82543
) {
1086 /* We don't really know the bus characteristics here. */
1087 sc
->sc_bus_speed
= 33;
1088 } else if (sc
->sc_type
== WM_T_82547
|| sc
->sc_type
== WM_T_82547_2
) {
1090 * CSA (Communication Streaming Architecture) is about as fast
1091 * a 32-bit 66MHz PCI Bus.
1093 sc
->sc_flags
|= WM_F_CSA
;
1094 sc
->sc_bus_speed
= 66;
1095 aprint_verbose_dev(sc
->sc_dev
,
1096 "Communication Streaming Architecture\n");
1097 if (sc
->sc_type
== WM_T_82547
) {
1098 callout_init(&sc
->sc_txfifo_ch
, 0);
1099 callout_setfunc(&sc
->sc_txfifo_ch
,
1100 wm_82547_txfifo_stall
, sc
);
1101 aprint_verbose_dev(sc
->sc_dev
,
1102 "using 82547 Tx FIFO stall work-around\n");
1104 } else if (sc
->sc_type
>= WM_T_82571
) {
1105 sc
->sc_flags
|= WM_F_PCIE
;
1106 if ((sc
->sc_type
!= WM_T_ICH8
) && (sc
->sc_type
!= WM_T_ICH9
)
1107 && (sc
->sc_type
!= WM_T_ICH10
))
1108 sc
->sc_flags
|= WM_F_EEPROM_SEMAPHORE
;
1109 aprint_verbose_dev(sc
->sc_dev
, "PCI-Express bus\n");
1111 reg
= CSR_READ(sc
, WMREG_STATUS
);
1112 if (reg
& STATUS_BUS64
)
1113 sc
->sc_flags
|= WM_F_BUS64
;
1114 if ((reg
& STATUS_PCIX_MODE
) != 0) {
1115 pcireg_t pcix_cmd
, pcix_sts
, bytecnt
, maxb
;
1117 sc
->sc_flags
|= WM_F_PCIX
;
1118 if (pci_get_capability(pa
->pa_pc
, pa
->pa_tag
,
1120 &sc
->sc_pcix_offset
, NULL
) == 0)
1121 aprint_error_dev(sc
->sc_dev
,
1122 "unable to find PCIX capability\n");
1123 else if (sc
->sc_type
!= WM_T_82545_3
&&
1124 sc
->sc_type
!= WM_T_82546_3
) {
1126 * Work around a problem caused by the BIOS
1127 * setting the max memory read byte count
1130 pcix_cmd
= pci_conf_read(pa
->pa_pc
, pa
->pa_tag
,
1131 sc
->sc_pcix_offset
+ PCI_PCIX_CMD
);
1132 pcix_sts
= pci_conf_read(pa
->pa_pc
, pa
->pa_tag
,
1133 sc
->sc_pcix_offset
+ PCI_PCIX_STATUS
);
1136 (pcix_cmd
& PCI_PCIX_CMD_BYTECNT_MASK
) >>
1137 PCI_PCIX_CMD_BYTECNT_SHIFT
;
1139 (pcix_sts
& PCI_PCIX_STATUS_MAXB_MASK
) >>
1140 PCI_PCIX_STATUS_MAXB_SHIFT
;
1141 if (bytecnt
> maxb
) {
1142 aprint_verbose_dev(sc
->sc_dev
,
1143 "resetting PCI-X MMRBC: %d -> %d\n",
1144 512 << bytecnt
, 512 << maxb
);
1145 pcix_cmd
= (pcix_cmd
&
1146 ~PCI_PCIX_CMD_BYTECNT_MASK
) |
1147 (maxb
<< PCI_PCIX_CMD_BYTECNT_SHIFT
);
1148 pci_conf_write(pa
->pa_pc
, pa
->pa_tag
,
1149 sc
->sc_pcix_offset
+ PCI_PCIX_CMD
,
1155 * The quad port adapter is special; it has a PCIX-PCIX
1156 * bridge on the board, and can run the secondary bus at
1159 if (wmp
->wmp_product
== PCI_PRODUCT_INTEL_82546EB_QUAD
) {
1160 sc
->sc_bus_speed
= (sc
->sc_flags
& WM_F_PCIX
) ? 120
1162 } else if (sc
->sc_flags
& WM_F_PCIX
) {
1163 switch (reg
& STATUS_PCIXSPD_MASK
) {
1164 case STATUS_PCIXSPD_50_66
:
1165 sc
->sc_bus_speed
= 66;
1167 case STATUS_PCIXSPD_66_100
:
1168 sc
->sc_bus_speed
= 100;
1170 case STATUS_PCIXSPD_100_133
:
1171 sc
->sc_bus_speed
= 133;
1174 aprint_error_dev(sc
->sc_dev
,
1175 "unknown PCIXSPD %d; assuming 66MHz\n",
1176 reg
& STATUS_PCIXSPD_MASK
);
1177 sc
->sc_bus_speed
= 66;
1181 sc
->sc_bus_speed
= (reg
& STATUS_PCI66
) ? 66 : 33;
1182 aprint_verbose_dev(sc
->sc_dev
, "%d-bit %dMHz %s bus\n",
1183 (sc
->sc_flags
& WM_F_BUS64
) ? 64 : 32, sc
->sc_bus_speed
,
1184 (sc
->sc_flags
& WM_F_PCIX
) ? "PCIX" : "PCI");
1188 * Allocate the control data structures, and create and load the
1191 * NOTE: All Tx descriptors must be in the same 4G segment of
1192 * memory. So must Rx descriptors. We simplify by allocating
1193 * both sets within the same 4G segment.
1195 WM_NTXDESC(sc
) = sc
->sc_type
< WM_T_82544
?
1196 WM_NTXDESC_82542
: WM_NTXDESC_82544
;
1197 cdata_size
= sc
->sc_type
< WM_T_82544
?
1198 sizeof(struct wm_control_data_82542
) :
1199 sizeof(struct wm_control_data_82544
);
1200 if ((error
= bus_dmamem_alloc(sc
->sc_dmat
, cdata_size
, PAGE_SIZE
,
1201 (bus_size_t
) 0x100000000ULL
,
1202 &seg
, 1, &rseg
, 0)) != 0) {
1203 aprint_error_dev(sc
->sc_dev
,
1204 "unable to allocate control data, error = %d\n",
1209 if ((error
= bus_dmamem_map(sc
->sc_dmat
, &seg
, rseg
, cdata_size
,
1210 (void **)&sc
->sc_control_data
,
1211 BUS_DMA_COHERENT
)) != 0) {
1212 aprint_error_dev(sc
->sc_dev
,
1213 "unable to map control data, error = %d\n", error
);
1217 if ((error
= bus_dmamap_create(sc
->sc_dmat
, cdata_size
, 1, cdata_size
,
1218 0, 0, &sc
->sc_cddmamap
)) != 0) {
1219 aprint_error_dev(sc
->sc_dev
,
1220 "unable to create control data DMA map, error = %d\n",
1225 if ((error
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_cddmamap
,
1226 sc
->sc_control_data
, cdata_size
, NULL
,
1228 aprint_error_dev(sc
->sc_dev
,
1229 "unable to load control data DMA map, error = %d\n",
1235 * Create the transmit buffer DMA maps.
1238 (sc
->sc_type
== WM_T_82547
|| sc
->sc_type
== WM_T_82547_2
) ?
1239 WM_TXQUEUELEN_MAX_82547
: WM_TXQUEUELEN_MAX
;
1240 for (i
= 0; i
< WM_TXQUEUELEN(sc
); i
++) {
1241 if ((error
= bus_dmamap_create(sc
->sc_dmat
, WM_MAXTXDMA
,
1242 WM_NTXSEGS
, WTX_MAX_LEN
, 0, 0,
1243 &sc
->sc_txsoft
[i
].txs_dmamap
)) != 0) {
1244 aprint_error_dev(sc
->sc_dev
,
1245 "unable to create Tx DMA map %d, error = %d\n",
1252 * Create the receive buffer DMA maps.
1254 for (i
= 0; i
< WM_NRXDESC
; i
++) {
1255 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1,
1257 &sc
->sc_rxsoft
[i
].rxs_dmamap
)) != 0) {
1258 aprint_error_dev(sc
->sc_dev
,
1259 "unable to create Rx DMA map %d error = %d\n",
1263 sc
->sc_rxsoft
[i
].rxs_mbuf
= NULL
;
1266 /* clear interesting stat counters */
1267 CSR_READ(sc
, WMREG_COLC
);
1268 CSR_READ(sc
, WMREG_RXERRC
);
1271 * Reset the chip to a known state.
1275 switch (sc
->sc_type
) {
1285 if (wm_check_mng_mode(sc
) != 0)
1286 wm_get_hw_control(sc
);
1293 * Get some information about the EEPROM.
1295 switch (sc
->sc_type
) {
1296 case WM_T_82542_2_0
:
1297 case WM_T_82542_2_1
:
1301 sc
->sc_ee_addrbits
= 6;
1309 reg
= CSR_READ(sc
, WMREG_EECD
);
1310 if (reg
& EECD_EE_SIZE
)
1311 sc
->sc_ee_addrbits
= 8;
1313 sc
->sc_ee_addrbits
= 6;
1314 sc
->sc_flags
|= WM_F_EEPROM_HANDSHAKE
;
1320 reg
= CSR_READ(sc
, WMREG_EECD
);
1321 if (reg
& EECD_EE_TYPE
) {
1323 wm_set_spiaddrsize(sc
);
1326 sc
->sc_ee_addrbits
= (reg
& EECD_EE_ABITS
) ? 8 : 6;
1327 sc
->sc_flags
|= WM_F_EEPROM_HANDSHAKE
;
1332 wm_set_spiaddrsize(sc
);
1333 sc
->sc_flags
|= WM_F_EEPROM_HANDSHAKE
;
1338 if (wm_is_onboard_nvm_eeprom(sc
) == 0)
1339 sc
->sc_flags
|= WM_F_EEPROM_FLASH
;
1342 wm_set_spiaddrsize(sc
);
1344 sc
->sc_flags
|= WM_F_EEPROM_EERDEEWR
;
1348 wm_set_spiaddrsize(sc
);
1349 sc
->sc_flags
|= WM_F_EEPROM_EERDEEWR
| WM_F_SWFW_SYNC
;
1353 /* Check whether EEPROM is present or not */
1354 if ((CSR_READ(sc
, WMREG_EECD
) & EECD_EE_PRES
) == 0) {
1356 aprint_error_dev(sc
->sc_dev
,
1357 "EEPROM PRESENT bit isn't set\n");
1358 sc
->sc_flags
|= WM_F_EEPROM_INVALID
;
1363 sc
->sc_flags
|= WM_F_EEPROM_FLASH
| WM_F_SWFWHW_SYNC
;
1364 memtype
= pci_mapreg_type(pa
->pa_pc
, pa
->pa_tag
, WM_ICH8_FLASH
);
1365 if (pci_mapreg_map(pa
, WM_ICH8_FLASH
, memtype
, 0,
1366 &sc
->sc_flasht
, &sc
->sc_flashh
, NULL
, NULL
)) {
1367 aprint_error_dev(sc
->sc_dev
,
1368 "can't map FLASH registers\n");
1371 reg
= ICH8_FLASH_READ32(sc
, ICH_FLASH_GFPREG
);
1372 sc
->sc_ich8_flash_base
= (reg
& ICH_GFPREG_BASE_MASK
) *
1373 ICH_FLASH_SECTOR_SIZE
;
1374 sc
->sc_ich8_flash_bank_size
=
1375 ((reg
>> 16) & ICH_GFPREG_BASE_MASK
) + 1;
1376 sc
->sc_ich8_flash_bank_size
-=
1377 (reg
& ICH_GFPREG_BASE_MASK
);
1378 sc
->sc_ich8_flash_bank_size
*= ICH_FLASH_SECTOR_SIZE
;
1379 sc
->sc_ich8_flash_bank_size
/= 2 * sizeof(uint16_t);
1386 * Defer printing the EEPROM type until after verifying the checksum
1387 * This allows the EEPROM type to be printed correctly in the case
1388 * that no EEPROM is attached.
1391 * Validate the EEPROM checksum. If the checksum fails, flag
1392 * this for later, so we can fail future reads from the EEPROM.
1394 if (wm_validate_eeprom_checksum(sc
)) {
1396 * Read twice again because some PCI-e parts fail the
1397 * first check due to the link being in sleep state.
1399 if (wm_validate_eeprom_checksum(sc
))
1400 sc
->sc_flags
|= WM_F_EEPROM_INVALID
;
1403 /* Set device properties (macflags) */
1404 prop_dictionary_set_uint32(dict
, "macflags", sc
->sc_flags
);
1406 if (sc
->sc_flags
& WM_F_EEPROM_INVALID
)
1407 aprint_verbose_dev(sc
->sc_dev
, "No EEPROM\n");
1408 else if (sc
->sc_flags
& WM_F_EEPROM_FLASH
) {
1409 aprint_verbose_dev(sc
->sc_dev
, "FLASH\n");
1411 if (sc
->sc_flags
& WM_F_EEPROM_SPI
)
1414 eetype
= "MicroWire";
1415 aprint_verbose_dev(sc
->sc_dev
,
1416 "%u word (%d address bits) %s EEPROM\n",
1417 1U << sc
->sc_ee_addrbits
,
1418 sc
->sc_ee_addrbits
, eetype
);
1422 * Read the Ethernet address from the EEPROM, if not first found
1423 * in device properties.
1425 ea
= prop_dictionary_get(dict
, "mac-addr");
1427 KASSERT(prop_object_type(ea
) == PROP_TYPE_DATA
);
1428 KASSERT(prop_data_size(ea
) == ETHER_ADDR_LEN
);
1429 memcpy(enaddr
, prop_data_data_nocopy(ea
), ETHER_ADDR_LEN
);
1431 if (wm_read_eeprom(sc
, EEPROM_OFF_MACADDR
,
1432 sizeof(myea
) / sizeof(myea
[0]), myea
)) {
1433 aprint_error_dev(sc
->sc_dev
,
1434 "unable to read Ethernet address\n");
1437 enaddr
[0] = myea
[0] & 0xff;
1438 enaddr
[1] = myea
[0] >> 8;
1439 enaddr
[2] = myea
[1] & 0xff;
1440 enaddr
[3] = myea
[1] >> 8;
1441 enaddr
[4] = myea
[2] & 0xff;
1442 enaddr
[5] = myea
[2] >> 8;
1446 * Toggle the LSB of the MAC address on the second port
1447 * of the dual port controller.
1449 if (sc
->sc_type
== WM_T_82546
|| sc
->sc_type
== WM_T_82546_3
1450 || sc
->sc_type
== WM_T_82571
|| sc
->sc_type
== WM_T_80003
) {
1451 if ((CSR_READ(sc
, WMREG_STATUS
) >> STATUS_FUNCID_SHIFT
) & 1)
1455 aprint_normal_dev(sc
->sc_dev
, "Ethernet address %s\n",
1456 ether_sprintf(enaddr
));
1459 * Read the config info from the EEPROM, and set up various
1460 * bits in the control registers based on their contents.
1462 pn
= prop_dictionary_get(dict
, "i82543-cfg1");
1464 KASSERT(prop_object_type(pn
) == PROP_TYPE_NUMBER
);
1465 cfg1
= (uint16_t) prop_number_integer_value(pn
);
1467 if (wm_read_eeprom(sc
, EEPROM_OFF_CFG1
, 1, &cfg1
)) {
1468 aprint_error_dev(sc
->sc_dev
, "unable to read CFG1\n");
1473 pn
= prop_dictionary_get(dict
, "i82543-cfg2");
1475 KASSERT(prop_object_type(pn
) == PROP_TYPE_NUMBER
);
1476 cfg2
= (uint16_t) prop_number_integer_value(pn
);
1478 if (wm_read_eeprom(sc
, EEPROM_OFF_CFG2
, 1, &cfg2
)) {
1479 aprint_error_dev(sc
->sc_dev
, "unable to read CFG2\n");
1484 if (sc
->sc_type
>= WM_T_82544
) {
1485 pn
= prop_dictionary_get(dict
, "i82543-swdpin");
1487 KASSERT(prop_object_type(pn
) == PROP_TYPE_NUMBER
);
1488 swdpin
= (uint16_t) prop_number_integer_value(pn
);
1490 if (wm_read_eeprom(sc
, EEPROM_OFF_SWDPIN
, 1, &swdpin
)) {
1491 aprint_error_dev(sc
->sc_dev
,
1492 "unable to read SWDPIN\n");
1498 if (cfg1
& EEPROM_CFG1_ILOS
)
1499 sc
->sc_ctrl
|= CTRL_ILOS
;
1500 if (sc
->sc_type
>= WM_T_82544
) {
1502 ((swdpin
>> EEPROM_SWDPIN_SWDPIO_SHIFT
) & 0xf) <<
1505 ((swdpin
>> EEPROM_SWDPIN_SWDPIN_SHIFT
) & 0xf) <<
1509 ((cfg1
>> EEPROM_CFG1_SWDPIO_SHIFT
) & 0xf) <<
1514 if (sc
->sc_type
>= WM_T_82544
) {
1515 if (cfg1
& EEPROM_CFG1_IPS0
)
1516 sc
->sc_ctrl_ext
|= CTRL_EXT_IPS
;
1517 if (cfg1
& EEPROM_CFG1_IPS1
)
1518 sc
->sc_ctrl_ext
|= CTRL_EXT_IPS1
;
1520 ((swdpin
>> (EEPROM_SWDPIN_SWDPIO_SHIFT
+ 4)) & 0xd) <<
1521 CTRL_EXT_SWDPIO_SHIFT
;
1523 ((swdpin
>> (EEPROM_SWDPIN_SWDPIN_SHIFT
+ 4)) & 0xd) <<
1524 CTRL_EXT_SWDPINS_SHIFT
;
1527 ((cfg2
>> EEPROM_CFG2_SWDPIO_SHIFT
) & 0xf) <<
1528 CTRL_EXT_SWDPIO_SHIFT
;
1532 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
1534 CSR_WRITE(sc
, WMREG_CTRL_EXT
, sc
->sc_ctrl_ext
);
1538 * Set up some register offsets that are different between
1539 * the i82542 and the i82543 and later chips.
1541 if (sc
->sc_type
< WM_T_82543
) {
1542 sc
->sc_rdt_reg
= WMREG_OLD_RDT0
;
1543 sc
->sc_tdt_reg
= WMREG_OLD_TDT
;
1545 sc
->sc_rdt_reg
= WMREG_RDT
;
1546 sc
->sc_tdt_reg
= WMREG_TDT
;
1550 * Determine if we're TBI or GMII mode, and initialize the
1551 * media structures accordingly.
1553 if (sc
->sc_type
== WM_T_ICH8
|| sc
->sc_type
== WM_T_ICH9
1554 || sc
->sc_type
== WM_T_ICH10
|| sc
->sc_type
== WM_T_82573
1555 || sc
->sc_type
== WM_T_82574
|| sc
->sc_type
== WM_T_82583
) {
1556 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1557 wm_gmii_mediainit(sc
);
1558 } else if (sc
->sc_type
< WM_T_82543
||
1559 (CSR_READ(sc
, WMREG_STATUS
) & STATUS_TBIMODE
) != 0) {
1560 if (wmp
->wmp_flags
& WMP_F_1000T
)
1561 aprint_error_dev(sc
->sc_dev
,
1562 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1563 wm_tbi_mediainit(sc
);
1565 if (wmp
->wmp_flags
& WMP_F_1000X
)
1566 aprint_error_dev(sc
->sc_dev
,
1567 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1568 wm_gmii_mediainit(sc
);
1571 ifp
= &sc
->sc_ethercom
.ec_if
;
1572 xname
= device_xname(sc
->sc_dev
);
1573 strlcpy(ifp
->if_xname
, xname
, IFNAMSIZ
);
1575 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
1576 ifp
->if_ioctl
= wm_ioctl
;
1577 ifp
->if_start
= wm_start
;
1578 ifp
->if_watchdog
= wm_watchdog
;
1579 ifp
->if_init
= wm_init
;
1580 ifp
->if_stop
= wm_stop
;
1581 IFQ_SET_MAXLEN(&ifp
->if_snd
, max(WM_IFQUEUELEN
, IFQ_MAXLEN
));
1582 IFQ_SET_READY(&ifp
->if_snd
);
1584 /* Check for jumbo frame */
1585 switch (sc
->sc_type
) {
1587 /* XXX limited to 9234 if ASPM is disabled */
1588 wm_read_eeprom(sc
, EEPROM_INIT_3GIO_3
, 1, &io3
);
1589 if ((io3
& EEPROM_3GIO_3_ASPM_MASK
) != 0)
1590 sc
->sc_ethercom
.ec_capabilities
|= ETHERCAP_JUMBO_MTU
;
1598 /* XXX limited to 9234 */
1599 sc
->sc_ethercom
.ec_capabilities
|= ETHERCAP_JUMBO_MTU
;
1601 case WM_T_82542_2_0
:
1602 case WM_T_82542_2_1
:
1605 /* No support for jumbo frame */
1608 /* ETHER_MAX_LEN_JUMBO */
1609 sc
->sc_ethercom
.ec_capabilities
|= ETHERCAP_JUMBO_MTU
;
1614 * If we're a i82543 or greater, we can support VLANs.
1616 if (sc
->sc_type
>= WM_T_82543
)
1617 sc
->sc_ethercom
.ec_capabilities
|=
1618 ETHERCAP_VLAN_MTU
| ETHERCAP_VLAN_HWTAGGING
;
1621 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1622 * on i82543 and later.
1624 if (sc
->sc_type
>= WM_T_82543
) {
1625 ifp
->if_capabilities
|=
1626 IFCAP_CSUM_IPv4_Tx
| IFCAP_CSUM_IPv4_Rx
|
1627 IFCAP_CSUM_TCPv4_Tx
| IFCAP_CSUM_TCPv4_Rx
|
1628 IFCAP_CSUM_UDPv4_Tx
| IFCAP_CSUM_UDPv4_Rx
|
1629 IFCAP_CSUM_TCPv6_Tx
|
1630 IFCAP_CSUM_UDPv6_Tx
;
1634 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1636 * 82541GI (8086:1076) ... no
1637 * 82572EI (8086:10b9) ... yes
1639 if (sc
->sc_type
>= WM_T_82571
) {
1640 ifp
->if_capabilities
|=
1641 IFCAP_CSUM_TCPv6_Rx
| IFCAP_CSUM_UDPv6_Rx
;
1645 * If we're a i82544 or greater (except i82547), we can do
1646 * TCP segmentation offload.
1648 if (sc
->sc_type
>= WM_T_82544
&& sc
->sc_type
!= WM_T_82547
) {
1649 ifp
->if_capabilities
|= IFCAP_TSOv4
;
1652 if (sc
->sc_type
>= WM_T_82571
) {
1653 ifp
->if_capabilities
|= IFCAP_TSOv6
;
1657 * Attach the interface.
1660 ether_ifattach(ifp
, enaddr
);
1662 rnd_attach_source(&sc
->rnd_source
, xname
, RND_TYPE_NET
, 0);
1665 #ifdef WM_EVENT_COUNTERS
1666 /* Attach event counters. */
1667 evcnt_attach_dynamic(&sc
->sc_ev_txsstall
, EVCNT_TYPE_MISC
,
1668 NULL
, xname
, "txsstall");
1669 evcnt_attach_dynamic(&sc
->sc_ev_txdstall
, EVCNT_TYPE_MISC
,
1670 NULL
, xname
, "txdstall");
1671 evcnt_attach_dynamic(&sc
->sc_ev_txfifo_stall
, EVCNT_TYPE_MISC
,
1672 NULL
, xname
, "txfifo_stall");
1673 evcnt_attach_dynamic(&sc
->sc_ev_txdw
, EVCNT_TYPE_INTR
,
1674 NULL
, xname
, "txdw");
1675 evcnt_attach_dynamic(&sc
->sc_ev_txqe
, EVCNT_TYPE_INTR
,
1676 NULL
, xname
, "txqe");
1677 evcnt_attach_dynamic(&sc
->sc_ev_rxintr
, EVCNT_TYPE_INTR
,
1678 NULL
, xname
, "rxintr");
1679 evcnt_attach_dynamic(&sc
->sc_ev_linkintr
, EVCNT_TYPE_INTR
,
1680 NULL
, xname
, "linkintr");
1682 evcnt_attach_dynamic(&sc
->sc_ev_rxipsum
, EVCNT_TYPE_MISC
,
1683 NULL
, xname
, "rxipsum");
1684 evcnt_attach_dynamic(&sc
->sc_ev_rxtusum
, EVCNT_TYPE_MISC
,
1685 NULL
, xname
, "rxtusum");
1686 evcnt_attach_dynamic(&sc
->sc_ev_txipsum
, EVCNT_TYPE_MISC
,
1687 NULL
, xname
, "txipsum");
1688 evcnt_attach_dynamic(&sc
->sc_ev_txtusum
, EVCNT_TYPE_MISC
,
1689 NULL
, xname
, "txtusum");
1690 evcnt_attach_dynamic(&sc
->sc_ev_txtusum6
, EVCNT_TYPE_MISC
,
1691 NULL
, xname
, "txtusum6");
1693 evcnt_attach_dynamic(&sc
->sc_ev_txtso
, EVCNT_TYPE_MISC
,
1694 NULL
, xname
, "txtso");
1695 evcnt_attach_dynamic(&sc
->sc_ev_txtso6
, EVCNT_TYPE_MISC
,
1696 NULL
, xname
, "txtso6");
1697 evcnt_attach_dynamic(&sc
->sc_ev_txtsopain
, EVCNT_TYPE_MISC
,
1698 NULL
, xname
, "txtsopain");
1700 for (i
= 0; i
< WM_NTXSEGS
; i
++) {
1701 sprintf(wm_txseg_evcnt_names
[i
], "txseg%d", i
);
1702 evcnt_attach_dynamic(&sc
->sc_ev_txseg
[i
], EVCNT_TYPE_MISC
,
1703 NULL
, xname
, wm_txseg_evcnt_names
[i
]);
1706 evcnt_attach_dynamic(&sc
->sc_ev_txdrop
, EVCNT_TYPE_MISC
,
1707 NULL
, xname
, "txdrop");
1709 evcnt_attach_dynamic(&sc
->sc_ev_tu
, EVCNT_TYPE_MISC
,
1712 evcnt_attach_dynamic(&sc
->sc_ev_tx_xoff
, EVCNT_TYPE_MISC
,
1713 NULL
, xname
, "tx_xoff");
1714 evcnt_attach_dynamic(&sc
->sc_ev_tx_xon
, EVCNT_TYPE_MISC
,
1715 NULL
, xname
, "tx_xon");
1716 evcnt_attach_dynamic(&sc
->sc_ev_rx_xoff
, EVCNT_TYPE_MISC
,
1717 NULL
, xname
, "rx_xoff");
1718 evcnt_attach_dynamic(&sc
->sc_ev_rx_xon
, EVCNT_TYPE_MISC
,
1719 NULL
, xname
, "rx_xon");
1720 evcnt_attach_dynamic(&sc
->sc_ev_rx_macctl
, EVCNT_TYPE_MISC
,
1721 NULL
, xname
, "rx_macctl");
1722 #endif /* WM_EVENT_COUNTERS */
1724 if (pmf_device_register(self
, NULL
, NULL
))
1725 pmf_class_network_register(self
, ifp
);
1727 aprint_error_dev(self
, "couldn't establish power handler\n");
1732 * Free any resources we've allocated during the failed attach
1733 * attempt. Do this in reverse order and fall through.
1736 for (i
= 0; i
< WM_NRXDESC
; i
++) {
1737 if (sc
->sc_rxsoft
[i
].rxs_dmamap
!= NULL
)
1738 bus_dmamap_destroy(sc
->sc_dmat
,
1739 sc
->sc_rxsoft
[i
].rxs_dmamap
);
1742 for (i
= 0; i
< WM_TXQUEUELEN(sc
); i
++) {
1743 if (sc
->sc_txsoft
[i
].txs_dmamap
!= NULL
)
1744 bus_dmamap_destroy(sc
->sc_dmat
,
1745 sc
->sc_txsoft
[i
].txs_dmamap
);
1747 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_cddmamap
);
1749 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_cddmamap
);
1751 bus_dmamem_unmap(sc
->sc_dmat
, (void *)sc
->sc_control_data
,
1754 bus_dmamem_free(sc
->sc_dmat
, &seg
, rseg
);
1762 * Set up TCP/IP checksumming parameters for the
1766 wm_tx_offload(struct wm_softc
*sc
, struct wm_txsoft
*txs
, uint32_t *cmdp
,
1769 struct mbuf
*m0
= txs
->txs_mbuf
;
1770 struct livengood_tcpip_ctxdesc
*t
;
1771 uint32_t ipcs
, tucs
, cmd
, cmdlen
, seg
;
1773 struct ether_header
*eh
;
1778 * XXX It would be nice if the mbuf pkthdr had offset
1779 * fields for the protocol headers.
1782 eh
= mtod(m0
, struct ether_header
*);
1783 switch (htons(eh
->ether_type
)) {
1785 case ETHERTYPE_IPV6
:
1786 offset
= ETHER_HDR_LEN
;
1789 case ETHERTYPE_VLAN
:
1790 offset
= ETHER_HDR_LEN
+ ETHER_VLAN_ENCAP_LEN
;
1795 * Don't support this protocol or encapsulation.
1802 if ((m0
->m_pkthdr
.csum_flags
&
1803 (M_CSUM_TSOv4
|M_CSUM_UDPv4
|M_CSUM_TCPv4
)) != 0) {
1804 iphl
= M_CSUM_DATA_IPv4_IPHL(m0
->m_pkthdr
.csum_data
);
1806 iphl
= M_CSUM_DATA_IPv6_HL(m0
->m_pkthdr
.csum_data
);
1808 ipcse
= offset
+ iphl
- 1;
1810 cmd
= WTX_CMD_DEXT
| WTX_DTYP_D
;
1811 cmdlen
= WTX_CMD_DEXT
| WTX_DTYP_C
| WTX_CMD_IDE
;
1815 if ((m0
->m_pkthdr
.csum_flags
& (M_CSUM_TSOv4
| M_CSUM_TSOv6
)) != 0) {
1816 int hlen
= offset
+ iphl
;
1817 bool v4
= (m0
->m_pkthdr
.csum_flags
& M_CSUM_TSOv4
) != 0;
1819 if (__predict_false(m0
->m_len
<
1820 (hlen
+ sizeof(struct tcphdr
)))) {
1822 * TCP/IP headers are not in the first mbuf; we need
1823 * to do this the slow and painful way. Let's just
1824 * hope this doesn't happen very often.
1828 WM_EVCNT_INCR(&sc
->sc_ev_txtsopain
);
1830 m_copydata(m0
, hlen
, sizeof(th
), &th
);
1834 m_copydata(m0
, offset
, sizeof(ip
), &ip
);
1837 offset
+ offsetof(struct ip
, ip_len
),
1838 sizeof(ip
.ip_len
), &ip
.ip_len
);
1839 th
.th_sum
= in_cksum_phdr(ip
.ip_src
.s_addr
,
1840 ip
.ip_dst
.s_addr
, htons(IPPROTO_TCP
));
1844 m_copydata(m0
, offset
, sizeof(ip6
), &ip6
);
1847 offset
+ offsetof(struct ip6_hdr
, ip6_plen
),
1848 sizeof(ip6
.ip6_plen
), &ip6
.ip6_plen
);
1849 th
.th_sum
= in6_cksum_phdr(&ip6
.ip6_src
,
1850 &ip6
.ip6_dst
, 0, htonl(IPPROTO_TCP
));
1852 m_copyback(m0
, hlen
+ offsetof(struct tcphdr
, th_sum
),
1853 sizeof(th
.th_sum
), &th
.th_sum
);
1855 hlen
+= th
.th_off
<< 2;
1858 * TCP/IP headers are in the first mbuf; we can do
1859 * this the easy way.
1865 (void *)(mtod(m0
, char *) + offset
);
1866 th
= (void *)(mtod(m0
, char *) + hlen
);
1869 th
->th_sum
= in_cksum_phdr(ip
->ip_src
.s_addr
,
1870 ip
->ip_dst
.s_addr
, htons(IPPROTO_TCP
));
1872 struct ip6_hdr
*ip6
=
1873 (void *)(mtod(m0
, char *) + offset
);
1874 th
= (void *)(mtod(m0
, char *) + hlen
);
1877 th
->th_sum
= in6_cksum_phdr(&ip6
->ip6_src
,
1878 &ip6
->ip6_dst
, 0, htonl(IPPROTO_TCP
));
1880 hlen
+= th
->th_off
<< 2;
1884 WM_EVCNT_INCR(&sc
->sc_ev_txtso
);
1885 cmdlen
|= WTX_TCPIP_CMD_IP
;
1887 WM_EVCNT_INCR(&sc
->sc_ev_txtso6
);
1890 cmd
|= WTX_TCPIP_CMD_TSE
;
1891 cmdlen
|= WTX_TCPIP_CMD_TSE
|
1892 WTX_TCPIP_CMD_TCP
| (m0
->m_pkthdr
.len
- hlen
);
1893 seg
= WTX_TCPIP_SEG_HDRLEN(hlen
) |
1894 WTX_TCPIP_SEG_MSS(m0
->m_pkthdr
.segsz
);
1898 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1899 * offload feature, if we load the context descriptor, we
1900 * MUST provide valid values for IPCSS and TUCSS fields.
1903 ipcs
= WTX_TCPIP_IPCSS(offset
) |
1904 WTX_TCPIP_IPCSO(offset
+ offsetof(struct ip
, ip_sum
)) |
1905 WTX_TCPIP_IPCSE(ipcse
);
1906 if (m0
->m_pkthdr
.csum_flags
& (M_CSUM_IPv4
|M_CSUM_TSOv4
)) {
1907 WM_EVCNT_INCR(&sc
->sc_ev_txipsum
);
1913 if (m0
->m_pkthdr
.csum_flags
&
1914 (M_CSUM_TCPv4
|M_CSUM_UDPv4
|M_CSUM_TSOv4
)) {
1915 WM_EVCNT_INCR(&sc
->sc_ev_txtusum
);
1917 tucs
= WTX_TCPIP_TUCSS(offset
) |
1918 WTX_TCPIP_TUCSO(offset
+
1919 M_CSUM_DATA_IPv4_OFFSET(m0
->m_pkthdr
.csum_data
)) |
1920 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1921 } else if ((m0
->m_pkthdr
.csum_flags
&
1922 (M_CSUM_TCPv6
|M_CSUM_UDPv6
|M_CSUM_TSOv6
)) != 0) {
1923 WM_EVCNT_INCR(&sc
->sc_ev_txtusum6
);
1925 tucs
= WTX_TCPIP_TUCSS(offset
) |
1926 WTX_TCPIP_TUCSO(offset
+
1927 M_CSUM_DATA_IPv6_OFFSET(m0
->m_pkthdr
.csum_data
)) |
1928 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1930 /* Just initialize it to a valid TCP context. */
1931 tucs
= WTX_TCPIP_TUCSS(offset
) |
1932 WTX_TCPIP_TUCSO(offset
+ offsetof(struct tcphdr
, th_sum
)) |
1933 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1936 /* Fill in the context descriptor. */
1937 t
= (struct livengood_tcpip_ctxdesc
*)
1938 &sc
->sc_txdescs
[sc
->sc_txnext
];
1939 t
->tcpip_ipcs
= htole32(ipcs
);
1940 t
->tcpip_tucs
= htole32(tucs
);
1941 t
->tcpip_cmdlen
= htole32(cmdlen
);
1942 t
->tcpip_seg
= htole32(seg
);
1943 WM_CDTXSYNC(sc
, sc
->sc_txnext
, 1, BUS_DMASYNC_PREWRITE
);
1945 sc
->sc_txnext
= WM_NEXTTX(sc
, sc
->sc_txnext
);
1955 wm_dump_mbuf_chain(struct wm_softc
*sc
, struct mbuf
*m0
)
1960 log(LOG_DEBUG
, "%s: mbuf chain:\n", device_xname(sc
->sc_dev
));
1961 for (m
= m0
, i
= 0; m
!= NULL
; m
= m
->m_next
, i
++)
1962 log(LOG_DEBUG
, "%s:\tm_data = %p, m_len = %d, "
1963 "m_flags = 0x%08x\n", device_xname(sc
->sc_dev
),
1964 m
->m_data
, m
->m_len
, m
->m_flags
);
1965 log(LOG_DEBUG
, "%s:\t%d mbuf%s in chain\n", device_xname(sc
->sc_dev
),
1966 i
, i
== 1 ? "" : "s");
1970 * wm_82547_txfifo_stall:
1972 * Callout used to wait for the 82547 Tx FIFO to drain,
1973 * reset the FIFO pointers, and restart packet transmission.
1976 wm_82547_txfifo_stall(void *arg
)
1978 struct wm_softc
*sc
= arg
;
1983 if (sc
->sc_txfifo_stall
) {
1984 if (CSR_READ(sc
, WMREG_TDT
) == CSR_READ(sc
, WMREG_TDH
) &&
1985 CSR_READ(sc
, WMREG_TDFT
) == CSR_READ(sc
, WMREG_TDFH
) &&
1986 CSR_READ(sc
, WMREG_TDFTS
) == CSR_READ(sc
, WMREG_TDFHS
)) {
1988 * Packets have drained. Stop transmitter, reset
1989 * FIFO pointers, restart transmitter, and kick
1992 uint32_t tctl
= CSR_READ(sc
, WMREG_TCTL
);
1993 CSR_WRITE(sc
, WMREG_TCTL
, tctl
& ~TCTL_EN
);
1994 CSR_WRITE(sc
, WMREG_TDFT
, sc
->sc_txfifo_addr
);
1995 CSR_WRITE(sc
, WMREG_TDFH
, sc
->sc_txfifo_addr
);
1996 CSR_WRITE(sc
, WMREG_TDFTS
, sc
->sc_txfifo_addr
);
1997 CSR_WRITE(sc
, WMREG_TDFHS
, sc
->sc_txfifo_addr
);
1998 CSR_WRITE(sc
, WMREG_TCTL
, tctl
);
1999 CSR_WRITE_FLUSH(sc
);
2001 sc
->sc_txfifo_head
= 0;
2002 sc
->sc_txfifo_stall
= 0;
2003 wm_start(&sc
->sc_ethercom
.ec_if
);
2006 * Still waiting for packets to drain; try again in
2009 callout_schedule(&sc
->sc_txfifo_ch
, 1);
2017 * wm_82547_txfifo_bugchk:
2019 * Check for bug condition in the 82547 Tx FIFO. We need to
2020 * prevent enqueueing a packet that would wrap around the end
2021 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2023 * We do this by checking the amount of space before the end
2024 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2025 * the Tx FIFO, wait for all remaining packets to drain, reset
2026 * the internal FIFO pointers to the beginning, and restart
2027 * transmission on the interface.
2029 #define WM_FIFO_HDR 0x10
2030 #define WM_82547_PAD_LEN 0x3e0
2032 wm_82547_txfifo_bugchk(struct wm_softc
*sc
, struct mbuf
*m0
)
2034 int space
= sc
->sc_txfifo_size
- sc
->sc_txfifo_head
;
2035 int len
= roundup(m0
->m_pkthdr
.len
+ WM_FIFO_HDR
, WM_FIFO_HDR
);
2037 /* Just return if already stalled. */
2038 if (sc
->sc_txfifo_stall
)
2041 if (sc
->sc_mii
.mii_media_active
& IFM_FDX
) {
2042 /* Stall only occurs in half-duplex mode. */
2046 if (len
>= WM_82547_PAD_LEN
+ space
) {
2047 sc
->sc_txfifo_stall
= 1;
2048 callout_schedule(&sc
->sc_txfifo_ch
, 1);
2053 sc
->sc_txfifo_head
+= len
;
2054 if (sc
->sc_txfifo_head
>= sc
->sc_txfifo_size
)
2055 sc
->sc_txfifo_head
-= sc
->sc_txfifo_size
;
2061 * wm_start: [ifnet interface function]
2063 * Start packet transmission on the interface.
2066 wm_start(struct ifnet
*ifp
)
2068 struct wm_softc
*sc
= ifp
->if_softc
;
2071 struct wm_txsoft
*txs
;
2072 bus_dmamap_t dmamap
;
2073 int error
, nexttx
, lasttx
= -1, ofree
, seg
, segs_needed
, use_tso
;
2075 bus_size_t seglen
, curlen
;
2077 uint8_t cksumfields
;
2079 if ((ifp
->if_flags
& (IFF_RUNNING
|IFF_OACTIVE
)) != IFF_RUNNING
)
2083 * Remember the previous number of free descriptors.
2085 ofree
= sc
->sc_txfree
;
2088 * Loop through the send queue, setting up transmit descriptors
2089 * until we drain the queue, or use up all available transmit
2093 /* Grab a packet off the queue. */
2094 IFQ_POLL(&ifp
->if_snd
, m0
);
2098 DPRINTF(WM_DEBUG_TX
,
2099 ("%s: TX: have packet to transmit: %p\n",
2100 device_xname(sc
->sc_dev
), m0
));
2102 /* Get a work queue entry. */
2103 if (sc
->sc_txsfree
< WM_TXQUEUE_GC(sc
)) {
2105 if (sc
->sc_txsfree
== 0) {
2106 DPRINTF(WM_DEBUG_TX
,
2107 ("%s: TX: no free job descriptors\n",
2108 device_xname(sc
->sc_dev
)));
2109 WM_EVCNT_INCR(&sc
->sc_ev_txsstall
);
2114 txs
= &sc
->sc_txsoft
[sc
->sc_txsnext
];
2115 dmamap
= txs
->txs_dmamap
;
2117 use_tso
= (m0
->m_pkthdr
.csum_flags
&
2118 (M_CSUM_TSOv4
| M_CSUM_TSOv6
)) != 0;
2121 * So says the Linux driver:
2122 * The controller does a simple calculation to make sure
2123 * there is enough room in the FIFO before initiating the
2124 * DMA for each buffer. The calc is:
2125 * 4 = ceil(buffer len / MSS)
2126 * To make sure we don't overrun the FIFO, adjust the max
2127 * buffer len if the MSS drops.
2129 dmamap
->dm_maxsegsz
=
2130 (use_tso
&& (m0
->m_pkthdr
.segsz
<< 2) < WTX_MAX_LEN
)
2131 ? m0
->m_pkthdr
.segsz
<< 2
2135 * Load the DMA map. If this fails, the packet either
2136 * didn't fit in the allotted number of segments, or we
2137 * were short on resources. For the too-many-segments
2138 * case, we simply report an error and drop the packet,
2139 * since we can't sanely copy a jumbo packet to a single
2142 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
, m0
,
2143 BUS_DMA_WRITE
|BUS_DMA_NOWAIT
);
2145 if (error
== EFBIG
) {
2146 WM_EVCNT_INCR(&sc
->sc_ev_txdrop
);
2147 log(LOG_ERR
, "%s: Tx packet consumes too many "
2148 "DMA segments, dropping...\n",
2149 device_xname(sc
->sc_dev
));
2150 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
2151 wm_dump_mbuf_chain(sc
, m0
);
2156 * Short on resources, just stop for now.
2158 DPRINTF(WM_DEBUG_TX
,
2159 ("%s: TX: dmamap load failed: %d\n",
2160 device_xname(sc
->sc_dev
), error
));
2164 segs_needed
= dmamap
->dm_nsegs
;
2166 /* For sentinel descriptor; see below. */
2171 * Ensure we have enough descriptors free to describe
2172 * the packet. Note, we always reserve one descriptor
2173 * at the end of the ring due to the semantics of the
2174 * TDT register, plus one more in the event we need
2175 * to load offload context.
2177 if (segs_needed
> sc
->sc_txfree
- 2) {
2179 * Not enough free descriptors to transmit this
2180 * packet. We haven't committed anything yet,
2181 * so just unload the DMA map, put the packet
2182 * pack on the queue, and punt. Notify the upper
2183 * layer that there are no more slots left.
2185 DPRINTF(WM_DEBUG_TX
,
2186 ("%s: TX: need %d (%d) descriptors, have %d\n",
2187 device_xname(sc
->sc_dev
), dmamap
->dm_nsegs
,
2188 segs_needed
, sc
->sc_txfree
- 1));
2189 ifp
->if_flags
|= IFF_OACTIVE
;
2190 bus_dmamap_unload(sc
->sc_dmat
, dmamap
);
2191 WM_EVCNT_INCR(&sc
->sc_ev_txdstall
);
2196 * Check for 82547 Tx FIFO bug. We need to do this
2197 * once we know we can transmit the packet, since we
2198 * do some internal FIFO space accounting here.
2200 if (sc
->sc_type
== WM_T_82547
&&
2201 wm_82547_txfifo_bugchk(sc
, m0
)) {
2202 DPRINTF(WM_DEBUG_TX
,
2203 ("%s: TX: 82547 Tx FIFO bug detected\n",
2204 device_xname(sc
->sc_dev
)));
2205 ifp
->if_flags
|= IFF_OACTIVE
;
2206 bus_dmamap_unload(sc
->sc_dmat
, dmamap
);
2207 WM_EVCNT_INCR(&sc
->sc_ev_txfifo_stall
);
2211 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
2214 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2217 DPRINTF(WM_DEBUG_TX
,
2218 ("%s: TX: packet has %d (%d) DMA segments\n",
2219 device_xname(sc
->sc_dev
), dmamap
->dm_nsegs
, segs_needed
));
2221 WM_EVCNT_INCR(&sc
->sc_ev_txseg
[dmamap
->dm_nsegs
- 1]);
2224 * Store a pointer to the packet so that we can free it
2227 * Initially, we consider the number of descriptors the
2228 * packet uses the number of DMA segments. This may be
2229 * incremented by 1 if we do checksum offload (a descriptor
2230 * is used to set the checksum context).
2233 txs
->txs_firstdesc
= sc
->sc_txnext
;
2234 txs
->txs_ndesc
= segs_needed
;
2236 /* Set up offload parameters for this packet. */
2237 if (m0
->m_pkthdr
.csum_flags
&
2238 (M_CSUM_TSOv4
|M_CSUM_TSOv6
|
2239 M_CSUM_IPv4
|M_CSUM_TCPv4
|M_CSUM_UDPv4
|
2240 M_CSUM_TCPv6
|M_CSUM_UDPv6
)) {
2241 if (wm_tx_offload(sc
, txs
, &cksumcmd
,
2242 &cksumfields
) != 0) {
2243 /* Error message already displayed. */
2244 bus_dmamap_unload(sc
->sc_dmat
, dmamap
);
2252 cksumcmd
|= WTX_CMD_IDE
| WTX_CMD_IFCS
;
2254 /* Sync the DMA map. */
2255 bus_dmamap_sync(sc
->sc_dmat
, dmamap
, 0, dmamap
->dm_mapsize
,
2256 BUS_DMASYNC_PREWRITE
);
2259 * Initialize the transmit descriptor.
2261 for (nexttx
= sc
->sc_txnext
, seg
= 0;
2262 seg
< dmamap
->dm_nsegs
; seg
++) {
2263 for (seglen
= dmamap
->dm_segs
[seg
].ds_len
,
2264 curaddr
= dmamap
->dm_segs
[seg
].ds_addr
;
2266 curaddr
+= curlen
, seglen
-= curlen
,
2267 nexttx
= WM_NEXTTX(sc
, nexttx
)) {
2271 * So says the Linux driver:
2272 * Work around for premature descriptor
2273 * write-backs in TSO mode. Append a
2274 * 4-byte sentinel descriptor.
2277 seg
== dmamap
->dm_nsegs
- 1 &&
2282 &sc
->sc_txdescs
[nexttx
].wtx_addr
,
2284 sc
->sc_txdescs
[nexttx
].wtx_cmdlen
=
2285 htole32(cksumcmd
| curlen
);
2286 sc
->sc_txdescs
[nexttx
].wtx_fields
.wtxu_status
=
2288 sc
->sc_txdescs
[nexttx
].wtx_fields
.wtxu_options
=
2290 sc
->sc_txdescs
[nexttx
].wtx_fields
.wtxu_vlan
= 0;
2293 DPRINTF(WM_DEBUG_TX
,
2294 ("%s: TX: desc %d: low 0x%08lx, "
2296 device_xname(sc
->sc_dev
), nexttx
,
2297 curaddr
& 0xffffffffUL
, (unsigned)curlen
));
2301 KASSERT(lasttx
!= -1);
2304 * Set up the command byte on the last descriptor of
2305 * the packet. If we're in the interrupt delay window,
2306 * delay the interrupt.
2308 sc
->sc_txdescs
[lasttx
].wtx_cmdlen
|=
2309 htole32(WTX_CMD_EOP
| WTX_CMD_RS
);
2312 * If VLANs are enabled and the packet has a VLAN tag, set
2313 * up the descriptor to encapsulate the packet for us.
2315 * This is only valid on the last descriptor of the packet.
2317 if ((mtag
= VLAN_OUTPUT_TAG(&sc
->sc_ethercom
, m0
)) != NULL
) {
2318 sc
->sc_txdescs
[lasttx
].wtx_cmdlen
|=
2319 htole32(WTX_CMD_VLE
);
2320 sc
->sc_txdescs
[lasttx
].wtx_fields
.wtxu_vlan
2321 = htole16(VLAN_TAG_VALUE(mtag
) & 0xffff);
2324 txs
->txs_lastdesc
= lasttx
;
2326 DPRINTF(WM_DEBUG_TX
,
2327 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2328 device_xname(sc
->sc_dev
),
2329 lasttx
, le32toh(sc
->sc_txdescs
[lasttx
].wtx_cmdlen
)));
2331 /* Sync the descriptors we're using. */
2332 WM_CDTXSYNC(sc
, sc
->sc_txnext
, txs
->txs_ndesc
,
2333 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
2335 /* Give the packet to the chip. */
2336 CSR_WRITE(sc
, sc
->sc_tdt_reg
, nexttx
);
2338 DPRINTF(WM_DEBUG_TX
,
2339 ("%s: TX: TDT -> %d\n", device_xname(sc
->sc_dev
), nexttx
));
2341 DPRINTF(WM_DEBUG_TX
,
2342 ("%s: TX: finished transmitting packet, job %d\n",
2343 device_xname(sc
->sc_dev
), sc
->sc_txsnext
));
2345 /* Advance the tx pointer. */
2346 sc
->sc_txfree
-= txs
->txs_ndesc
;
2347 sc
->sc_txnext
= nexttx
;
2350 sc
->sc_txsnext
= WM_NEXTTXS(sc
, sc
->sc_txsnext
);
2353 /* Pass the packet to any BPF listeners. */
2355 bpf_mtap(ifp
->if_bpf
, m0
);
2356 #endif /* NBPFILTER > 0 */
2359 if (sc
->sc_txsfree
== 0 || sc
->sc_txfree
<= 2) {
2360 /* No more slots; notify upper layer. */
2361 ifp
->if_flags
|= IFF_OACTIVE
;
2364 if (sc
->sc_txfree
!= ofree
) {
2365 /* Set a watchdog timer in case the chip flakes out. */
2371 * wm_watchdog: [ifnet interface function]
2373 * Watchdog timer handler.
2376 wm_watchdog(struct ifnet
*ifp
)
2378 struct wm_softc
*sc
= ifp
->if_softc
;
2381 * Since we're using delayed interrupts, sweep up
2382 * before we report an error.
2386 if (sc
->sc_txfree
!= WM_NTXDESC(sc
)) {
2388 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2389 device_xname(sc
->sc_dev
), sc
->sc_txfree
, sc
->sc_txsfree
,
2393 /* Reset the interface. */
2394 (void) wm_init(ifp
);
2397 /* Try to get more packets going. */
2402 * wm_ioctl: [ifnet interface function]
2404 * Handle control requests from the operator.
2407 wm_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
2409 struct wm_softc
*sc
= ifp
->if_softc
;
2410 struct ifreq
*ifr
= (struct ifreq
*) data
;
2411 struct ifaddr
*ifa
= (struct ifaddr
*)data
;
2412 struct sockaddr_dl
*sdl
;
2419 if ((error
= ifioctl_common(ifp
, cmd
, data
)) != 0)
2421 if (ifp
->if_flags
& IFF_UP
) {
2422 diff
= (ifp
->if_flags
^ sc
->sc_if_flags
)
2423 & (IFF_PROMISC
| IFF_ALLMULTI
);
2424 if ((diff
& (IFF_PROMISC
| IFF_ALLMULTI
)) != 0) {
2426 * If the difference bettween last flag and
2427 * new flag is only IFF_PROMISC or
2428 * IFF_ALLMULTI, set multicast filter only
2429 * (don't reset to prevent link down).
2434 * Reset the interface to pick up changes in
2435 * any other flags that affect the hardware
2441 if (ifp
->if_flags
& IFF_RUNNING
)
2444 sc
->sc_if_flags
= ifp
->if_flags
;
2449 /* Flow control requires full-duplex mode. */
2450 if (IFM_SUBTYPE(ifr
->ifr_media
) == IFM_AUTO
||
2451 (ifr
->ifr_media
& IFM_FDX
) == 0)
2452 ifr
->ifr_media
&= ~IFM_ETH_FMASK
;
2453 if (IFM_SUBTYPE(ifr
->ifr_media
) != IFM_AUTO
) {
2454 if ((ifr
->ifr_media
& IFM_ETH_FMASK
) == IFM_FLOW
) {
2455 /* We can do both TXPAUSE and RXPAUSE. */
2457 IFM_ETH_TXPAUSE
| IFM_ETH_RXPAUSE
;
2459 sc
->sc_flowflags
= ifr
->ifr_media
& IFM_ETH_FMASK
;
2461 error
= ifmedia_ioctl(ifp
, ifr
, &sc
->sc_mii
.mii_media
, cmd
);
2463 case SIOCINITIFADDR
:
2464 if (ifa
->ifa_addr
->sa_family
== AF_LINK
) {
2465 sdl
= satosdl(ifp
->if_dl
->ifa_addr
);
2466 (void)sockaddr_dl_setaddr(sdl
, sdl
->sdl_len
,
2467 LLADDR(satosdl(ifa
->ifa_addr
)),
2469 /* unicast address is first multicast entry */
2474 /* Fall through for rest */
2476 if ((error
= ether_ioctl(ifp
, cmd
, data
)) != ENETRESET
)
2481 if (cmd
== SIOCSIFCAP
)
2482 error
= (*ifp
->if_init
)(ifp
);
2483 else if (cmd
!= SIOCADDMULTI
&& cmd
!= SIOCDELMULTI
)
2485 else if (ifp
->if_flags
& IFF_RUNNING
) {
2487 * Multicast list has changed; set the hardware filter
2495 /* Try to get more packets going. */
2505 * Interrupt service routine.
2510 struct wm_softc
*sc
= arg
;
2511 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
2515 while (1 /* CONSTCOND */) {
2516 icr
= CSR_READ(sc
, WMREG_ICR
);
2517 if ((icr
& sc
->sc_icr
) == 0)
2520 if (RND_ENABLED(&sc
->rnd_source
))
2521 rnd_add_uint32(&sc
->rnd_source
, icr
);
2526 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2527 if (icr
& (ICR_RXDMT0
|ICR_RXT0
)) {
2528 DPRINTF(WM_DEBUG_RX
,
2529 ("%s: RX: got Rx intr 0x%08x\n",
2530 device_xname(sc
->sc_dev
),
2531 icr
& (ICR_RXDMT0
|ICR_RXT0
)));
2532 WM_EVCNT_INCR(&sc
->sc_ev_rxintr
);
2537 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2538 if (icr
& ICR_TXDW
) {
2539 DPRINTF(WM_DEBUG_TX
,
2540 ("%s: TX: got TXDW interrupt\n",
2541 device_xname(sc
->sc_dev
)));
2542 WM_EVCNT_INCR(&sc
->sc_ev_txdw
);
2547 if (icr
& (ICR_LSC
|ICR_RXSEQ
|ICR_RXCFG
)) {
2548 WM_EVCNT_INCR(&sc
->sc_ev_linkintr
);
2549 wm_linkintr(sc
, icr
);
2552 if (icr
& ICR_RXO
) {
2554 #if defined(WM_DEBUG)
2555 log(LOG_WARNING
, "%s: Receive overrun\n",
2556 device_xname(sc
->sc_dev
));
2557 #endif /* defined(WM_DEBUG) */
2562 /* Try to get more packets going. */
2572 * Helper; handle transmit interrupts.
2575 wm_txintr(struct wm_softc
*sc
)
2577 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
2578 struct wm_txsoft
*txs
;
2582 ifp
->if_flags
&= ~IFF_OACTIVE
;
2585 * Go through the Tx list and free mbufs for those
2586 * frames which have been transmitted.
2588 for (i
= sc
->sc_txsdirty
; sc
->sc_txsfree
!= WM_TXQUEUELEN(sc
);
2589 i
= WM_NEXTTXS(sc
, i
), sc
->sc_txsfree
++) {
2590 txs
= &sc
->sc_txsoft
[i
];
2592 DPRINTF(WM_DEBUG_TX
,
2593 ("%s: TX: checking job %d\n", device_xname(sc
->sc_dev
), i
));
2595 WM_CDTXSYNC(sc
, txs
->txs_firstdesc
, txs
->txs_ndesc
,
2596 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
2599 sc
->sc_txdescs
[txs
->txs_lastdesc
].wtx_fields
.wtxu_status
;
2600 if ((status
& WTX_ST_DD
) == 0) {
2601 WM_CDTXSYNC(sc
, txs
->txs_lastdesc
, 1,
2602 BUS_DMASYNC_PREREAD
);
2606 DPRINTF(WM_DEBUG_TX
,
2607 ("%s: TX: job %d done: descs %d..%d\n",
2608 device_xname(sc
->sc_dev
), i
, txs
->txs_firstdesc
,
2609 txs
->txs_lastdesc
));
2612 * XXX We should probably be using the statistics
2613 * XXX registers, but I don't know if they exist
2614 * XXX on chips before the i82544.
2617 #ifdef WM_EVENT_COUNTERS
2618 if (status
& WTX_ST_TU
)
2619 WM_EVCNT_INCR(&sc
->sc_ev_tu
);
2620 #endif /* WM_EVENT_COUNTERS */
2622 if (status
& (WTX_ST_EC
|WTX_ST_LC
)) {
2624 if (status
& WTX_ST_LC
)
2625 log(LOG_WARNING
, "%s: late collision\n",
2626 device_xname(sc
->sc_dev
));
2627 else if (status
& WTX_ST_EC
) {
2628 ifp
->if_collisions
+= 16;
2629 log(LOG_WARNING
, "%s: excessive collisions\n",
2630 device_xname(sc
->sc_dev
));
2635 sc
->sc_txfree
+= txs
->txs_ndesc
;
2636 bus_dmamap_sync(sc
->sc_dmat
, txs
->txs_dmamap
,
2637 0, txs
->txs_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
2638 bus_dmamap_unload(sc
->sc_dmat
, txs
->txs_dmamap
);
2639 m_freem(txs
->txs_mbuf
);
2640 txs
->txs_mbuf
= NULL
;
2643 /* Update the dirty transmit buffer pointer. */
2644 sc
->sc_txsdirty
= i
;
2645 DPRINTF(WM_DEBUG_TX
,
2646 ("%s: TX: txsdirty -> %d\n", device_xname(sc
->sc_dev
), i
));
2649 * If there are no more pending transmissions, cancel the watchdog
2652 if (sc
->sc_txsfree
== WM_TXQUEUELEN(sc
))
2659 * Helper; handle receive interrupts.
2662 wm_rxintr(struct wm_softc
*sc
)
2664 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
2665 struct wm_rxsoft
*rxs
;
2668 uint8_t status
, errors
;
2671 for (i
= sc
->sc_rxptr
;; i
= WM_NEXTRX(i
)) {
2672 rxs
= &sc
->sc_rxsoft
[i
];
2674 DPRINTF(WM_DEBUG_RX
,
2675 ("%s: RX: checking descriptor %d\n",
2676 device_xname(sc
->sc_dev
), i
));
2678 WM_CDRXSYNC(sc
, i
, BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
2680 status
= sc
->sc_rxdescs
[i
].wrx_status
;
2681 errors
= sc
->sc_rxdescs
[i
].wrx_errors
;
2682 len
= le16toh(sc
->sc_rxdescs
[i
].wrx_len
);
2683 vlantag
= sc
->sc_rxdescs
[i
].wrx_special
;
2685 if ((status
& WRX_ST_DD
) == 0) {
2687 * We have processed all of the receive descriptors.
2689 WM_CDRXSYNC(sc
, i
, BUS_DMASYNC_PREREAD
);
2693 if (__predict_false(sc
->sc_rxdiscard
)) {
2694 DPRINTF(WM_DEBUG_RX
,
2695 ("%s: RX: discarding contents of descriptor %d\n",
2696 device_xname(sc
->sc_dev
), i
));
2697 WM_INIT_RXDESC(sc
, i
);
2698 if (status
& WRX_ST_EOP
) {
2699 /* Reset our state. */
2700 DPRINTF(WM_DEBUG_RX
,
2701 ("%s: RX: resetting rxdiscard -> 0\n",
2702 device_xname(sc
->sc_dev
)));
2703 sc
->sc_rxdiscard
= 0;
2708 bus_dmamap_sync(sc
->sc_dmat
, rxs
->rxs_dmamap
, 0,
2709 rxs
->rxs_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
2714 * Add a new receive buffer to the ring, unless of
2715 * course the length is zero. Treat the latter as a
2718 if ((len
== 0) || (wm_add_rxbuf(sc
, i
) != 0)) {
2720 * Failed, throw away what we've done so
2721 * far, and discard the rest of the packet.
2724 bus_dmamap_sync(sc
->sc_dmat
, rxs
->rxs_dmamap
, 0,
2725 rxs
->rxs_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
2726 WM_INIT_RXDESC(sc
, i
);
2727 if ((status
& WRX_ST_EOP
) == 0)
2728 sc
->sc_rxdiscard
= 1;
2729 if (sc
->sc_rxhead
!= NULL
)
2730 m_freem(sc
->sc_rxhead
);
2731 WM_RXCHAIN_RESET(sc
);
2732 DPRINTF(WM_DEBUG_RX
,
2733 ("%s: RX: Rx buffer allocation failed, "
2734 "dropping packet%s\n", device_xname(sc
->sc_dev
),
2735 sc
->sc_rxdiscard
? " (discard)" : ""));
2740 sc
->sc_rxlen
+= len
;
2741 DPRINTF(WM_DEBUG_RX
,
2742 ("%s: RX: buffer at %p len %d\n",
2743 device_xname(sc
->sc_dev
), m
->m_data
, len
));
2746 * If this is not the end of the packet, keep
2749 if ((status
& WRX_ST_EOP
) == 0) {
2750 WM_RXCHAIN_LINK(sc
, m
);
2751 DPRINTF(WM_DEBUG_RX
,
2752 ("%s: RX: not yet EOP, rxlen -> %d\n",
2753 device_xname(sc
->sc_dev
), sc
->sc_rxlen
));
2758 * Okay, we have the entire packet now. The chip is
2759 * configured to include the FCS (not all chips can
2760 * be configured to strip it), so we need to trim it.
2761 * May need to adjust length of previous mbuf in the
2762 * chain if the current mbuf is too short.
2764 if (m
->m_len
< ETHER_CRC_LEN
) {
2765 sc
->sc_rxtail
->m_len
-= (ETHER_CRC_LEN
- m
->m_len
);
2768 m
->m_len
-= ETHER_CRC_LEN
;
2770 len
= sc
->sc_rxlen
- ETHER_CRC_LEN
;
2772 WM_RXCHAIN_LINK(sc
, m
);
2774 *sc
->sc_rxtailp
= NULL
;
2777 WM_RXCHAIN_RESET(sc
);
2779 DPRINTF(WM_DEBUG_RX
,
2780 ("%s: RX: have entire packet, len -> %d\n",
2781 device_xname(sc
->sc_dev
), len
));
2784 * If an error occurred, update stats and drop the packet.
2787 (WRX_ER_CE
|WRX_ER_SE
|WRX_ER_SEQ
|WRX_ER_CXE
|WRX_ER_RXE
)) {
2789 if (errors
& WRX_ER_SE
)
2790 log(LOG_WARNING
, "%s: symbol error\n",
2791 device_xname(sc
->sc_dev
));
2792 else if (errors
& WRX_ER_SEQ
)
2793 log(LOG_WARNING
, "%s: receive sequence error\n",
2794 device_xname(sc
->sc_dev
));
2795 else if (errors
& WRX_ER_CE
)
2796 log(LOG_WARNING
, "%s: CRC error\n",
2797 device_xname(sc
->sc_dev
));
2803 * No errors. Receive the packet.
2805 m
->m_pkthdr
.rcvif
= ifp
;
2806 m
->m_pkthdr
.len
= len
;
2809 * If VLANs are enabled, VLAN packets have been unwrapped
2810 * for us. Associate the tag with the packet.
2812 if ((status
& WRX_ST_VP
) != 0) {
2813 VLAN_INPUT_TAG(ifp
, m
,
2819 * Set up checksum info for this packet.
2821 if ((status
& WRX_ST_IXSM
) == 0) {
2822 if (status
& WRX_ST_IPCS
) {
2823 WM_EVCNT_INCR(&sc
->sc_ev_rxipsum
);
2824 m
->m_pkthdr
.csum_flags
|= M_CSUM_IPv4
;
2825 if (errors
& WRX_ER_IPE
)
2826 m
->m_pkthdr
.csum_flags
|=
2829 if (status
& WRX_ST_TCPCS
) {
2831 * Note: we don't know if this was TCP or UDP,
2832 * so we just set both bits, and expect the
2833 * upper layers to deal.
2835 WM_EVCNT_INCR(&sc
->sc_ev_rxtusum
);
2836 m
->m_pkthdr
.csum_flags
|=
2837 M_CSUM_TCPv4
| M_CSUM_UDPv4
|
2838 M_CSUM_TCPv6
| M_CSUM_UDPv6
;
2839 if (errors
& WRX_ER_TCPE
)
2840 m
->m_pkthdr
.csum_flags
|=
2848 /* Pass this up to any BPF listeners. */
2850 bpf_mtap(ifp
->if_bpf
, m
);
2851 #endif /* NBPFILTER > 0 */
2854 (*ifp
->if_input
)(ifp
, m
);
2857 /* Update the receive pointer. */
2860 DPRINTF(WM_DEBUG_RX
,
2861 ("%s: RX: rxptr -> %d\n", device_xname(sc
->sc_dev
), i
));
2867 * Helper; handle link interrupts.
2870 wm_linkintr(struct wm_softc
*sc
, uint32_t icr
)
2874 DPRINTF(WM_DEBUG_LINK
, ("%s: %s:\n", device_xname(sc
->sc_dev
),
2877 * If we get a link status interrupt on a 1000BASE-T
2878 * device, just fall into the normal MII tick path.
2880 if (sc
->sc_flags
& WM_F_HAS_MII
) {
2881 if (icr
& ICR_LSC
) {
2882 DPRINTF(WM_DEBUG_LINK
,
2883 ("%s: LINK: LSC -> mii_tick\n",
2884 device_xname(sc
->sc_dev
)));
2885 mii_tick(&sc
->sc_mii
);
2886 if (sc
->sc_type
== WM_T_82543
) {
2887 int miistatus
, active
;
2890 * With 82543, we need to force speed and
2891 * duplex on the MAC equal to what the PHY
2892 * speed and duplex configuration is.
2894 miistatus
= sc
->sc_mii
.mii_media_status
;
2896 if (miistatus
& IFM_ACTIVE
) {
2897 active
= sc
->sc_mii
.mii_media_active
;
2898 sc
->sc_ctrl
&= ~(CTRL_SPEED_MASK
2900 switch (IFM_SUBTYPE(active
)) {
2902 sc
->sc_ctrl
|= CTRL_SPEED_10
;
2905 sc
->sc_ctrl
|= CTRL_SPEED_100
;
2908 sc
->sc_ctrl
|= CTRL_SPEED_1000
;
2913 * Shoud not enter here.
2915 printf("unknown media (%x)\n",
2919 if (active
& IFM_FDX
)
2920 sc
->sc_ctrl
|= CTRL_FD
;
2921 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
2924 } else if (icr
& ICR_RXSEQ
) {
2925 DPRINTF(WM_DEBUG_LINK
,
2926 ("%s: LINK Receive sequence error\n",
2927 device_xname(sc
->sc_dev
)));
2932 status
= CSR_READ(sc
, WMREG_STATUS
);
2933 if (icr
& ICR_LSC
) {
2934 if (status
& STATUS_LU
) {
2935 DPRINTF(WM_DEBUG_LINK
, ("%s: LINK: LSC -> up %s\n",
2936 device_xname(sc
->sc_dev
),
2937 (status
& STATUS_FD
) ? "FDX" : "HDX"));
2939 * NOTE: CTRL will update TFCE and RFCE automatically,
2940 * so we should update sc->sc_ctrl
2943 sc
->sc_ctrl
= CSR_READ(sc
, WMREG_CTRL
);
2944 sc
->sc_tctl
&= ~TCTL_COLD(0x3ff);
2945 sc
->sc_fcrtl
&= ~FCRTL_XONE
;
2946 if (status
& STATUS_FD
)
2948 TCTL_COLD(TX_COLLISION_DISTANCE_FDX
);
2951 TCTL_COLD(TX_COLLISION_DISTANCE_HDX
);
2952 if (sc
->sc_ctrl
& CTRL_TFCE
)
2953 sc
->sc_fcrtl
|= FCRTL_XONE
;
2954 CSR_WRITE(sc
, WMREG_TCTL
, sc
->sc_tctl
);
2955 CSR_WRITE(sc
, (sc
->sc_type
< WM_T_82543
) ?
2956 WMREG_OLD_FCRTL
: WMREG_FCRTL
,
2958 sc
->sc_tbi_linkup
= 1;
2960 DPRINTF(WM_DEBUG_LINK
, ("%s: LINK: LSC -> down\n",
2961 device_xname(sc
->sc_dev
)));
2962 sc
->sc_tbi_linkup
= 0;
2964 wm_tbi_set_linkled(sc
);
2965 } else if (icr
& ICR_RXCFG
) {
2966 DPRINTF(WM_DEBUG_LINK
, ("%s: LINK: receiving /C/\n",
2967 device_xname(sc
->sc_dev
)));
2968 sc
->sc_tbi_nrxcfg
++;
2969 wm_check_for_link(sc
);
2970 } else if (icr
& ICR_RXSEQ
) {
2971 DPRINTF(WM_DEBUG_LINK
,
2972 ("%s: LINK: Receive sequence error\n",
2973 device_xname(sc
->sc_dev
)));
2980 * One second timer, used to check link status, sweep up
2981 * completed transmit jobs, etc.
2986 struct wm_softc
*sc
= arg
;
2987 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
2992 if (sc
->sc_type
>= WM_T_82542_2_1
) {
2993 WM_EVCNT_ADD(&sc
->sc_ev_rx_xon
, CSR_READ(sc
, WMREG_XONRXC
));
2994 WM_EVCNT_ADD(&sc
->sc_ev_tx_xon
, CSR_READ(sc
, WMREG_XONTXC
));
2995 WM_EVCNT_ADD(&sc
->sc_ev_rx_xoff
, CSR_READ(sc
, WMREG_XOFFRXC
));
2996 WM_EVCNT_ADD(&sc
->sc_ev_tx_xoff
, CSR_READ(sc
, WMREG_XOFFTXC
));
2997 WM_EVCNT_ADD(&sc
->sc_ev_rx_macctl
, CSR_READ(sc
, WMREG_FCRUC
));
3000 ifp
->if_collisions
+= CSR_READ(sc
, WMREG_COLC
);
3001 ifp
->if_ierrors
+= CSR_READ(sc
, WMREG_RXERRC
);
3003 if (sc
->sc_flags
& WM_F_HAS_MII
)
3004 mii_tick(&sc
->sc_mii
);
3006 wm_tbi_check_link(sc
);
3010 callout_reset(&sc
->sc_tick_ch
, hz
, wm_tick
, sc
);
3016 * Reset the i82542 chip.
3019 wm_reset(struct wm_softc
*sc
)
3022 uint32_t reg
, func
, mask
;
3026 * Allocate on-chip memory according to the MTU size.
3027 * The Packet Buffer Allocation register must be written
3028 * before the chip is reset.
3030 switch (sc
->sc_type
) {
3033 sc
->sc_pba
= sc
->sc_ethercom
.ec_if
.if_mtu
> 8192 ?
3035 sc
->sc_txfifo_head
= 0;
3036 sc
->sc_txfifo_addr
= sc
->sc_pba
<< PBA_ADDR_SHIFT
;
3037 sc
->sc_txfifo_size
=
3038 (PBA_40K
- sc
->sc_pba
) << PBA_BYTE_SHIFT
;
3039 sc
->sc_txfifo_stall
= 0;
3044 sc
->sc_pba
= PBA_32K
;
3047 sc
->sc_pba
= PBA_12K
;
3051 sc
->sc_pba
= PBA_20K
;
3054 sc
->sc_pba
= PBA_8K
;
3055 CSR_WRITE(sc
, WMREG_PBS
, PBA_16K
);
3059 sc
->sc_pba
= PBA_10K
;
3062 sc
->sc_pba
= sc
->sc_ethercom
.ec_if
.if_mtu
> 8192 ?
3066 CSR_WRITE(sc
, WMREG_PBA
, sc
->sc_pba
);
3068 if (sc
->sc_flags
& WM_F_PCIE
) {
3071 sc
->sc_ctrl
|= CTRL_GIO_M_DIS
;
3072 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
3075 if ((CSR_READ(sc
, WMREG_STATUS
) & STATUS_GIO_M_ENA
) == 0)
3081 /* clear interrupt */
3082 CSR_WRITE(sc
, WMREG_IMC
, 0xffffffffU
);
3084 /* Stop the transmit and receive processes. */
3085 CSR_WRITE(sc
, WMREG_RCTL
, 0);
3086 CSR_WRITE(sc
, WMREG_TCTL
, TCTL_PSP
);
3088 /* set_tbi_sbp_82543() */
3092 /* Must acquire the MDIO ownership before MAC reset */
3093 switch(sc
->sc_type
) {
3098 reg
= CSR_READ(sc
, WMREG_EXTCNFCTR
)
3099 | EXTCNFCTR_MDIO_SW_OWNERSHIP
;
3101 CSR_WRITE(sc
, WMREG_EXTCNFCTR
,
3102 reg
| EXTCNFCTR_MDIO_SW_OWNERSHIP
);
3103 reg
= CSR_READ(sc
, WMREG_EXTCNFCTR
);
3104 if ((reg
& EXTCNFCTR_MDIO_SW_OWNERSHIP
) != 0)
3106 reg
|= EXTCNFCTR_MDIO_SW_OWNERSHIP
;
3109 } while (i
< WM_MDIO_OWNERSHIP_TIMEOUT
);
3116 * 82541 Errata 29? & 82547 Errata 28?
3117 * See also the description about PHY_RST bit in CTRL register
3118 * in 8254x_GBe_SDM.pdf.
3120 if ((sc
->sc_type
== WM_T_82541
) || (sc
->sc_type
== WM_T_82547
)) {
3121 CSR_WRITE(sc
, WMREG_CTRL
,
3122 CSR_READ(sc
, WMREG_CTRL
) | CTRL_PHY_RESET
);
3126 switch (sc
->sc_type
) {
3127 case WM_T_82544
: /* XXX check whether WM_F_IOH_VALID is set */
3133 * On some chipsets, a reset through a memory-mapped write
3134 * cycle can cause the chip to reset before completing the
3135 * write cycle. This causes major headache that can be
3136 * avoided by issuing the reset via indirect register writes
3137 * through I/O space.
3139 * So, if we successfully mapped the I/O BAR at attach time,
3140 * use that. Otherwise, try our luck with a memory-mapped
3143 if (sc
->sc_flags
& WM_F_IOH_VALID
)
3144 wm_io_write(sc
, WMREG_CTRL
, CTRL_RST
);
3146 CSR_WRITE(sc
, WMREG_CTRL
, CTRL_RST
);
3150 /* Use the shadow control register on these chips. */
3151 CSR_WRITE(sc
, WMREG_CTRL_SHADOW
, CTRL_RST
);
3154 func
= (CSR_READ(sc
, WMREG_STATUS
) >> STATUS_FUNCID_SHIFT
) & 1;
3155 mask
= func
? SWFW_PHY1_SM
: SWFW_PHY0_SM
;
3156 reg
= CSR_READ(sc
, WMREG_CTRL
) | CTRL_RST
;
3157 wm_get_swfw_semaphore(sc
, mask
);
3158 CSR_WRITE(sc
, WMREG_CTRL
, reg
);
3159 wm_put_swfw_semaphore(sc
, mask
);
3164 reg
= CSR_READ(sc
, WMREG_CTRL
) | CTRL_RST
;
3165 if (wm_check_reset_block(sc
) == 0) {
3166 reg
|= CTRL_PHY_RESET
;
3169 wm_get_swfwhw_semaphore(sc
);
3170 CSR_WRITE(sc
, WMREG_CTRL
, reg
);
3172 wm_put_swfwhw_semaphore(sc
);
3174 case WM_T_82542_2_0
:
3175 case WM_T_82542_2_1
:
3186 /* Everything else can safely use the documented method. */
3187 CSR_WRITE(sc
, WMREG_CTRL
, CSR_READ(sc
, WMREG_CTRL
) | CTRL_RST
);
3192 wm_get_cfg_done(sc
);
3195 switch(sc
->sc_type
) {
3196 case WM_T_82542_2_0
:
3197 case WM_T_82542_2_1
:
3201 reg
= CSR_READ(sc
, WMREG_CTRL_EXT
) | CTRL_EXT_EE_RST
;
3202 CSR_WRITE(sc
, WMREG_CTRL_EXT
, reg
);
3211 /* XXX Disable HW ARPs on ASF enabled adapters */
3218 /* XXX Disable HW ARPs on ASF enabled adapters */
3225 if (sc
->sc_flags
& WM_F_EEPROM_FLASH
) {
3227 reg
= CSR_READ(sc
, WMREG_CTRL_EXT
) | CTRL_EXT_EE_RST
;
3228 CSR_WRITE(sc
, WMREG_CTRL_EXT
, reg
);
3230 /* check EECD_EE_AUTORD */
3231 wm_get_auto_rd_done(sc
);
3233 * Phy configuration from NVM just starts after EECD_AUTO_RD
3236 if ((sc
->sc_type
== WM_T_82573
) || (sc
->sc_type
== WM_T_82574
)
3237 || (sc
->sc_type
== WM_T_82583
))
3243 /* check EECD_EE_AUTORD */
3244 wm_get_auto_rd_done(sc
);
3246 case WM_T_ICH10
: /* & PCH */
3247 wm_lan_init_done(sc
);
3250 panic("%s: unknown type\n", __func__
);
3253 /* reload sc_ctrl */
3254 sc
->sc_ctrl
= CSR_READ(sc
, WMREG_CTRL
);
3257 for (i
= 0; i
< 1000; i
++) {
3258 if ((CSR_READ(sc
, WMREG_CTRL
) & CTRL_RST
) == 0) {
3264 if (CSR_READ(sc
, WMREG_CTRL
) & CTRL_RST
)
3265 log(LOG_ERR
, "%s: reset failed to complete\n",
3266 device_xname(sc
->sc_dev
));
3271 * wm_init: [ifnet interface function]
3273 * Initialize the interface. Must be called at splnet().
3276 wm_init(struct ifnet
*ifp
)
3278 struct wm_softc
*sc
= ifp
->if_softc
;
3279 struct wm_rxsoft
*rxs
;
3284 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3285 * There is a small but measurable benefit to avoiding the adjusment
3286 * of the descriptor so that the headers are aligned, for normal mtu,
3287 * on such platforms. One possibility is that the DMA itself is
3288 * slightly more efficient if the front of the entire packet (instead
3289 * of the front of the headers) is aligned.
3291 * Note we must always set align_tweak to 0 if we are using
3294 #ifdef __NO_STRICT_ALIGNMENT
3295 sc
->sc_align_tweak
= 0;
3297 if ((ifp
->if_mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
) > (MCLBYTES
- 2))
3298 sc
->sc_align_tweak
= 0;
3300 sc
->sc_align_tweak
= 2;
3301 #endif /* __NO_STRICT_ALIGNMENT */
3303 /* Cancel any pending I/O. */
3306 /* update statistics before reset */
3307 ifp
->if_collisions
+= CSR_READ(sc
, WMREG_COLC
);
3308 ifp
->if_ierrors
+= CSR_READ(sc
, WMREG_RXERRC
);
3310 /* Reset the chip to a known state. */
3313 switch (sc
->sc_type
) {
3323 if (wm_check_mng_mode(sc
) != 0)
3324 wm_get_hw_control(sc
);
3330 /* Initialize the transmit descriptor ring. */
3331 memset(sc
->sc_txdescs
, 0, WM_TXDESCSIZE(sc
));
3332 WM_CDTXSYNC(sc
, 0, WM_NTXDESC(sc
),
3333 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
3334 sc
->sc_txfree
= WM_NTXDESC(sc
);
3337 if (sc
->sc_type
< WM_T_82543
) {
3338 CSR_WRITE(sc
, WMREG_OLD_TBDAH
, WM_CDTXADDR_HI(sc
, 0));
3339 CSR_WRITE(sc
, WMREG_OLD_TBDAL
, WM_CDTXADDR_LO(sc
, 0));
3340 CSR_WRITE(sc
, WMREG_OLD_TDLEN
, WM_TXDESCSIZE(sc
));
3341 CSR_WRITE(sc
, WMREG_OLD_TDH
, 0);
3342 CSR_WRITE(sc
, WMREG_OLD_TDT
, 0);
3343 CSR_WRITE(sc
, WMREG_OLD_TIDV
, 128);
3345 CSR_WRITE(sc
, WMREG_TBDAH
, WM_CDTXADDR_HI(sc
, 0));
3346 CSR_WRITE(sc
, WMREG_TBDAL
, WM_CDTXADDR_LO(sc
, 0));
3347 CSR_WRITE(sc
, WMREG_TDLEN
, WM_TXDESCSIZE(sc
));
3348 CSR_WRITE(sc
, WMREG_TDH
, 0);
3349 CSR_WRITE(sc
, WMREG_TDT
, 0);
3350 CSR_WRITE(sc
, WMREG_TIDV
, 375); /* ITR / 4 */
3351 CSR_WRITE(sc
, WMREG_TADV
, 375); /* should be same */
3353 CSR_WRITE(sc
, WMREG_TXDCTL
, TXDCTL_PTHRESH(0) |
3354 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3355 CSR_WRITE(sc
, WMREG_RXDCTL
, RXDCTL_PTHRESH(0) |
3356 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3358 CSR_WRITE(sc
, WMREG_TQSA_LO
, 0);
3359 CSR_WRITE(sc
, WMREG_TQSA_HI
, 0);
3361 /* Initialize the transmit job descriptors. */
3362 for (i
= 0; i
< WM_TXQUEUELEN(sc
); i
++)
3363 sc
->sc_txsoft
[i
].txs_mbuf
= NULL
;
3364 sc
->sc_txsfree
= WM_TXQUEUELEN(sc
);
3366 sc
->sc_txsdirty
= 0;
3369 * Initialize the receive descriptor and receive job
3372 if (sc
->sc_type
< WM_T_82543
) {
3373 CSR_WRITE(sc
, WMREG_OLD_RDBAH0
, WM_CDRXADDR_HI(sc
, 0));
3374 CSR_WRITE(sc
, WMREG_OLD_RDBAL0
, WM_CDRXADDR_LO(sc
, 0));
3375 CSR_WRITE(sc
, WMREG_OLD_RDLEN0
, sizeof(sc
->sc_rxdescs
));
3376 CSR_WRITE(sc
, WMREG_OLD_RDH0
, 0);
3377 CSR_WRITE(sc
, WMREG_OLD_RDT0
, 0);
3378 CSR_WRITE(sc
, WMREG_OLD_RDTR0
, 28 | RDTR_FPD
);
3380 CSR_WRITE(sc
, WMREG_OLD_RDBA1_HI
, 0);
3381 CSR_WRITE(sc
, WMREG_OLD_RDBA1_LO
, 0);
3382 CSR_WRITE(sc
, WMREG_OLD_RDLEN1
, 0);
3383 CSR_WRITE(sc
, WMREG_OLD_RDH1
, 0);
3384 CSR_WRITE(sc
, WMREG_OLD_RDT1
, 0);
3385 CSR_WRITE(sc
, WMREG_OLD_RDTR1
, 0);
3387 CSR_WRITE(sc
, WMREG_RDBAH
, WM_CDRXADDR_HI(sc
, 0));
3388 CSR_WRITE(sc
, WMREG_RDBAL
, WM_CDRXADDR_LO(sc
, 0));
3389 CSR_WRITE(sc
, WMREG_RDLEN
, sizeof(sc
->sc_rxdescs
));
3390 CSR_WRITE(sc
, WMREG_RDH
, 0);
3391 CSR_WRITE(sc
, WMREG_RDT
, 0);
3392 CSR_WRITE(sc
, WMREG_RDTR
, 375 | RDTR_FPD
); /* ITR/4 */
3393 CSR_WRITE(sc
, WMREG_RADV
, 375); /* MUST be same */
3395 for (i
= 0; i
< WM_NRXDESC
; i
++) {
3396 rxs
= &sc
->sc_rxsoft
[i
];
3397 if (rxs
->rxs_mbuf
== NULL
) {
3398 if ((error
= wm_add_rxbuf(sc
, i
)) != 0) {
3399 log(LOG_ERR
, "%s: unable to allocate or map rx "
3400 "buffer %d, error = %d\n",
3401 device_xname(sc
->sc_dev
), i
, error
);
3403 * XXX Should attempt to run with fewer receive
3404 * XXX buffers instead of just failing.
3410 WM_INIT_RXDESC(sc
, i
);
3413 sc
->sc_rxdiscard
= 0;
3414 WM_RXCHAIN_RESET(sc
);
3417 * Clear out the VLAN table -- we don't use it (yet).
3419 CSR_WRITE(sc
, WMREG_VET
, 0);
3420 for (i
= 0; i
< WM_VLAN_TABSIZE
; i
++)
3421 CSR_WRITE(sc
, WMREG_VFTA
+ (i
<< 2), 0);
3424 * Set up flow-control parameters.
3426 * XXX Values could probably stand some tuning.
3428 if ((sc
->sc_type
!= WM_T_ICH8
) && (sc
->sc_type
!= WM_T_ICH9
)
3429 && (sc
->sc_type
!= WM_T_ICH10
)) {
3430 CSR_WRITE(sc
, WMREG_FCAL
, FCAL_CONST
);
3431 CSR_WRITE(sc
, WMREG_FCAH
, FCAH_CONST
);
3432 CSR_WRITE(sc
, WMREG_FCT
, ETHERTYPE_FLOWCONTROL
);
3435 sc
->sc_fcrtl
= FCRTL_DFLT
;
3436 if (sc
->sc_type
< WM_T_82543
) {
3437 CSR_WRITE(sc
, WMREG_OLD_FCRTH
, FCRTH_DFLT
);
3438 CSR_WRITE(sc
, WMREG_OLD_FCRTL
, sc
->sc_fcrtl
);
3440 CSR_WRITE(sc
, WMREG_FCRTH
, FCRTH_DFLT
);
3441 CSR_WRITE(sc
, WMREG_FCRTL
, sc
->sc_fcrtl
);
3444 if (sc
->sc_type
== WM_T_80003
)
3445 CSR_WRITE(sc
, WMREG_FCTTV
, 0xffff);
3447 CSR_WRITE(sc
, WMREG_FCTTV
, FCTTV_DFLT
);
3449 /* Deal with VLAN enables. */
3450 if (VLAN_ATTACHED(&sc
->sc_ethercom
))
3451 sc
->sc_ctrl
|= CTRL_VME
;
3453 sc
->sc_ctrl
&= ~CTRL_VME
;
3455 /* Write the control registers. */
3456 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
3458 if (sc
->sc_flags
& WM_F_HAS_MII
) {
3461 switch (sc
->sc_type
) {
3467 * Set the mac to wait the maximum time between each
3468 * iteration and increase the max iterations when
3469 * polling the phy; this fixes erroneous timeouts at
3472 wm_kmrn_writereg(sc
, KUMCTRLSTA_OFFSET_TIMEOUTS
,
3474 val
= wm_kmrn_readreg(sc
,
3475 KUMCTRLSTA_OFFSET_INB_PARAM
);
3477 wm_kmrn_writereg(sc
,
3478 KUMCTRLSTA_OFFSET_INB_PARAM
, val
);
3484 if (sc
->sc_type
== WM_T_80003
) {
3485 val
= CSR_READ(sc
, WMREG_CTRL_EXT
);
3486 val
&= ~CTRL_EXT_LINK_MODE_MASK
;
3487 CSR_WRITE(sc
, WMREG_CTRL_EXT
, val
);
3489 /* Bypass RX and TX FIFO's */
3490 wm_kmrn_writereg(sc
, KUMCTRLSTA_OFFSET_FIFO_CTRL
,
3491 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
|
3492 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS
);
3494 wm_kmrn_writereg(sc
, KUMCTRLSTA_OFFSET_INB_CTRL
,
3495 KUMCTRLSTA_INB_CTRL_DIS_PADDING
|
3496 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT
);
3500 CSR_WRITE(sc
, WMREG_CTRL_EXT
, sc
->sc_ctrl_ext
);
3504 * Set up checksum offload parameters.
3506 reg
= CSR_READ(sc
, WMREG_RXCSUM
);
3507 reg
&= ~(RXCSUM_IPOFL
| RXCSUM_IPV6OFL
| RXCSUM_TUOFL
);
3508 if (ifp
->if_capenable
& IFCAP_CSUM_IPv4_Rx
)
3509 reg
|= RXCSUM_IPOFL
;
3510 if (ifp
->if_capenable
& (IFCAP_CSUM_TCPv4_Rx
| IFCAP_CSUM_UDPv4_Rx
))
3511 reg
|= RXCSUM_IPOFL
| RXCSUM_TUOFL
;
3512 if (ifp
->if_capenable
& (IFCAP_CSUM_TCPv6_Rx
| IFCAP_CSUM_UDPv6_Rx
))
3513 reg
|= RXCSUM_IPV6OFL
| RXCSUM_TUOFL
;
3514 CSR_WRITE(sc
, WMREG_RXCSUM
, reg
);
3516 /* Reset TBI's RXCFG count */
3517 sc
->sc_tbi_nrxcfg
= sc
->sc_tbi_lastnrxcfg
= 0;
3520 * Set up the interrupt registers.
3522 CSR_WRITE(sc
, WMREG_IMC
, 0xffffffffU
);
3523 sc
->sc_icr
= ICR_TXDW
| ICR_LSC
| ICR_RXSEQ
| ICR_RXDMT0
|
3525 if ((sc
->sc_flags
& WM_F_HAS_MII
) == 0)
3526 sc
->sc_icr
|= ICR_RXCFG
;
3527 CSR_WRITE(sc
, WMREG_IMS
, sc
->sc_icr
);
3529 if ((sc
->sc_type
== WM_T_ICH8
) || (sc
->sc_type
== WM_T_ICH9
)
3530 || (sc
->sc_type
== WM_T_ICH10
)) {
3531 reg
= CSR_READ(sc
, WMREG_KABGTXD
);
3532 reg
|= KABGTXD_BGSQLBIAS
;
3533 CSR_WRITE(sc
, WMREG_KABGTXD
, reg
);
3536 /* Set up the inter-packet gap. */
3537 CSR_WRITE(sc
, WMREG_TIPG
, sc
->sc_tipg
);
3539 if (sc
->sc_type
>= WM_T_82543
) {
3541 * Set up the interrupt throttling register (units of 256ns)
3542 * Note that a footnote in Intel's documentation says this
3543 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3544 * or 10Mbit mode. Empirically, it appears to be the case
3545 * that that is also true for the 1024ns units of the other
3546 * interrupt-related timer registers -- so, really, we ought
3547 * to divide this value by 4 when the link speed is low.
3549 * XXX implement this division at link speed change!
3553 * For N interrupts/sec, set this value to:
3554 * 1000000000 / (N * 256). Note that we set the
3555 * absolute and packet timer values to this value
3556 * divided by 4 to get "simple timer" behavior.
3559 sc
->sc_itr
= 1500; /* 2604 ints/sec */
3560 CSR_WRITE(sc
, WMREG_ITR
, sc
->sc_itr
);
3563 /* Set the VLAN ethernetype. */
3564 CSR_WRITE(sc
, WMREG_VET
, ETHERTYPE_VLAN
);
3567 * Set up the transmit control register; we start out with
3568 * a collision distance suitable for FDX, but update it whe
3569 * we resolve the media type.
3571 sc
->sc_tctl
= TCTL_EN
| TCTL_PSP
| TCTL_RTLC
3572 | TCTL_CT(TX_COLLISION_THRESHOLD
)
3573 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX
);
3574 if (sc
->sc_type
>= WM_T_82571
)
3575 sc
->sc_tctl
|= TCTL_MULR
;
3576 CSR_WRITE(sc
, WMREG_TCTL
, sc
->sc_tctl
);
3578 if (sc
->sc_type
== WM_T_80003
) {
3579 reg
= CSR_READ(sc
, WMREG_TCTL_EXT
);
3580 reg
&= ~TCTL_EXT_GCEX_MASK
;
3581 reg
|= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX
;
3582 CSR_WRITE(sc
, WMREG_TCTL_EXT
, reg
);
3585 /* Set the media. */
3586 if ((error
= mii_ifmedia_change(&sc
->sc_mii
)) != 0)
3590 * Set up the receive control register; we actually program
3591 * the register when we set the receive filter. Use multicast
3592 * address offset type 0.
3594 * Only the i82544 has the ability to strip the incoming
3595 * CRC, so we don't enable that feature.
3597 sc
->sc_mchash_type
= 0;
3598 sc
->sc_rctl
= RCTL_EN
| RCTL_LBM_NONE
| RCTL_RDMTS_1_2
| RCTL_DPF
3599 | RCTL_MO(sc
->sc_mchash_type
);
3601 if (((sc
->sc_ethercom
.ec_capabilities
& ETHERCAP_JUMBO_MTU
) != 0)
3602 && (ifp
->if_mtu
> ETHERMTU
))
3603 sc
->sc_rctl
|= RCTL_LPE
;
3605 if (MCLBYTES
== 2048) {
3606 sc
->sc_rctl
|= RCTL_2k
;
3608 if (sc
->sc_type
>= WM_T_82543
) {
3611 sc
->sc_rctl
|= RCTL_BSEX
| RCTL_BSEX_4k
;
3614 sc
->sc_rctl
|= RCTL_BSEX
| RCTL_BSEX_8k
;
3617 sc
->sc_rctl
|= RCTL_BSEX
| RCTL_BSEX_16k
;
3620 panic("wm_init: MCLBYTES %d unsupported",
3624 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3627 /* Set the receive filter. */
3630 /* Start the one second link check clock. */
3631 callout_reset(&sc
->sc_tick_ch
, hz
, wm_tick
, sc
);
3634 ifp
->if_flags
|= IFF_RUNNING
;
3635 ifp
->if_flags
&= ~IFF_OACTIVE
;
3639 log(LOG_ERR
, "%s: interface not running\n",
3640 device_xname(sc
->sc_dev
));
3647 * Drain the receive queue.
3650 wm_rxdrain(struct wm_softc
*sc
)
3652 struct wm_rxsoft
*rxs
;
3655 for (i
= 0; i
< WM_NRXDESC
; i
++) {
3656 rxs
= &sc
->sc_rxsoft
[i
];
3657 if (rxs
->rxs_mbuf
!= NULL
) {
3658 bus_dmamap_unload(sc
->sc_dmat
, rxs
->rxs_dmamap
);
3659 m_freem(rxs
->rxs_mbuf
);
3660 rxs
->rxs_mbuf
= NULL
;
3666 * wm_stop: [ifnet interface function]
3668 * Stop transmission on the interface.
3671 wm_stop(struct ifnet
*ifp
, int disable
)
3673 struct wm_softc
*sc
= ifp
->if_softc
;
3674 struct wm_txsoft
*txs
;
3677 /* Stop the one second clock. */
3678 callout_stop(&sc
->sc_tick_ch
);
3680 /* Stop the 82547 Tx FIFO stall check timer. */
3681 if (sc
->sc_type
== WM_T_82547
)
3682 callout_stop(&sc
->sc_txfifo_ch
);
3684 if (sc
->sc_flags
& WM_F_HAS_MII
) {
3686 mii_down(&sc
->sc_mii
);
3689 /* Should we clear PHY's status properly? */
3694 /* Stop the transmit and receive processes. */
3695 CSR_WRITE(sc
, WMREG_TCTL
, 0);
3696 CSR_WRITE(sc
, WMREG_RCTL
, 0);
3699 * Clear the interrupt mask to ensure the device cannot assert its
3701 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3702 * any currently pending or shared interrupt.
3704 CSR_WRITE(sc
, WMREG_IMC
, 0xffffffffU
);
3707 /* Release any queued transmit buffers. */
3708 for (i
= 0; i
< WM_TXQUEUELEN(sc
); i
++) {
3709 txs
= &sc
->sc_txsoft
[i
];
3710 if (txs
->txs_mbuf
!= NULL
) {
3711 bus_dmamap_unload(sc
->sc_dmat
, txs
->txs_dmamap
);
3712 m_freem(txs
->txs_mbuf
);
3713 txs
->txs_mbuf
= NULL
;
3717 /* Mark the interface as down and cancel the watchdog timer. */
3718 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
3726 wm_get_auto_rd_done(struct wm_softc
*sc
)
3730 /* wait for eeprom to reload */
3731 switch (sc
->sc_type
) {
3741 for (i
= 0; i
< 10; i
++) {
3742 if (CSR_READ(sc
, WMREG_EECD
) & EECD_EE_AUTORD
)
3747 log(LOG_ERR
, "%s: auto read from eeprom failed to "
3748 "complete\n", device_xname(sc
->sc_dev
));
3758 wm_lan_init_done(struct wm_softc
*sc
)
3763 /* wait for eeprom to reload */
3764 switch (sc
->sc_type
) {
3765 case WM_T_ICH10
: /* & PCH */
3766 for (i
= 0; i
< WM_ICH8_LAN_INIT_TIMEOUT
; i
++) {
3767 reg
= CSR_READ(sc
, WMREG_STATUS
);
3768 if ((reg
& STATUS_LAN_INIT_DONE
) != 0)
3772 if (i
>= WM_ICH8_LAN_INIT_TIMEOUT
) {
3773 log(LOG_ERR
, "%s: %s: lan_init_done failed to "
3774 "complete\n", device_xname(sc
->sc_dev
), __func__
);
3778 panic("%s: %s: unknown type\n", device_xname(sc
->sc_dev
),
3783 reg
&= ~STATUS_LAN_INIT_DONE
;
3784 CSR_WRITE(sc
, WMREG_STATUS
, reg
);
3788 wm_get_cfg_done(struct wm_softc
*sc
)
3794 /* wait for eeprom to reload */
3795 switch (sc
->sc_type
) {
3796 case WM_T_82542_2_0
:
3797 case WM_T_82542_2_1
:
3823 if (sc
->sc_type
== WM_T_80003
)
3824 func
= (CSR_READ(sc
, WMREG_STATUS
)
3825 >> STATUS_FUNCID_SHIFT
) & 1;
3827 func
= 0; /* XXX Is it true for 82571? */
3828 mask
= (func
== 1) ? EEMNGCTL_CFGDONE_1
: EEMNGCTL_CFGDONE_0
;
3829 for (i
= 0; i
< WM_PHY_CFG_TIMEOUT
; i
++) {
3830 if (CSR_READ(sc
, WMREG_EEMNGCTL
) & mask
)
3834 if (i
>= WM_PHY_CFG_TIMEOUT
) {
3835 DPRINTF(WM_DEBUG_GMII
, ("%s: %s failed\n",
3836 device_xname(sc
->sc_dev
), __func__
));
3840 panic("%s: %s: unknown type\n", device_xname(sc
->sc_dev
),
3847 * wm_acquire_eeprom:
3849 * Perform the EEPROM handshake required on some chips.
3852 wm_acquire_eeprom(struct wm_softc
*sc
)
3858 /* always success */
3859 if ((sc
->sc_flags
& WM_F_EEPROM_FLASH
) != 0)
3862 if (sc
->sc_flags
& WM_F_SWFWHW_SYNC
) {
3863 ret
= wm_get_swfwhw_semaphore(sc
);
3864 } else if (sc
->sc_flags
& WM_F_SWFW_SYNC
) {
3865 /* this will also do wm_get_swsm_semaphore() if needed */
3866 ret
= wm_get_swfw_semaphore(sc
, SWFW_EEP_SM
);
3867 } else if (sc
->sc_flags
& WM_F_EEPROM_SEMAPHORE
) {
3868 ret
= wm_get_swsm_semaphore(sc
);
3872 aprint_error_dev(sc
->sc_dev
, "%s: failed to get semaphore\n",
3877 if (sc
->sc_flags
& WM_F_EEPROM_HANDSHAKE
) {
3878 reg
= CSR_READ(sc
, WMREG_EECD
);
3880 /* Request EEPROM access. */
3882 CSR_WRITE(sc
, WMREG_EECD
, reg
);
3884 /* ..and wait for it to be granted. */
3885 for (x
= 0; x
< 1000; x
++) {
3886 reg
= CSR_READ(sc
, WMREG_EECD
);
3887 if (reg
& EECD_EE_GNT
)
3891 if ((reg
& EECD_EE_GNT
) == 0) {
3892 aprint_error_dev(sc
->sc_dev
,
3893 "could not acquire EEPROM GNT\n");
3894 reg
&= ~EECD_EE_REQ
;
3895 CSR_WRITE(sc
, WMREG_EECD
, reg
);
3896 if (sc
->sc_flags
& WM_F_SWFWHW_SYNC
)
3897 wm_put_swfwhw_semaphore(sc
);
3898 if (sc
->sc_flags
& WM_F_SWFW_SYNC
)
3899 wm_put_swfw_semaphore(sc
, SWFW_EEP_SM
);
3900 else if (sc
->sc_flags
& WM_F_EEPROM_SEMAPHORE
)
3901 wm_put_swsm_semaphore(sc
);
3910 * wm_release_eeprom:
3912 * Release the EEPROM mutex.
3915 wm_release_eeprom(struct wm_softc
*sc
)
3919 /* always success */
3920 if ((sc
->sc_flags
& WM_F_EEPROM_FLASH
) != 0)
3923 if (sc
->sc_flags
& WM_F_EEPROM_HANDSHAKE
) {
3924 reg
= CSR_READ(sc
, WMREG_EECD
);
3925 reg
&= ~EECD_EE_REQ
;
3926 CSR_WRITE(sc
, WMREG_EECD
, reg
);
3929 if (sc
->sc_flags
& WM_F_SWFWHW_SYNC
)
3930 wm_put_swfwhw_semaphore(sc
);
3931 if (sc
->sc_flags
& WM_F_SWFW_SYNC
)
3932 wm_put_swfw_semaphore(sc
, SWFW_EEP_SM
);
3933 else if (sc
->sc_flags
& WM_F_EEPROM_SEMAPHORE
)
3934 wm_put_swsm_semaphore(sc
);
3938 * wm_eeprom_sendbits:
3940 * Send a series of bits to the EEPROM.
3943 wm_eeprom_sendbits(struct wm_softc
*sc
, uint32_t bits
, int nbits
)
3948 reg
= CSR_READ(sc
, WMREG_EECD
);
3950 for (x
= nbits
; x
> 0; x
--) {
3951 if (bits
& (1U << (x
- 1)))
3955 CSR_WRITE(sc
, WMREG_EECD
, reg
);
3957 CSR_WRITE(sc
, WMREG_EECD
, reg
| EECD_SK
);
3959 CSR_WRITE(sc
, WMREG_EECD
, reg
);
3965 * wm_eeprom_recvbits:
3967 * Receive a series of bits from the EEPROM.
3970 wm_eeprom_recvbits(struct wm_softc
*sc
, uint32_t *valp
, int nbits
)
3975 reg
= CSR_READ(sc
, WMREG_EECD
) & ~EECD_DI
;
3978 for (x
= nbits
; x
> 0; x
--) {
3979 CSR_WRITE(sc
, WMREG_EECD
, reg
| EECD_SK
);
3981 if (CSR_READ(sc
, WMREG_EECD
) & EECD_DO
)
3982 val
|= (1U << (x
- 1));
3983 CSR_WRITE(sc
, WMREG_EECD
, reg
);
3990 * wm_read_eeprom_uwire:
3992 * Read a word from the EEPROM using the MicroWire protocol.
3995 wm_read_eeprom_uwire(struct wm_softc
*sc
, int word
, int wordcnt
, uint16_t *data
)
4000 for (i
= 0; i
< wordcnt
; i
++) {
4001 /* Clear SK and DI. */
4002 reg
= CSR_READ(sc
, WMREG_EECD
) & ~(EECD_SK
| EECD_DI
);
4003 CSR_WRITE(sc
, WMREG_EECD
, reg
);
4005 /* Set CHIP SELECT. */
4007 CSR_WRITE(sc
, WMREG_EECD
, reg
);
4010 /* Shift in the READ command. */
4011 wm_eeprom_sendbits(sc
, UWIRE_OPC_READ
, 3);
4013 /* Shift in address. */
4014 wm_eeprom_sendbits(sc
, word
+ i
, sc
->sc_ee_addrbits
);
4016 /* Shift out the data. */
4017 wm_eeprom_recvbits(sc
, &val
, 16);
4018 data
[i
] = val
& 0xffff;
4020 /* Clear CHIP SELECT. */
4021 reg
= CSR_READ(sc
, WMREG_EECD
) & ~EECD_CS
;
4022 CSR_WRITE(sc
, WMREG_EECD
, reg
);
4030 * wm_spi_eeprom_ready:
4032 * Wait for a SPI EEPROM to be ready for commands.
4035 wm_spi_eeprom_ready(struct wm_softc
*sc
)
4040 for (usec
= 0; usec
< SPI_MAX_RETRIES
; delay(5), usec
+= 5) {
4041 wm_eeprom_sendbits(sc
, SPI_OPC_RDSR
, 8);
4042 wm_eeprom_recvbits(sc
, &val
, 8);
4043 if ((val
& SPI_SR_RDY
) == 0)
4046 if (usec
>= SPI_MAX_RETRIES
) {
4047 aprint_error_dev(sc
->sc_dev
, "EEPROM failed to become ready\n");
4054 * wm_read_eeprom_spi:
4056 * Read a work from the EEPROM using the SPI protocol.
4059 wm_read_eeprom_spi(struct wm_softc
*sc
, int word
, int wordcnt
, uint16_t *data
)
4065 /* Clear SK and CS. */
4066 reg
= CSR_READ(sc
, WMREG_EECD
) & ~(EECD_SK
| EECD_CS
);
4067 CSR_WRITE(sc
, WMREG_EECD
, reg
);
4070 if (wm_spi_eeprom_ready(sc
))
4073 /* Toggle CS to flush commands. */
4074 CSR_WRITE(sc
, WMREG_EECD
, reg
| EECD_CS
);
4076 CSR_WRITE(sc
, WMREG_EECD
, reg
);
4080 if (sc
->sc_ee_addrbits
== 8 && word
>= 128)
4083 wm_eeprom_sendbits(sc
, opc
, 8);
4084 wm_eeprom_sendbits(sc
, word
<< 1, sc
->sc_ee_addrbits
);
4086 for (i
= 0; i
< wordcnt
; i
++) {
4087 wm_eeprom_recvbits(sc
, &val
, 16);
4088 data
[i
] = ((val
>> 8) & 0xff) | ((val
& 0xff) << 8);
4091 /* Raise CS and clear SK. */
4092 reg
= (CSR_READ(sc
, WMREG_EECD
) & ~EECD_SK
) | EECD_CS
;
4093 CSR_WRITE(sc
, WMREG_EECD
, reg
);
4099 #define EEPROM_CHECKSUM 0xBABA
4100 #define EEPROM_SIZE 0x0040
4103 * wm_validate_eeprom_checksum
4105 * The checksum is defined as the sum of the first 64 (16 bit) words.
4108 wm_validate_eeprom_checksum(struct wm_softc
*sc
)
4111 uint16_t eeprom_data
;
4116 for (i
= 0; i
< EEPROM_SIZE
; i
++) {
4117 if (wm_read_eeprom(sc
, i
, 1, &eeprom_data
))
4119 checksum
+= eeprom_data
;
4122 if (checksum
!= (uint16_t) EEPROM_CHECKSUM
)
4131 * Read data from the serial EEPROM.
4134 wm_read_eeprom(struct wm_softc
*sc
, int word
, int wordcnt
, uint16_t *data
)
4138 if (sc
->sc_flags
& WM_F_EEPROM_INVALID
)
4141 if (wm_acquire_eeprom(sc
))
4144 if ((sc
->sc_type
== WM_T_ICH8
) || (sc
->sc_type
== WM_T_ICH9
)
4145 || (sc
->sc_type
== WM_T_ICH10
))
4146 rv
= wm_read_eeprom_ich8(sc
, word
, wordcnt
, data
);
4147 else if (sc
->sc_flags
& WM_F_EEPROM_EERDEEWR
)
4148 rv
= wm_read_eeprom_eerd(sc
, word
, wordcnt
, data
);
4149 else if (sc
->sc_flags
& WM_F_EEPROM_SPI
)
4150 rv
= wm_read_eeprom_spi(sc
, word
, wordcnt
, data
);
4152 rv
= wm_read_eeprom_uwire(sc
, word
, wordcnt
, data
);
4154 wm_release_eeprom(sc
);
4159 wm_read_eeprom_eerd(struct wm_softc
*sc
, int offset
, int wordcnt
,
4165 for (i
= 0; i
< wordcnt
; i
++) {
4166 eerd
= ((offset
+ i
) << EERD_ADDR_SHIFT
) | EERD_START
;
4168 CSR_WRITE(sc
, WMREG_EERD
, eerd
);
4169 error
= wm_poll_eerd_eewr_done(sc
, WMREG_EERD
);
4173 data
[i
] = (CSR_READ(sc
, WMREG_EERD
) >> EERD_DATA_SHIFT
);
4180 wm_poll_eerd_eewr_done(struct wm_softc
*sc
, int rw
)
4182 uint32_t attempts
= 100000;
4183 uint32_t i
, reg
= 0;
4186 for (i
= 0; i
< attempts
; i
++) {
4187 reg
= CSR_READ(sc
, rw
);
4189 if (reg
& EERD_DONE
) {
4202 * Add a receive buffer to the indiciated descriptor.
4205 wm_add_rxbuf(struct wm_softc
*sc
, int idx
)
4207 struct wm_rxsoft
*rxs
= &sc
->sc_rxsoft
[idx
];
4211 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
4215 MCLGET(m
, M_DONTWAIT
);
4216 if ((m
->m_flags
& M_EXT
) == 0) {
4221 if (rxs
->rxs_mbuf
!= NULL
)
4222 bus_dmamap_unload(sc
->sc_dmat
, rxs
->rxs_dmamap
);
4226 m
->m_len
= m
->m_pkthdr
.len
= m
->m_ext
.ext_size
;
4227 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, rxs
->rxs_dmamap
, m
,
4228 BUS_DMA_READ
|BUS_DMA_NOWAIT
);
4231 aprint_error_dev(sc
->sc_dev
,
4232 "unable to load rx DMA map %d, error = %d\n",
4234 panic("wm_add_rxbuf");
4237 bus_dmamap_sync(sc
->sc_dmat
, rxs
->rxs_dmamap
, 0,
4238 rxs
->rxs_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
4240 WM_INIT_RXDESC(sc
, idx
);
4248 * Set an entery in the receive address list.
4251 wm_set_ral(struct wm_softc
*sc
, const uint8_t *enaddr
, int idx
)
4253 uint32_t ral_lo
, ral_hi
;
4255 if (enaddr
!= NULL
) {
4256 ral_lo
= enaddr
[0] | (enaddr
[1] << 8) | (enaddr
[2] << 16) |
4258 ral_hi
= enaddr
[4] | (enaddr
[5] << 8);
4265 if (sc
->sc_type
>= WM_T_82544
) {
4266 CSR_WRITE(sc
, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE
, idx
),
4268 CSR_WRITE(sc
, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE
, idx
),
4271 CSR_WRITE(sc
, WMREG_RAL_LO(WMREG_RAL_BASE
, idx
), ral_lo
);
4272 CSR_WRITE(sc
, WMREG_RAL_HI(WMREG_RAL_BASE
, idx
), ral_hi
);
4279 * Compute the hash of the multicast address for the 4096-bit
4283 wm_mchash(struct wm_softc
*sc
, const uint8_t *enaddr
)
4285 static const int lo_shift
[4] = { 4, 3, 2, 0 };
4286 static const int hi_shift
[4] = { 4, 5, 6, 8 };
4287 static const int ich8_lo_shift
[4] = { 6, 5, 4, 2 };
4288 static const int ich8_hi_shift
[4] = { 2, 3, 4, 6 };
4291 if ((sc
->sc_type
== WM_T_ICH8
) || (sc
->sc_type
== WM_T_ICH9
)
4292 || (sc
->sc_type
== WM_T_ICH10
)) {
4293 hash
= (enaddr
[4] >> ich8_lo_shift
[sc
->sc_mchash_type
]) |
4294 (((uint16_t) enaddr
[5]) << ich8_hi_shift
[sc
->sc_mchash_type
]);
4295 return (hash
& 0x3ff);
4297 hash
= (enaddr
[4] >> lo_shift
[sc
->sc_mchash_type
]) |
4298 (((uint16_t) enaddr
[5]) << hi_shift
[sc
->sc_mchash_type
]);
4300 return (hash
& 0xfff);
4306 * Set up the receive filter.
4309 wm_set_filter(struct wm_softc
*sc
)
4311 struct ethercom
*ec
= &sc
->sc_ethercom
;
4312 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
4313 struct ether_multi
*enm
;
4314 struct ether_multistep step
;
4316 uint32_t hash
, reg
, bit
;
4319 if (sc
->sc_type
>= WM_T_82544
)
4320 mta_reg
= WMREG_CORDOVA_MTA
;
4322 mta_reg
= WMREG_MTA
;
4324 sc
->sc_rctl
&= ~(RCTL_BAM
| RCTL_UPE
| RCTL_MPE
);
4326 if (ifp
->if_flags
& IFF_BROADCAST
)
4327 sc
->sc_rctl
|= RCTL_BAM
;
4328 if (ifp
->if_flags
& IFF_PROMISC
) {
4329 sc
->sc_rctl
|= RCTL_UPE
;
4334 * Set the station address in the first RAL slot, and
4335 * clear the remaining slots.
4337 if ((sc
->sc_type
== WM_T_ICH8
) || (sc
->sc_type
== WM_T_ICH9
)
4338 || (sc
->sc_type
== WM_T_ICH10
))
4339 size
= WM_ICH8_RAL_TABSIZE
;
4341 size
= WM_RAL_TABSIZE
;
4342 wm_set_ral(sc
, CLLADDR(ifp
->if_sadl
), 0);
4343 for (i
= 1; i
< size
; i
++)
4344 wm_set_ral(sc
, NULL
, i
);
4346 if ((sc
->sc_type
== WM_T_ICH8
) || (sc
->sc_type
== WM_T_ICH9
)
4347 || (sc
->sc_type
== WM_T_ICH10
))
4348 size
= WM_ICH8_MC_TABSIZE
;
4350 size
= WM_MC_TABSIZE
;
4351 /* Clear out the multicast table. */
4352 for (i
= 0; i
< size
; i
++)
4353 CSR_WRITE(sc
, mta_reg
+ (i
<< 2), 0);
4355 ETHER_FIRST_MULTI(step
, ec
, enm
);
4356 while (enm
!= NULL
) {
4357 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, ETHER_ADDR_LEN
)) {
4359 * We must listen to a range of multicast addresses.
4360 * For now, just accept all multicasts, rather than
4361 * trying to set only those filter bits needed to match
4362 * the range. (At this time, the only use of address
4363 * ranges is for IP multicast routing, for which the
4364 * range is big enough to require all bits set.)
4369 hash
= wm_mchash(sc
, enm
->enm_addrlo
);
4372 if ((sc
->sc_type
== WM_T_ICH8
) || (sc
->sc_type
== WM_T_ICH9
)
4373 || (sc
->sc_type
== WM_T_ICH10
))
4379 hash
= CSR_READ(sc
, mta_reg
+ (reg
<< 2));
4382 /* XXX Hardware bug?? */
4383 if (sc
->sc_type
== WM_T_82544
&& (reg
& 0xe) == 1) {
4384 bit
= CSR_READ(sc
, mta_reg
+ ((reg
- 1) << 2));
4385 CSR_WRITE(sc
, mta_reg
+ (reg
<< 2), hash
);
4386 CSR_WRITE(sc
, mta_reg
+ ((reg
- 1) << 2), bit
);
4388 CSR_WRITE(sc
, mta_reg
+ (reg
<< 2), hash
);
4390 ETHER_NEXT_MULTI(step
, enm
);
4393 ifp
->if_flags
&= ~IFF_ALLMULTI
;
4397 ifp
->if_flags
|= IFF_ALLMULTI
;
4398 sc
->sc_rctl
|= RCTL_MPE
;
4401 CSR_WRITE(sc
, WMREG_RCTL
, sc
->sc_rctl
);
4407 * Initialize media for use on 1000BASE-X devices.
4410 wm_tbi_mediainit(struct wm_softc
*sc
)
4412 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
4413 const char *sep
= "";
4415 if (sc
->sc_type
< WM_T_82543
)
4416 sc
->sc_tipg
= TIPG_WM_DFLT
;
4418 sc
->sc_tipg
= TIPG_LG_DFLT
;
4420 sc
->sc_tbi_anegticks
= 5;
4422 /* Initialize our media structures */
4423 sc
->sc_mii
.mii_ifp
= ifp
;
4425 sc
->sc_ethercom
.ec_mii
= &sc
->sc_mii
;
4426 ifmedia_init(&sc
->sc_mii
.mii_media
, IFM_IMASK
, wm_tbi_mediachange
,
4427 wm_tbi_mediastatus
);
4432 * 0 = Link LED (output)
4433 * 1 = Loss Of Signal (input)
4435 sc
->sc_ctrl
|= CTRL_SWDPIO(0);
4436 sc
->sc_ctrl
&= ~CTRL_SWDPIO(1);
4438 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
4440 #define ADD(ss, mm, dd) \
4442 aprint_normal("%s%s", sep, ss); \
4443 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4445 } while (/*CONSTCOND*/0)
4447 aprint_normal_dev(sc
->sc_dev
, "");
4448 ADD("1000baseSX", IFM_1000_SX
, ANAR_X_HD
);
4449 ADD("1000baseSX-FDX", IFM_1000_SX
|IFM_FDX
, ANAR_X_FD
);
4450 ADD("auto", IFM_AUTO
, ANAR_X_FD
|ANAR_X_HD
);
4451 aprint_normal("\n");
4455 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_AUTO
);
4459 * wm_tbi_mediastatus: [ifmedia interface function]
4461 * Get the current interface media status on a 1000BASE-X device.
4464 wm_tbi_mediastatus(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
4466 struct wm_softc
*sc
= ifp
->if_softc
;
4467 uint32_t ctrl
, status
;
4469 ifmr
->ifm_status
= IFM_AVALID
;
4470 ifmr
->ifm_active
= IFM_ETHER
;
4472 status
= CSR_READ(sc
, WMREG_STATUS
);
4473 if ((status
& STATUS_LU
) == 0) {
4474 ifmr
->ifm_active
|= IFM_NONE
;
4478 ifmr
->ifm_status
|= IFM_ACTIVE
;
4479 ifmr
->ifm_active
|= IFM_1000_SX
;
4480 if (CSR_READ(sc
, WMREG_STATUS
) & STATUS_FD
)
4481 ifmr
->ifm_active
|= IFM_FDX
;
4482 ctrl
= CSR_READ(sc
, WMREG_CTRL
);
4483 if (ctrl
& CTRL_RFCE
)
4484 ifmr
->ifm_active
|= IFM_FLOW
| IFM_ETH_RXPAUSE
;
4485 if (ctrl
& CTRL_TFCE
)
4486 ifmr
->ifm_active
|= IFM_FLOW
| IFM_ETH_TXPAUSE
;
4490 * wm_tbi_mediachange: [ifmedia interface function]
4492 * Set hardware to newly-selected media on a 1000BASE-X device.
4495 wm_tbi_mediachange(struct ifnet
*ifp
)
4497 struct wm_softc
*sc
= ifp
->if_softc
;
4498 struct ifmedia_entry
*ife
= sc
->sc_mii
.mii_media
.ifm_cur
;
4503 if (IFM_SUBTYPE(ife
->ifm_media
) == IFM_AUTO
||
4504 (sc
->sc_mii
.mii_media
.ifm_media
& IFM_FLOW
) != 0)
4505 sc
->sc_txcw
|= TXCW_SYM_PAUSE
| TXCW_ASYM_PAUSE
;
4506 if (IFM_SUBTYPE(ife
->ifm_media
) == IFM_AUTO
) {
4507 sc
->sc_txcw
|= TXCW_ANE
;
4510 * If autonegotiation is turned off, force link up and turn on
4513 sc
->sc_txcw
&= ~TXCW_ANE
;
4514 sc
->sc_ctrl
|= CTRL_SLU
| CTRL_FD
;
4515 sc
->sc_ctrl
&= ~(CTRL_TFCE
| CTRL_RFCE
);
4516 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
4520 DPRINTF(WM_DEBUG_LINK
,("%s: sc_txcw = 0x%x after autoneg check\n",
4521 device_xname(sc
->sc_dev
),sc
->sc_txcw
));
4522 CSR_WRITE(sc
, WMREG_TXCW
, sc
->sc_txcw
);
4525 i
= CSR_READ(sc
, WMREG_CTRL
) & CTRL_SWDPIN(1);
4526 DPRINTF(WM_DEBUG_LINK
,("%s: i = 0x%x\n", device_xname(sc
->sc_dev
),i
));
4529 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4530 * optics detect a signal, 0 if they don't.
4532 if (((i
!= 0) && (sc
->sc_type
> WM_T_82544
)) || (i
== 0)) {
4533 /* Have signal; wait for the link to come up. */
4535 if (IFM_SUBTYPE(ife
->ifm_media
) == IFM_AUTO
) {
4537 * Reset the link, and let autonegotiation do its thing
4539 sc
->sc_ctrl
|= CTRL_LRST
;
4540 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
4542 sc
->sc_ctrl
&= ~CTRL_LRST
;
4543 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
4547 for (i
= 0; i
< WM_LINKUP_TIMEOUT
; i
++) {
4549 if (CSR_READ(sc
, WMREG_STATUS
) & STATUS_LU
)
4553 DPRINTF(WM_DEBUG_LINK
,("%s: i = %d after waiting for link\n",
4554 device_xname(sc
->sc_dev
),i
));
4556 status
= CSR_READ(sc
, WMREG_STATUS
);
4557 DPRINTF(WM_DEBUG_LINK
,
4558 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4559 device_xname(sc
->sc_dev
),status
, STATUS_LU
));
4560 if (status
& STATUS_LU
) {
4562 DPRINTF(WM_DEBUG_LINK
,
4563 ("%s: LINK: set media -> link up %s\n",
4564 device_xname(sc
->sc_dev
),
4565 (status
& STATUS_FD
) ? "FDX" : "HDX"));
4568 * NOTE: CTRL will update TFCE and RFCE automatically,
4569 * so we should update sc->sc_ctrl
4571 sc
->sc_ctrl
= CSR_READ(sc
, WMREG_CTRL
);
4572 sc
->sc_tctl
&= ~TCTL_COLD(0x3ff);
4573 sc
->sc_fcrtl
&= ~FCRTL_XONE
;
4574 if (status
& STATUS_FD
)
4576 TCTL_COLD(TX_COLLISION_DISTANCE_FDX
);
4579 TCTL_COLD(TX_COLLISION_DISTANCE_HDX
);
4580 if (CSR_READ(sc
, WMREG_CTRL
) & CTRL_TFCE
)
4581 sc
->sc_fcrtl
|= FCRTL_XONE
;
4582 CSR_WRITE(sc
, WMREG_TCTL
, sc
->sc_tctl
);
4583 CSR_WRITE(sc
, (sc
->sc_type
< WM_T_82543
) ?
4584 WMREG_OLD_FCRTL
: WMREG_FCRTL
,
4586 sc
->sc_tbi_linkup
= 1;
4588 if (i
== WM_LINKUP_TIMEOUT
)
4589 wm_check_for_link(sc
);
4591 DPRINTF(WM_DEBUG_LINK
,
4592 ("%s: LINK: set media -> link down\n",
4593 device_xname(sc
->sc_dev
)));
4594 sc
->sc_tbi_linkup
= 0;
4597 DPRINTF(WM_DEBUG_LINK
, ("%s: LINK: set media -> no signal\n",
4598 device_xname(sc
->sc_dev
)));
4599 sc
->sc_tbi_linkup
= 0;
4602 wm_tbi_set_linkled(sc
);
4608 * wm_tbi_set_linkled:
4610 * Update the link LED on 1000BASE-X devices.
4613 wm_tbi_set_linkled(struct wm_softc
*sc
)
4616 if (sc
->sc_tbi_linkup
)
4617 sc
->sc_ctrl
|= CTRL_SWDPIN(0);
4619 sc
->sc_ctrl
&= ~CTRL_SWDPIN(0);
4621 /* 82540 or newer devices are active low */
4622 sc
->sc_ctrl
^= (sc
->sc_type
>= WM_T_82540
) ? CTRL_SWDPIN(0) : 0;
4624 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
4628 * wm_tbi_check_link:
4630 * Check the link on 1000BASE-X devices.
4633 wm_tbi_check_link(struct wm_softc
*sc
)
4635 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
4636 struct ifmedia_entry
*ife
= sc
->sc_mii
.mii_media
.ifm_cur
;
4637 uint32_t rxcw
, ctrl
, status
;
4639 status
= CSR_READ(sc
, WMREG_STATUS
);
4641 rxcw
= CSR_READ(sc
, WMREG_RXCW
);
4642 ctrl
= CSR_READ(sc
, WMREG_CTRL
);
4644 /* set link status */
4645 if ((status
& STATUS_LU
) == 0) {
4646 DPRINTF(WM_DEBUG_LINK
,
4647 ("%s: LINK: checklink -> down\n", device_xname(sc
->sc_dev
)));
4648 sc
->sc_tbi_linkup
= 0;
4649 } else if (sc
->sc_tbi_linkup
== 0) {
4650 DPRINTF(WM_DEBUG_LINK
,
4651 ("%s: LINK: checklink -> up %s\n", device_xname(sc
->sc_dev
),
4652 (status
& STATUS_FD
) ? "FDX" : "HDX"));
4653 sc
->sc_tbi_linkup
= 1;
4656 if ((sc
->sc_ethercom
.ec_if
.if_flags
& IFF_UP
)
4657 && ((status
& STATUS_LU
) == 0)) {
4658 sc
->sc_tbi_linkup
= 0;
4659 if (sc
->sc_tbi_nrxcfg
- sc
->sc_tbi_lastnrxcfg
> 100) {
4661 DPRINTF(WM_DEBUG_LINK
, ("RXCFG storm! (%d)\n",
4662 sc
->sc_tbi_nrxcfg
- sc
->sc_tbi_lastnrxcfg
));
4665 } else if (IFM_SUBTYPE(ife
->ifm_media
) == IFM_AUTO
) {
4666 /* If the timer expired, retry autonegotiation */
4667 if (++sc
->sc_tbi_ticks
>= sc
->sc_tbi_anegticks
) {
4668 DPRINTF(WM_DEBUG_LINK
, ("EXPIRE\n"));
4669 sc
->sc_tbi_ticks
= 0;
4671 * Reset the link, and let autonegotiation do
4674 sc
->sc_ctrl
|= CTRL_LRST
;
4675 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
4677 sc
->sc_ctrl
&= ~CTRL_LRST
;
4678 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
4680 CSR_WRITE(sc
, WMREG_TXCW
,
4681 sc
->sc_txcw
& ~TXCW_ANE
);
4682 CSR_WRITE(sc
, WMREG_TXCW
, sc
->sc_txcw
);
4687 wm_tbi_set_linkled(sc
);
4696 wm_gmii_reset(struct wm_softc
*sc
)
4699 int func
= 0; /* XXX gcc */
4702 /* get phy semaphore */
4703 switch (sc
->sc_type
) {
4709 /* XXX sould get sw semaphore, too */
4710 rv
= wm_get_swsm_semaphore(sc
);
4713 func
= (CSR_READ(sc
, WMREG_STATUS
) >> STATUS_FUNCID_SHIFT
) & 1;
4714 rv
= wm_get_swfw_semaphore(sc
,
4715 func
? SWFW_PHY1_SM
: SWFW_PHY0_SM
);
4720 rv
= wm_get_swfwhw_semaphore(sc
);
4728 aprint_error_dev(sc
->sc_dev
, "%s: failed to get semaphore\n",
4733 switch (sc
->sc_type
) {
4734 case WM_T_82542_2_0
:
4735 case WM_T_82542_2_1
:
4740 * With 82543, we need to force speed and duplex on the MAC
4741 * equal to what the PHY speed and duplex configuration is.
4742 * In addition, we need to perform a hardware reset on the PHY
4743 * to take it out of reset.
4745 sc
->sc_ctrl
|= CTRL_FRCSPD
| CTRL_FRCFDX
;
4746 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
4748 /* The PHY reset pin is active-low. */
4749 reg
= CSR_READ(sc
, WMREG_CTRL_EXT
);
4750 reg
&= ~((CTRL_EXT_SWDPIO_MASK
<< CTRL_EXT_SWDPIO_SHIFT
) |
4751 CTRL_EXT_SWDPIN(4));
4752 reg
|= CTRL_EXT_SWDPIO(4);
4754 CSR_WRITE(sc
, WMREG_CTRL_EXT
, reg
);
4757 CSR_WRITE(sc
, WMREG_CTRL_EXT
, reg
| CTRL_EXT_SWDPIN(4));
4760 sc
->sc_ctrl_ext
= reg
| CTRL_EXT_SWDPIN(4);
4762 delay(20*1000); /* XXX extra delay to get PHY ID? */
4764 case WM_T_82544
: /* reset 10000us */
4774 case WM_T_82571
: /* reset 100us */
4781 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
| CTRL_PHY_RESET
);
4782 delay((sc
->sc_type
>= WM_T_82571
) ? 100 : 10*1000);
4783 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
4786 if ((sc
->sc_type
== WM_T_82541
)
4787 || (sc
->sc_type
== WM_T_82541_2
)
4788 || (sc
->sc_type
== WM_T_82547
)
4789 || (sc
->sc_type
== WM_T_82547_2
)) {
4790 /* workaround for igp are done in igp_reset() */
4791 /* XXX add code to set LED after phy reset */
4798 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
| CTRL_PHY_RESET
);
4800 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
4803 /* Allow time for h/w to get to a quiescent state afer reset */
4806 /* XXX add code to set LED after phy reset */
4809 panic("%s: %s: unknown type\n", device_xname(sc
->sc_dev
),
4814 /* release PHY semaphore */
4815 switch (sc
->sc_type
) {
4821 /* XXX sould put sw semaphore, too */
4822 wm_put_swsm_semaphore(sc
);
4825 wm_put_swfw_semaphore(sc
, func
? SWFW_PHY1_SM
: SWFW_PHY0_SM
);
4830 wm_put_swfwhw_semaphore(sc
);
4839 wm_get_cfg_done(sc
);
4842 switch (sc
->sc_type
) {
4843 case WM_T_82542_2_0
:
4844 case WM_T_82542_2_1
:
4864 /* XXX Configure actively LED after PHY reset */
4872 panic("%s: unknown type\n", __func__
);
4878 * wm_gmii_mediainit:
4880 * Initialize media for use on 1000BASE-T devices.
4883 wm_gmii_mediainit(struct wm_softc
*sc
)
4885 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
4888 sc
->sc_flags
|= WM_F_HAS_MII
;
4890 if (sc
->sc_type
== WM_T_80003
)
4891 sc
->sc_tipg
= TIPG_1000T_80003_DFLT
;
4893 sc
->sc_tipg
= TIPG_1000T_DFLT
;
4896 * Let the chip set speed/duplex on its own based on
4897 * signals from the PHY.
4898 * XXXbouyer - I'm not sure this is right for the 80003,
4899 * the em driver only sets CTRL_SLU here - but it seems to work.
4901 sc
->sc_ctrl
|= CTRL_SLU
;
4902 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
4904 /* Initialize our media structures and probe the GMII. */
4905 sc
->sc_mii
.mii_ifp
= ifp
;
4907 if (sc
->sc_type
>= WM_T_80003
) {
4908 sc
->sc_mii
.mii_readreg
= wm_gmii_i80003_readreg
;
4909 sc
->sc_mii
.mii_writereg
= wm_gmii_i80003_writereg
;
4910 } else if (sc
->sc_type
>= WM_T_82544
) {
4911 sc
->sc_mii
.mii_readreg
= wm_gmii_i82544_readreg
;
4912 sc
->sc_mii
.mii_writereg
= wm_gmii_i82544_writereg
;
4914 sc
->sc_mii
.mii_readreg
= wm_gmii_i82543_readreg
;
4915 sc
->sc_mii
.mii_writereg
= wm_gmii_i82543_writereg
;
4917 sc
->sc_mii
.mii_statchg
= wm_gmii_statchg
;
4921 sc
->sc_ethercom
.ec_mii
= &sc
->sc_mii
;
4922 ifmedia_init(&sc
->sc_mii
.mii_media
, IFM_IMASK
, wm_gmii_mediachange
,
4923 wm_gmii_mediastatus
);
4925 mii_attach(sc
->sc_dev
, &sc
->sc_mii
, 0xffffffff, MII_PHY_ANY
,
4926 MII_OFFSET_ANY
, MIIF_DOPAUSE
);
4928 if (LIST_FIRST(&sc
->sc_mii
.mii_phys
) == NULL
) {
4929 /* if failed, retry with *_bm_* */
4930 sc
->sc_mii
.mii_readreg
= wm_gmii_bm_readreg
;
4931 sc
->sc_mii
.mii_writereg
= wm_gmii_bm_writereg
;
4933 mii_attach(sc
->sc_dev
, &sc
->sc_mii
, 0xffffffff, MII_PHY_ANY
,
4934 MII_OFFSET_ANY
, MIIF_DOPAUSE
);
4936 if (LIST_FIRST(&sc
->sc_mii
.mii_phys
) == NULL
) {
4937 ifmedia_add(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
, 0, NULL
);
4938 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
);
4940 if (sc
->sc_type
>= WM_T_82574
) {
4941 struct mii_softc
*child
;
4943 child
= LIST_FIRST(&sc
->sc_mii
.mii_phys
);
4944 /* fix read/write functions as e1000 driver */
4945 if (device_is_a(child
->mii_dev
, "igphy")) {
4946 sc
->sc_mii
.mii_readreg
= wm_gmii_i80003_readreg
;
4947 sc
->sc_mii
.mii_writereg
= wm_gmii_i80003_writereg
;
4949 sc
->sc_mii
.mii_readreg
= wm_gmii_bm_readreg
;
4950 sc
->sc_mii
.mii_writereg
= wm_gmii_bm_writereg
;
4954 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_AUTO
);
4959 * wm_gmii_mediastatus: [ifmedia interface function]
4961 * Get the current interface media status on a 1000BASE-T device.
4964 wm_gmii_mediastatus(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
4966 struct wm_softc
*sc
= ifp
->if_softc
;
4968 ether_mediastatus(ifp
, ifmr
);
4969 ifmr
->ifm_active
= (ifmr
->ifm_active
& ~IFM_ETH_FMASK
) |
4974 * wm_gmii_mediachange: [ifmedia interface function]
4976 * Set hardware to newly-selected media on a 1000BASE-T device.
4979 wm_gmii_mediachange(struct ifnet
*ifp
)
4981 struct wm_softc
*sc
= ifp
->if_softc
;
4982 struct ifmedia_entry
*ife
= sc
->sc_mii
.mii_media
.ifm_cur
;
4985 if ((ifp
->if_flags
& IFF_UP
) == 0)
4988 sc
->sc_ctrl
&= ~(CTRL_SPEED_MASK
| CTRL_FD
);
4989 sc
->sc_ctrl
|= CTRL_SLU
;
4990 if ((IFM_SUBTYPE(ife
->ifm_media
) == IFM_AUTO
)
4991 || (sc
->sc_type
> WM_T_82543
)) {
4992 sc
->sc_ctrl
&= ~(CTRL_FRCSPD
| CTRL_FRCFDX
);
4994 sc
->sc_ctrl
&= ~CTRL_ASDE
;
4995 sc
->sc_ctrl
|= CTRL_FRCSPD
| CTRL_FRCFDX
;
4996 if (ife
->ifm_media
& IFM_FDX
)
4997 sc
->sc_ctrl
|= CTRL_FD
;
4998 switch(IFM_SUBTYPE(ife
->ifm_media
)) {
5000 sc
->sc_ctrl
|= CTRL_SPEED_10
;
5003 sc
->sc_ctrl
|= CTRL_SPEED_100
;
5006 sc
->sc_ctrl
|= CTRL_SPEED_1000
;
5009 panic("wm_gmii_mediachange: bad media 0x%x",
5013 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
5014 if (sc
->sc_type
<= WM_T_82543
)
5017 if ((rc
= mii_mediachg(&sc
->sc_mii
)) == ENXIO
)
5022 #define MDI_IO CTRL_SWDPIN(2)
5023 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5024 #define MDI_CLK CTRL_SWDPIN(3)
5027 i82543_mii_sendbits(struct wm_softc
*sc
, uint32_t data
, int nbits
)
5031 v
= CSR_READ(sc
, WMREG_CTRL
);
5032 v
&= ~(MDI_IO
|MDI_CLK
|(CTRL_SWDPIO_MASK
<< CTRL_SWDPIO_SHIFT
));
5033 v
|= MDI_DIR
| CTRL_SWDPIO(3);
5035 for (i
= 1 << (nbits
- 1); i
!= 0; i
>>= 1) {
5040 CSR_WRITE(sc
, WMREG_CTRL
, v
);
5042 CSR_WRITE(sc
, WMREG_CTRL
, v
| MDI_CLK
);
5044 CSR_WRITE(sc
, WMREG_CTRL
, v
);
5050 i82543_mii_recvbits(struct wm_softc
*sc
)
5052 uint32_t v
, i
, data
= 0;
5054 v
= CSR_READ(sc
, WMREG_CTRL
);
5055 v
&= ~(MDI_IO
|MDI_CLK
|(CTRL_SWDPIO_MASK
<< CTRL_SWDPIO_SHIFT
));
5056 v
|= CTRL_SWDPIO(3);
5058 CSR_WRITE(sc
, WMREG_CTRL
, v
);
5060 CSR_WRITE(sc
, WMREG_CTRL
, v
| MDI_CLK
);
5062 CSR_WRITE(sc
, WMREG_CTRL
, v
);
5065 for (i
= 0; i
< 16; i
++) {
5067 CSR_WRITE(sc
, WMREG_CTRL
, v
| MDI_CLK
);
5069 if (CSR_READ(sc
, WMREG_CTRL
) & MDI_IO
)
5071 CSR_WRITE(sc
, WMREG_CTRL
, v
);
5075 CSR_WRITE(sc
, WMREG_CTRL
, v
| MDI_CLK
);
5077 CSR_WRITE(sc
, WMREG_CTRL
, v
);
5088 * wm_gmii_i82543_readreg: [mii interface function]
5090 * Read a PHY register on the GMII (i82543 version).
5093 wm_gmii_i82543_readreg(device_t self
, int phy
, int reg
)
5095 struct wm_softc
*sc
= device_private(self
);
5098 i82543_mii_sendbits(sc
, 0xffffffffU
, 32);
5099 i82543_mii_sendbits(sc
, reg
| (phy
<< 5) |
5100 (MII_COMMAND_READ
<< 10) | (MII_COMMAND_START
<< 12), 14);
5101 rv
= i82543_mii_recvbits(sc
) & 0xffff;
5103 DPRINTF(WM_DEBUG_GMII
,
5104 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5105 device_xname(sc
->sc_dev
), phy
, reg
, rv
));
5111 * wm_gmii_i82543_writereg: [mii interface function]
5113 * Write a PHY register on the GMII (i82543 version).
5116 wm_gmii_i82543_writereg(device_t self
, int phy
, int reg
, int val
)
5118 struct wm_softc
*sc
= device_private(self
);
5120 i82543_mii_sendbits(sc
, 0xffffffffU
, 32);
5121 i82543_mii_sendbits(sc
, val
| (MII_COMMAND_ACK
<< 16) |
5122 (reg
<< 18) | (phy
<< 23) | (MII_COMMAND_WRITE
<< 28) |
5123 (MII_COMMAND_START
<< 30), 32);
5127 * wm_gmii_i82544_readreg: [mii interface function]
5129 * Read a PHY register on the GMII.
5132 wm_gmii_i82544_readreg(device_t self
, int phy
, int reg
)
5134 struct wm_softc
*sc
= device_private(self
);
5138 CSR_WRITE(sc
, WMREG_MDIC
, MDIC_OP_READ
| MDIC_PHYADD(phy
) |
5141 for (i
= 0; i
< 320; i
++) {
5142 mdic
= CSR_READ(sc
, WMREG_MDIC
);
5143 if (mdic
& MDIC_READY
)
5148 if ((mdic
& MDIC_READY
) == 0) {
5149 log(LOG_WARNING
, "%s: MDIC read timed out: phy %d reg %d\n",
5150 device_xname(sc
->sc_dev
), phy
, reg
);
5152 } else if (mdic
& MDIC_E
) {
5153 #if 0 /* This is normal if no PHY is present. */
5154 log(LOG_WARNING
, "%s: MDIC read error: phy %d reg %d\n",
5155 device_xname(sc
->sc_dev
), phy
, reg
);
5159 rv
= MDIC_DATA(mdic
);
5168 * wm_gmii_i82544_writereg: [mii interface function]
5170 * Write a PHY register on the GMII.
5173 wm_gmii_i82544_writereg(device_t self
, int phy
, int reg
, int val
)
5175 struct wm_softc
*sc
= device_private(self
);
5179 CSR_WRITE(sc
, WMREG_MDIC
, MDIC_OP_WRITE
| MDIC_PHYADD(phy
) |
5180 MDIC_REGADD(reg
) | MDIC_DATA(val
));
5182 for (i
= 0; i
< 320; i
++) {
5183 mdic
= CSR_READ(sc
, WMREG_MDIC
);
5184 if (mdic
& MDIC_READY
)
5189 if ((mdic
& MDIC_READY
) == 0)
5190 log(LOG_WARNING
, "%s: MDIC write timed out: phy %d reg %d\n",
5191 device_xname(sc
->sc_dev
), phy
, reg
);
5192 else if (mdic
& MDIC_E
)
5193 log(LOG_WARNING
, "%s: MDIC write error: phy %d reg %d\n",
5194 device_xname(sc
->sc_dev
), phy
, reg
);
5198 * wm_gmii_i80003_readreg: [mii interface function]
5200 * Read a PHY register on the kumeran
5201 * This could be handled by the PHY layer if we didn't have to lock the
5205 wm_gmii_i80003_readreg(device_t self
, int phy
, int reg
)
5207 struct wm_softc
*sc
= device_private(self
);
5208 int func
= ((CSR_READ(sc
, WMREG_STATUS
) >> STATUS_FUNCID_SHIFT
) & 1);
5211 if (phy
!= 1) /* only one PHY on kumeran bus */
5214 if (wm_get_swfw_semaphore(sc
, func
? SWFW_PHY1_SM
: SWFW_PHY0_SM
)) {
5215 aprint_error_dev(sc
->sc_dev
, "%s: failed to get semaphore\n",
5220 if ((reg
& GG82563_MAX_REG_ADDRESS
) < GG82563_MIN_ALT_REG
) {
5221 wm_gmii_i82544_writereg(self
, phy
, GG82563_PHY_PAGE_SELECT
,
5222 reg
>> GG82563_PAGE_SHIFT
);
5224 wm_gmii_i82544_writereg(self
, phy
, GG82563_PHY_PAGE_SELECT_ALT
,
5225 reg
>> GG82563_PAGE_SHIFT
);
5227 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5229 rv
= wm_gmii_i82544_readreg(self
, phy
, reg
& GG82563_MAX_REG_ADDRESS
);
5232 wm_put_swfw_semaphore(sc
, func
? SWFW_PHY1_SM
: SWFW_PHY0_SM
);
5237 * wm_gmii_i80003_writereg: [mii interface function]
5239 * Write a PHY register on the kumeran.
5240 * This could be handled by the PHY layer if we didn't have to lock the
5244 wm_gmii_i80003_writereg(device_t self
, int phy
, int reg
, int val
)
5246 struct wm_softc
*sc
= device_private(self
);
5247 int func
= ((CSR_READ(sc
, WMREG_STATUS
) >> STATUS_FUNCID_SHIFT
) & 1);
5249 if (phy
!= 1) /* only one PHY on kumeran bus */
5252 if (wm_get_swfw_semaphore(sc
, func
? SWFW_PHY1_SM
: SWFW_PHY0_SM
)) {
5253 aprint_error_dev(sc
->sc_dev
, "%s: failed to get semaphore\n",
5258 if ((reg
& GG82563_MAX_REG_ADDRESS
) < GG82563_MIN_ALT_REG
) {
5259 wm_gmii_i82544_writereg(self
, phy
, GG82563_PHY_PAGE_SELECT
,
5260 reg
>> GG82563_PAGE_SHIFT
);
5262 wm_gmii_i82544_writereg(self
, phy
, GG82563_PHY_PAGE_SELECT_ALT
,
5263 reg
>> GG82563_PAGE_SHIFT
);
5265 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5267 wm_gmii_i82544_writereg(self
, phy
, reg
& GG82563_MAX_REG_ADDRESS
, val
);
5270 wm_put_swfw_semaphore(sc
, func
? SWFW_PHY1_SM
: SWFW_PHY0_SM
);
5274 * wm_gmii_bm_readreg: [mii interface function]
5276 * Read a PHY register on the kumeran
5277 * This could be handled by the PHY layer if we didn't have to lock the
5281 wm_gmii_bm_readreg(device_t self
, int phy
, int reg
)
5283 struct wm_softc
*sc
= device_private(self
);
5284 int func
= ((CSR_READ(sc
, WMREG_STATUS
) >> STATUS_FUNCID_SHIFT
) & 1);
5287 if (wm_get_swfw_semaphore(sc
, func
? SWFW_PHY1_SM
: SWFW_PHY0_SM
)) {
5288 aprint_error_dev(sc
->sc_dev
, "%s: failed to get semaphore\n",
5293 if (reg
> GG82563_MAX_REG_ADDRESS
) {
5295 wm_gmii_i82544_writereg(self
, phy
, 0x1f,
5298 wm_gmii_i82544_writereg(self
, phy
, GG82563_PHY_PAGE_SELECT
,
5299 reg
>> GG82563_PAGE_SHIFT
);
5303 rv
= wm_gmii_i82544_readreg(self
, phy
, reg
& GG82563_MAX_REG_ADDRESS
);
5304 wm_put_swfw_semaphore(sc
, func
? SWFW_PHY1_SM
: SWFW_PHY0_SM
);
5309 * wm_gmii_bm_writereg: [mii interface function]
5311 * Write a PHY register on the kumeran.
5312 * This could be handled by the PHY layer if we didn't have to lock the
5316 wm_gmii_bm_writereg(device_t self
, int phy
, int reg
, int val
)
5318 struct wm_softc
*sc
= device_private(self
);
5319 int func
= ((CSR_READ(sc
, WMREG_STATUS
) >> STATUS_FUNCID_SHIFT
) & 1);
5321 if (wm_get_swfw_semaphore(sc
, func
? SWFW_PHY1_SM
: SWFW_PHY0_SM
)) {
5322 aprint_error_dev(sc
->sc_dev
, "%s: failed to get semaphore\n",
5327 if (reg
> GG82563_MAX_REG_ADDRESS
) {
5329 wm_gmii_i82544_writereg(self
, phy
, 0x1f,
5332 wm_gmii_i82544_writereg(self
, phy
, GG82563_PHY_PAGE_SELECT
,
5333 reg
>> GG82563_PAGE_SHIFT
);
5337 wm_gmii_i82544_writereg(self
, phy
, reg
& GG82563_MAX_REG_ADDRESS
, val
);
5338 wm_put_swfw_semaphore(sc
, func
? SWFW_PHY1_SM
: SWFW_PHY0_SM
);
5342 * wm_gmii_statchg: [mii interface function]
5344 * Callback from MII layer when media changes.
5347 wm_gmii_statchg(device_t self
)
5349 struct wm_softc
*sc
= device_private(self
);
5350 struct mii_data
*mii
= &sc
->sc_mii
;
5352 sc
->sc_ctrl
&= ~(CTRL_TFCE
| CTRL_RFCE
);
5353 sc
->sc_tctl
&= ~TCTL_COLD(0x3ff);
5354 sc
->sc_fcrtl
&= ~FCRTL_XONE
;
5357 * Get flow control negotiation result.
5359 if (IFM_SUBTYPE(mii
->mii_media
.ifm_cur
->ifm_media
) == IFM_AUTO
&&
5360 (mii
->mii_media_active
& IFM_ETH_FMASK
) != sc
->sc_flowflags
) {
5361 sc
->sc_flowflags
= mii
->mii_media_active
& IFM_ETH_FMASK
;
5362 mii
->mii_media_active
&= ~IFM_ETH_FMASK
;
5365 if (sc
->sc_flowflags
& IFM_FLOW
) {
5366 if (sc
->sc_flowflags
& IFM_ETH_TXPAUSE
) {
5367 sc
->sc_ctrl
|= CTRL_TFCE
;
5368 sc
->sc_fcrtl
|= FCRTL_XONE
;
5370 if (sc
->sc_flowflags
& IFM_ETH_RXPAUSE
)
5371 sc
->sc_ctrl
|= CTRL_RFCE
;
5374 if (sc
->sc_mii
.mii_media_active
& IFM_FDX
) {
5375 DPRINTF(WM_DEBUG_LINK
,
5376 ("%s: LINK: statchg: FDX\n", device_xname(sc
->sc_dev
)));
5377 sc
->sc_tctl
|= TCTL_COLD(TX_COLLISION_DISTANCE_FDX
);
5379 DPRINTF(WM_DEBUG_LINK
,
5380 ("%s: LINK: statchg: HDX\n", device_xname(sc
->sc_dev
)));
5381 sc
->sc_tctl
|= TCTL_COLD(TX_COLLISION_DISTANCE_HDX
);
5384 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
5385 CSR_WRITE(sc
, WMREG_TCTL
, sc
->sc_tctl
);
5386 CSR_WRITE(sc
, (sc
->sc_type
< WM_T_82543
) ? WMREG_OLD_FCRTL
5387 : WMREG_FCRTL
, sc
->sc_fcrtl
);
5388 if (sc
->sc_type
== WM_T_80003
) {
5389 switch(IFM_SUBTYPE(sc
->sc_mii
.mii_media_active
)) {
5391 wm_kmrn_writereg(sc
, KUMCTRLSTA_OFFSET_HD_CTRL
,
5392 KUMCTRLSTA_HD_CTRL_1000_DEFAULT
);
5393 sc
->sc_tipg
= TIPG_1000T_80003_DFLT
;
5396 wm_kmrn_writereg(sc
, KUMCTRLSTA_OFFSET_HD_CTRL
,
5397 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT
);
5398 sc
->sc_tipg
= TIPG_10_100_80003_DFLT
;
5401 CSR_WRITE(sc
, WMREG_TIPG
, sc
->sc_tipg
);
5408 * Read a kumeran register
5411 wm_kmrn_readreg(struct wm_softc
*sc
, int reg
)
5415 if (sc
->sc_flags
== WM_F_SWFW_SYNC
) {
5416 if (wm_get_swfw_semaphore(sc
, SWFW_MAC_CSR_SM
)) {
5417 aprint_error_dev(sc
->sc_dev
,
5418 "%s: failed to get semaphore\n", __func__
);
5421 } else if (sc
->sc_flags
== WM_F_SWFWHW_SYNC
) {
5422 if (wm_get_swfwhw_semaphore(sc
)) {
5423 aprint_error_dev(sc
->sc_dev
,
5424 "%s: failed to get semaphore\n", __func__
);
5429 CSR_WRITE(sc
, WMREG_KUMCTRLSTA
,
5430 ((reg
<< KUMCTRLSTA_OFFSET_SHIFT
) & KUMCTRLSTA_OFFSET
) |
5434 rv
= CSR_READ(sc
, WMREG_KUMCTRLSTA
) & KUMCTRLSTA_MASK
;
5436 if (sc
->sc_flags
== WM_F_SWFW_SYNC
)
5437 wm_put_swfw_semaphore(sc
, SWFW_MAC_CSR_SM
);
5438 else if (sc
->sc_flags
== WM_F_SWFWHW_SYNC
)
5439 wm_put_swfwhw_semaphore(sc
);
5447 * Write a kumeran register
5450 wm_kmrn_writereg(struct wm_softc
*sc
, int reg
, int val
)
5453 if (sc
->sc_flags
== WM_F_SWFW_SYNC
) {
5454 if (wm_get_swfw_semaphore(sc
, SWFW_MAC_CSR_SM
)) {
5455 aprint_error_dev(sc
->sc_dev
,
5456 "%s: failed to get semaphore\n", __func__
);
5459 } else if (sc
->sc_flags
== WM_F_SWFWHW_SYNC
) {
5460 if (wm_get_swfwhw_semaphore(sc
)) {
5461 aprint_error_dev(sc
->sc_dev
,
5462 "%s: failed to get semaphore\n", __func__
);
5467 CSR_WRITE(sc
, WMREG_KUMCTRLSTA
,
5468 ((reg
<< KUMCTRLSTA_OFFSET_SHIFT
) & KUMCTRLSTA_OFFSET
) |
5469 (val
& KUMCTRLSTA_MASK
));
5471 if (sc
->sc_flags
== WM_F_SWFW_SYNC
)
5472 wm_put_swfw_semaphore(sc
, SWFW_MAC_CSR_SM
);
5473 else if (sc
->sc_flags
== WM_F_SWFWHW_SYNC
)
5474 wm_put_swfwhw_semaphore(sc
);
5478 wm_is_onboard_nvm_eeprom(struct wm_softc
*sc
)
5482 if (sc
->sc_type
== WM_T_82573
|| sc
->sc_type
== WM_T_82574
5483 || sc
->sc_type
== WM_T_82583
) {
5484 eecd
= CSR_READ(sc
, WMREG_EECD
);
5486 /* Isolate bits 15 & 16 */
5487 eecd
= ((eecd
>> 15) & 0x03);
5489 /* If both bits are set, device is Flash type */
5497 wm_get_swsm_semaphore(struct wm_softc
*sc
)
5502 /* Get the FW semaphore. */
5503 timeout
= 1000 + 1; /* XXX */
5505 swsm
= CSR_READ(sc
, WMREG_SWSM
);
5506 swsm
|= SWSM_SWESMBI
;
5507 CSR_WRITE(sc
, WMREG_SWSM
, swsm
);
5508 /* if we managed to set the bit we got the semaphore. */
5509 swsm
= CSR_READ(sc
, WMREG_SWSM
);
5510 if (swsm
& SWSM_SWESMBI
)
5518 aprint_error_dev(sc
->sc_dev
, "could not acquire EEPROM GNT\n");
5519 /* Release semaphores */
5520 wm_put_swsm_semaphore(sc
);
5527 wm_put_swsm_semaphore(struct wm_softc
*sc
)
5531 swsm
= CSR_READ(sc
, WMREG_SWSM
);
5532 swsm
&= ~(SWSM_SWESMBI
);
5533 CSR_WRITE(sc
, WMREG_SWSM
, swsm
);
5537 wm_get_swfw_semaphore(struct wm_softc
*sc
, uint16_t mask
)
5540 uint32_t swmask
= mask
<< SWFW_SOFT_SHIFT
;
5541 uint32_t fwmask
= mask
<< SWFW_FIRM_SHIFT
;
5544 for(timeout
= 0; timeout
< 200; timeout
++) {
5545 if (sc
->sc_flags
& WM_F_EEPROM_SEMAPHORE
) {
5546 if (wm_get_swsm_semaphore(sc
)) {
5547 aprint_error_dev(sc
->sc_dev
,
5548 "%s: failed to get semaphore\n",
5553 swfw_sync
= CSR_READ(sc
, WMREG_SW_FW_SYNC
);
5554 if ((swfw_sync
& (swmask
| fwmask
)) == 0) {
5555 swfw_sync
|= swmask
;
5556 CSR_WRITE(sc
, WMREG_SW_FW_SYNC
, swfw_sync
);
5557 if (sc
->sc_flags
& WM_F_EEPROM_SEMAPHORE
)
5558 wm_put_swsm_semaphore(sc
);
5561 if (sc
->sc_flags
& WM_F_EEPROM_SEMAPHORE
)
5562 wm_put_swsm_semaphore(sc
);
5565 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5566 device_xname(sc
->sc_dev
), mask
, swfw_sync
);
5571 wm_put_swfw_semaphore(struct wm_softc
*sc
, uint16_t mask
)
5575 if (sc
->sc_flags
& WM_F_EEPROM_SEMAPHORE
) {
5576 while (wm_get_swsm_semaphore(sc
) != 0)
5579 swfw_sync
= CSR_READ(sc
, WMREG_SW_FW_SYNC
);
5580 swfw_sync
&= ~(mask
<< SWFW_SOFT_SHIFT
);
5581 CSR_WRITE(sc
, WMREG_SW_FW_SYNC
, swfw_sync
);
5582 if (sc
->sc_flags
& WM_F_EEPROM_SEMAPHORE
)
5583 wm_put_swsm_semaphore(sc
);
5587 wm_get_swfwhw_semaphore(struct wm_softc
*sc
)
5592 for(timeout
= 0; timeout
< 200; timeout
++) {
5593 ext_ctrl
= CSR_READ(sc
, WMREG_EXTCNFCTR
);
5594 ext_ctrl
|= E1000_EXTCNF_CTRL_SWFLAG
;
5595 CSR_WRITE(sc
, WMREG_EXTCNFCTR
, ext_ctrl
);
5597 ext_ctrl
= CSR_READ(sc
, WMREG_EXTCNFCTR
);
5598 if (ext_ctrl
& E1000_EXTCNF_CTRL_SWFLAG
)
5602 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5603 device_xname(sc
->sc_dev
), ext_ctrl
);
5608 wm_put_swfwhw_semaphore(struct wm_softc
*sc
)
5611 ext_ctrl
= CSR_READ(sc
, WMREG_EXTCNFCTR
);
5612 ext_ctrl
&= ~E1000_EXTCNF_CTRL_SWFLAG
;
5613 CSR_WRITE(sc
, WMREG_EXTCNFCTR
, ext_ctrl
);
5617 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc
*sc
, unsigned int *bank
)
5619 uint32_t act_offset
= ICH_NVM_SIG_WORD
* 2 + 1;
5620 uint8_t bank_high_byte
;
5621 uint32_t bank1_offset
= sc
->sc_ich8_flash_bank_size
* sizeof(uint16_t);
5623 if (sc
->sc_type
!= WM_T_ICH10
) {
5624 /* Value of bit 22 corresponds to the flash bank we're on. */
5625 *bank
= (CSR_READ(sc
, WMREG_EECD
) & EECD_SEC1VAL
) ? 1 : 0;
5627 wm_read_ich8_byte(sc
, act_offset
, &bank_high_byte
);
5628 if ((bank_high_byte
& 0xc0) == 0x80)
5631 wm_read_ich8_byte(sc
, act_offset
+ bank1_offset
,
5633 if ((bank_high_byte
& 0xc0) == 0x80)
5636 aprint_error_dev(sc
->sc_dev
,
5637 "EEPROM not present\n");
5646 /******************************************************************************
5647 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5650 * sc - Struct containing variables accessed by shared code
5651 * offset - offset of word in the EEPROM to read
5652 * data - word read from the EEPROM
5653 * words - number of words to read
5654 *****************************************************************************/
5656 wm_read_eeprom_ich8(struct wm_softc
*sc
, int offset
, int words
, uint16_t *data
)
5659 uint32_t flash_bank
= 0;
5660 uint32_t act_offset
= 0;
5661 uint32_t bank_offset
= 0;
5665 /* We need to know which is the valid flash bank. In the event
5666 * that we didn't allocate eeprom_shadow_ram, we may not be
5667 * managing flash_bank. So it cannot be trusted and needs
5668 * to be updated with each read.
5670 error
= wm_valid_nvm_bank_detect_ich8lan(sc
, &flash_bank
);
5672 aprint_error_dev(sc
->sc_dev
, "%s: failed to detect NVM bank\n",
5677 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5678 bank_offset
= flash_bank
* (sc
->sc_ich8_flash_bank_size
* 2);
5680 error
= wm_get_swfwhw_semaphore(sc
);
5682 aprint_error_dev(sc
->sc_dev
, "%s: failed to get semaphore\n",
5687 for (i
= 0; i
< words
; i
++) {
5688 /* The NVM part needs a byte offset, hence * 2 */
5689 act_offset
= bank_offset
+ ((offset
+ i
) * 2);
5690 error
= wm_read_ich8_word(sc
, act_offset
, &word
);
5692 aprint_error_dev(sc
->sc_dev
, "%s: failed to read NVM\n",
5699 wm_put_swfwhw_semaphore(sc
);
5703 /******************************************************************************
5704 * This function does initial flash setup so that a new read/write/erase cycle
5707 * sc - The pointer to the hw structure
5708 ****************************************************************************/
5710 wm_ich8_cycle_init(struct wm_softc
*sc
)
5716 hsfsts
= ICH8_FLASH_READ16(sc
, ICH_FLASH_HSFSTS
);
5718 /* May be check the Flash Des Valid bit in Hw status */
5719 if ((hsfsts
& HSFSTS_FLDVAL
) == 0) {
5723 /* Clear FCERR in Hw status by writing 1 */
5724 /* Clear DAEL in Hw status by writing a 1 */
5725 hsfsts
|= HSFSTS_ERR
| HSFSTS_DAEL
;
5727 ICH8_FLASH_WRITE16(sc
, ICH_FLASH_HSFSTS
, hsfsts
);
5729 /* Either we should have a hardware SPI cycle in progress bit to check
5730 * against, in order to start a new cycle or FDONE bit should be changed
5731 * in the hardware so that it is 1 after harware reset, which can then be
5732 * used as an indication whether a cycle is in progress or has been
5733 * completed .. we should also have some software semaphore mechanism to
5734 * guard FDONE or the cycle in progress bit so that two threads access to
5735 * those bits can be sequentiallized or a way so that 2 threads dont
5736 * start the cycle at the same time */
5738 if ((hsfsts
& HSFSTS_FLINPRO
) == 0) {
5739 /* There is no cycle running at present, so we can start a cycle */
5740 /* Begin by setting Flash Cycle Done. */
5741 hsfsts
|= HSFSTS_DONE
;
5742 ICH8_FLASH_WRITE16(sc
, ICH_FLASH_HSFSTS
, hsfsts
);
5745 /* otherwise poll for sometime so the current cycle has a chance
5746 * to end before giving up. */
5747 for (i
= 0; i
< ICH_FLASH_COMMAND_TIMEOUT
; i
++) {
5748 hsfsts
= ICH8_FLASH_READ16(sc
, ICH_FLASH_HSFSTS
);
5749 if ((hsfsts
& HSFSTS_FLINPRO
) == 0) {
5756 /* Successful in waiting for previous cycle to timeout,
5757 * now set the Flash Cycle Done. */
5758 hsfsts
|= HSFSTS_DONE
;
5759 ICH8_FLASH_WRITE16(sc
, ICH_FLASH_HSFSTS
, hsfsts
);
5765 /******************************************************************************
5766 * This function starts a flash cycle and waits for its completion
5768 * sc - The pointer to the hw structure
5769 ****************************************************************************/
5771 wm_ich8_flash_cycle(struct wm_softc
*sc
, uint32_t timeout
)
5778 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5779 hsflctl
= ICH8_FLASH_READ16(sc
, ICH_FLASH_HSFCTL
);
5780 hsflctl
|= HSFCTL_GO
;
5781 ICH8_FLASH_WRITE16(sc
, ICH_FLASH_HSFCTL
, hsflctl
);
5783 /* wait till FDONE bit is set to 1 */
5785 hsfsts
= ICH8_FLASH_READ16(sc
, ICH_FLASH_HSFSTS
);
5786 if (hsfsts
& HSFSTS_DONE
)
5790 } while (i
< timeout
);
5791 if ((hsfsts
& HSFSTS_DONE
) == 1 && (hsfsts
& HSFSTS_ERR
) == 0) {
5797 /******************************************************************************
5798 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5800 * sc - The pointer to the hw structure
5801 * index - The index of the byte or word to read.
5802 * size - Size of data to read, 1=byte 2=word
5803 * data - Pointer to the word to store the value read.
5804 *****************************************************************************/
5806 wm_read_ich8_data(struct wm_softc
*sc
, uint32_t index
,
5807 uint32_t size
, uint16_t* data
)
5811 uint32_t flash_linear_address
;
5812 uint32_t flash_data
= 0;
5816 if (size
< 1 || size
> 2 || data
== 0x0 ||
5817 index
> ICH_FLASH_LINEAR_ADDR_MASK
)
5820 flash_linear_address
= (ICH_FLASH_LINEAR_ADDR_MASK
& index
) +
5821 sc
->sc_ich8_flash_base
;
5826 error
= wm_ich8_cycle_init(sc
);
5830 hsflctl
= ICH8_FLASH_READ16(sc
, ICH_FLASH_HSFCTL
);
5831 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5832 hsflctl
|= ((size
- 1) << HSFCTL_BCOUNT_SHIFT
) & HSFCTL_BCOUNT_MASK
;
5833 hsflctl
|= ICH_CYCLE_READ
<< HSFCTL_CYCLE_SHIFT
;
5834 ICH8_FLASH_WRITE16(sc
, ICH_FLASH_HSFCTL
, hsflctl
);
5836 /* Write the last 24 bits of index into Flash Linear address field in
5838 /* TODO: TBD maybe check the index against the size of flash */
5840 ICH8_FLASH_WRITE32(sc
, ICH_FLASH_FADDR
, flash_linear_address
);
5842 error
= wm_ich8_flash_cycle(sc
, ICH_FLASH_COMMAND_TIMEOUT
);
5844 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5845 * sequence a few more times, else read in (shift in) the Flash Data0,
5846 * the order is least significant byte first msb to lsb */
5848 flash_data
= ICH8_FLASH_READ32(sc
, ICH_FLASH_FDATA0
);
5850 *data
= (uint8_t)(flash_data
& 0x000000FF);
5851 } else if (size
== 2) {
5852 *data
= (uint16_t)(flash_data
& 0x0000FFFF);
5856 /* If we've gotten here, then things are probably completely hosed,
5857 * but if the error condition is detected, it won't hurt to give
5858 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5860 hsfsts
= ICH8_FLASH_READ16(sc
, ICH_FLASH_HSFSTS
);
5861 if (hsfsts
& HSFSTS_ERR
) {
5862 /* Repeat for some time before giving up. */
5864 } else if ((hsfsts
& HSFSTS_DONE
) == 0) {
5868 } while (count
++ < ICH_FLASH_CYCLE_REPEAT_COUNT
);
5873 /******************************************************************************
5874 * Reads a single byte from the NVM using the ICH8 flash access registers.
5876 * sc - pointer to wm_hw structure
5877 * index - The index of the byte to read.
5878 * data - Pointer to a byte to store the value read.
5879 *****************************************************************************/
5881 wm_read_ich8_byte(struct wm_softc
*sc
, uint32_t index
, uint8_t* data
)
5886 status
= wm_read_ich8_data(sc
, index
, 1, &word
);
5888 *data
= (uint8_t)word
;
5894 /******************************************************************************
5895 * Reads a word from the NVM using the ICH8 flash access registers.
5897 * sc - pointer to wm_hw structure
5898 * index - The starting byte index of the word to read.
5899 * data - Pointer to a word to store the value read.
5900 *****************************************************************************/
5902 wm_read_ich8_word(struct wm_softc
*sc
, uint32_t index
, uint16_t *data
)
5906 status
= wm_read_ich8_data(sc
, index
, 2, data
);
5911 wm_check_mng_mode(struct wm_softc
*sc
)
5915 switch (sc
->sc_type
) {
5919 rv
= wm_check_mng_mode_ich8lan(sc
);
5923 rv
= wm_check_mng_mode_82574(sc
);
5929 rv
= wm_check_mng_mode_generic(sc
);
5941 wm_check_mng_mode_ich8lan(struct wm_softc
*sc
)
5945 fwsm
= CSR_READ(sc
, WMREG_FWSM
);
5947 if ((fwsm
& FWSM_MODE_MASK
) == (MNG_ICH_IAMT_MODE
<< FWSM_MODE_SHIFT
))
5954 wm_check_mng_mode_82574(struct wm_softc
*sc
)
5958 wm_read_eeprom(sc
, EEPROM_OFF_CFG2
, 1, &data
);
5960 if ((data
& EEPROM_CFG2_MNGM_MASK
) != 0)
5967 wm_check_mng_mode_generic(struct wm_softc
*sc
)
5971 fwsm
= CSR_READ(sc
, WMREG_FWSM
);
5973 if ((fwsm
& FWSM_MODE_MASK
) == (MNG_IAMT_MODE
<< FWSM_MODE_SHIFT
))
5980 wm_check_reset_block(struct wm_softc
*sc
)
5984 switch (sc
->sc_type
) {
5988 reg
= CSR_READ(sc
, WMREG_FWSM
);
5989 if ((reg
& FWSM_RSPCIPHY
) != 0)
6000 reg
= CSR_READ(sc
, WMREG_MANC
);
6001 if ((reg
& MANC_BLK_PHY_RST_ON_IDE
) != 0)
6015 wm_get_hw_control(struct wm_softc
*sc
)
6019 switch (sc
->sc_type
) {
6025 * FreeBSD's em driver has the function for 82574 to checks
6026 * the management mode, but it's not used. Why?
6029 reg
= CSR_READ(sc
, WMREG_SWSM
);
6030 CSR_WRITE(sc
, WMREG_SWSM
, reg
| SWSM_DRV_LOAD
);
6038 reg
= CSR_READ(sc
, WMREG_CTRL_EXT
);
6039 CSR_WRITE(sc
, WMREG_CTRL_EXT
, reg
| CTRL_EXT_DRV_LOAD
);
6046 /* XXX Currently TBI only */
6048 wm_check_for_link(struct wm_softc
*sc
)
6050 struct ifmedia_entry
*ife
= sc
->sc_mii
.mii_media
.ifm_cur
;
6056 rxcw
= CSR_READ(sc
, WMREG_RXCW
);
6057 ctrl
= CSR_READ(sc
, WMREG_CTRL
);
6058 status
= CSR_READ(sc
, WMREG_STATUS
);
6060 sig
= (sc
->sc_type
> WM_T_82544
) ? CTRL_SWDPIN(1) : 0;
6062 DPRINTF(WM_DEBUG_LINK
, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
6063 device_xname(sc
->sc_dev
), __func__
,
6064 ((ctrl
& CTRL_SWDPIN(1)) == sig
),
6065 ((status
& STATUS_LU
) != 0),
6066 ((rxcw
& RXCW_C
) != 0)
6072 * 0 0 1 (should not happen)
6073 * 0 1 0 (should not happen)
6074 * 0 1 1 (should not happen)
6075 * 1 0 0 Disable autonego and force linkup
6076 * 1 0 1 got /C/ but not linkup yet
6078 * 1 1 1 If IFM_AUTO, back to autonego
6081 if (((ctrl
& CTRL_SWDPIN(1)) == sig
)
6082 && ((status
& STATUS_LU
) == 0)
6083 && ((rxcw
& RXCW_C
) == 0)) {
6084 DPRINTF(WM_DEBUG_LINK
, ("%s: force linkup and fullduplex\n",
6086 sc
->sc_tbi_linkup
= 0;
6087 /* Disable auto-negotiation in the TXCW register */
6088 CSR_WRITE(sc
, WMREG_TXCW
, (sc
->sc_txcw
& ~TXCW_ANE
));
6091 * Force link-up and also force full-duplex.
6093 * NOTE: CTRL was updated TFCE and RFCE automatically,
6094 * so we should update sc->sc_ctrl
6096 sc
->sc_ctrl
= ctrl
| CTRL_SLU
| CTRL_FD
;
6097 CSR_WRITE(sc
, WMREG_CTRL
, sc
->sc_ctrl
);
6098 } else if(((status
& STATUS_LU
) != 0)
6099 && ((rxcw
& RXCW_C
) != 0)
6100 && (IFM_SUBTYPE(ife
->ifm_media
) == IFM_AUTO
)) {
6101 sc
->sc_tbi_linkup
= 1;
6102 DPRINTF(WM_DEBUG_LINK
, ("%s: go back to autonego\n",
6104 CSR_WRITE(sc
, WMREG_TXCW
, sc
->sc_txcw
);
6105 CSR_WRITE(sc
, WMREG_CTRL
, (ctrl
& ~CTRL_SLU
));
6106 } else if (((ctrl
& CTRL_SWDPIN(1)) == sig
)
6107 && ((rxcw
& RXCW_C
) != 0)) {
6108 DPRINTF(WM_DEBUG_LINK
, ("/C/"));
6110 DPRINTF(WM_DEBUG_LINK
, ("%s: %x,%x,%x\n", __func__
, rxcw
, ctrl
,