1 /* $NetBSD: if_nfe.c,v 1.47 2009/11/26 15:17:09 njoly Exp $ */
2 /* $OpenBSD: if_nfe.c,v 1.77 2008/02/05 16:52:50 brad Exp $ */
5 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.47 2009/11/26 15:17:09 njoly Exp $");
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/sockio.h>
36 #include <sys/mutex.h>
37 #include <sys/queue.h>
38 #include <sys/kernel.h>
39 #include <sys/device.h>
40 #include <sys/callout.h>
41 #include <sys/socket.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_ether.h>
49 #include <net/if_arp.h>
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/in_var.h>
55 #include <netinet/ip.h>
56 #include <netinet/if_inarp.h>
60 #include <net/if_types.h>
67 #include <dev/mii/mii.h>
68 #include <dev/mii/miivar.h>
70 #include <dev/pci/pcireg.h>
71 #include <dev/pci/pcivar.h>
72 #include <dev/pci/pcidevs.h>
74 #include <dev/pci/if_nfereg.h>
75 #include <dev/pci/if_nfevar.h>
77 static int nfe_ifflags_cb(struct ethercom
*);
79 int nfe_match(device_t
, cfdata_t
, void *);
80 void nfe_attach(device_t
, device_t
, void *);
81 void nfe_power(int, void *);
82 void nfe_miibus_statchg(device_t
);
83 int nfe_miibus_readreg(device_t
, int, int);
84 void nfe_miibus_writereg(device_t
, int, int, int);
86 int nfe_ioctl(struct ifnet
*, u_long
, void *);
87 void nfe_txdesc32_sync(struct nfe_softc
*, struct nfe_desc32
*, int);
88 void nfe_txdesc64_sync(struct nfe_softc
*, struct nfe_desc64
*, int);
89 void nfe_txdesc32_rsync(struct nfe_softc
*, int, int, int);
90 void nfe_txdesc64_rsync(struct nfe_softc
*, int, int, int);
91 void nfe_rxdesc32_sync(struct nfe_softc
*, struct nfe_desc32
*, int);
92 void nfe_rxdesc64_sync(struct nfe_softc
*, struct nfe_desc64
*, int);
93 void nfe_rxeof(struct nfe_softc
*);
94 void nfe_txeof(struct nfe_softc
*);
95 int nfe_encap(struct nfe_softc
*, struct mbuf
*);
96 void nfe_start(struct ifnet
*);
97 void nfe_watchdog(struct ifnet
*);
98 int nfe_init(struct ifnet
*);
99 void nfe_stop(struct ifnet
*, int);
100 struct nfe_jbuf
*nfe_jalloc(struct nfe_softc
*, int);
101 void nfe_jfree(struct mbuf
*, void *, size_t, void *);
102 int nfe_jpool_alloc(struct nfe_softc
*);
103 void nfe_jpool_free(struct nfe_softc
*);
104 int nfe_alloc_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
105 void nfe_reset_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
106 void nfe_free_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
107 int nfe_alloc_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
108 void nfe_reset_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
109 void nfe_free_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
110 void nfe_setmulti(struct nfe_softc
*);
111 void nfe_get_macaddr(struct nfe_softc
*, uint8_t *);
112 void nfe_set_macaddr(struct nfe_softc
*, const uint8_t *);
113 void nfe_tick(void *);
114 void nfe_poweron(device_t
);
115 bool nfe_resume(device_t
, pmf_qual_t
);
117 CFATTACH_DECL_NEW(nfe
, sizeof(struct nfe_softc
), nfe_match
, nfe_attach
,
120 /* #define NFE_NO_JUMBO */
124 #define DPRINTF(x) do { if (nfedebug) printf x; } while (0)
125 #define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0)
128 #define DPRINTFN(n,x)
131 /* deal with naming differences */
133 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \
134 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1
135 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \
136 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2
137 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \
138 PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN
140 #define PCI_PRODUCT_NVIDIA_CK804_LAN1 \
141 PCI_PRODUCT_NVIDIA_NFORCE4_LAN1
142 #define PCI_PRODUCT_NVIDIA_CK804_LAN2 \
143 PCI_PRODUCT_NVIDIA_NFORCE4_LAN2
145 #define PCI_PRODUCT_NVIDIA_MCP51_LAN1 \
146 PCI_PRODUCT_NVIDIA_NFORCE430_LAN1
147 #define PCI_PRODUCT_NVIDIA_MCP51_LAN2 \
148 PCI_PRODUCT_NVIDIA_NFORCE430_LAN2
154 const struct nfe_product
{
155 pci_vendor_id_t vendor
;
156 pci_product_id_t product
;
158 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE_LAN
},
159 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE2_LAN
},
160 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1
},
161 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2
},
162 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3
},
163 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4
},
164 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5
},
165 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_CK804_LAN1
},
166 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_CK804_LAN2
},
167 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP04_LAN1
},
168 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP04_LAN2
},
169 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP51_LAN1
},
170 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP51_LAN2
},
171 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP55_LAN1
},
172 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP55_LAN2
},
173 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN1
},
174 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN2
},
175 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN3
},
176 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN4
},
177 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN1
},
178 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN2
},
179 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN3
},
180 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN4
},
181 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN1
},
182 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN2
},
183 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN3
},
184 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN4
},
185 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP73_LAN1
},
186 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP73_LAN2
},
187 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP73_LAN3
},
188 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP73_LAN4
},
189 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP77_LAN1
},
190 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP77_LAN2
},
191 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP77_LAN3
},
192 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP77_LAN4
},
193 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP79_LAN1
},
194 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP79_LAN2
},
195 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP79_LAN3
},
196 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP79_LAN4
}
200 nfe_match(device_t dev
, cfdata_t match
, void *aux
)
202 struct pci_attach_args
*pa
= aux
;
203 const struct nfe_product
*np
;
206 for (i
= 0; i
< __arraycount(nfe_devices
); i
++) {
207 np
= &nfe_devices
[i
];
208 if (PCI_VENDOR(pa
->pa_id
) == np
->vendor
&&
209 PCI_PRODUCT(pa
->pa_id
) == np
->product
)
216 nfe_attach(device_t parent
, device_t self
, void *aux
)
218 struct nfe_softc
*sc
= device_private(self
);
219 struct pci_attach_args
*pa
= aux
;
220 pci_chipset_tag_t pc
= pa
->pa_pc
;
221 pci_intr_handle_t ih
;
230 pci_devinfo(pa
->pa_id
, pa
->pa_class
, 0, devinfo
, sizeof(devinfo
));
231 aprint_normal(": %s (rev. 0x%02x)\n", devinfo
, PCI_REVISION(pa
->pa_class
));
233 memtype
= pci_mapreg_type(pa
->pa_pc
, pa
->pa_tag
, NFE_PCI_BA
);
235 case PCI_MAPREG_TYPE_MEM
| PCI_MAPREG_MEM_TYPE_32BIT
:
236 case PCI_MAPREG_TYPE_MEM
| PCI_MAPREG_MEM_TYPE_64BIT
:
237 if (pci_mapreg_map(pa
, NFE_PCI_BA
, memtype
, 0, &sc
->sc_memt
,
238 &sc
->sc_memh
, NULL
, &memsize
) == 0)
242 aprint_error_dev(self
, "could not map mem space\n");
246 if (pci_intr_map(pa
, &ih
) != 0) {
247 aprint_error_dev(self
, "could not map interrupt\n");
251 intrstr
= pci_intr_string(pc
, ih
);
252 sc
->sc_ih
= pci_intr_establish(pc
, ih
, IPL_NET
, nfe_intr
, sc
);
253 if (sc
->sc_ih
== NULL
) {
254 aprint_error_dev(self
, "could not establish interrupt");
256 aprint_error(" at %s", intrstr
);
260 aprint_normal_dev(self
, "interrupting at %s\n", intrstr
);
262 sc
->sc_dmat
= pa
->pa_dmat
;
266 switch (PCI_PRODUCT(pa
->pa_id
)) {
267 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2
:
268 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3
:
269 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4
:
270 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5
:
271 sc
->sc_flags
|= NFE_JUMBO_SUP
| NFE_HW_CSUM
;
273 case PCI_PRODUCT_NVIDIA_MCP51_LAN1
:
274 case PCI_PRODUCT_NVIDIA_MCP51_LAN2
:
275 sc
->sc_flags
|= NFE_40BIT_ADDR
| NFE_PWR_MGMT
;
277 case PCI_PRODUCT_NVIDIA_MCP61_LAN1
:
278 case PCI_PRODUCT_NVIDIA_MCP61_LAN2
:
279 case PCI_PRODUCT_NVIDIA_MCP61_LAN3
:
280 case PCI_PRODUCT_NVIDIA_MCP61_LAN4
:
281 case PCI_PRODUCT_NVIDIA_MCP67_LAN1
:
282 case PCI_PRODUCT_NVIDIA_MCP67_LAN2
:
283 case PCI_PRODUCT_NVIDIA_MCP67_LAN3
:
284 case PCI_PRODUCT_NVIDIA_MCP67_LAN4
:
285 case PCI_PRODUCT_NVIDIA_MCP73_LAN1
:
286 case PCI_PRODUCT_NVIDIA_MCP73_LAN2
:
287 case PCI_PRODUCT_NVIDIA_MCP73_LAN3
:
288 case PCI_PRODUCT_NVIDIA_MCP73_LAN4
:
289 sc
->sc_flags
|= NFE_40BIT_ADDR
| NFE_CORRECT_MACADDR
|
292 case PCI_PRODUCT_NVIDIA_MCP77_LAN1
:
293 case PCI_PRODUCT_NVIDIA_MCP77_LAN2
:
294 case PCI_PRODUCT_NVIDIA_MCP77_LAN3
:
295 case PCI_PRODUCT_NVIDIA_MCP77_LAN4
:
296 sc
->sc_flags
|= NFE_40BIT_ADDR
| NFE_HW_CSUM
|
297 NFE_CORRECT_MACADDR
| NFE_PWR_MGMT
;
299 case PCI_PRODUCT_NVIDIA_MCP79_LAN1
:
300 case PCI_PRODUCT_NVIDIA_MCP79_LAN2
:
301 case PCI_PRODUCT_NVIDIA_MCP79_LAN3
:
302 case PCI_PRODUCT_NVIDIA_MCP79_LAN4
:
303 sc
->sc_flags
|= NFE_JUMBO_SUP
| NFE_40BIT_ADDR
| NFE_HW_CSUM
|
304 NFE_CORRECT_MACADDR
| NFE_PWR_MGMT
;
306 case PCI_PRODUCT_NVIDIA_CK804_LAN1
:
307 case PCI_PRODUCT_NVIDIA_CK804_LAN2
:
308 case PCI_PRODUCT_NVIDIA_MCP04_LAN1
:
309 case PCI_PRODUCT_NVIDIA_MCP04_LAN2
:
310 sc
->sc_flags
|= NFE_JUMBO_SUP
| NFE_40BIT_ADDR
| NFE_HW_CSUM
;
312 case PCI_PRODUCT_NVIDIA_MCP65_LAN1
:
313 case PCI_PRODUCT_NVIDIA_MCP65_LAN2
:
314 case PCI_PRODUCT_NVIDIA_MCP65_LAN3
:
315 case PCI_PRODUCT_NVIDIA_MCP65_LAN4
:
316 sc
->sc_flags
|= NFE_JUMBO_SUP
| NFE_40BIT_ADDR
|
317 NFE_CORRECT_MACADDR
| NFE_PWR_MGMT
;
318 mii_flags
= MIIF_DOPAUSE
;
320 case PCI_PRODUCT_NVIDIA_MCP55_LAN1
:
321 case PCI_PRODUCT_NVIDIA_MCP55_LAN2
:
322 sc
->sc_flags
|= NFE_JUMBO_SUP
| NFE_40BIT_ADDR
| NFE_HW_CSUM
|
323 NFE_HW_VLAN
| NFE_PWR_MGMT
;
330 /* enable jumbo frames for adapters that support it */
331 if (sc
->sc_flags
& NFE_JUMBO_SUP
)
332 sc
->sc_flags
|= NFE_USE_JUMBO
;
335 /* Check for reversed ethernet address */
336 if ((NFE_READ(sc
, NFE_TX_UNK
) & NFE_MAC_ADDR_INORDER
) != 0)
337 sc
->sc_flags
|= NFE_CORRECT_MACADDR
;
339 nfe_get_macaddr(sc
, sc
->sc_enaddr
);
340 aprint_normal_dev(self
, "Ethernet address %s\n",
341 ether_sprintf(sc
->sc_enaddr
));
344 * Allocate Tx and Rx rings.
346 if (nfe_alloc_tx_ring(sc
, &sc
->txq
) != 0) {
347 aprint_error_dev(self
, "could not allocate Tx ring\n");
351 mutex_init(&sc
->rxq
.mtx
, MUTEX_DEFAULT
, IPL_NET
);
353 if (nfe_alloc_rx_ring(sc
, &sc
->rxq
) != 0) {
354 aprint_error_dev(self
, "could not allocate Rx ring\n");
355 nfe_free_tx_ring(sc
, &sc
->txq
);
359 ifp
= &sc
->sc_ethercom
.ec_if
;
361 ifp
->if_mtu
= ETHERMTU
;
362 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
363 ifp
->if_ioctl
= nfe_ioctl
;
364 ifp
->if_start
= nfe_start
;
365 ifp
->if_stop
= nfe_stop
;
366 ifp
->if_watchdog
= nfe_watchdog
;
367 ifp
->if_init
= nfe_init
;
368 ifp
->if_baudrate
= IF_Gbps(1);
369 IFQ_SET_MAXLEN(&ifp
->if_snd
, NFE_IFQ_MAXLEN
);
370 IFQ_SET_READY(&ifp
->if_snd
);
371 strlcpy(ifp
->if_xname
, device_xname(self
), IFNAMSIZ
);
373 if (sc
->sc_flags
& NFE_USE_JUMBO
)
374 sc
->sc_ethercom
.ec_capabilities
|= ETHERCAP_JUMBO_MTU
;
377 if (sc
->sc_flags
& NFE_HW_VLAN
)
378 sc
->sc_ethercom
.ec_capabilities
|=
379 ETHERCAP_VLAN_HWTAGGING
| ETHERCAP_VLAN_MTU
;
381 if (sc
->sc_flags
& NFE_HW_CSUM
) {
382 ifp
->if_capabilities
|=
383 IFCAP_CSUM_IPv4_Tx
| IFCAP_CSUM_IPv4_Rx
|
384 IFCAP_CSUM_TCPv4_Tx
| IFCAP_CSUM_TCPv4_Rx
|
385 IFCAP_CSUM_UDPv4_Tx
| IFCAP_CSUM_UDPv4_Rx
;
388 sc
->sc_mii
.mii_ifp
= ifp
;
389 sc
->sc_mii
.mii_readreg
= nfe_miibus_readreg
;
390 sc
->sc_mii
.mii_writereg
= nfe_miibus_writereg
;
391 sc
->sc_mii
.mii_statchg
= nfe_miibus_statchg
;
393 sc
->sc_ethercom
.ec_mii
= &sc
->sc_mii
;
394 ifmedia_init(&sc
->sc_mii
.mii_media
, 0, ether_mediachange
,
397 mii_attach(self
, &sc
->sc_mii
, 0xffffffff, MII_PHY_ANY
,
398 MII_OFFSET_ANY
, mii_flags
);
400 if (LIST_FIRST(&sc
->sc_mii
.mii_phys
) == NULL
) {
401 aprint_error_dev(self
, "no PHY found!\n");
402 ifmedia_add(&sc
->sc_mii
.mii_media
, IFM_ETHER
| IFM_MANUAL
,
404 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
| IFM_MANUAL
);
406 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
| IFM_AUTO
);
409 ether_ifattach(ifp
, sc
->sc_enaddr
);
410 ether_set_ifflags_cb(&sc
->sc_ethercom
, nfe_ifflags_cb
);
412 callout_init(&sc
->sc_tick_ch
, 0);
413 callout_setfunc(&sc
->sc_tick_ch
, nfe_tick
, sc
);
415 if (pmf_device_register(self
, NULL
, nfe_resume
))
416 pmf_class_network_register(self
, ifp
);
418 aprint_error_dev(self
, "couldn't establish power handler\n");
423 if (sc
->sc_ih
!= NULL
) {
424 pci_intr_disestablish(pc
, sc
->sc_ih
);
428 bus_space_unmap(sc
->sc_memt
, sc
->sc_memh
, memsize
);
432 nfe_miibus_statchg(device_t dev
)
434 struct nfe_softc
*sc
= device_private(dev
);
435 struct mii_data
*mii
= &sc
->sc_mii
;
436 uint32_t phy
, seed
, misc
= NFE_MISC1_MAGIC
, link
= NFE_MEDIA_SET
;
438 phy
= NFE_READ(sc
, NFE_PHY_IFACE
);
439 phy
&= ~(NFE_PHY_HDX
| NFE_PHY_100TX
| NFE_PHY_1000T
);
441 seed
= NFE_READ(sc
, NFE_RNDSEED
);
442 seed
&= ~NFE_SEED_MASK
;
444 if ((mii
->mii_media_active
& IFM_GMASK
) == IFM_HDX
) {
445 phy
|= NFE_PHY_HDX
; /* half-duplex */
446 misc
|= NFE_MISC1_HDX
;
449 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
450 case IFM_1000_T
: /* full-duplex only */
451 link
|= NFE_MEDIA_1000T
;
452 seed
|= NFE_SEED_1000T
;
453 phy
|= NFE_PHY_1000T
;
456 link
|= NFE_MEDIA_100TX
;
457 seed
|= NFE_SEED_100TX
;
458 phy
|= NFE_PHY_100TX
;
461 link
|= NFE_MEDIA_10T
;
462 seed
|= NFE_SEED_10T
;
466 NFE_WRITE(sc
, NFE_RNDSEED
, seed
); /* XXX: gigabit NICs only? */
468 NFE_WRITE(sc
, NFE_PHY_IFACE
, phy
);
469 NFE_WRITE(sc
, NFE_MISC1
, misc
);
470 NFE_WRITE(sc
, NFE_LINKSPEED
, link
);
474 nfe_miibus_readreg(device_t dev
, int phy
, int reg
)
476 struct nfe_softc
*sc
= device_private(dev
);
480 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
482 if (NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
) {
483 NFE_WRITE(sc
, NFE_PHY_CTL
, NFE_PHY_BUSY
);
487 NFE_WRITE(sc
, NFE_PHY_CTL
, (phy
<< NFE_PHYADD_SHIFT
) | reg
);
489 for (ntries
= 0; ntries
< 1000; ntries
++) {
491 if (!(NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
))
494 if (ntries
== 1000) {
495 DPRINTFN(2, ("%s: timeout waiting for PHY\n",
496 device_xname(sc
->sc_dev
)));
500 if (NFE_READ(sc
, NFE_PHY_STATUS
) & NFE_PHY_ERROR
) {
501 DPRINTFN(2, ("%s: could not read PHY\n",
502 device_xname(sc
->sc_dev
)));
506 val
= NFE_READ(sc
, NFE_PHY_DATA
);
507 if (val
!= 0xffffffff && val
!= 0)
508 sc
->mii_phyaddr
= phy
;
510 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
511 device_xname(sc
->sc_dev
), phy
, reg
, val
));
517 nfe_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
519 struct nfe_softc
*sc
= device_private(dev
);
523 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
525 if (NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
) {
526 NFE_WRITE(sc
, NFE_PHY_CTL
, NFE_PHY_BUSY
);
530 NFE_WRITE(sc
, NFE_PHY_DATA
, val
);
531 ctl
= NFE_PHY_WRITE
| (phy
<< NFE_PHYADD_SHIFT
) | reg
;
532 NFE_WRITE(sc
, NFE_PHY_CTL
, ctl
);
534 for (ntries
= 0; ntries
< 1000; ntries
++) {
536 if (!(NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
))
540 if (nfedebug
>= 2 && ntries
== 1000)
541 printf("could not write to PHY\n");
548 struct nfe_softc
*sc
= arg
;
549 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
553 if ((ifp
->if_flags
& IFF_UP
) == 0)
559 r
= NFE_READ(sc
, NFE_IRQ_STATUS
);
560 if ((r
& NFE_IRQ_WANTED
) == 0)
563 NFE_WRITE(sc
, NFE_IRQ_STATUS
, r
);
565 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r
));
567 if ((r
& (NFE_IRQ_RXERR
|NFE_IRQ_RX_NOBUF
|NFE_IRQ_RX
)) != 0) {
571 if ((r
& (NFE_IRQ_TXERR
|NFE_IRQ_TXERR2
|NFE_IRQ_TX_DONE
)) != 0) {
575 if ((r
& NFE_IRQ_LINK
) != 0) {
576 NFE_READ(sc
, NFE_PHY_STATUS
);
577 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
578 DPRINTF(("%s: link state changed\n",
579 device_xname(sc
->sc_dev
)));
583 if (handled
&& !IF_IS_EMPTY(&ifp
->if_snd
))
590 nfe_ifflags_cb(struct ethercom
*ec
)
592 struct ifnet
*ifp
= &ec
->ec_if
;
593 struct nfe_softc
*sc
= ifp
->if_softc
;
594 int change
= ifp
->if_flags
^ sc
->sc_if_flags
;
597 * If only the PROMISC flag changes, then
598 * don't do a full re-init of the chip, just update
601 if ((change
& ~(IFF_CANTCHANGE
|IFF_DEBUG
)) != 0)
603 else if ((change
& IFF_PROMISC
) != 0)
610 nfe_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
612 struct nfe_softc
*sc
= ifp
->if_softc
;
613 struct ifaddr
*ifa
= (struct ifaddr
*)data
;
620 ifp
->if_flags
|= IFF_UP
;
622 switch (ifa
->ifa_addr
->sa_family
) {
625 arp_ifinit(ifp
, ifa
);
633 if ((error
= ether_ioctl(ifp
, cmd
, data
)) != ENETRESET
)
638 if (cmd
!= SIOCADDMULTI
&& cmd
!= SIOCDELMULTI
)
640 else if (ifp
->if_flags
& IFF_RUNNING
)
644 sc
->sc_if_flags
= ifp
->if_flags
;
652 nfe_txdesc32_sync(struct nfe_softc
*sc
, struct nfe_desc32
*desc32
, int ops
)
654 bus_dmamap_sync(sc
->sc_dmat
, sc
->txq
.map
,
655 (char *)desc32
- (char *)sc
->txq
.desc32
,
656 sizeof (struct nfe_desc32
), ops
);
660 nfe_txdesc64_sync(struct nfe_softc
*sc
, struct nfe_desc64
*desc64
, int ops
)
662 bus_dmamap_sync(sc
->sc_dmat
, sc
->txq
.map
,
663 (char *)desc64
- (char *)sc
->txq
.desc64
,
664 sizeof (struct nfe_desc64
), ops
);
668 nfe_txdesc32_rsync(struct nfe_softc
*sc
, int start
, int end
, int ops
)
671 bus_dmamap_sync(sc
->sc_dmat
, sc
->txq
.map
,
672 (char *)&sc
->txq
.desc32
[start
] - (char *)sc
->txq
.desc32
,
673 (char *)&sc
->txq
.desc32
[end
] -
674 (char *)&sc
->txq
.desc32
[start
], ops
);
677 /* sync from 'start' to end of ring */
678 bus_dmamap_sync(sc
->sc_dmat
, sc
->txq
.map
,
679 (char *)&sc
->txq
.desc32
[start
] - (char *)sc
->txq
.desc32
,
680 (char *)&sc
->txq
.desc32
[NFE_TX_RING_COUNT
] -
681 (char *)&sc
->txq
.desc32
[start
], ops
);
683 /* sync from start of ring to 'end' */
684 bus_dmamap_sync(sc
->sc_dmat
, sc
->txq
.map
, 0,
685 (char *)&sc
->txq
.desc32
[end
] - (char *)sc
->txq
.desc32
, ops
);
689 nfe_txdesc64_rsync(struct nfe_softc
*sc
, int start
, int end
, int ops
)
692 bus_dmamap_sync(sc
->sc_dmat
, sc
->txq
.map
,
693 (char *)&sc
->txq
.desc64
[start
] - (char *)sc
->txq
.desc64
,
694 (char *)&sc
->txq
.desc64
[end
] -
695 (char *)&sc
->txq
.desc64
[start
], ops
);
698 /* sync from 'start' to end of ring */
699 bus_dmamap_sync(sc
->sc_dmat
, sc
->txq
.map
,
700 (char *)&sc
->txq
.desc64
[start
] - (char *)sc
->txq
.desc64
,
701 (char *)&sc
->txq
.desc64
[NFE_TX_RING_COUNT
] -
702 (char *)&sc
->txq
.desc64
[start
], ops
);
704 /* sync from start of ring to 'end' */
705 bus_dmamap_sync(sc
->sc_dmat
, sc
->txq
.map
, 0,
706 (char *)&sc
->txq
.desc64
[end
] - (char *)sc
->txq
.desc64
, ops
);
710 nfe_rxdesc32_sync(struct nfe_softc
*sc
, struct nfe_desc32
*desc32
, int ops
)
712 bus_dmamap_sync(sc
->sc_dmat
, sc
->rxq
.map
,
713 (char *)desc32
- (char *)sc
->rxq
.desc32
,
714 sizeof (struct nfe_desc32
), ops
);
718 nfe_rxdesc64_sync(struct nfe_softc
*sc
, struct nfe_desc64
*desc64
, int ops
)
720 bus_dmamap_sync(sc
->sc_dmat
, sc
->rxq
.map
,
721 (char *)desc64
- (char *)sc
->rxq
.desc64
,
722 sizeof (struct nfe_desc64
), ops
);
726 nfe_rxeof(struct nfe_softc
*sc
)
728 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
729 struct nfe_desc32
*desc32
;
730 struct nfe_desc64
*desc64
;
731 struct nfe_rx_data
*data
;
732 struct nfe_jbuf
*jbuf
;
733 struct mbuf
*m
, *mnew
;
740 for (i
= sc
->rxq
.cur
;; i
= NFE_RX_NEXTDESC(i
)) {
741 data
= &sc
->rxq
.data
[i
];
743 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
744 desc64
= &sc
->rxq
.desc64
[i
];
745 nfe_rxdesc64_sync(sc
, desc64
,
746 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
748 flags
= le16toh(desc64
->flags
);
749 len
= le16toh(desc64
->length
) & 0x3fff;
751 desc32
= &sc
->rxq
.desc32
[i
];
752 nfe_rxdesc32_sync(sc
, desc32
,
753 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
755 flags
= le16toh(desc32
->flags
);
756 len
= le16toh(desc32
->length
) & 0x3fff;
759 if ((flags
& NFE_RX_READY
) != 0)
762 if ((sc
->sc_flags
& (NFE_JUMBO_SUP
| NFE_40BIT_ADDR
)) == 0) {
763 if ((flags
& NFE_RX_VALID_V1
) == 0)
766 if ((flags
& NFE_RX_FIXME_V1
) == NFE_RX_FIXME_V1
) {
767 flags
&= ~NFE_RX_ERROR
;
768 len
--; /* fix buffer length */
771 if ((flags
& NFE_RX_VALID_V2
) == 0)
774 if ((flags
& NFE_RX_FIXME_V2
) == NFE_RX_FIXME_V2
) {
775 flags
&= ~NFE_RX_ERROR
;
776 len
--; /* fix buffer length */
780 if (flags
& NFE_RX_ERROR
) {
786 * Try to allocate a new mbuf for this ring element and load
787 * it before processing the current mbuf. If the ring element
788 * cannot be loaded, drop the received packet and reuse the
789 * old mbuf. In the unlikely case that the old mbuf can't be
790 * reloaded either, explicitly panic.
792 MGETHDR(mnew
, M_DONTWAIT
, MT_DATA
);
798 if (sc
->sc_flags
& NFE_USE_JUMBO
) {
800 sc
->rxq
.jbuf
[sc
->rxq
.jbufmap
[i
]].physaddr
;
801 if ((jbuf
= nfe_jalloc(sc
, i
)) == NULL
) {
802 if (len
> MCLBYTES
) {
807 MCLGET(mnew
, M_DONTWAIT
);
808 if ((mnew
->m_flags
& M_EXT
) == 0) {
814 (void)memcpy(mtod(mnew
, void *),
815 mtod(data
->m
, const void *), len
);
819 MEXTADD(mnew
, jbuf
->buf
, NFE_JBYTES
, 0, nfe_jfree
, sc
);
820 bus_dmamap_sync(sc
->sc_dmat
, sc
->rxq
.jmap
,
821 mtod(data
->m
, char *) - (char *)sc
->rxq
.jpool
,
822 NFE_JBYTES
, BUS_DMASYNC_POSTREAD
);
824 physaddr
= jbuf
->physaddr
;
827 MCLGET(mnew
, M_DONTWAIT
);
828 if ((mnew
->m_flags
& M_EXT
) == 0) {
834 bus_dmamap_sync(sc
->sc_dmat
, data
->map
, 0,
835 data
->map
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
836 bus_dmamap_unload(sc
->sc_dmat
, data
->map
);
838 error
= bus_dmamap_load(sc
->sc_dmat
, data
->map
,
839 mtod(mnew
, void *), MCLBYTES
, NULL
,
840 BUS_DMA_READ
| BUS_DMA_NOWAIT
);
844 /* try to reload the old mbuf */
845 error
= bus_dmamap_load(sc
->sc_dmat
, data
->map
,
846 mtod(data
->m
, void *), MCLBYTES
, NULL
,
847 BUS_DMA_READ
| BUS_DMA_NOWAIT
);
849 /* very unlikely that it will fail.. */
850 panic("%s: could not load old rx mbuf",
851 device_xname(sc
->sc_dev
));
856 physaddr
= data
->map
->dm_segs
[0].ds_addr
;
860 * New mbuf successfully loaded, update Rx ring and continue
868 m
->m_pkthdr
.len
= m
->m_len
= len
;
869 m
->m_pkthdr
.rcvif
= ifp
;
871 if ((sc
->sc_flags
& NFE_HW_CSUM
) != 0) {
874 * no way to check M_CSUM_IPv4_BAD or non-IPv4 packets?
876 if (flags
& NFE_RX_IP_CSUMOK
) {
877 m
->m_pkthdr
.csum_flags
|= M_CSUM_IPv4
;
878 DPRINTFN(3, ("%s: ip4csum-rx ok\n",
879 device_xname(sc
->sc_dev
)));
883 * no way to check M_CSUM_TCP_UDP_BAD or
886 if (flags
& NFE_RX_UDP_CSUMOK
) {
887 m
->m_pkthdr
.csum_flags
|= M_CSUM_UDPv4
;
888 DPRINTFN(3, ("%s: udp4csum-rx ok\n",
889 device_xname(sc
->sc_dev
)));
890 } else if (flags
& NFE_RX_TCP_CSUMOK
) {
891 m
->m_pkthdr
.csum_flags
|= M_CSUM_TCPv4
;
892 DPRINTFN(3, ("%s: tcp4csum-rx ok\n",
893 device_xname(sc
->sc_dev
)));
898 bpf_mtap(ifp
->if_bpf
, m
);
901 (*ifp
->if_input
)(ifp
, m
);
904 /* update mapping address in h/w descriptor */
905 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
906 #if defined(__LP64__)
907 desc64
->physaddr
[0] = htole32(physaddr
>> 32);
909 desc64
->physaddr
[1] = htole32(physaddr
& 0xffffffff);
911 desc32
->physaddr
= htole32(physaddr
);
915 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
916 desc64
->length
= htole16(sc
->rxq
.bufsz
);
917 desc64
->flags
= htole16(NFE_RX_READY
);
919 nfe_rxdesc64_sync(sc
, desc64
,
920 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
922 desc32
->length
= htole16(sc
->rxq
.bufsz
);
923 desc32
->flags
= htole16(NFE_RX_READY
);
925 nfe_rxdesc32_sync(sc
, desc32
,
926 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
929 /* update current RX pointer */
934 nfe_txeof(struct nfe_softc
*sc
)
936 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
937 struct nfe_desc32
*desc32
;
938 struct nfe_desc64
*desc64
;
939 struct nfe_tx_data
*data
= NULL
;
944 for (i
= sc
->txq
.next
;
946 i
= NFE_TX_NEXTDESC(i
), sc
->txq
.queued
--) {
947 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
948 desc64
= &sc
->txq
.desc64
[i
];
949 nfe_txdesc64_sync(sc
, desc64
,
950 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
952 flags
= le16toh(desc64
->flags
);
954 desc32
= &sc
->txq
.desc32
[i
];
955 nfe_txdesc32_sync(sc
, desc32
,
956 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
958 flags
= le16toh(desc32
->flags
);
961 if ((flags
& NFE_TX_VALID
) != 0)
964 data
= &sc
->txq
.data
[i
];
966 if ((sc
->sc_flags
& (NFE_JUMBO_SUP
| NFE_40BIT_ADDR
)) == 0) {
967 if ((flags
& NFE_TX_LASTFRAG_V1
) == 0 &&
971 if ((flags
& NFE_TX_ERROR_V1
) != 0) {
972 snprintb(buf
, sizeof(buf
), NFE_V1_TXERR
, flags
);
973 aprint_error_dev(sc
->sc_dev
, "tx v1 error %s\n",
979 if ((flags
& NFE_TX_LASTFRAG_V2
) == 0 &&
983 if ((flags
& NFE_TX_ERROR_V2
) != 0) {
984 snprintb(buf
, sizeof(buf
), NFE_V2_TXERR
, flags
);
985 aprint_error_dev(sc
->sc_dev
, "tx v2 error %s\n",
992 if (data
->m
== NULL
) { /* should not get there */
993 aprint_error_dev(sc
->sc_dev
,
994 "last fragment bit w/o associated mbuf!\n");
998 /* last fragment of the mbuf chain transmitted */
999 bus_dmamap_sync(sc
->sc_dmat
, data
->active
, 0,
1000 data
->active
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
1001 bus_dmamap_unload(sc
->sc_dmat
, data
->active
);
1008 if (sc
->txq
.queued
< NFE_TX_RING_COUNT
) {
1009 /* at least one slot freed */
1010 ifp
->if_flags
&= ~IFF_OACTIVE
;
1013 if (sc
->txq
.queued
== 0) {
1014 /* all queued packets are sent */
1020 nfe_encap(struct nfe_softc
*sc
, struct mbuf
*m0
)
1022 struct nfe_desc32
*desc32
;
1023 struct nfe_desc64
*desc64
;
1024 struct nfe_tx_data
*data
;
1026 uint16_t flags
, csumflags
;
1031 int error
, i
, first
;
1039 first
= sc
->txq
.cur
;
1041 map
= sc
->txq
.data
[first
].map
;
1043 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, map
, m0
, BUS_DMA_NOWAIT
);
1045 aprint_error_dev(sc
->sc_dev
, "could not map mbuf (error %d)\n",
1050 if (sc
->txq
.queued
+ map
->dm_nsegs
>= NFE_TX_RING_COUNT
- 1) {
1051 bus_dmamap_unload(sc
->sc_dmat
, map
);
1056 /* setup h/w VLAN tagging */
1057 if ((mtag
= VLAN_OUTPUT_TAG(&sc
->sc_ethercom
, m0
)) != NULL
)
1058 vtag
= NFE_TX_VTAG
| VLAN_TAG_VALUE(mtag
);
1060 if ((sc
->sc_flags
& NFE_HW_CSUM
) != 0) {
1061 if (m0
->m_pkthdr
.csum_flags
& M_CSUM_IPv4
)
1062 csumflags
|= NFE_TX_IP_CSUM
;
1063 if (m0
->m_pkthdr
.csum_flags
& (M_CSUM_TCPv4
| M_CSUM_UDPv4
))
1064 csumflags
|= NFE_TX_TCP_UDP_CSUM
;
1067 for (i
= 0; i
< map
->dm_nsegs
; i
++) {
1068 data
= &sc
->txq
.data
[sc
->txq
.cur
];
1070 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1071 desc64
= &sc
->txq
.desc64
[sc
->txq
.cur
];
1072 #if defined(__LP64__)
1073 desc64
->physaddr
[0] =
1074 htole32(map
->dm_segs
[i
].ds_addr
>> 32);
1076 desc64
->physaddr
[1] =
1077 htole32(map
->dm_segs
[i
].ds_addr
& 0xffffffff);
1078 desc64
->length
= htole16(map
->dm_segs
[i
].ds_len
- 1);
1079 desc64
->flags
= htole16(flags
);
1082 desc32
= &sc
->txq
.desc32
[sc
->txq
.cur
];
1084 desc32
->physaddr
= htole32(map
->dm_segs
[i
].ds_addr
);
1085 desc32
->length
= htole16(map
->dm_segs
[i
].ds_len
- 1);
1086 desc32
->flags
= htole16(flags
);
1090 * Setting of the valid bit in the first descriptor is
1091 * deferred until the whole chain is fully setup.
1093 flags
|= NFE_TX_VALID
;
1096 sc
->txq
.cur
= NFE_TX_NEXTDESC(sc
->txq
.cur
);
1099 /* the whole mbuf chain has been setup */
1100 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1101 /* fix last descriptor */
1102 flags
|= NFE_TX_LASTFRAG_V2
;
1103 desc64
->flags
= htole16(flags
);
1105 /* Checksum flags and vtag belong to the first fragment only. */
1107 sc
->txq
.desc64
[first
].vtag
= htole32(vtag
);
1109 sc
->txq
.desc64
[first
].flags
|= htole16(csumflags
);
1111 /* finally, set the valid bit in the first descriptor */
1112 sc
->txq
.desc64
[first
].flags
|= htole16(NFE_TX_VALID
);
1114 /* fix last descriptor */
1115 if (sc
->sc_flags
& NFE_JUMBO_SUP
)
1116 flags
|= NFE_TX_LASTFRAG_V2
;
1118 flags
|= NFE_TX_LASTFRAG_V1
;
1119 desc32
->flags
= htole16(flags
);
1121 /* Checksum flags belong to the first fragment only. */
1122 sc
->txq
.desc32
[first
].flags
|= htole16(csumflags
);
1124 /* finally, set the valid bit in the first descriptor */
1125 sc
->txq
.desc32
[first
].flags
|= htole16(NFE_TX_VALID
);
1131 bus_dmamap_sync(sc
->sc_dmat
, map
, 0, map
->dm_mapsize
,
1132 BUS_DMASYNC_PREWRITE
);
1138 nfe_start(struct ifnet
*ifp
)
1140 struct nfe_softc
*sc
= ifp
->if_softc
;
1141 int old
= sc
->txq
.queued
;
1144 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_OACTIVE
)) != IFF_RUNNING
)
1148 IFQ_POLL(&ifp
->if_snd
, m0
);
1152 if (nfe_encap(sc
, m0
) != 0) {
1153 ifp
->if_flags
|= IFF_OACTIVE
;
1157 /* packet put in h/w queue, remove from s/w queue */
1158 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
1161 if (ifp
->if_bpf
!= NULL
)
1162 bpf_mtap(ifp
->if_bpf
, m0
);
1166 if (sc
->txq
.queued
!= old
) {
1167 /* packets are queued */
1168 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
1169 nfe_txdesc64_rsync(sc
, old
, sc
->txq
.cur
,
1170 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1172 nfe_txdesc32_rsync(sc
, old
, sc
->txq
.cur
,
1173 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1175 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_KICKTX
| sc
->rxtxctl
);
1178 * Set a timeout in case the chip goes out to lunch.
1185 nfe_watchdog(struct ifnet
*ifp
)
1187 struct nfe_softc
*sc
= ifp
->if_softc
;
1189 aprint_error_dev(sc
->sc_dev
, "watchdog timeout\n");
1191 ifp
->if_flags
&= ~IFF_RUNNING
;
1198 nfe_init(struct ifnet
*ifp
)
1200 struct nfe_softc
*sc
= ifp
->if_softc
;
1204 if (ifp
->if_flags
& IFF_RUNNING
)
1209 NFE_WRITE(sc
, NFE_TX_UNK
, 0);
1210 NFE_WRITE(sc
, NFE_STATUS
, 0);
1212 sc
->rxtxctl
= NFE_RXTX_BIT2
;
1213 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
1214 sc
->rxtxctl
|= NFE_RXTX_V3MAGIC
;
1215 else if (sc
->sc_flags
& NFE_JUMBO_SUP
)
1216 sc
->rxtxctl
|= NFE_RXTX_V2MAGIC
;
1217 if (sc
->sc_flags
& NFE_HW_CSUM
)
1218 sc
->rxtxctl
|= NFE_RXTX_RXCSUM
;
1221 * Although the adapter is capable of stripping VLAN tags from received
1222 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1223 * purpose. This will be done in software by our network stack.
1225 if (sc
->sc_flags
& NFE_HW_VLAN
)
1226 sc
->rxtxctl
|= NFE_RXTX_VTAG_INSERT
;
1228 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_RESET
| sc
->rxtxctl
);
1230 NFE_WRITE(sc
, NFE_RXTX_CTL
, sc
->rxtxctl
);
1233 if (sc
->sc_flags
& NFE_HW_VLAN
)
1234 NFE_WRITE(sc
, NFE_VTAG_CTL
, NFE_VTAG_ENABLE
);
1237 NFE_WRITE(sc
, NFE_SETUP_R6
, 0);
1239 /* set MAC address */
1240 nfe_set_macaddr(sc
, sc
->sc_enaddr
);
1242 /* tell MAC where rings are in memory */
1244 NFE_WRITE(sc
, NFE_RX_RING_ADDR_HI
, sc
->rxq
.physaddr
>> 32);
1246 NFE_WRITE(sc
, NFE_RX_RING_ADDR_LO
, sc
->rxq
.physaddr
& 0xffffffff);
1248 NFE_WRITE(sc
, NFE_TX_RING_ADDR_HI
, sc
->txq
.physaddr
>> 32);
1250 NFE_WRITE(sc
, NFE_TX_RING_ADDR_LO
, sc
->txq
.physaddr
& 0xffffffff);
1252 NFE_WRITE(sc
, NFE_RING_SIZE
,
1253 (NFE_RX_RING_COUNT
- 1) << 16 |
1254 (NFE_TX_RING_COUNT
- 1));
1256 NFE_WRITE(sc
, NFE_RXBUFSZ
, sc
->rxq
.bufsz
);
1258 /* force MAC to wakeup */
1259 tmp
= NFE_READ(sc
, NFE_PWR_STATE
);
1260 NFE_WRITE(sc
, NFE_PWR_STATE
, tmp
| NFE_PWR_WAKEUP
);
1262 tmp
= NFE_READ(sc
, NFE_PWR_STATE
);
1263 NFE_WRITE(sc
, NFE_PWR_STATE
, tmp
| NFE_PWR_VALID
);
1266 NFE_WRITE(sc
, NFE_IRQ_MASK
, 0);
1267 nfe_intr(sc
); /* XXX clear IRQ status registers */
1268 NFE_WRITE(sc
, NFE_IRQ_MASK
, NFE_IRQ_WANTED
);
1272 /* configure interrupts coalescing/mitigation */
1273 NFE_WRITE(sc
, NFE_IMTIMER
, NFE_IM_DEFAULT
);
1275 /* no interrupt mitigation: one interrupt per packet */
1276 NFE_WRITE(sc
, NFE_IMTIMER
, 970);
1279 NFE_WRITE(sc
, NFE_SETUP_R1
, NFE_R1_MAGIC
);
1280 NFE_WRITE(sc
, NFE_SETUP_R2
, NFE_R2_MAGIC
);
1281 NFE_WRITE(sc
, NFE_SETUP_R6
, NFE_R6_MAGIC
);
1283 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1284 NFE_WRITE(sc
, NFE_STATUS
, sc
->mii_phyaddr
<< 24 | NFE_STATUS_MAGIC
);
1286 NFE_WRITE(sc
, NFE_SETUP_R4
, NFE_R4_MAGIC
);
1287 NFE_WRITE(sc
, NFE_WOL_CTL
, NFE_WOL_ENABLE
);
1289 sc
->rxtxctl
&= ~NFE_RXTX_BIT2
;
1290 NFE_WRITE(sc
, NFE_RXTX_CTL
, sc
->rxtxctl
);
1292 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_BIT1
| sc
->rxtxctl
);
1297 if ((rc
= ether_mediachange(ifp
)) != 0)
1303 NFE_WRITE(sc
, NFE_RX_CTL
, NFE_RX_START
);
1306 NFE_WRITE(sc
, NFE_TX_CTL
, NFE_TX_START
);
1308 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
1310 /* enable interrupts */
1311 NFE_WRITE(sc
, NFE_IRQ_MASK
, NFE_IRQ_WANTED
);
1313 callout_schedule(&sc
->sc_tick_ch
, hz
);
1315 ifp
->if_flags
|= IFF_RUNNING
;
1316 ifp
->if_flags
&= ~IFF_OACTIVE
;
1323 nfe_stop(struct ifnet
*ifp
, int disable
)
1325 struct nfe_softc
*sc
= ifp
->if_softc
;
1327 callout_stop(&sc
->sc_tick_ch
);
1330 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1332 mii_down(&sc
->sc_mii
);
1335 NFE_WRITE(sc
, NFE_TX_CTL
, 0);
1338 NFE_WRITE(sc
, NFE_RX_CTL
, 0);
1340 /* disable interrupts */
1341 NFE_WRITE(sc
, NFE_IRQ_MASK
, 0);
1343 /* reset Tx and Rx rings */
1344 nfe_reset_tx_ring(sc
, &sc
->txq
);
1345 nfe_reset_rx_ring(sc
, &sc
->rxq
);
1349 nfe_alloc_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1351 struct nfe_desc32
*desc32
;
1352 struct nfe_desc64
*desc64
;
1353 struct nfe_rx_data
*data
;
1354 struct nfe_jbuf
*jbuf
;
1356 bus_addr_t physaddr
;
1357 int i
, nsegs
, error
, descsize
;
1359 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1360 desc
= (void **)&ring
->desc64
;
1361 descsize
= sizeof (struct nfe_desc64
);
1363 desc
= (void **)&ring
->desc32
;
1364 descsize
= sizeof (struct nfe_desc32
);
1367 ring
->cur
= ring
->next
= 0;
1368 ring
->bufsz
= MCLBYTES
;
1370 error
= bus_dmamap_create(sc
->sc_dmat
, NFE_RX_RING_COUNT
* descsize
, 1,
1371 NFE_RX_RING_COUNT
* descsize
, 0, BUS_DMA_NOWAIT
, &ring
->map
);
1373 aprint_error_dev(sc
->sc_dev
,
1374 "could not create desc DMA map\n");
1379 error
= bus_dmamem_alloc(sc
->sc_dmat
, NFE_RX_RING_COUNT
* descsize
,
1380 PAGE_SIZE
, 0, &ring
->seg
, 1, &nsegs
, BUS_DMA_NOWAIT
);
1382 aprint_error_dev(sc
->sc_dev
,
1383 "could not allocate DMA memory\n");
1387 error
= bus_dmamem_map(sc
->sc_dmat
, &ring
->seg
, nsegs
,
1388 NFE_RX_RING_COUNT
* descsize
, (void **)desc
, BUS_DMA_NOWAIT
);
1390 aprint_error_dev(sc
->sc_dev
,
1391 "could not map desc DMA memory\n");
1395 error
= bus_dmamap_load(sc
->sc_dmat
, ring
->map
, *desc
,
1396 NFE_RX_RING_COUNT
* descsize
, NULL
, BUS_DMA_NOWAIT
);
1398 aprint_error_dev(sc
->sc_dev
, "could not load desc DMA map\n");
1402 memset(*desc
, 0, NFE_RX_RING_COUNT
* descsize
);
1403 ring
->physaddr
= ring
->map
->dm_segs
[0].ds_addr
;
1405 if (sc
->sc_flags
& NFE_USE_JUMBO
) {
1406 ring
->bufsz
= NFE_JBYTES
;
1407 if ((error
= nfe_jpool_alloc(sc
)) != 0) {
1408 aprint_error_dev(sc
->sc_dev
,
1409 "could not allocate jumbo frames\n");
1415 * Pre-allocate Rx buffers and populate Rx ring.
1417 for (i
= 0; i
< NFE_RX_RING_COUNT
; i
++) {
1418 data
= &sc
->rxq
.data
[i
];
1420 MGETHDR(data
->m
, M_DONTWAIT
, MT_DATA
);
1421 if (data
->m
== NULL
) {
1422 aprint_error_dev(sc
->sc_dev
,
1423 "could not allocate rx mbuf\n");
1428 if (sc
->sc_flags
& NFE_USE_JUMBO
) {
1429 if ((jbuf
= nfe_jalloc(sc
, i
)) == NULL
) {
1430 aprint_error_dev(sc
->sc_dev
,
1431 "could not allocate jumbo buffer\n");
1434 MEXTADD(data
->m
, jbuf
->buf
, NFE_JBYTES
, 0, nfe_jfree
,
1437 physaddr
= jbuf
->physaddr
;
1439 error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1,
1440 MCLBYTES
, 0, BUS_DMA_NOWAIT
, &data
->map
);
1442 aprint_error_dev(sc
->sc_dev
,
1443 "could not create DMA map\n");
1447 MCLGET(data
->m
, M_DONTWAIT
);
1448 if (!(data
->m
->m_flags
& M_EXT
)) {
1449 aprint_error_dev(sc
->sc_dev
,
1450 "could not allocate mbuf cluster\n");
1455 error
= bus_dmamap_load(sc
->sc_dmat
, data
->map
,
1456 mtod(data
->m
, void *), MCLBYTES
, NULL
,
1457 BUS_DMA_READ
| BUS_DMA_NOWAIT
);
1459 aprint_error_dev(sc
->sc_dev
,
1460 "could not load rx buf DMA map");
1463 physaddr
= data
->map
->dm_segs
[0].ds_addr
;
1466 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1467 desc64
= &sc
->rxq
.desc64
[i
];
1468 #if defined(__LP64__)
1469 desc64
->physaddr
[0] = htole32(physaddr
>> 32);
1471 desc64
->physaddr
[1] = htole32(physaddr
& 0xffffffff);
1472 desc64
->length
= htole16(sc
->rxq
.bufsz
);
1473 desc64
->flags
= htole16(NFE_RX_READY
);
1475 desc32
= &sc
->rxq
.desc32
[i
];
1476 desc32
->physaddr
= htole32(physaddr
);
1477 desc32
->length
= htole16(sc
->rxq
.bufsz
);
1478 desc32
->flags
= htole16(NFE_RX_READY
);
1482 bus_dmamap_sync(sc
->sc_dmat
, ring
->map
, 0, ring
->map
->dm_mapsize
,
1483 BUS_DMASYNC_PREWRITE
);
1487 fail
: nfe_free_rx_ring(sc
, ring
);
1492 nfe_reset_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1496 for (i
= 0; i
< NFE_RX_RING_COUNT
; i
++) {
1497 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1498 ring
->desc64
[i
].length
= htole16(ring
->bufsz
);
1499 ring
->desc64
[i
].flags
= htole16(NFE_RX_READY
);
1501 ring
->desc32
[i
].length
= htole16(ring
->bufsz
);
1502 ring
->desc32
[i
].flags
= htole16(NFE_RX_READY
);
1506 bus_dmamap_sync(sc
->sc_dmat
, ring
->map
, 0, ring
->map
->dm_mapsize
,
1507 BUS_DMASYNC_PREWRITE
);
1509 ring
->cur
= ring
->next
= 0;
1513 nfe_free_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1515 struct nfe_rx_data
*data
;
1519 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1520 desc
= ring
->desc64
;
1521 descsize
= sizeof (struct nfe_desc64
);
1523 desc
= ring
->desc32
;
1524 descsize
= sizeof (struct nfe_desc32
);
1528 bus_dmamap_sync(sc
->sc_dmat
, ring
->map
, 0,
1529 ring
->map
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
1530 bus_dmamap_unload(sc
->sc_dmat
, ring
->map
);
1531 bus_dmamem_unmap(sc
->sc_dmat
, (void *)desc
,
1532 NFE_RX_RING_COUNT
* descsize
);
1533 bus_dmamem_free(sc
->sc_dmat
, &ring
->seg
, 1);
1536 for (i
= 0; i
< NFE_RX_RING_COUNT
; i
++) {
1537 data
= &ring
->data
[i
];
1539 if (data
->map
!= NULL
) {
1540 bus_dmamap_sync(sc
->sc_dmat
, data
->map
, 0,
1541 data
->map
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
1542 bus_dmamap_unload(sc
->sc_dmat
, data
->map
);
1543 bus_dmamap_destroy(sc
->sc_dmat
, data
->map
);
1545 if (data
->m
!= NULL
)
1551 nfe_jalloc(struct nfe_softc
*sc
, int i
)
1553 struct nfe_jbuf
*jbuf
;
1555 mutex_enter(&sc
->rxq
.mtx
);
1556 jbuf
= SLIST_FIRST(&sc
->rxq
.jfreelist
);
1558 SLIST_REMOVE_HEAD(&sc
->rxq
.jfreelist
, jnext
);
1559 mutex_exit(&sc
->rxq
.mtx
);
1562 sc
->rxq
.jbufmap
[i
] =
1563 ((char *)jbuf
->buf
- (char *)sc
->rxq
.jpool
) / NFE_JBYTES
;
1568 * This is called automatically by the network stack when the mbuf is freed.
1569 * Caution must be taken that the NIC might be reset by the time the mbuf is
1573 nfe_jfree(struct mbuf
*m
, void *buf
, size_t size
, void *arg
)
1575 struct nfe_softc
*sc
= arg
;
1576 struct nfe_jbuf
*jbuf
;
1579 /* find the jbuf from the base pointer */
1580 i
= ((char *)buf
- (char *)sc
->rxq
.jpool
) / NFE_JBYTES
;
1581 if (i
< 0 || i
>= NFE_JPOOL_COUNT
) {
1582 aprint_error_dev(sc
->sc_dev
,
1583 "request to free a buffer (%p) not managed by us\n", buf
);
1586 jbuf
= &sc
->rxq
.jbuf
[i
];
1588 /* ..and put it back in the free list */
1589 mutex_enter(&sc
->rxq
.mtx
);
1590 SLIST_INSERT_HEAD(&sc
->rxq
.jfreelist
, jbuf
, jnext
);
1591 mutex_exit(&sc
->rxq
.mtx
);
1594 pool_cache_put(mb_cache
, m
);
1598 nfe_jpool_alloc(struct nfe_softc
*sc
)
1600 struct nfe_rx_ring
*ring
= &sc
->rxq
;
1601 struct nfe_jbuf
*jbuf
;
1602 bus_addr_t physaddr
;
1604 int i
, nsegs
, error
;
1607 * Allocate a big chunk of DMA'able memory.
1609 error
= bus_dmamap_create(sc
->sc_dmat
, NFE_JPOOL_SIZE
, 1,
1610 NFE_JPOOL_SIZE
, 0, BUS_DMA_NOWAIT
, &ring
->jmap
);
1612 aprint_error_dev(sc
->sc_dev
,
1613 "could not create jumbo DMA map\n");
1618 error
= bus_dmamem_alloc(sc
->sc_dmat
, NFE_JPOOL_SIZE
, PAGE_SIZE
, 0,
1619 &ring
->jseg
, 1, &nsegs
, BUS_DMA_NOWAIT
);
1621 aprint_error_dev(sc
->sc_dev
,
1622 "could not allocate jumbo DMA memory\n");
1626 error
= bus_dmamem_map(sc
->sc_dmat
, &ring
->jseg
, nsegs
, NFE_JPOOL_SIZE
,
1627 &ring
->jpool
, BUS_DMA_NOWAIT
);
1629 aprint_error_dev(sc
->sc_dev
,
1630 "could not map jumbo DMA memory\n");
1634 error
= bus_dmamap_load(sc
->sc_dmat
, ring
->jmap
, ring
->jpool
,
1635 NFE_JPOOL_SIZE
, NULL
, BUS_DMA_READ
| BUS_DMA_NOWAIT
);
1637 aprint_error_dev(sc
->sc_dev
,
1638 "could not load jumbo DMA map\n");
1642 /* ..and split it into 9KB chunks */
1643 SLIST_INIT(&ring
->jfreelist
);
1646 physaddr
= ring
->jmap
->dm_segs
[0].ds_addr
;
1647 for (i
= 0; i
< NFE_JPOOL_COUNT
; i
++) {
1648 jbuf
= &ring
->jbuf
[i
];
1651 jbuf
->physaddr
= physaddr
;
1653 SLIST_INSERT_HEAD(&ring
->jfreelist
, jbuf
, jnext
);
1656 physaddr
+= NFE_JBYTES
;
1661 fail
: nfe_jpool_free(sc
);
1666 nfe_jpool_free(struct nfe_softc
*sc
)
1668 struct nfe_rx_ring
*ring
= &sc
->rxq
;
1670 if (ring
->jmap
!= NULL
) {
1671 bus_dmamap_sync(sc
->sc_dmat
, ring
->jmap
, 0,
1672 ring
->jmap
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
1673 bus_dmamap_unload(sc
->sc_dmat
, ring
->jmap
);
1674 bus_dmamap_destroy(sc
->sc_dmat
, ring
->jmap
);
1676 if (ring
->jpool
!= NULL
) {
1677 bus_dmamem_unmap(sc
->sc_dmat
, ring
->jpool
, NFE_JPOOL_SIZE
);
1678 bus_dmamem_free(sc
->sc_dmat
, &ring
->jseg
, 1);
1683 nfe_alloc_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
1685 int i
, nsegs
, error
;
1689 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1690 desc
= (void **)&ring
->desc64
;
1691 descsize
= sizeof (struct nfe_desc64
);
1693 desc
= (void **)&ring
->desc32
;
1694 descsize
= sizeof (struct nfe_desc32
);
1698 ring
->cur
= ring
->next
= 0;
1700 error
= bus_dmamap_create(sc
->sc_dmat
, NFE_TX_RING_COUNT
* descsize
, 1,
1701 NFE_TX_RING_COUNT
* descsize
, 0, BUS_DMA_NOWAIT
, &ring
->map
);
1704 aprint_error_dev(sc
->sc_dev
,
1705 "could not create desc DMA map\n");
1710 error
= bus_dmamem_alloc(sc
->sc_dmat
, NFE_TX_RING_COUNT
* descsize
,
1711 PAGE_SIZE
, 0, &ring
->seg
, 1, &nsegs
, BUS_DMA_NOWAIT
);
1713 aprint_error_dev(sc
->sc_dev
,
1714 "could not allocate DMA memory\n");
1718 error
= bus_dmamem_map(sc
->sc_dmat
, &ring
->seg
, nsegs
,
1719 NFE_TX_RING_COUNT
* descsize
, (void **)desc
, BUS_DMA_NOWAIT
);
1721 aprint_error_dev(sc
->sc_dev
,
1722 "could not map desc DMA memory\n");
1726 error
= bus_dmamap_load(sc
->sc_dmat
, ring
->map
, *desc
,
1727 NFE_TX_RING_COUNT
* descsize
, NULL
, BUS_DMA_NOWAIT
);
1729 aprint_error_dev(sc
->sc_dev
, "could not load desc DMA map\n");
1733 memset(*desc
, 0, NFE_TX_RING_COUNT
* descsize
);
1734 ring
->physaddr
= ring
->map
->dm_segs
[0].ds_addr
;
1736 for (i
= 0; i
< NFE_TX_RING_COUNT
; i
++) {
1737 error
= bus_dmamap_create(sc
->sc_dmat
, NFE_JBYTES
,
1738 NFE_MAX_SCATTER
, NFE_JBYTES
, 0, BUS_DMA_NOWAIT
,
1739 &ring
->data
[i
].map
);
1741 aprint_error_dev(sc
->sc_dev
,
1742 "could not create DMA map\n");
1743 ring
->data
[i
].map
= NULL
;
1750 fail
: nfe_free_tx_ring(sc
, ring
);
1755 nfe_reset_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
1757 struct nfe_tx_data
*data
;
1760 for (i
= 0; i
< NFE_TX_RING_COUNT
; i
++) {
1761 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
1762 ring
->desc64
[i
].flags
= 0;
1764 ring
->desc32
[i
].flags
= 0;
1766 data
= &ring
->data
[i
];
1768 if (data
->m
!= NULL
) {
1769 bus_dmamap_sync(sc
->sc_dmat
, data
->active
, 0,
1770 data
->active
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
1771 bus_dmamap_unload(sc
->sc_dmat
, data
->active
);
1777 bus_dmamap_sync(sc
->sc_dmat
, ring
->map
, 0, ring
->map
->dm_mapsize
,
1778 BUS_DMASYNC_PREWRITE
);
1781 ring
->cur
= ring
->next
= 0;
1785 nfe_free_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
1787 struct nfe_tx_data
*data
;
1791 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1792 desc
= ring
->desc64
;
1793 descsize
= sizeof (struct nfe_desc64
);
1795 desc
= ring
->desc32
;
1796 descsize
= sizeof (struct nfe_desc32
);
1800 bus_dmamap_sync(sc
->sc_dmat
, ring
->map
, 0,
1801 ring
->map
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
1802 bus_dmamap_unload(sc
->sc_dmat
, ring
->map
);
1803 bus_dmamem_unmap(sc
->sc_dmat
, (void *)desc
,
1804 NFE_TX_RING_COUNT
* descsize
);
1805 bus_dmamem_free(sc
->sc_dmat
, &ring
->seg
, 1);
1808 for (i
= 0; i
< NFE_TX_RING_COUNT
; i
++) {
1809 data
= &ring
->data
[i
];
1811 if (data
->m
!= NULL
) {
1812 bus_dmamap_sync(sc
->sc_dmat
, data
->active
, 0,
1813 data
->active
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
1814 bus_dmamap_unload(sc
->sc_dmat
, data
->active
);
1819 /* ..and now actually destroy the DMA mappings */
1820 for (i
= 0; i
< NFE_TX_RING_COUNT
; i
++) {
1821 data
= &ring
->data
[i
];
1822 if (data
->map
== NULL
)
1824 bus_dmamap_destroy(sc
->sc_dmat
, data
->map
);
1829 nfe_setmulti(struct nfe_softc
*sc
)
1831 struct ethercom
*ec
= &sc
->sc_ethercom
;
1832 struct ifnet
*ifp
= &ec
->ec_if
;
1833 struct ether_multi
*enm
;
1834 struct ether_multistep step
;
1835 uint8_t addr
[ETHER_ADDR_LEN
], mask
[ETHER_ADDR_LEN
];
1836 uint32_t filter
= NFE_RXFILTER_MAGIC
;
1839 if ((ifp
->if_flags
& (IFF_ALLMULTI
| IFF_PROMISC
)) != 0) {
1840 memset(addr
, 0, ETHER_ADDR_LEN
);
1841 memset(mask
, 0, ETHER_ADDR_LEN
);
1845 memcpy(addr
, etherbroadcastaddr
, ETHER_ADDR_LEN
);
1846 memcpy(mask
, etherbroadcastaddr
, ETHER_ADDR_LEN
);
1848 ETHER_FIRST_MULTI(step
, ec
, enm
);
1849 while (enm
!= NULL
) {
1850 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, ETHER_ADDR_LEN
)) {
1851 ifp
->if_flags
|= IFF_ALLMULTI
;
1852 memset(addr
, 0, ETHER_ADDR_LEN
);
1853 memset(mask
, 0, ETHER_ADDR_LEN
);
1856 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
1857 addr
[i
] &= enm
->enm_addrlo
[i
];
1858 mask
[i
] &= ~enm
->enm_addrlo
[i
];
1860 ETHER_NEXT_MULTI(step
, enm
);
1862 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
1866 addr
[0] |= 0x01; /* make sure multicast bit is set */
1868 NFE_WRITE(sc
, NFE_MULTIADDR_HI
,
1869 addr
[3] << 24 | addr
[2] << 16 | addr
[1] << 8 | addr
[0]);
1870 NFE_WRITE(sc
, NFE_MULTIADDR_LO
,
1871 addr
[5] << 8 | addr
[4]);
1872 NFE_WRITE(sc
, NFE_MULTIMASK_HI
,
1873 mask
[3] << 24 | mask
[2] << 16 | mask
[1] << 8 | mask
[0]);
1874 NFE_WRITE(sc
, NFE_MULTIMASK_LO
,
1875 mask
[5] << 8 | mask
[4]);
1877 filter
|= (ifp
->if_flags
& IFF_PROMISC
) ? NFE_PROMISC
: NFE_U2M
;
1878 NFE_WRITE(sc
, NFE_RXFILTER
, filter
);
1882 nfe_get_macaddr(struct nfe_softc
*sc
, uint8_t *addr
)
1886 if ((sc
->sc_flags
& NFE_CORRECT_MACADDR
) != 0) {
1887 tmp
= NFE_READ(sc
, NFE_MACADDR_HI
);
1888 addr
[0] = (tmp
& 0xff);
1889 addr
[1] = (tmp
>> 8) & 0xff;
1890 addr
[2] = (tmp
>> 16) & 0xff;
1891 addr
[3] = (tmp
>> 24) & 0xff;
1893 tmp
= NFE_READ(sc
, NFE_MACADDR_LO
);
1894 addr
[4] = (tmp
& 0xff);
1895 addr
[5] = (tmp
>> 8) & 0xff;
1898 tmp
= NFE_READ(sc
, NFE_MACADDR_LO
);
1899 addr
[0] = (tmp
>> 8) & 0xff;
1900 addr
[1] = (tmp
& 0xff);
1902 tmp
= NFE_READ(sc
, NFE_MACADDR_HI
);
1903 addr
[2] = (tmp
>> 24) & 0xff;
1904 addr
[3] = (tmp
>> 16) & 0xff;
1905 addr
[4] = (tmp
>> 8) & 0xff;
1906 addr
[5] = (tmp
& 0xff);
1911 nfe_set_macaddr(struct nfe_softc
*sc
, const uint8_t *addr
)
1913 NFE_WRITE(sc
, NFE_MACADDR_LO
,
1914 addr
[5] << 8 | addr
[4]);
1915 NFE_WRITE(sc
, NFE_MACADDR_HI
,
1916 addr
[3] << 24 | addr
[2] << 16 | addr
[1] << 8 | addr
[0]);
1922 struct nfe_softc
*sc
= arg
;
1926 mii_tick(&sc
->sc_mii
);
1929 callout_schedule(&sc
->sc_tick_ch
, hz
);
1933 nfe_poweron(device_t self
)
1935 struct nfe_softc
*sc
= device_private(self
);
1937 if ((sc
->sc_flags
& NFE_PWR_MGMT
) != 0) {
1938 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_RESET
| NFE_RXTX_BIT2
);
1939 NFE_WRITE(sc
, NFE_MAC_RESET
, NFE_MAC_RESET_MAGIC
);
1941 NFE_WRITE(sc
, NFE_MAC_RESET
, 0);
1943 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_BIT2
);
1944 NFE_WRITE(sc
, NFE_PWR2_CTL
,
1945 NFE_READ(sc
, NFE_PWR2_CTL
) & ~NFE_PWR2_WAKEUP_MASK
);
1950 nfe_resume(device_t dv
, pmf_qual_t qual
)