1 /* $NetBSD: if_tl.c,v 1.92 2009/09/05 13:50:15 tsutsui Exp $ */
4 * Copyright (c) 1997 Manuel Bouyer. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Texas Instruments ThunderLAN ethernet controller
29 * ThunderLAN Programmer's Guide (TI Literature Number SPWU013A)
30 * available from www.ti.com
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_tl.c,v 1.92 2009/09/05 13:50:15 tsutsui Exp $");
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/ioctl.h>
50 #include <sys/errno.h>
51 #include <sys/malloc.h>
52 #include <sys/kernel.h>
53 #include <sys/proc.h> /* only for declaration of wakeup() used by vm.h */
54 #include <sys/device.h>
57 #if defined(SIOCSIFMEDIA)
58 #include <net/if_media.h>
60 #include <net/if_types.h>
61 #include <net/if_dl.h>
62 #include <net/route.h>
63 #include <net/netisr.h>
68 #include <net/bpfdesc.h>
77 #include <netinet/in.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/in_var.h>
80 #include <netinet/ip.h>
84 #if defined(__NetBSD__)
85 #include <net/if_ether.h>
86 #include <uvm/uvm_extern.h>
88 #include <netinet/if_inarp.h>
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
98 #include <dev/i2c/i2cvar.h>
99 #include <dev/i2c/i2c_bitbang.h>
100 #include <dev/i2c/at24cxxvar.h>
102 #include <dev/mii/mii.h>
103 #include <dev/mii/miivar.h>
105 #include <dev/mii/tlphyvar.h>
107 #include <dev/pci/if_tlregs.h>
108 #include <dev/pci/if_tlvar.h>
109 #endif /* __NetBSD__ */
111 /* number of transmit/receive buffers */
116 static int tl_pci_match(device_t
, cfdata_t
, void *);
117 static void tl_pci_attach(device_t
, device_t
, void *);
118 static int tl_intr(void *);
120 static int tl_ifioctl(struct ifnet
*, ioctl_cmd_t
, void *);
121 static int tl_mediachange(struct ifnet
*);
122 static void tl_ifwatchdog(struct ifnet
*);
123 static bool tl_shutdown(device_t
, int);
125 static void tl_ifstart(struct ifnet
*);
126 static void tl_reset(tl_softc_t
*);
127 static int tl_init(struct ifnet
*);
128 static void tl_stop(struct ifnet
*, int);
129 static void tl_restart(void *);
130 static int tl_add_RxBuff(tl_softc_t
*, struct Rx_list
*, struct mbuf
*);
131 static void tl_read_stats(tl_softc_t
*);
132 static void tl_ticks(void *);
133 static int tl_multicast_hash(uint8_t *);
134 static void tl_addr_filter(tl_softc_t
*);
136 static uint32_t tl_intreg_read(tl_softc_t
*, uint32_t);
137 static void tl_intreg_write(tl_softc_t
*, uint32_t, uint32_t);
138 static uint8_t tl_intreg_read_byte(tl_softc_t
*, uint32_t);
139 static void tl_intreg_write_byte(tl_softc_t
*, uint32_t, uint8_t);
141 void tl_mii_sync(struct tl_softc
*);
142 void tl_mii_sendbits(struct tl_softc
*, uint32_t, int);
145 #if defined(TLDEBUG_RX)
146 static void ether_printheader(struct ether_header
*);
149 int tl_mii_read(device_t
, int, int);
150 void tl_mii_write(device_t
, int, int, int);
152 void tl_statchg(device_t
);
155 static int tl_i2c_acquire_bus(void *, int);
156 static void tl_i2c_release_bus(void *, int);
157 static int tl_i2c_send_start(void *, int);
158 static int tl_i2c_send_stop(void *, int);
159 static int tl_i2c_initiate_xfer(void *, i2c_addr_t
, int);
160 static int tl_i2c_read_byte(void *, uint8_t *, int);
161 static int tl_i2c_write_byte(void *, uint8_t, int);
163 /* I2C bit-bang glue */
164 static void tl_i2cbb_set_bits(void *, uint32_t);
165 static void tl_i2cbb_set_dir(void *, uint32_t);
166 static uint32_t tl_i2cbb_read(void *);
167 static const struct i2c_bitbang_ops tl_i2cbb_ops
= {
172 TL_NETSIO_EDATA
, /* SDA */
173 TL_NETSIO_ECLOCK
, /* SCL */
174 TL_NETSIO_ETXEN
, /* SDA is output */
175 0, /* SDA is input */
179 static inline void netsio_clr(tl_softc_t
*, uint8_t);
180 static inline void netsio_set(tl_softc_t
*, uint8_t);
181 static inline uint8_t netsio_read(tl_softc_t
*, uint8_t);
184 netsio_clr(tl_softc_t
*sc
, uint8_t bits
)
187 tl_intreg_write_byte(sc
, TL_INT_NET
+ TL_INT_NetSio
,
188 tl_intreg_read_byte(sc
, TL_INT_NET
+ TL_INT_NetSio
) & (~bits
));
192 netsio_set(tl_softc_t
*sc
, uint8_t bits
)
195 tl_intreg_write_byte(sc
, TL_INT_NET
+ TL_INT_NetSio
,
196 tl_intreg_read_byte(sc
, TL_INT_NET
+ TL_INT_NetSio
) | bits
);
199 static inline uint8_t
200 netsio_read(tl_softc_t
*sc
, uint8_t bits
)
203 return tl_intreg_read_byte(sc
, TL_INT_NET
+ TL_INT_NetSio
) & bits
;
206 CFATTACH_DECL_NEW(tl
, sizeof(tl_softc_t
),
207 tl_pci_match
, tl_pci_attach
, NULL
, NULL
);
209 static const struct tl_product_desc tl_compaq_products
[] = {
210 { PCI_PRODUCT_COMPAQ_N100TX
, TLPHY_MEDIA_NO_10_T
,
211 "Compaq Netelligent 10/100 TX" },
212 { PCI_PRODUCT_COMPAQ_INT100TX
, TLPHY_MEDIA_NO_10_T
,
213 "Integrated Compaq Netelligent 10/100 TX" },
214 { PCI_PRODUCT_COMPAQ_N10T
, TLPHY_MEDIA_10_5
,
215 "Compaq Netelligent 10 T" },
216 { PCI_PRODUCT_COMPAQ_N10T2
, TLPHY_MEDIA_10_2
,
217 "Compaq Netelligent 10 T/2 UTP/Coax" },
218 { PCI_PRODUCT_COMPAQ_IntNF3P
, TLPHY_MEDIA_10_2
,
219 "Compaq Integrated NetFlex 3/P" },
220 { PCI_PRODUCT_COMPAQ_IntPL100TX
, TLPHY_MEDIA_10_2
|TLPHY_MEDIA_NO_10_T
,
221 "Compaq ProLiant Integrated Netelligent 10/100 TX" },
222 { PCI_PRODUCT_COMPAQ_DPNet100TX
, TLPHY_MEDIA_10_5
|TLPHY_MEDIA_NO_10_T
,
223 "Compaq Dual Port Netelligent 10/100 TX" },
224 { PCI_PRODUCT_COMPAQ_DP4000
, TLPHY_MEDIA_10_5
|TLPHY_MEDIA_NO_10_T
,
225 "Compaq Deskpro 4000 5233MMX" },
226 { PCI_PRODUCT_COMPAQ_NF3P_BNC
, TLPHY_MEDIA_10_2
,
227 "Compaq NetFlex 3/P w/ BNC" },
228 { PCI_PRODUCT_COMPAQ_NF3P
, TLPHY_MEDIA_10_5
,
229 "Compaq NetFlex 3/P" },
233 static const struct tl_product_desc tl_ti_products
[] = {
235 * Built-in Ethernet on the TI TravelMate 5000
236 * docking station; better product description?
238 { PCI_PRODUCT_TI_TLAN
, 0,
239 "Texas Instruments ThunderLAN" },
243 struct tl_vendor_desc
{
245 const struct tl_product_desc
*tv_products
;
248 const struct tl_vendor_desc tl_vendors
[] = {
249 { PCI_VENDOR_COMPAQ
, tl_compaq_products
},
250 { PCI_VENDOR_TI
, tl_ti_products
},
254 static const struct tl_product_desc
*tl_lookup_product(uint32_t);
256 static const struct tl_product_desc
*
257 tl_lookup_product(uint32_t id
)
259 const struct tl_product_desc
*tp
;
260 const struct tl_vendor_desc
*tv
;
262 for (tv
= tl_vendors
; tv
->tv_products
!= NULL
; tv
++)
263 if (PCI_VENDOR(id
) == tv
->tv_vendor
)
266 if ((tp
= tv
->tv_products
) == NULL
)
269 for (; tp
->tp_desc
!= NULL
; tp
++)
270 if (PCI_PRODUCT(id
) == tp
->tp_product
)
273 if (tp
->tp_desc
== NULL
)
280 tl_pci_match(device_t parent
, cfdata_t cf
, void *aux
)
282 struct pci_attach_args
*pa
= (struct pci_attach_args
*)aux
;
284 if (tl_lookup_product(pa
->pa_id
) != NULL
)
291 tl_pci_attach(device_t parent
, device_t self
, void *aux
)
293 tl_softc_t
*sc
= device_private(self
);
294 struct pci_attach_args
* const pa
= (struct pci_attach_args
*)aux
;
295 const struct tl_product_desc
*tp
;
296 struct ifnet
* const ifp
= &sc
->tl_if
;
297 bus_space_tag_t iot
, memt
;
298 bus_space_handle_t ioh
, memh
;
299 pci_intr_handle_t intrhandle
;
301 int ioh_valid
, memh_valid
;
303 pcireg_t reg10
, reg14
;
309 callout_init(&sc
->tl_tick_ch
, 0);
310 callout_init(&sc
->tl_restart_ch
, 0);
312 tp
= tl_lookup_product(pa
->pa_id
);
314 panic("%s: impossible", __func__
);
318 * Map the card space. First we have to find the I/O and MEM
319 * registers. I/O is supposed to be at 0x10, MEM at 0x14,
320 * but some boards (Compaq Netflex 3/P PCI) seem to have it reversed.
321 * The ThunderLAN manual is not consistent about this either (there
322 * are both cases in code examples).
324 reg10
= pci_conf_read(pa
->pa_pc
, pa
->pa_tag
, 0x10);
325 reg14
= pci_conf_read(pa
->pa_pc
, pa
->pa_tag
, 0x14);
326 if (PCI_MAPREG_TYPE(reg10
) == PCI_MAPREG_TYPE_IO
)
328 else if (PCI_MAPREG_TYPE(reg14
) == PCI_MAPREG_TYPE_IO
)
332 if (PCI_MAPREG_TYPE(reg10
) == PCI_MAPREG_TYPE_MEM
)
334 else if (PCI_MAPREG_TYPE(reg14
) == PCI_MAPREG_TYPE_MEM
)
340 ioh_valid
= (pci_mapreg_map(pa
, reg_io
, PCI_MAPREG_TYPE_IO
,
341 0, &iot
, &ioh
, NULL
, NULL
) == 0);
345 memh_valid
= (pci_mapreg_map(pa
, PCI_CBMA
,
346 PCI_MAPREG_TYPE_MEM
| PCI_MAPREG_MEM_TYPE_32BIT
,
347 0, &memt
, &memh
, NULL
, NULL
) == 0);
353 sc
->tl_bushandle
= ioh
;
354 } else if (memh_valid
) {
355 sc
->tl_bustag
= memt
;
356 sc
->tl_bushandle
= memh
;
358 aprint_error_dev(self
, "unable to map device registers\n");
361 sc
->tl_dmatag
= pa
->pa_dmat
;
363 /* Enable the device. */
364 csr
= pci_conf_read(pa
->pa_pc
, pa
->pa_tag
, PCI_COMMAND_STATUS_REG
);
365 pci_conf_write(pa
->pa_pc
, pa
->pa_tag
, PCI_COMMAND_STATUS_REG
,
366 csr
| PCI_COMMAND_MASTER_ENABLE
);
368 aprint_normal_dev(self
, "%s\n", tp
->tp_desc
);
372 /* fill in the i2c tag */
373 sc
->sc_i2c
.ic_cookie
= sc
;
374 sc
->sc_i2c
.ic_acquire_bus
= tl_i2c_acquire_bus
;
375 sc
->sc_i2c
.ic_release_bus
= tl_i2c_release_bus
;
376 sc
->sc_i2c
.ic_send_start
= tl_i2c_send_start
;
377 sc
->sc_i2c
.ic_send_stop
= tl_i2c_send_stop
;
378 sc
->sc_i2c
.ic_initiate_xfer
= tl_i2c_initiate_xfer
;
379 sc
->sc_i2c
.ic_read_byte
= tl_i2c_read_byte
;
380 sc
->sc_i2c
.ic_write_byte
= tl_i2c_write_byte
;
383 aprint_debug_dev(self
, "default values of INTreg: 0x%x\n",
384 tl_intreg_read(sc
, TL_INT_Defaults
));
388 if (seeprom_bootstrap_read(&sc
->sc_i2c
, 0x50, 0x83, 256 /* 2kbit */,
389 sc
->tl_enaddr
, ETHER_ADDR_LEN
)) {
390 aprint_error_dev(self
, "error reading Ethernet address\n");
393 aprint_normal_dev(self
, "Ethernet address %s\n",
394 ether_sprintf(sc
->tl_enaddr
));
396 /* Map and establish interrupts */
397 if (pci_intr_map(pa
, &intrhandle
)) {
398 aprint_error_dev(self
, "couldn't map interrupt\n");
401 intrstr
= pci_intr_string(pa
->pa_pc
, intrhandle
);
402 sc
->tl_if
.if_softc
= sc
;
403 sc
->tl_ih
= pci_intr_establish(pa
->pa_pc
, intrhandle
, IPL_NET
,
405 if (sc
->tl_ih
== NULL
) {
406 aprint_error_dev(self
, "couldn't establish interrupt");
408 aprint_error(" at %s", intrstr
);
412 aprint_normal_dev(self
, "interrupting at %s\n", intrstr
);
414 /* init these pointers, so that tl_shutdown won't try to read them */
418 /* allocate DMA-safe memory for control structs */
419 if (bus_dmamem_alloc(sc
->tl_dmatag
, PAGE_SIZE
, 0, PAGE_SIZE
,
420 &sc
->ctrl_segs
, 1, &sc
->ctrl_nsegs
, BUS_DMA_NOWAIT
) != 0 ||
421 bus_dmamem_map(sc
->tl_dmatag
, &sc
->ctrl_segs
,
422 sc
->ctrl_nsegs
, PAGE_SIZE
, (void **)&sc
->ctrl
,
423 BUS_DMA_NOWAIT
| BUS_DMA_COHERENT
) != 0) {
424 aprint_error_dev(self
, "can't allocate DMA memory for lists\n");
429 * Initialize our media structures and probe the MII.
431 * Note that we don't care about the media instance. We
432 * are expecting to have multiple PHYs on the 10/100 cards,
433 * and on those cards we exclude the internal PHY from providing
434 * 10baseT. By ignoring the instance, it allows us to not have
435 * to specify it on the command line when switching media.
437 sc
->tl_mii
.mii_ifp
= ifp
;
438 sc
->tl_mii
.mii_readreg
= tl_mii_read
;
439 sc
->tl_mii
.mii_writereg
= tl_mii_write
;
440 sc
->tl_mii
.mii_statchg
= tl_statchg
;
441 sc
->tl_ec
.ec_mii
= &sc
->tl_mii
;
442 ifmedia_init(&sc
->tl_mii
.mii_media
, IFM_IMASK
, tl_mediachange
,
444 mii_attach(self
, &sc
->tl_mii
, 0xffffffff, MII_PHY_ANY
,
446 if (LIST_FIRST(&sc
->tl_mii
.mii_phys
) == NULL
) {
447 ifmedia_add(&sc
->tl_mii
.mii_media
, IFM_ETHER
|IFM_NONE
, 0, NULL
);
448 ifmedia_set(&sc
->tl_mii
.mii_media
, IFM_ETHER
|IFM_NONE
);
450 ifmedia_set(&sc
->tl_mii
.mii_media
, IFM_ETHER
|IFM_AUTO
);
453 * We can support 802.1Q VLAN-sized frames.
455 sc
->tl_ec
.ec_capabilities
|= ETHERCAP_VLAN_MTU
;
457 strlcpy(ifp
->if_xname
, device_xname(self
), IFNAMSIZ
);
458 ifp
->if_flags
= IFF_BROADCAST
|IFF_SIMPLEX
|IFF_NOTRAILERS
|IFF_MULTICAST
;
459 ifp
->if_ioctl
= tl_ifioctl
;
460 ifp
->if_start
= tl_ifstart
;
461 ifp
->if_watchdog
= tl_ifwatchdog
;
462 ifp
->if_init
= tl_init
;
463 ifp
->if_stop
= tl_stop
;
465 IFQ_SET_READY(&ifp
->if_snd
);
467 ether_ifattach(&(sc
)->tl_if
, (sc
)->tl_enaddr
);
470 * Add shutdown hook so that DMA is disabled prior to reboot.
471 * Not doing reboot before the driver initializes.
473 if (pmf_device_register1(self
, NULL
, NULL
, tl_shutdown
))
474 pmf_class_network_register(self
, ifp
);
476 aprint_error_dev(self
, "couldn't establish power handler\n");
479 rnd_attach_source(&sc
->rnd_source
, device_xname(self
),
485 tl_reset(tl_softc_t
*sc
)
490 if (sc
->tl_if
.if_flags
& IFF_RUNNING
) {
491 callout_stop(&sc
->tl_tick_ch
);
495 TL_HR_WRITE(sc
, TL_HOST_CMD
,
496 TL_HR_READ(sc
, TL_HOST_CMD
) | HOST_CMD_Ad_Rst
);
498 /* Disable interrupts */
499 TL_HR_WRITE(sc
, TL_HOST_CMD
, HOST_CMD_IntOff
);
500 /* setup aregs & hash */
501 for (i
= TL_INT_Areg0
; i
<= TL_INT_HASH2
; i
= i
+ 4)
502 tl_intreg_write(sc
, i
, 0);
504 printf("Areg & hash registers: \n");
505 for (i
= TL_INT_Areg0
; i
<= TL_INT_HASH2
; i
= i
+ 4)
506 printf(" reg %x: %x\n", i
, tl_intreg_read(sc
, i
));
508 /* Setup NetConfig */
509 tl_intreg_write(sc
, TL_INT_NetConfig
,
510 TL_NETCONFIG_1F
| TL_NETCONFIG_1chn
| TL_NETCONFIG_PHY_EN
);
511 /* Bsize: accept default */
512 /* TX commit in Acommit: accept default */
513 /* Load Ld_tmr and Ld_thr */
515 TL_HR_WRITE(sc
, TL_HOST_CMD
, 0x3 | HOST_CMD_LdTmr
);
517 TL_HR_WRITE(sc
, TL_HOST_CMD
, 0x0 | HOST_CMD_LdThr
);
519 netsio_set(sc
, TL_NETSIO_NMRST
);
521 sc
->tl_mii
.mii_media_status
&= ~IFM_ACTIVE
;
525 tl_shutdown(device_t self
, int howto
)
527 tl_softc_t
*sc
= device_private(self
);
528 struct ifnet
*ifp
= &sc
->tl_if
;
536 tl_stop(struct ifnet
*ifp
, int disable
)
538 tl_softc_t
*sc
= ifp
->if_softc
;
542 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
544 /* disable interrupts */
545 TL_HR_WRITE(sc
, TL_HOST_CMD
, HOST_CMD_IntOff
);
546 /* stop TX and RX channels */
547 TL_HR_WRITE(sc
, TL_HOST_CMD
,
548 HOST_CMD_STOP
| HOST_CMD_RT
| HOST_CMD_Nes
);
549 TL_HR_WRITE(sc
, TL_HOST_CMD
, HOST_CMD_STOP
);
552 /* stop statistics reading loop, read stats */
553 callout_stop(&sc
->tl_tick_ch
);
557 mii_down(&sc
->tl_mii
);
559 /* deallocate memory allocations */
561 for (i
= 0; i
< TL_NBUF
; i
++) {
562 if (sc
->Rx_list
[i
].m
) {
563 bus_dmamap_unload(sc
->tl_dmatag
,
564 sc
->Rx_list
[i
].m_dmamap
);
565 m_freem(sc
->Rx_list
[i
].m
);
567 bus_dmamap_destroy(sc
->tl_dmatag
,
568 sc
->Rx_list
[i
].m_dmamap
);
569 sc
->Rx_list
[i
].m
= NULL
;
571 free(sc
->Rx_list
, M_DEVBUF
);
573 bus_dmamap_unload(sc
->tl_dmatag
, sc
->Rx_dmamap
);
574 bus_dmamap_destroy(sc
->tl_dmatag
, sc
->Rx_dmamap
);
575 sc
->hw_Rx_list
= NULL
;
576 while ((Tx
= sc
->active_Tx
) != NULL
) {
577 Tx
->hw_list
->stat
= 0;
578 bus_dmamap_unload(sc
->tl_dmatag
, Tx
->m_dmamap
);
579 bus_dmamap_destroy(sc
->tl_dmatag
, Tx
->m_dmamap
);
581 sc
->active_Tx
= Tx
->next
;
582 Tx
->next
= sc
->Free_Tx
;
586 free(sc
->Tx_list
, M_DEVBUF
);
588 bus_dmamap_unload(sc
->tl_dmatag
, sc
->Tx_dmamap
);
589 bus_dmamap_destroy(sc
->tl_dmatag
, sc
->Tx_dmamap
);
590 sc
->hw_Tx_list
= NULL
;
592 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
594 sc
->tl_mii
.mii_media_status
&= ~IFM_ACTIVE
;
605 tl_init(struct ifnet
*ifp
)
607 tl_softc_t
*sc
= ifp
->if_softc
;
610 prop_number_t prop_boundary
;
611 const char *errstring
;
615 /* cancel any pending IO */
618 if ((sc
->tl_if
.if_flags
& IFF_UP
) == 0) {
622 /* Set various register to reasonable value */
623 /* setup NetCmd in promisc mode if needed */
624 i
= (ifp
->if_flags
& IFF_PROMISC
) ? TL_NETCOMMAND_CAF
: 0;
625 tl_intreg_write_byte(sc
, TL_INT_NET
+ TL_INT_NetCmd
,
626 TL_NETCOMMAND_NRESET
| TL_NETCOMMAND_NWRAP
| i
);
627 /* Max receive size : MCLBYTES */
628 tl_intreg_write_byte(sc
, TL_INT_MISC
+ TL_MISC_MaxRxL
, MCLBYTES
& 0xff);
629 tl_intreg_write_byte(sc
, TL_INT_MISC
+ TL_MISC_MaxRxH
,
630 (MCLBYTES
>> 8) & 0xff);
633 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
634 tl_intreg_write_byte(sc
, TL_INT_Areg0
+ i
, sc
->tl_enaddr
[i
]);
635 /* add multicast filters */
638 printf("Wrote Mac addr, Areg & hash registers are now: \n");
639 for (i
= TL_INT_Areg0
; i
<= TL_INT_HASH2
; i
= i
+ 4)
640 printf(" reg %x: %x\n", i
, tl_intreg_read(sc
, i
));
643 /* Pre-allocate receivers mbuf, make the lists */
644 sc
->Rx_list
= malloc(sizeof(struct Rx_list
) * TL_NBUF
, M_DEVBUF
,
646 sc
->Tx_list
= malloc(sizeof(struct Tx_list
) * TL_NBUF
, M_DEVBUF
,
648 if (sc
->Rx_list
== NULL
|| sc
->Tx_list
== NULL
) {
649 errstring
= "out of memory for lists";
655 * Some boards (Set Engineering GFE) do not permit DMA transfers
656 * across page boundaries.
658 prop_boundary
= prop_dictionary_get(device_properties(sc
->sc_dev
),
659 "tl-dma-page-boundary");
660 if (prop_boundary
!= NULL
) {
661 KASSERT(prop_object_type(prop_boundary
) == PROP_TYPE_NUMBER
);
662 boundary
= (bus_size_t
)prop_number_integer_value(prop_boundary
);
667 error
= bus_dmamap_create(sc
->tl_dmatag
,
668 sizeof(struct tl_Rx_list
) * TL_NBUF
, 1,
669 sizeof(struct tl_Rx_list
) * TL_NBUF
, 0, BUS_DMA_WAITOK
,
672 error
= bus_dmamap_create(sc
->tl_dmatag
,
673 sizeof(struct tl_Tx_list
) * TL_NBUF
, 1,
674 sizeof(struct tl_Tx_list
) * TL_NBUF
, boundary
,
675 BUS_DMA_WAITOK
, &sc
->Tx_dmamap
);
677 error
= bus_dmamap_create(sc
->tl_dmatag
, ETHER_MIN_TX
, 1,
678 ETHER_MIN_TX
, boundary
, BUS_DMA_WAITOK
,
681 errstring
= "can't allocate DMA maps for lists";
684 memset(sc
->ctrl
, 0, PAGE_SIZE
);
685 sc
->hw_Rx_list
= (void *)sc
->ctrl
;
687 (void *)(sc
->ctrl
+ sizeof(struct tl_Rx_list
) * TL_NBUF
);
688 nullbuf
= sc
->ctrl
+ sizeof(struct tl_Rx_list
) * TL_NBUF
+
689 sizeof(struct tl_Tx_list
) * TL_NBUF
;
690 error
= bus_dmamap_load(sc
->tl_dmatag
, sc
->Rx_dmamap
,
691 sc
->hw_Rx_list
, sizeof(struct tl_Rx_list
) * TL_NBUF
, NULL
,
694 error
= bus_dmamap_load(sc
->tl_dmatag
, sc
->Tx_dmamap
,
695 sc
->hw_Tx_list
, sizeof(struct tl_Tx_list
) * TL_NBUF
, NULL
,
698 error
= bus_dmamap_load(sc
->tl_dmatag
, sc
->null_dmamap
,
699 nullbuf
, ETHER_MIN_TX
, NULL
, BUS_DMA_WAITOK
);
701 errstring
= "can't DMA map DMA memory for lists";
704 for (i
= 0; i
< TL_NBUF
; i
++) {
705 error
= bus_dmamap_create(sc
->tl_dmatag
, MCLBYTES
,
706 1, MCLBYTES
, boundary
, BUS_DMA_WAITOK
| BUS_DMA_ALLOCNOW
,
707 &sc
->Rx_list
[i
].m_dmamap
);
709 error
= bus_dmamap_create(sc
->tl_dmatag
, MCLBYTES
,
710 TL_NSEG
, MCLBYTES
, boundary
,
711 BUS_DMA_WAITOK
| BUS_DMA_ALLOCNOW
,
712 &sc
->Tx_list
[i
].m_dmamap
);
715 errstring
= "can't allocate DMA maps for mbufs";
718 sc
->Rx_list
[i
].hw_list
= &sc
->hw_Rx_list
[i
];
719 sc
->Rx_list
[i
].hw_listaddr
= sc
->Rx_dmamap
->dm_segs
[0].ds_addr
720 + sizeof(struct tl_Rx_list
) * i
;
721 sc
->Tx_list
[i
].hw_list
= &sc
->hw_Tx_list
[i
];
722 sc
->Tx_list
[i
].hw_listaddr
= sc
->Tx_dmamap
->dm_segs
[0].ds_addr
723 + sizeof(struct tl_Tx_list
) * i
;
724 if (tl_add_RxBuff(sc
, &sc
->Rx_list
[i
], NULL
) == 0) {
725 errstring
= "out of mbuf for receive list";
729 if (i
> 0) { /* chain the list */
730 sc
->Rx_list
[i
- 1].next
= &sc
->Rx_list
[i
];
731 sc
->hw_Rx_list
[i
- 1].fwd
=
732 htole32(sc
->Rx_list
[i
].hw_listaddr
);
733 sc
->Tx_list
[i
- 1].next
= &sc
->Tx_list
[i
];
736 sc
->hw_Rx_list
[TL_NBUF
- 1].fwd
= 0;
737 sc
->Rx_list
[TL_NBUF
- 1].next
= NULL
;
738 sc
->hw_Tx_list
[TL_NBUF
- 1].fwd
= 0;
739 sc
->Tx_list
[TL_NBUF
- 1].next
= NULL
;
741 sc
->active_Rx
= &sc
->Rx_list
[0];
742 sc
->last_Rx
= &sc
->Rx_list
[TL_NBUF
- 1];
743 sc
->active_Tx
= sc
->last_Tx
= NULL
;
744 sc
->Free_Tx
= &sc
->Tx_list
[0];
745 bus_dmamap_sync(sc
->tl_dmatag
, sc
->Rx_dmamap
, 0,
746 sizeof(struct tl_Rx_list
) * TL_NBUF
,
747 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
748 bus_dmamap_sync(sc
->tl_dmatag
, sc
->Tx_dmamap
, 0,
749 sizeof(struct tl_Tx_list
) * TL_NBUF
,
750 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
751 bus_dmamap_sync(sc
->tl_dmatag
, sc
->null_dmamap
, 0, ETHER_MIN_TX
,
752 BUS_DMASYNC_PREWRITE
);
755 if ((error
= mii_mediachg(&sc
->tl_mii
)) == ENXIO
)
757 else if (error
!= 0) {
758 errstring
= "could not set media";
762 /* start ticks calls */
763 callout_reset(&sc
->tl_tick_ch
, hz
, tl_ticks
, sc
);
764 /* write address of Rx list and enable interrupts */
765 TL_HR_WRITE(sc
, TL_HOST_CH_PARM
, sc
->Rx_list
[0].hw_listaddr
);
766 TL_HR_WRITE(sc
, TL_HOST_CMD
,
767 HOST_CMD_GO
| HOST_CMD_RT
| HOST_CMD_Nes
| HOST_CMD_IntOn
);
768 sc
->tl_if
.if_flags
|= IFF_RUNNING
;
769 sc
->tl_if
.if_flags
&= ~IFF_OACTIVE
;
773 printf("%s: %s\n", device_xname(sc
->sc_dev
), errstring
);
780 tl_intreg_read(tl_softc_t
*sc
, uint32_t reg
)
783 TL_HR_WRITE(sc
, TL_HOST_INTR_DIOADR
, reg
& TL_HOST_DIOADR_MASK
);
784 return TL_HR_READ(sc
, TL_HOST_DIO_DATA
);
788 tl_intreg_read_byte(tl_softc_t
*sc
, uint32_t reg
)
791 TL_HR_WRITE(sc
, TL_HOST_INTR_DIOADR
,
792 (reg
& (~0x07)) & TL_HOST_DIOADR_MASK
);
793 return TL_HR_READ_BYTE(sc
, TL_HOST_DIO_DATA
+ (reg
& 0x07));
797 tl_intreg_write(tl_softc_t
*sc
, uint32_t reg
, uint32_t val
)
800 TL_HR_WRITE(sc
, TL_HOST_INTR_DIOADR
, reg
& TL_HOST_DIOADR_MASK
);
801 TL_HR_WRITE(sc
, TL_HOST_DIO_DATA
, val
);
805 tl_intreg_write_byte(tl_softc_t
*sc
, uint32_t reg
, uint8_t val
)
808 TL_HR_WRITE(sc
, TL_HOST_INTR_DIOADR
,
809 (reg
& (~0x03)) & TL_HOST_DIOADR_MASK
);
810 TL_HR_WRITE_BYTE(sc
, TL_HOST_DIO_DATA
+ (reg
& 0x03), val
);
814 tl_mii_sync(struct tl_softc
*sc
)
818 netsio_clr(sc
, TL_NETSIO_MTXEN
);
819 for (i
= 0; i
< 32; i
++) {
820 netsio_clr(sc
, TL_NETSIO_MCLK
);
821 netsio_set(sc
, TL_NETSIO_MCLK
);
826 tl_mii_sendbits(struct tl_softc
*sc
, uint32_t data
, int nbits
)
830 netsio_set(sc
, TL_NETSIO_MTXEN
);
831 for (i
= 1 << (nbits
- 1); i
; i
= i
>> 1) {
832 netsio_clr(sc
, TL_NETSIO_MCLK
);
833 netsio_read(sc
, TL_NETSIO_MCLK
);
835 netsio_set(sc
, TL_NETSIO_MDATA
);
837 netsio_clr(sc
, TL_NETSIO_MDATA
);
838 netsio_set(sc
, TL_NETSIO_MCLK
);
839 netsio_read(sc
, TL_NETSIO_MCLK
);
844 tl_mii_read(device_t self
, int phy
, int reg
)
846 struct tl_softc
*sc
= device_private(self
);
850 * Read the PHY register by manually driving the MII control lines.
854 tl_mii_sendbits(sc
, MII_COMMAND_START
, 2);
855 tl_mii_sendbits(sc
, MII_COMMAND_READ
, 2);
856 tl_mii_sendbits(sc
, phy
, 5);
857 tl_mii_sendbits(sc
, reg
, 5);
859 netsio_clr(sc
, TL_NETSIO_MTXEN
);
860 netsio_clr(sc
, TL_NETSIO_MCLK
);
861 netsio_set(sc
, TL_NETSIO_MCLK
);
862 netsio_clr(sc
, TL_NETSIO_MCLK
);
864 err
= netsio_read(sc
, TL_NETSIO_MDATA
);
865 netsio_set(sc
, TL_NETSIO_MCLK
);
867 /* Even if an error occurs, must still clock out the cycle. */
868 for (i
= 0; i
< 16; i
++) {
870 netsio_clr(sc
, TL_NETSIO_MCLK
);
871 if (err
== 0 && netsio_read(sc
, TL_NETSIO_MDATA
))
873 netsio_set(sc
, TL_NETSIO_MCLK
);
875 netsio_clr(sc
, TL_NETSIO_MCLK
);
876 netsio_set(sc
, TL_NETSIO_MCLK
);
878 return err
? 0 : val
;
882 tl_mii_write(device_t self
, int phy
, int reg
, int val
)
884 struct tl_softc
*sc
= device_private(self
);
887 * Write the PHY register by manually driving the MII control lines.
891 tl_mii_sendbits(sc
, MII_COMMAND_START
, 2);
892 tl_mii_sendbits(sc
, MII_COMMAND_WRITE
, 2);
893 tl_mii_sendbits(sc
, phy
, 5);
894 tl_mii_sendbits(sc
, reg
, 5);
895 tl_mii_sendbits(sc
, MII_COMMAND_ACK
, 2);
896 tl_mii_sendbits(sc
, val
, 16);
898 netsio_clr(sc
, TL_NETSIO_MCLK
);
899 netsio_set(sc
, TL_NETSIO_MCLK
);
903 tl_statchg(device_t self
)
905 tl_softc_t
*sc
= device_private(self
);
909 printf("%s: media %x\n", __func__
, sc
->tl_mii
.mii_media
.ifm_media
);
913 * We must keep the ThunderLAN and the PHY in sync as
914 * to the status of full-duplex!
916 reg
= tl_intreg_read_byte(sc
, TL_INT_NET
+ TL_INT_NetCmd
);
917 if (sc
->tl_mii
.mii_media_active
& IFM_FDX
)
918 reg
|= TL_NETCOMMAND_DUPLEX
;
920 reg
&= ~TL_NETCOMMAND_DUPLEX
;
921 tl_intreg_write_byte(sc
, TL_INT_NET
+ TL_INT_NetCmd
, reg
);
924 /********** I2C glue **********/
927 tl_i2c_acquire_bus(void *cookie
, int flags
)
935 tl_i2c_release_bus(void *cookie
, int flags
)
942 tl_i2c_send_start(void *cookie
, int flags
)
945 return i2c_bitbang_send_start(cookie
, flags
, &tl_i2cbb_ops
);
949 tl_i2c_send_stop(void *cookie
, int flags
)
952 return i2c_bitbang_send_stop(cookie
, flags
, &tl_i2cbb_ops
);
956 tl_i2c_initiate_xfer(void *cookie
, i2c_addr_t addr
, int flags
)
959 return i2c_bitbang_initiate_xfer(cookie
, addr
, flags
, &tl_i2cbb_ops
);
963 tl_i2c_read_byte(void *cookie
, uint8_t *valp
, int flags
)
966 return i2c_bitbang_read_byte(cookie
, valp
, flags
, &tl_i2cbb_ops
);
970 tl_i2c_write_byte(void *cookie
, uint8_t val
, int flags
)
973 return i2c_bitbang_write_byte(cookie
, val
, flags
, &tl_i2cbb_ops
);
976 /********** I2C bit-bang glue **********/
979 tl_i2cbb_set_bits(void *cookie
, uint32_t bits
)
981 struct tl_softc
*sc
= cookie
;
984 reg
= tl_intreg_read_byte(sc
, TL_INT_NET
+ TL_INT_NetSio
);
985 reg
= (reg
& ~(TL_NETSIO_EDATA
|TL_NETSIO_ECLOCK
)) | bits
;
986 tl_intreg_write_byte(sc
, TL_INT_NET
+ TL_INT_NetSio
, reg
);
990 tl_i2cbb_set_dir(void *cookie
, uint32_t bits
)
992 struct tl_softc
*sc
= cookie
;
995 reg
= tl_intreg_read_byte(sc
, TL_INT_NET
+ TL_INT_NetSio
);
996 reg
= (reg
& ~TL_NETSIO_ETXEN
) | bits
;
997 tl_intreg_write_byte(sc
, TL_INT_NET
+ TL_INT_NetSio
, reg
);
1001 tl_i2cbb_read(void *cookie
)
1004 return tl_intreg_read_byte(cookie
, TL_INT_NET
+ TL_INT_NetSio
);
1007 /********** End of I2C stuff **********/
1013 struct ifnet
*ifp
= &sc
->tl_if
;
1017 uint32_t int_type
, int_reg
;
1021 int_reg
= TL_HR_READ(sc
, TL_HOST_INTR_DIOADR
);
1022 int_type
= int_reg
& TL_INTR_MASK
;
1025 #if defined(TLDEBUG_RX) || defined(TLDEBUG_TX)
1026 printf("%s: interrupt type %x, intr_reg %x\n", device_xname(sc
->sc_dev
),
1029 /* disable interrupts */
1030 TL_HR_WRITE(sc
, TL_HOST_CMD
, HOST_CMD_IntOff
);
1031 switch(int_type
& TL_INTR_MASK
) {
1033 bus_dmamap_sync(sc
->tl_dmatag
, sc
->Rx_dmamap
, 0,
1034 sizeof(struct tl_Rx_list
) * TL_NBUF
,
1035 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
1036 while(le32toh(sc
->active_Rx
->hw_list
->stat
) &
1038 /* dequeue and requeue at end of list */
1041 sc
->active_Rx
= Rx
->next
;
1042 bus_dmamap_sync(sc
->tl_dmatag
, Rx
->m_dmamap
, 0,
1043 Rx
->m_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
1044 bus_dmamap_unload(sc
->tl_dmatag
, Rx
->m_dmamap
);
1046 size
= le32toh(Rx
->hw_list
->stat
) >> 16;
1048 printf("%s: RX list complete, Rx %p, size=%d\n",
1049 __func__
, Rx
, size
);
1051 if (tl_add_RxBuff(sc
, Rx
, m
) == 0) {
1053 * No new mbuf, reuse the same. This means
1058 #ifdef TL_PRIV_STATS
1062 printf("%s: out of mbuf, lost input packet\n",
1063 device_xname(sc
->sc_dev
));
1067 Rx
->hw_list
->fwd
= 0;
1068 sc
->last_Rx
->hw_list
->fwd
= htole32(Rx
->hw_listaddr
);
1069 sc
->last_Rx
->next
= Rx
;
1072 /* deliver packet */
1074 if (size
< sizeof(struct ether_header
)) {
1078 m
->m_pkthdr
.rcvif
= ifp
;
1079 m
->m_pkthdr
.len
= m
->m_len
= size
;
1082 struct ether_header
*eh
=
1083 mtod(m
, struct ether_header
*);
1084 printf("%s: Rx packet:\n", __func__
);
1085 ether_printheader(eh
);
1090 bpf_mtap(ifp
->if_bpf
, m
);
1091 #endif /* NBPFILTER > 0 */
1092 (*ifp
->if_input
)(ifp
, m
);
1095 bus_dmamap_sync(sc
->tl_dmatag
, sc
->Rx_dmamap
, 0,
1096 sizeof(struct tl_Rx_list
) * TL_NBUF
,
1097 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1099 printf("TL_INTR_RxEOF: ack %d\n", ack
);
1102 printf("%s: EOF intr without anything to read !\n",
1103 device_xname(sc
->sc_dev
));
1105 /* schedule reinit of the board */
1106 callout_reset(&sc
->tl_restart_ch
, 1, tl_restart
, ifp
);
1113 bus_dmamap_sync(sc
->tl_dmatag
, sc
->Rx_dmamap
, 0,
1114 sizeof(struct tl_Rx_list
) * TL_NBUF
,
1115 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
1117 printf("TL_INTR_RxEOC: ack %d\n", ack
);
1120 if (le32toh(sc
->active_Rx
->hw_list
->stat
) & TL_RX_CSTAT_CPLT
) {
1121 printf("%s: Rx EOC interrupt and active Tx list not "
1122 "cleared\n", device_xname(sc
->sc_dev
));
1128 * write address of Rx list and send Rx GO command, ack
1129 * interrupt and enable interrupts in one command
1131 TL_HR_WRITE(sc
, TL_HOST_CH_PARM
, sc
->active_Rx
->hw_listaddr
);
1132 TL_HR_WRITE(sc
, TL_HOST_CMD
,
1133 HOST_CMD_GO
| HOST_CMD_RT
| HOST_CMD_Nes
| ack
| int_type
|
1134 HOST_CMD_ACK
| HOST_CMD_IntOn
);
1139 bus_dmamap_sync(sc
->tl_dmatag
, sc
->Tx_dmamap
, 0,
1140 sizeof(struct tl_Tx_list
) * TL_NBUF
,
1141 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
1142 while ((Tx
= sc
->active_Tx
) != NULL
) {
1143 if((le32toh(Tx
->hw_list
->stat
) & TL_TX_CSTAT_CPLT
) == 0)
1147 printf("TL_INTR_TxEOC: list 0x%x done\n",
1148 (int)Tx
->hw_listaddr
);
1150 Tx
->hw_list
->stat
= 0;
1151 bus_dmamap_sync(sc
->tl_dmatag
, Tx
->m_dmamap
, 0,
1152 Tx
->m_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
1153 bus_dmamap_unload(sc
->tl_dmatag
, Tx
->m_dmamap
);
1156 sc
->active_Tx
= Tx
->next
;
1157 if (sc
->active_Tx
== NULL
)
1159 Tx
->next
= sc
->Free_Tx
;
1162 bus_dmamap_sync(sc
->tl_dmatag
, sc
->Tx_dmamap
, 0,
1163 sizeof(struct tl_Tx_list
) * TL_NBUF
,
1164 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1165 /* if this was an EOC, ACK immediatly */
1167 sc
->tl_if
.if_flags
&= ~IFF_OACTIVE
;
1168 if (int_type
== TL_INTR_TxEOC
) {
1170 printf("TL_INTR_TxEOC: ack %d (will be set to 1)\n",
1173 TL_HR_WRITE(sc
, TL_HOST_CMD
, 1 | int_type
|
1174 HOST_CMD_ACK
| HOST_CMD_IntOn
);
1175 if (sc
->active_Tx
!= NULL
) {
1176 /* needs a Tx go command */
1177 TL_HR_WRITE(sc
, TL_HOST_CH_PARM
,
1178 sc
->active_Tx
->hw_listaddr
);
1179 TL_HR_WRITE(sc
, TL_HOST_CMD
, HOST_CMD_GO
);
1181 sc
->tl_if
.if_timer
= 0;
1182 if (IFQ_IS_EMPTY(&sc
->tl_if
.if_snd
) == 0)
1183 tl_ifstart(&sc
->tl_if
);
1188 printf("TL_INTR_TxEOF: ack %d\n", ack
);
1191 sc
->tl_if
.if_timer
= 0;
1192 if (IFQ_IS_EMPTY(&sc
->tl_if
.if_snd
) == 0)
1193 tl_ifstart(&sc
->tl_if
);
1198 printf("TL_INTR_Stat: ack %d\n", ack
);
1203 if (int_reg
& TL_INTVec_MASK
) {
1204 /* adapter check conditions */
1205 printf("%s: check condition, intvect=0x%x, "
1206 "ch_param=0x%x\n", device_xname(sc
->sc_dev
),
1207 int_reg
& TL_INTVec_MASK
,
1208 TL_HR_READ(sc
, TL_HOST_CH_PARM
));
1210 /* schedule reinit of the board */
1211 callout_reset(&sc
->tl_restart_ch
, 1, tl_restart
, ifp
);
1215 /* Network status */
1217 tl_intreg_read_byte(sc
, TL_INT_NET
+TL_INT_NetSts
);
1218 printf("%s: network status, NetSts=%x\n",
1219 device_xname(sc
->sc_dev
), netstat
);
1220 /* Ack interrupts */
1221 tl_intreg_write_byte(sc
, TL_INT_NET
+TL_INT_NetSts
,
1227 printf("%s: unhandled interrupt code %x!\n",
1228 device_xname(sc
->sc_dev
), int_type
);
1233 /* Ack the interrupt and enable interrupts */
1234 TL_HR_WRITE(sc
, TL_HOST_CMD
, ack
| int_type
| HOST_CMD_ACK
|
1237 if (RND_ENABLED(&sc
->rnd_source
))
1238 rnd_add_uint32(&sc
->rnd_source
, int_reg
);
1242 /* ack = 0 ; interrupt was perhaps not our. Just enable interrupts */
1243 TL_HR_WRITE(sc
, TL_HOST_CMD
, HOST_CMD_IntOn
);
1248 tl_ifioctl(struct ifnet
*ifp
, unsigned long cmd
, void *data
)
1250 struct tl_softc
*sc
= ifp
->if_softc
;
1254 error
= ether_ioctl(ifp
, cmd
, data
);
1255 if (error
== ENETRESET
) {
1256 if (ifp
->if_flags
& IFF_RUNNING
)
1265 tl_ifstart(struct ifnet
*ifp
)
1267 tl_softc_t
*sc
= ifp
->if_softc
;
1268 struct mbuf
*mb_head
;
1273 if ((sc
->tl_if
.if_flags
& (IFF_RUNNING
|IFF_OACTIVE
)) != IFF_RUNNING
)
1276 /* If we don't have more space ... */
1277 if (sc
->Free_Tx
== NULL
) {
1279 printf("%s: No free TX list\n", __func__
);
1281 sc
->tl_if
.if_flags
|= IFF_OACTIVE
;
1284 /* Grab a paquet for output */
1285 IFQ_DEQUEUE(&ifp
->if_snd
, mb_head
);
1286 if (mb_head
== NULL
) {
1288 printf("%s: nothing to send\n", __func__
);
1293 sc
->Free_Tx
= Tx
->next
;
1297 * Go through each of the mbufs in the chain and initialize
1298 * the transmit list descriptors with the physical address
1299 * and size of the mbuf.
1302 memset(Tx
->hw_list
, 0, sizeof(struct tl_Tx_list
));
1304 size
= mb_head
->m_pkthdr
.len
;
1305 if ((error
= bus_dmamap_load_mbuf(sc
->tl_dmatag
, Tx
->m_dmamap
, mb_head
,
1306 BUS_DMA_NOWAIT
)) || (size
< ETHER_MIN_TX
&&
1307 Tx
->m_dmamap
->dm_nsegs
== TL_NSEG
)) {
1310 * We ran out of segments, or we will. We have to recopy this
1314 bus_dmamap_unload(sc
->tl_dmatag
, Tx
->m_dmamap
);
1316 /* already copyed, can't do much more */
1322 printf("%s: need to copy mbuf\n", __func__
);
1324 #ifdef TL_PRIV_STATS
1327 MGETHDR(mn
, M_DONTWAIT
, MT_DATA
);
1332 if (mb_head
->m_pkthdr
.len
> MHLEN
) {
1333 MCLGET(mn
, M_DONTWAIT
);
1334 if ((mn
->m_flags
& M_EXT
) == 0) {
1340 m_copydata(mb_head
, 0, mb_head
->m_pkthdr
.len
,
1342 mn
->m_pkthdr
.len
= mn
->m_len
= mb_head
->m_pkthdr
.len
;
1347 for (segment
= 0; segment
< Tx
->m_dmamap
->dm_nsegs
; segment
++) {
1348 Tx
->hw_list
->seg
[segment
].data_addr
=
1349 htole32(Tx
->m_dmamap
->dm_segs
[segment
].ds_addr
);
1350 Tx
->hw_list
->seg
[segment
].data_count
=
1351 htole32(Tx
->m_dmamap
->dm_segs
[segment
].ds_len
);
1353 bus_dmamap_sync(sc
->tl_dmatag
, Tx
->m_dmamap
, 0,
1354 Tx
->m_dmamap
->dm_mapsize
,
1355 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1356 /* We are at end of mbuf chain. check the size and
1357 * see if it needs to be extended
1359 if (size
< ETHER_MIN_TX
) {
1361 if (segment
>= TL_NSEG
) {
1362 panic("%s: to much segmets (%d)", __func__
, segment
);
1366 * add the nullbuf in the seg
1368 Tx
->hw_list
->seg
[segment
].data_count
=
1369 htole32(ETHER_MIN_TX
- size
);
1370 Tx
->hw_list
->seg
[segment
].data_addr
=
1371 htole32(sc
->null_dmamap
->dm_segs
[0].ds_addr
);
1372 size
= ETHER_MIN_TX
;
1375 /* The list is done, finish the list init */
1376 Tx
->hw_list
->seg
[segment
- 1].data_count
|=
1377 htole32(TL_LAST_SEG
);
1378 Tx
->hw_list
->stat
= htole32((size
<< 16) | 0x3000);
1380 printf("%s: sending, Tx : stat = 0x%x\n", device_xname(sc
->sc_dev
),
1381 le32toh(Tx
->hw_list
->stat
));
1383 for (segment
= 0; segment
< TL_NSEG
; segment
++) {
1384 printf(" seg %d addr 0x%x len 0x%x\n",
1386 le32toh(Tx
->hw_list
->seg
[segment
].data_addr
),
1387 le32toh(Tx
->hw_list
->seg
[segment
].data_count
));
1391 if (sc
->active_Tx
== NULL
) {
1392 sc
->active_Tx
= sc
->last_Tx
= Tx
;
1394 printf("%s: Tx GO, addr=0x%ux\n", device_xname(sc
->sc_dev
),
1395 (int)Tx
->hw_listaddr
);
1397 bus_dmamap_sync(sc
->tl_dmatag
, sc
->Tx_dmamap
, 0,
1398 sizeof(struct tl_Tx_list
) * TL_NBUF
,
1399 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1400 TL_HR_WRITE(sc
, TL_HOST_CH_PARM
, Tx
->hw_listaddr
);
1401 TL_HR_WRITE(sc
, TL_HOST_CMD
, HOST_CMD_GO
);
1404 printf("%s: Tx addr=0x%ux queued\n", device_xname(sc
->sc_dev
),
1405 (int)Tx
->hw_listaddr
);
1407 sc
->last_Tx
->hw_list
->fwd
= htole32(Tx
->hw_listaddr
);
1408 bus_dmamap_sync(sc
->tl_dmatag
, sc
->Tx_dmamap
, 0,
1409 sizeof(struct tl_Tx_list
) * TL_NBUF
,
1410 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1411 sc
->last_Tx
->next
= Tx
;
1414 if (sc
->last_Tx
->hw_list
->fwd
& 0x7)
1415 printf("%s: physical addr 0x%x of list not properly "
1417 device_xname(sc
->sc_dev
),
1418 sc
->last_Rx
->hw_list
->fwd
);
1422 /* Pass packet to bpf if there is a listener */
1424 bpf_mtap(ifp
->if_bpf
, mb_head
);
1427 * Set a 5 second timer just in case we don't hear from the card again.
1433 printf("%s: Out of mbuf, Tx pkt lost\n", __func__
);
1435 Tx
->next
= sc
->Free_Tx
;
1440 tl_ifwatchdog(struct ifnet
*ifp
)
1442 tl_softc_t
*sc
= ifp
->if_softc
;
1444 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
1446 printf("%s: device timeout\n", device_xname(sc
->sc_dev
));
1452 tl_mediachange(struct ifnet
*ifp
)
1455 if (ifp
->if_flags
& IFF_UP
)
1461 tl_add_RxBuff(tl_softc_t
*sc
, struct Rx_list
*Rx
, struct mbuf
*oldm
)
1466 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1468 MCLGET(m
, M_DONTWAIT
);
1469 if ((m
->m_flags
& M_EXT
) == 0) {
1474 m
->m_data
= m
->m_ext
.ext_buf
;
1480 m
->m_data
= m
->m_ext
.ext_buf
;
1483 /* (re)init the Rx_list struct */
1486 if ((error
= bus_dmamap_load(sc
->tl_dmatag
, Rx
->m_dmamap
,
1487 m
->m_ext
.ext_buf
, m
->m_ext
.ext_size
, NULL
, BUS_DMA_NOWAIT
)) != 0) {
1488 printf("%s: bus_dmamap_load() failed (error %d) for "
1489 "tl_add_RxBuff ", device_xname(sc
->sc_dev
), error
);
1490 printf("size %d (%d)\n", m
->m_pkthdr
.len
, MCLBYTES
);
1495 bus_dmamap_sync(sc
->tl_dmatag
, Rx
->m_dmamap
, 0,
1496 Rx
->m_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
1498 * Move the data pointer up so that the incoming data packet
1499 * will be 32-bit aligned.
1504 htole32(((Rx
->m_dmamap
->dm_segs
[0].ds_len
- 2) << 16) | 0x3000);
1505 Rx
->hw_list
->seg
.data_count
=
1506 htole32(Rx
->m_dmamap
->dm_segs
[0].ds_len
- 2);
1507 Rx
->hw_list
->seg
.data_addr
=
1508 htole32(Rx
->m_dmamap
->dm_segs
[0].ds_addr
+ 2);
1520 mii_tick(&sc
->tl_mii
);
1522 /* read statistics every seconds */
1523 callout_reset(&sc
->tl_tick_ch
, hz
, tl_ticks
, sc
);
1527 tl_read_stats(tl_softc_t
*sc
)
1540 struct ifnet
*ifp
= &sc
->tl_if
;
1542 reg
= tl_intreg_read(sc
, TL_INT_STATS_TX
);
1543 ifp
->if_opackets
+= reg
& 0x00ffffff;
1544 oerr_underr
= reg
>> 24;
1546 reg
= tl_intreg_read(sc
, TL_INT_STATS_RX
);
1547 ifp
->if_ipackets
+= reg
& 0x00ffffff;
1548 ierr_overr
= reg
>> 24;
1550 reg
= tl_intreg_read(sc
, TL_INT_STATS_FERR
);
1551 ierr_crc
= (reg
& TL_FERR_CRC
) >> 16;
1552 ierr_code
= (reg
& TL_FERR_CODE
) >> 24;
1553 oerr_deferred
= (reg
& TL_FERR_DEF
);
1555 reg
= tl_intreg_read(sc
, TL_INT_STATS_COLL
);
1556 oerr_multicoll
= (reg
& TL_COL_MULTI
);
1557 oerr_coll
= (reg
& TL_COL_SINGLE
) >> 16;
1559 reg
= tl_intreg_read(sc
, TL_INT_LERR
);
1560 oerr_exesscoll
= (reg
& TL_LERR_ECOLL
);
1561 oerr_latecoll
= (reg
& TL_LERR_LCOLL
) >> 8;
1562 oerr_carrloss
= (reg
& TL_LERR_CL
) >> 16;
1565 ifp
->if_oerrors
+= oerr_underr
+ oerr_exesscoll
+ oerr_latecoll
+
1567 ifp
->if_collisions
+= oerr_coll
+ oerr_multicoll
;
1568 ifp
->if_ierrors
+= ierr_overr
+ ierr_code
+ ierr_crc
;
1571 printf("%s: receiver ring buffer overrun\n",
1572 device_xname(sc
->sc_dev
));
1574 printf("%s: transmit buffer underrun\n",
1575 device_xname(sc
->sc_dev
));
1576 #ifdef TL_PRIV_STATS
1577 sc
->ierr_overr
+= ierr_overr
;
1578 sc
->ierr_code
+= ierr_code
;
1579 sc
->ierr_crc
+= ierr_crc
;
1580 sc
->oerr_underr
+= oerr_underr
;
1581 sc
->oerr_deferred
+= oerr_deferred
;
1582 sc
->oerr_coll
+= oerr_coll
;
1583 sc
->oerr_multicoll
+= oerr_multicoll
;
1584 sc
->oerr_exesscoll
+= oerr_exesscoll
;
1585 sc
->oerr_latecoll
+= oerr_latecoll
;
1586 sc
->oerr_carrloss
+= oerr_carrloss
;
1591 tl_addr_filter(tl_softc_t
*sc
)
1593 struct ether_multistep step
;
1594 struct ether_multi
*enm
;
1595 uint32_t hash
[2] = {0, 0};
1598 sc
->tl_if
.if_flags
&= ~IFF_ALLMULTI
;
1599 ETHER_FIRST_MULTI(step
, &sc
->tl_ec
, enm
);
1600 while (enm
!= NULL
) {
1602 printf("%s: addrs %s %s\n", __func__
,
1603 ether_sprintf(enm
->enm_addrlo
),
1604 ether_sprintf(enm
->enm_addrhi
));
1606 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, 6) == 0) {
1607 i
= tl_multicast_hash(enm
->enm_addrlo
);
1608 hash
[i
/ 32] |= 1 << (i
%32);
1610 hash
[0] = hash
[1] = 0xffffffff;
1611 sc
->tl_if
.if_flags
|= IFF_ALLMULTI
;
1614 ETHER_NEXT_MULTI(step
, enm
);
1617 printf("%s: hash1 %x has2 %x\n", __func__
, hash
[0], hash
[1]);
1619 tl_intreg_write(sc
, TL_INT_HASH1
, hash
[0]);
1620 tl_intreg_write(sc
, TL_INT_HASH2
, hash
[1]);
1624 tl_multicast_hash(uint8_t *a
)
1628 #define DA(addr,bit) (addr[5 - (bit / 8)] & (1 << (bit % 8)))
1629 #define xor8(a,b,c,d,e,f,g,h) \
1630 (((a != 0) + (b != 0) + (c != 0) + (d != 0) + \
1631 (e != 0) + (f != 0) + (g != 0) + (h != 0)) & 1)
1633 hash
= xor8(DA(a
,0), DA(a
, 6), DA(a
,12), DA(a
,18), DA(a
,24), DA(a
,30),
1634 DA(a
,36), DA(a
,42));
1635 hash
|= xor8(DA(a
,1), DA(a
, 7), DA(a
,13), DA(a
,19), DA(a
,25), DA(a
,31),
1636 DA(a
,37), DA(a
,43)) << 1;
1637 hash
|= xor8(DA(a
,2), DA(a
, 8), DA(a
,14), DA(a
,20), DA(a
,26), DA(a
,32),
1638 DA(a
,38), DA(a
,44)) << 2;
1639 hash
|= xor8(DA(a
,3), DA(a
, 9), DA(a
,15), DA(a
,21), DA(a
,27), DA(a
,33),
1640 DA(a
,39), DA(a
,45)) << 3;
1641 hash
|= xor8(DA(a
,4), DA(a
,10), DA(a
,16), DA(a
,22), DA(a
,28), DA(a
,34),
1642 DA(a
,40), DA(a
,46)) << 4;
1643 hash
|= xor8(DA(a
,5), DA(a
,11), DA(a
,17), DA(a
,23), DA(a
,29), DA(a
,35),
1644 DA(a
,41), DA(a
,47)) << 5;
1649 #if defined(TLDEBUG_RX)
1651 ether_printheader(struct ether_header
*eh
)
1653 uint8_t *c
= (uint8_t *)eh
;
1656 for (i
= 0; i
< sizeof(struct ether_header
); i
++)
1657 printf("%02x ", (u_int
)c
[i
]);