1 /* $NetBSD: if_ti.c,v 1.86 2009/09/27 12:52:59 tsutsui Exp $ */
4 * Copyright (c) 1997, 1998, 1999
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
34 * FreeBSD Id: if_ti.c,v 1.15 1999/08/14 15:45:03 wpaul Exp
38 * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD.
39 * Manuals, sample driver and firmware source kits are available
40 * from http://www.alteon.com/support/openkits.
42 * Written by Bill Paul <wpaul@ctr.columbia.edu>
43 * Electrical Engineering Department
44 * Columbia University, New York City
48 * The Alteon Networks Tigon chip contains an embedded R4000 CPU,
49 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs
50 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The
51 * Tigon supports hardware IP, TCP and UCP checksumming, multicast
52 * filtering and jumbo (9014 byte) frames. The hardware is largely
53 * controlled by firmware, which must be loaded into the NIC during
56 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware
57 * revision, which supports new features such as extended commands,
58 * extended jumbo receive ring desciptors and a mini receive ring.
60 * Alteon Networks is to be commended for releasing such a vast amount
61 * of development material for the Tigon NIC without requiring an NDA
62 * (although they really should have done it a long time ago). With
63 * any luck, the other vendors will finally wise up and follow Alteon's
66 * The firmware for the Tigon 1 and 2 NICs is compiled directly into
67 * this driver by #including it as a C header file. This bloats the
68 * driver somewhat, but it's the easiest method considering that the
69 * driver code and firmware code need to be kept in sync. The source
70 * for the firmware is not provided with the FreeBSD distribution since
71 * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3.
73 * The following people deserve special thanks:
74 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board
76 * - Raymond Lee of Netgear, for providing a pair of Netgear
77 * GA620 Tigon 2 boards for testing
78 * - Ulf Zimmermann, for bringing the GA620 to my attention and
79 * convincing me to write this driver.
80 * - Andrew Gallatin for providing FreeBSD/Alpha support.
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_ti.c,v 1.86 2009/09/27 12:52:59 tsutsui Exp $");
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/sockio.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96 #include <sys/queue.h>
97 #include <sys/device.h>
98 #include <sys/reboot.h>
100 #include <uvm/uvm_extern.h>
103 #include <net/if_arp.h>
104 #include <net/if_ether.h>
105 #include <net/if_dl.h>
106 #include <net/if_media.h>
113 #include <netinet/in.h>
114 #include <netinet/if_inarp.h>
115 #include <netinet/in_systm.h>
116 #include <netinet/ip.h>
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
126 #include <dev/pci/if_tireg.h>
128 #include <dev/microcode/tigon/ti_fw.h>
129 #include <dev/microcode/tigon/ti_fw2.h>
132 * Various supported device vendors/types and their names.
135 static const struct ti_type ti_devs
[] = {
136 { PCI_VENDOR_ALTEON
, PCI_PRODUCT_ALTEON_ACENIC
,
137 "Alteon AceNIC 1000BASE-SX Ethernet" },
138 { PCI_VENDOR_ALTEON
, PCI_PRODUCT_ALTEON_ACENIC_COPPER
,
139 "Alteon AceNIC 1000BASE-T Ethernet" },
140 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3C985
,
141 "3Com 3c985-SX Gigabit Ethernet" },
142 { PCI_VENDOR_NETGEAR
, PCI_PRODUCT_NETGEAR_GA620
,
143 "Netgear GA620 1000BASE-SX Ethernet" },
144 { PCI_VENDOR_NETGEAR
, PCI_PRODUCT_NETGEAR_GA620T
,
145 "Netgear GA620 1000BASE-T Ethernet" },
146 { PCI_VENDOR_SGI
, PCI_PRODUCT_SGI_TIGON
,
147 "Silicon Graphics Gigabit Ethernet" },
151 static const struct ti_type
*ti_type_match(struct pci_attach_args
*);
152 static int ti_probe(device_t
, cfdata_t
, void *);
153 static void ti_attach(device_t
, device_t
, void *);
154 static bool ti_shutdown(device_t
, int);
155 static void ti_txeof_tigon1(struct ti_softc
*);
156 static void ti_txeof_tigon2(struct ti_softc
*);
157 static void ti_rxeof(struct ti_softc
*);
159 static void ti_stats_update(struct ti_softc
*);
160 static int ti_encap_tigon1(struct ti_softc
*, struct mbuf
*, u_int32_t
*);
161 static int ti_encap_tigon2(struct ti_softc
*, struct mbuf
*, u_int32_t
*);
163 static int ti_intr(void *);
164 static void ti_start(struct ifnet
*);
165 static int ti_ioctl(struct ifnet
*, u_long
, void *);
166 static void ti_init(void *);
167 static void ti_init2(struct ti_softc
*);
168 static void ti_stop(struct ti_softc
*);
169 static void ti_watchdog(struct ifnet
*);
170 static int ti_ifmedia_upd(struct ifnet
*);
171 static void ti_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
173 static u_int32_t
ti_eeprom_putbyte(struct ti_softc
*, int);
174 static u_int8_t
ti_eeprom_getbyte(struct ti_softc
*, int, u_int8_t
*);
175 static int ti_read_eeprom(struct ti_softc
*, void *, int, int);
177 static void ti_add_mcast(struct ti_softc
*, struct ether_addr
*);
178 static void ti_del_mcast(struct ti_softc
*, struct ether_addr
*);
179 static void ti_setmulti(struct ti_softc
*);
181 static void ti_mem(struct ti_softc
*, u_int32_t
, u_int32_t
, const void *);
182 static void ti_loadfw(struct ti_softc
*);
183 static void ti_cmd(struct ti_softc
*, struct ti_cmd_desc
*);
184 static void ti_cmd_ext(struct ti_softc
*, struct ti_cmd_desc
*, void *, int);
185 static void ti_handle_events(struct ti_softc
*);
186 static int ti_alloc_jumbo_mem(struct ti_softc
*);
187 static void *ti_jalloc(struct ti_softc
*);
188 static void ti_jfree(struct mbuf
*, void *, size_t, void *);
189 static int ti_newbuf_std(struct ti_softc
*, int, struct mbuf
*, bus_dmamap_t
);
190 static int ti_newbuf_mini(struct ti_softc
*, int, struct mbuf
*, bus_dmamap_t
);
191 static int ti_newbuf_jumbo(struct ti_softc
*, int, struct mbuf
*);
192 static int ti_init_rx_ring_std(struct ti_softc
*);
193 static void ti_free_rx_ring_std(struct ti_softc
*);
194 static int ti_init_rx_ring_jumbo(struct ti_softc
*);
195 static void ti_free_rx_ring_jumbo(struct ti_softc
*);
196 static int ti_init_rx_ring_mini(struct ti_softc
*);
197 static void ti_free_rx_ring_mini(struct ti_softc
*);
198 static void ti_free_tx_ring(struct ti_softc
*);
199 static int ti_init_tx_ring(struct ti_softc
*);
201 static int ti_64bitslot_war(struct ti_softc
*);
202 static int ti_chipinit(struct ti_softc
*);
203 static int ti_gibinit(struct ti_softc
*);
205 static int ti_ether_ioctl(struct ifnet
*, u_long
, void *);
207 CFATTACH_DECL(ti
, sizeof(struct ti_softc
),
208 ti_probe
, ti_attach
, NULL
, NULL
);
211 * Send an instruction or address to the EEPROM, check for ACK.
214 ti_eeprom_putbyte(struct ti_softc
*sc
, int byte
)
219 * Make sure we're in TX mode.
221 TI_SETBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_EE_TXEN
);
224 * Feed in each bit and stobe the clock.
226 for (i
= 0x80; i
; i
>>= 1) {
228 TI_SETBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_EE_DOUT
);
230 TI_CLRBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_EE_DOUT
);
233 TI_SETBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_EE_CLK
);
235 TI_CLRBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_EE_CLK
);
241 TI_CLRBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_EE_TXEN
);
246 TI_SETBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_EE_CLK
);
247 ack
= CSR_READ_4(sc
, TI_MISC_LOCAL_CTL
) & TI_MLC_EE_DIN
;
248 TI_CLRBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_EE_CLK
);
254 * Read a byte of data stored in the EEPROM at address 'addr.'
255 * We have to send two address bytes since the EEPROM can hold
256 * more than 256 bytes of data.
259 ti_eeprom_getbyte(struct ti_softc
*sc
, int addr
, u_int8_t
*dest
)
267 * Send write control code to EEPROM.
269 if (ti_eeprom_putbyte(sc
, EEPROM_CTL_WRITE
)) {
270 printf("%s: failed to send write command, status: %x\n",
271 device_xname(&sc
->sc_dev
), CSR_READ_4(sc
, TI_MISC_LOCAL_CTL
));
276 * Send first byte of address of byte we want to read.
278 if (ti_eeprom_putbyte(sc
, (addr
>> 8) & 0xFF)) {
279 printf("%s: failed to send address, status: %x\n",
280 device_xname(&sc
->sc_dev
), CSR_READ_4(sc
, TI_MISC_LOCAL_CTL
));
284 * Send second byte address of byte we want to read.
286 if (ti_eeprom_putbyte(sc
, addr
& 0xFF)) {
287 printf("%s: failed to send address, status: %x\n",
288 device_xname(&sc
->sc_dev
), CSR_READ_4(sc
, TI_MISC_LOCAL_CTL
));
295 * Send read control code to EEPROM.
297 if (ti_eeprom_putbyte(sc
, EEPROM_CTL_READ
)) {
298 printf("%s: failed to send read command, status: %x\n",
299 device_xname(&sc
->sc_dev
), CSR_READ_4(sc
, TI_MISC_LOCAL_CTL
));
304 * Start reading bits from EEPROM.
306 TI_CLRBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_EE_TXEN
);
307 for (i
= 0x80; i
; i
>>= 1) {
308 TI_SETBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_EE_CLK
);
310 if (CSR_READ_4(sc
, TI_MISC_LOCAL_CTL
) & TI_MLC_EE_DIN
)
312 TI_CLRBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_EE_CLK
);
319 * No ACK generated for read, so just return byte.
328 * Read a sequence of bytes from the EEPROM.
331 ti_read_eeprom(struct ti_softc
*sc
, void *destv
, int off
, int cnt
)
337 for (i
= 0; i
< cnt
; i
++) {
338 err
= ti_eeprom_getbyte(sc
, off
+ i
, &byte
);
344 return (err
? 1 : 0);
348 * NIC memory access function. Can be used to either clear a section
349 * of NIC local memory or (if tbuf is non-NULL) copy data into it.
352 ti_mem(struct ti_softc
*sc
, u_int32_t addr
, u_int32_t len
, const void *xbuf
)
354 int segptr
, segsize
, cnt
;
365 segsize
= TI_WINLEN
- (segptr
% TI_WINLEN
);
366 CSR_WRITE_4(sc
, TI_WINBASE
, (segptr
& ~(TI_WINLEN
- 1)));
368 bus_space_set_region_4(sc
->ti_btag
, sc
->ti_bhandle
,
369 TI_WINDOW
+ (segptr
& (TI_WINLEN
- 1)), 0,
372 #ifdef __BUS_SPACE_HAS_STREAM_METHODS
373 bus_space_write_region_stream_4(sc
->ti_btag
,
375 TI_WINDOW
+ (segptr
& (TI_WINLEN
- 1)),
376 (const u_int32_t
*)ptr
, segsize
/ 4);
378 bus_space_write_region_4(sc
->ti_btag
, sc
->ti_bhandle
,
379 TI_WINDOW
+ (segptr
& (TI_WINLEN
- 1)),
380 (const u_int32_t
*)ptr
, segsize
/ 4);
382 ptr
= (const char *)ptr
+ segsize
;
392 * Load firmware image into the NIC. Check that the firmware revision
393 * is acceptable and see if we want the firmware for the Tigon 1 or
397 ti_loadfw(struct ti_softc
*sc
)
399 switch (sc
->ti_hwrev
) {
401 if (tigonFwReleaseMajor
!= TI_FIRMWARE_MAJOR
||
402 tigonFwReleaseMinor
!= TI_FIRMWARE_MINOR
||
403 tigonFwReleaseFix
!= TI_FIRMWARE_FIX
) {
404 printf("%s: firmware revision mismatch; want "
405 "%d.%d.%d, got %d.%d.%d\n", device_xname(&sc
->sc_dev
),
406 TI_FIRMWARE_MAJOR
, TI_FIRMWARE_MINOR
,
407 TI_FIRMWARE_FIX
, tigonFwReleaseMajor
,
408 tigonFwReleaseMinor
, tigonFwReleaseFix
);
411 ti_mem(sc
, tigonFwTextAddr
, tigonFwTextLen
, tigonFwText
);
412 ti_mem(sc
, tigonFwDataAddr
, tigonFwDataLen
, tigonFwData
);
413 ti_mem(sc
, tigonFwRodataAddr
, tigonFwRodataLen
, tigonFwRodata
);
414 ti_mem(sc
, tigonFwBssAddr
, tigonFwBssLen
, NULL
);
415 ti_mem(sc
, tigonFwSbssAddr
, tigonFwSbssLen
, NULL
);
416 CSR_WRITE_4(sc
, TI_CPU_PROGRAM_COUNTER
, tigonFwStartAddr
);
418 case TI_HWREV_TIGON_II
:
419 if (tigon2FwReleaseMajor
!= TI_FIRMWARE_MAJOR
||
420 tigon2FwReleaseMinor
!= TI_FIRMWARE_MINOR
||
421 tigon2FwReleaseFix
!= TI_FIRMWARE_FIX
) {
422 printf("%s: firmware revision mismatch; want "
423 "%d.%d.%d, got %d.%d.%d\n", device_xname(&sc
->sc_dev
),
424 TI_FIRMWARE_MAJOR
, TI_FIRMWARE_MINOR
,
425 TI_FIRMWARE_FIX
, tigon2FwReleaseMajor
,
426 tigon2FwReleaseMinor
, tigon2FwReleaseFix
);
429 ti_mem(sc
, tigon2FwTextAddr
, tigon2FwTextLen
, tigon2FwText
);
430 ti_mem(sc
, tigon2FwDataAddr
, tigon2FwDataLen
, tigon2FwData
);
431 ti_mem(sc
, tigon2FwRodataAddr
, tigon2FwRodataLen
,
433 ti_mem(sc
, tigon2FwBssAddr
, tigon2FwBssLen
, NULL
);
434 ti_mem(sc
, tigon2FwSbssAddr
, tigon2FwSbssLen
, NULL
);
435 CSR_WRITE_4(sc
, TI_CPU_PROGRAM_COUNTER
, tigon2FwStartAddr
);
438 printf("%s: can't load firmware: unknown hardware rev\n",
439 device_xname(&sc
->sc_dev
));
447 * Send the NIC a command via the command ring.
450 ti_cmd(struct ti_softc
*sc
, struct ti_cmd_desc
*cmd
)
454 index
= sc
->ti_cmd_saved_prodidx
;
455 CSR_WRITE_4(sc
, TI_GCR_CMDRING
+ (index
* 4), *(u_int32_t
*)(cmd
));
456 TI_INC(index
, TI_CMD_RING_CNT
);
457 CSR_WRITE_4(sc
, TI_MB_CMDPROD_IDX
, index
);
458 sc
->ti_cmd_saved_prodidx
= index
;
462 * Send the NIC an extended command. The 'len' parameter specifies the
463 * number of command slots to include after the initial command.
466 ti_cmd_ext(struct ti_softc
*sc
, struct ti_cmd_desc
*cmd
, void *argv
, int len
)
472 index
= sc
->ti_cmd_saved_prodidx
;
473 CSR_WRITE_4(sc
, TI_GCR_CMDRING
+ (index
* 4), *(u_int32_t
*)(cmd
));
474 TI_INC(index
, TI_CMD_RING_CNT
);
475 for (i
= 0; i
< len
; i
++) {
476 CSR_WRITE_4(sc
, TI_GCR_CMDRING
+ (index
* 4),
477 *(u_int32_t
*)(&arg
[i
* 4]));
478 TI_INC(index
, TI_CMD_RING_CNT
);
480 CSR_WRITE_4(sc
, TI_MB_CMDPROD_IDX
, index
);
481 sc
->ti_cmd_saved_prodidx
= index
;
485 * Handle events that have triggered interrupts.
488 ti_handle_events(struct ti_softc
*sc
)
490 struct ti_event_desc
*e
;
492 if (sc
->ti_rdata
->ti_event_ring
== NULL
)
495 while (sc
->ti_ev_saved_considx
!= sc
->ti_ev_prodidx
.ti_idx
) {
496 e
= &sc
->ti_rdata
->ti_event_ring
[sc
->ti_ev_saved_considx
];
497 switch (TI_EVENT_EVENT(e
)) {
498 case TI_EV_LINKSTAT_CHANGED
:
499 sc
->ti_linkstat
= TI_EVENT_CODE(e
);
500 if (sc
->ti_linkstat
== TI_EV_CODE_LINK_UP
)
501 printf("%s: 10/100 link up\n",
502 device_xname(&sc
->sc_dev
));
503 else if (sc
->ti_linkstat
== TI_EV_CODE_GIG_LINK_UP
)
504 printf("%s: gigabit link up\n",
505 device_xname(&sc
->sc_dev
));
506 else if (sc
->ti_linkstat
== TI_EV_CODE_LINK_DOWN
)
507 printf("%s: link down\n",
508 device_xname(&sc
->sc_dev
));
511 if (TI_EVENT_CODE(e
) == TI_EV_CODE_ERR_INVAL_CMD
)
512 printf("%s: invalid command\n",
513 device_xname(&sc
->sc_dev
));
514 else if (TI_EVENT_CODE(e
) == TI_EV_CODE_ERR_UNIMP_CMD
)
515 printf("%s: unknown command\n",
516 device_xname(&sc
->sc_dev
));
517 else if (TI_EVENT_CODE(e
) == TI_EV_CODE_ERR_BADCFG
)
518 printf("%s: bad config data\n",
519 device_xname(&sc
->sc_dev
));
521 case TI_EV_FIRMWARE_UP
:
524 case TI_EV_STATS_UPDATED
:
527 case TI_EV_RESET_JUMBO_RING
:
528 case TI_EV_MCAST_UPDATED
:
532 printf("%s: unknown event: %d\n",
533 device_xname(&sc
->sc_dev
), TI_EVENT_EVENT(e
));
536 /* Advance the consumer index. */
537 TI_INC(sc
->ti_ev_saved_considx
, TI_EVENT_RING_CNT
);
538 CSR_WRITE_4(sc
, TI_GCR_EVENTCONS_IDX
, sc
->ti_ev_saved_considx
);
545 * Memory management for the jumbo receive ring is a pain in the
546 * butt. We need to allocate at least 9018 bytes of space per frame,
547 * _and_ it has to be contiguous (unless you use the extended
548 * jumbo descriptor format). Using malloc() all the time won't
549 * work: malloc() allocates memory in powers of two, which means we
550 * would end up wasting a considerable amount of space by allocating
551 * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have
552 * to do our own memory management.
554 * The driver needs to allocate a contiguous chunk of memory at boot
555 * time. We then chop this up ourselves into 9K pieces and use them
556 * as external mbuf storage.
558 * One issue here is how much memory to allocate. The jumbo ring has
559 * 256 slots in it, but at 9K per slot than can consume over 2MB of
560 * RAM. This is a bit much, especially considering we also need
561 * RAM for the standard ring and mini ring (on the Tigon 2). To
562 * save space, we only actually allocate enough memory for 64 slots
563 * by default, which works out to between 500 and 600K. This can
564 * be tuned by changing a #define in if_tireg.h.
568 ti_alloc_jumbo_mem(struct ti_softc
*sc
)
572 struct ti_jpool_entry
*entry
;
573 bus_dma_segment_t dmaseg
;
576 /* Grab a big chunk o' storage. */
577 if ((error
= bus_dmamem_alloc(sc
->sc_dmat
,
578 TI_JMEM
, PAGE_SIZE
, 0, &dmaseg
, 1, &dmanseg
,
579 BUS_DMA_NOWAIT
)) != 0) {
580 aprint_error_dev(&sc
->sc_dev
, "can't allocate jumbo buffer, error = %d\n",
585 if ((error
= bus_dmamem_map(sc
->sc_dmat
, &dmaseg
, dmanseg
,
586 TI_JMEM
, (void **)&sc
->ti_cdata
.ti_jumbo_buf
,
587 BUS_DMA_NOWAIT
|BUS_DMA_COHERENT
)) != 0) {
588 aprint_error_dev(&sc
->sc_dev
, "can't map jumbo buffer, error = %d\n",
593 if ((error
= bus_dmamap_create(sc
->sc_dmat
,
595 TI_JMEM
, 0, BUS_DMA_NOWAIT
,
596 &sc
->jumbo_dmamap
)) != 0) {
597 aprint_error_dev(&sc
->sc_dev
, "can't create jumbo buffer DMA map, error = %d\n",
602 if ((error
= bus_dmamap_load(sc
->sc_dmat
, sc
->jumbo_dmamap
,
603 sc
->ti_cdata
.ti_jumbo_buf
, TI_JMEM
, NULL
,
604 BUS_DMA_NOWAIT
)) != 0) {
605 aprint_error_dev(&sc
->sc_dev
, "can't load jumbo buffer DMA map, error = %d\n",
609 sc
->jumbo_dmaaddr
= sc
->jumbo_dmamap
->dm_segs
[0].ds_addr
;
611 SIMPLEQ_INIT(&sc
->ti_jfree_listhead
);
612 SIMPLEQ_INIT(&sc
->ti_jinuse_listhead
);
615 * Now divide it up into 9K pieces and save the addresses
618 ptr
= sc
->ti_cdata
.ti_jumbo_buf
;
619 for (i
= 0; i
< TI_JSLOTS
; i
++) {
620 sc
->ti_cdata
.ti_jslots
[i
] = ptr
;
622 entry
= malloc(sizeof(struct ti_jpool_entry
),
625 free(sc
->ti_cdata
.ti_jumbo_buf
, M_DEVBUF
);
626 sc
->ti_cdata
.ti_jumbo_buf
= NULL
;
627 printf("%s: no memory for jumbo "
628 "buffer queue!\n", device_xname(&sc
->sc_dev
));
632 SIMPLEQ_INSERT_HEAD(&sc
->ti_jfree_listhead
, entry
,
640 * Allocate a jumbo buffer.
643 ti_jalloc(struct ti_softc
*sc
)
645 struct ti_jpool_entry
*entry
;
647 entry
= SIMPLEQ_FIRST(&sc
->ti_jfree_listhead
);
650 printf("%s: no free jumbo buffers\n", device_xname(&sc
->sc_dev
));
654 SIMPLEQ_REMOVE_HEAD(&sc
->ti_jfree_listhead
, jpool_entries
);
655 SIMPLEQ_INSERT_HEAD(&sc
->ti_jinuse_listhead
, entry
, jpool_entries
);
657 return (sc
->ti_cdata
.ti_jslots
[entry
->slot
]);
661 * Release a jumbo buffer.
664 ti_jfree(struct mbuf
*m
, void *tbuf
, size_t size
, void *arg
)
668 struct ti_jpool_entry
*entry
;
670 /* Extract the softc struct pointer. */
671 sc
= (struct ti_softc
*)arg
;
674 panic("ti_jfree: didn't get softc pointer!");
676 /* calculate the slot this buffer belongs to */
679 - (char *)sc
->ti_cdata
.ti_jumbo_buf
) / TI_JLEN
;
681 if ((i
< 0) || (i
>= TI_JSLOTS
))
682 panic("ti_jfree: asked to free buffer that we don't manage!");
685 entry
= SIMPLEQ_FIRST(&sc
->ti_jinuse_listhead
);
687 panic("ti_jfree: buffer not in use!");
689 SIMPLEQ_REMOVE_HEAD(&sc
->ti_jinuse_listhead
, jpool_entries
);
690 SIMPLEQ_INSERT_HEAD(&sc
->ti_jfree_listhead
, entry
, jpool_entries
);
692 if (__predict_true(m
!= NULL
))
693 pool_cache_put(mb_cache
, m
);
699 * Intialize a standard receive ring descriptor.
702 ti_newbuf_std(struct ti_softc
*sc
, int i
, struct mbuf
*m
, bus_dmamap_t dmamap
)
704 struct mbuf
*m_new
= NULL
;
705 struct ti_rx_desc
*r
;
708 if (dmamap
== NULL
) {
711 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1,
712 MCLBYTES
, 0, BUS_DMA_NOWAIT
,
714 aprint_error_dev(&sc
->sc_dev
, "can't create recv map, error = %d\n",
719 sc
->std_dmamap
[i
] = dmamap
;
722 MGETHDR(m_new
, M_DONTWAIT
, MT_DATA
);
724 aprint_error_dev(&sc
->sc_dev
, "mbuf allocation failed "
725 "-- packet dropped!\n");
729 MCLGET(m_new
, M_DONTWAIT
);
730 if (!(m_new
->m_flags
& M_EXT
)) {
731 aprint_error_dev(&sc
->sc_dev
, "cluster allocation failed "
732 "-- packet dropped!\n");
736 m_new
->m_len
= m_new
->m_pkthdr
.len
= MCLBYTES
;
737 m_adj(m_new
, ETHER_ALIGN
);
739 if ((error
= bus_dmamap_load(sc
->sc_dmat
, dmamap
,
740 mtod(m_new
, void *), m_new
->m_len
, NULL
,
741 BUS_DMA_READ
|BUS_DMA_NOWAIT
)) != 0) {
742 aprint_error_dev(&sc
->sc_dev
, "can't load recv map, error = %d\n",
748 m_new
->m_len
= m_new
->m_pkthdr
.len
= MCLBYTES
;
749 m_new
->m_data
= m_new
->m_ext
.ext_buf
;
750 m_adj(m_new
, ETHER_ALIGN
);
752 /* reuse the dmamap */
755 sc
->ti_cdata
.ti_rx_std_chain
[i
] = m_new
;
756 r
= &sc
->ti_rdata
->ti_rx_std_ring
[i
];
757 TI_HOSTADDR(r
->ti_addr
) = dmamap
->dm_segs
[0].ds_addr
;
758 r
->ti_type
= TI_BDTYPE_RECV_BD
;
760 if (sc
->ethercom
.ec_if
.if_capenable
& IFCAP_CSUM_IPv4_Rx
)
761 r
->ti_flags
|= TI_BDFLAG_IP_CKSUM
;
762 if (sc
->ethercom
.ec_if
.if_capenable
&
763 (IFCAP_CSUM_TCPv4_Rx
| IFCAP_CSUM_UDPv4_Rx
))
764 r
->ti_flags
|= TI_BDFLAG_TCP_UDP_CKSUM
;
765 r
->ti_len
= m_new
->m_len
; /* == ds_len */
772 * Intialize a mini receive ring descriptor. This only applies to
776 ti_newbuf_mini(struct ti_softc
*sc
, int i
, struct mbuf
*m
, bus_dmamap_t dmamap
)
778 struct mbuf
*m_new
= NULL
;
779 struct ti_rx_desc
*r
;
782 if (dmamap
== NULL
) {
785 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MHLEN
, 1,
786 MHLEN
, 0, BUS_DMA_NOWAIT
,
788 aprint_error_dev(&sc
->sc_dev
, "can't create recv map, error = %d\n",
793 sc
->mini_dmamap
[i
] = dmamap
;
796 MGETHDR(m_new
, M_DONTWAIT
, MT_DATA
);
798 aprint_error_dev(&sc
->sc_dev
, "mbuf allocation failed "
799 "-- packet dropped!\n");
802 m_new
->m_len
= m_new
->m_pkthdr
.len
= MHLEN
;
803 m_adj(m_new
, ETHER_ALIGN
);
805 if ((error
= bus_dmamap_load(sc
->sc_dmat
, dmamap
,
806 mtod(m_new
, void *), m_new
->m_len
, NULL
,
807 BUS_DMA_READ
|BUS_DMA_NOWAIT
)) != 0) {
808 aprint_error_dev(&sc
->sc_dev
, "can't load recv map, error = %d\n",
814 m_new
->m_data
= m_new
->m_pktdat
;
815 m_new
->m_len
= m_new
->m_pkthdr
.len
= MHLEN
;
816 m_adj(m_new
, ETHER_ALIGN
);
818 /* reuse the dmamap */
821 r
= &sc
->ti_rdata
->ti_rx_mini_ring
[i
];
822 sc
->ti_cdata
.ti_rx_mini_chain
[i
] = m_new
;
823 TI_HOSTADDR(r
->ti_addr
) = dmamap
->dm_segs
[0].ds_addr
;
824 r
->ti_type
= TI_BDTYPE_RECV_BD
;
825 r
->ti_flags
= TI_BDFLAG_MINI_RING
;
826 if (sc
->ethercom
.ec_if
.if_capenable
& IFCAP_CSUM_IPv4_Rx
)
827 r
->ti_flags
|= TI_BDFLAG_IP_CKSUM
;
828 if (sc
->ethercom
.ec_if
.if_capenable
&
829 (IFCAP_CSUM_TCPv4_Rx
| IFCAP_CSUM_UDPv4_Rx
))
830 r
->ti_flags
|= TI_BDFLAG_TCP_UDP_CKSUM
;
831 r
->ti_len
= m_new
->m_len
; /* == ds_len */
838 * Initialize a jumbo receive ring descriptor. This allocates
839 * a jumbo buffer from the pool managed internally by the driver.
842 ti_newbuf_jumbo(struct ti_softc
*sc
, int i
, struct mbuf
*m
)
844 struct mbuf
*m_new
= NULL
;
845 struct ti_rx_desc
*r
;
850 /* Allocate the mbuf. */
851 MGETHDR(m_new
, M_DONTWAIT
, MT_DATA
);
853 aprint_error_dev(&sc
->sc_dev
, "mbuf allocation failed "
854 "-- packet dropped!\n");
858 /* Allocate the jumbo buffer */
859 tbuf
= ti_jalloc(sc
);
862 aprint_error_dev(&sc
->sc_dev
, "jumbo allocation failed "
863 "-- packet dropped!\n");
867 /* Attach the buffer to the mbuf. */
868 MEXTADD(m_new
, tbuf
, ETHER_MAX_LEN_JUMBO
,
869 M_DEVBUF
, ti_jfree
, sc
);
870 m_new
->m_flags
|= M_EXT_RW
;
871 m_new
->m_len
= m_new
->m_pkthdr
.len
= ETHER_MAX_LEN_JUMBO
;
874 m_new
->m_data
= m_new
->m_ext
.ext_buf
;
875 m_new
->m_ext
.ext_size
= ETHER_MAX_LEN_JUMBO
;
878 m_adj(m_new
, ETHER_ALIGN
);
879 /* Set up the descriptor. */
880 r
= &sc
->ti_rdata
->ti_rx_jumbo_ring
[i
];
881 sc
->ti_cdata
.ti_rx_jumbo_chain
[i
] = m_new
;
882 TI_HOSTADDR(r
->ti_addr
) = sc
->jumbo_dmaaddr
+
883 (mtod(m_new
, char *) - (char *)sc
->ti_cdata
.ti_jumbo_buf
);
884 r
->ti_type
= TI_BDTYPE_RECV_JUMBO_BD
;
885 r
->ti_flags
= TI_BDFLAG_JUMBO_RING
;
886 if (sc
->ethercom
.ec_if
.if_capenable
& IFCAP_CSUM_IPv4_Rx
)
887 r
->ti_flags
|= TI_BDFLAG_IP_CKSUM
;
888 if (sc
->ethercom
.ec_if
.if_capenable
&
889 (IFCAP_CSUM_TCPv4_Rx
| IFCAP_CSUM_UDPv4_Rx
))
890 r
->ti_flags
|= TI_BDFLAG_TCP_UDP_CKSUM
;
891 r
->ti_len
= m_new
->m_len
;
898 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
899 * that's 1MB or memory, which is a lot. For now, we fill only the first
900 * 256 ring entries and hope that our CPU is fast enough to keep up with
904 ti_init_rx_ring_std(struct ti_softc
*sc
)
907 struct ti_cmd_desc cmd
;
909 for (i
= 0; i
< TI_SSLOTS
; i
++) {
910 if (ti_newbuf_std(sc
, i
, NULL
, 0) == ENOBUFS
)
914 TI_UPDATE_STDPROD(sc
, i
- 1);
921 ti_free_rx_ring_std(struct ti_softc
*sc
)
925 for (i
= 0; i
< TI_STD_RX_RING_CNT
; i
++) {
926 if (sc
->ti_cdata
.ti_rx_std_chain
[i
] != NULL
) {
927 m_freem(sc
->ti_cdata
.ti_rx_std_chain
[i
]);
928 sc
->ti_cdata
.ti_rx_std_chain
[i
] = NULL
;
930 /* if (sc->std_dmamap[i] == 0) panic() */
931 bus_dmamap_destroy(sc
->sc_dmat
, sc
->std_dmamap
[i
]);
932 sc
->std_dmamap
[i
] = 0;
934 memset((char *)&sc
->ti_rdata
->ti_rx_std_ring
[i
], 0,
935 sizeof(struct ti_rx_desc
));
942 ti_init_rx_ring_jumbo(struct ti_softc
*sc
)
945 struct ti_cmd_desc cmd
;
947 for (i
= 0; i
< TI_JUMBO_RX_RING_CNT
; i
++) {
948 if (ti_newbuf_jumbo(sc
, i
, NULL
) == ENOBUFS
)
952 TI_UPDATE_JUMBOPROD(sc
, i
- 1);
953 sc
->ti_jumbo
= i
- 1;
959 ti_free_rx_ring_jumbo(struct ti_softc
*sc
)
963 for (i
= 0; i
< TI_JUMBO_RX_RING_CNT
; i
++) {
964 if (sc
->ti_cdata
.ti_rx_jumbo_chain
[i
] != NULL
) {
965 m_freem(sc
->ti_cdata
.ti_rx_jumbo_chain
[i
]);
966 sc
->ti_cdata
.ti_rx_jumbo_chain
[i
] = NULL
;
968 memset((char *)&sc
->ti_rdata
->ti_rx_jumbo_ring
[i
], 0,
969 sizeof(struct ti_rx_desc
));
976 ti_init_rx_ring_mini(struct ti_softc
*sc
)
980 for (i
= 0; i
< TI_MSLOTS
; i
++) {
981 if (ti_newbuf_mini(sc
, i
, NULL
, 0) == ENOBUFS
)
985 TI_UPDATE_MINIPROD(sc
, i
- 1);
992 ti_free_rx_ring_mini(struct ti_softc
*sc
)
996 for (i
= 0; i
< TI_MINI_RX_RING_CNT
; i
++) {
997 if (sc
->ti_cdata
.ti_rx_mini_chain
[i
] != NULL
) {
998 m_freem(sc
->ti_cdata
.ti_rx_mini_chain
[i
]);
999 sc
->ti_cdata
.ti_rx_mini_chain
[i
] = NULL
;
1001 /* if (sc->mini_dmamap[i] == 0) panic() */
1002 bus_dmamap_destroy(sc
->sc_dmat
, sc
->mini_dmamap
[i
]);
1003 sc
->mini_dmamap
[i
] = 0;
1005 memset((char *)&sc
->ti_rdata
->ti_rx_mini_ring
[i
], 0,
1006 sizeof(struct ti_rx_desc
));
1013 ti_free_tx_ring(struct ti_softc
*sc
)
1016 struct txdmamap_pool_entry
*dma
;
1018 if (sc
->ti_rdata
->ti_tx_ring
== NULL
)
1021 for (i
= 0; i
< TI_TX_RING_CNT
; i
++) {
1022 if (sc
->ti_cdata
.ti_tx_chain
[i
] != NULL
) {
1023 m_freem(sc
->ti_cdata
.ti_tx_chain
[i
]);
1024 sc
->ti_cdata
.ti_tx_chain
[i
] = NULL
;
1026 /* if (sc->txdma[i] == 0) panic() */
1027 SIMPLEQ_INSERT_HEAD(&sc
->txdma_list
, sc
->txdma
[i
],
1031 memset((char *)&sc
->ti_rdata
->ti_tx_ring
[i
], 0,
1032 sizeof(struct ti_tx_desc
));
1035 while ((dma
= SIMPLEQ_FIRST(&sc
->txdma_list
))) {
1036 SIMPLEQ_REMOVE_HEAD(&sc
->txdma_list
, link
);
1037 bus_dmamap_destroy(sc
->sc_dmat
, dma
->dmamap
);
1038 free(dma
, M_DEVBUF
);
1045 ti_init_tx_ring(struct ti_softc
*sc
)
1048 bus_dmamap_t dmamap
;
1049 struct txdmamap_pool_entry
*dma
;
1052 sc
->ti_tx_saved_considx
= 0;
1053 CSR_WRITE_4(sc
, TI_MB_SENDPROD_IDX
, 0);
1055 SIMPLEQ_INIT(&sc
->txdma_list
);
1056 for (i
= 0; i
< TI_RSLOTS
; i
++) {
1057 /* I've seen mbufs with 30 fragments. */
1058 if ((error
= bus_dmamap_create(sc
->sc_dmat
, ETHER_MAX_LEN_JUMBO
,
1059 40, ETHER_MAX_LEN_JUMBO
, 0,
1060 BUS_DMA_NOWAIT
, &dmamap
)) != 0) {
1061 aprint_error_dev(&sc
->sc_dev
, "can't create tx map, error = %d\n",
1065 dma
= malloc(sizeof(*dma
), M_DEVBUF
, M_NOWAIT
);
1067 aprint_error_dev(&sc
->sc_dev
, "can't alloc txdmamap_pool_entry\n");
1068 bus_dmamap_destroy(sc
->sc_dmat
, dmamap
);
1071 dma
->dmamap
= dmamap
;
1072 SIMPLEQ_INSERT_HEAD(&sc
->txdma_list
, dma
, link
);
1079 * The Tigon 2 firmware has a new way to add/delete multicast addresses,
1080 * but we have to support the old way too so that Tigon 1 cards will
1084 ti_add_mcast(struct ti_softc
*sc
, struct ether_addr
*addr
)
1086 struct ti_cmd_desc cmd
;
1088 u_int32_t ext
[2] = {0, 0};
1090 m
= (u_int16_t
*)&addr
->ether_addr_octet
[0]; /* XXX */
1092 switch (sc
->ti_hwrev
) {
1093 case TI_HWREV_TIGON
:
1094 CSR_WRITE_4(sc
, TI_GCR_MAR0
, htons(m
[0]));
1095 CSR_WRITE_4(sc
, TI_GCR_MAR1
, (htons(m
[1]) << 16) | htons(m
[2]));
1096 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR
, 0, 0);
1098 case TI_HWREV_TIGON_II
:
1099 ext
[0] = htons(m
[0]);
1100 ext
[1] = (htons(m
[1]) << 16) | htons(m
[2]);
1101 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST
, 0, 0, (void *)&ext
, 2);
1104 printf("%s: unknown hwrev\n", device_xname(&sc
->sc_dev
));
1112 ti_del_mcast(struct ti_softc
*sc
, struct ether_addr
*addr
)
1114 struct ti_cmd_desc cmd
;
1116 u_int32_t ext
[2] = {0, 0};
1118 m
= (u_int16_t
*)&addr
->ether_addr_octet
[0]; /* XXX */
1120 switch (sc
->ti_hwrev
) {
1121 case TI_HWREV_TIGON
:
1122 CSR_WRITE_4(sc
, TI_GCR_MAR0
, htons(m
[0]));
1123 CSR_WRITE_4(sc
, TI_GCR_MAR1
, (htons(m
[1]) << 16) | htons(m
[2]));
1124 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR
, 0, 0);
1126 case TI_HWREV_TIGON_II
:
1127 ext
[0] = htons(m
[0]);
1128 ext
[1] = (htons(m
[1]) << 16) | htons(m
[2]);
1129 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST
, 0, 0, (void *)&ext
, 2);
1132 printf("%s: unknown hwrev\n", device_xname(&sc
->sc_dev
));
1140 * Configure the Tigon's multicast address filter.
1142 * The actual multicast table management is a bit of a pain, thanks to
1143 * slight brain damage on the part of both Alteon and us. With our
1144 * multicast code, we are only alerted when the multicast address table
1145 * changes and at that point we only have the current list of addresses:
1146 * we only know the current state, not the previous state, so we don't
1147 * actually know what addresses were removed or added. The firmware has
1148 * state, but we can't get our grubby mits on it, and there is no 'delete
1149 * all multicast addresses' command. Hence, we have to maintain our own
1150 * state so we know what addresses have been programmed into the NIC at
1154 ti_setmulti(struct ti_softc
*sc
)
1157 struct ti_cmd_desc cmd
;
1158 struct ti_mc_entry
*mc
;
1160 struct ether_multi
*enm
;
1161 struct ether_multistep step
;
1163 ifp
= &sc
->ethercom
.ec_if
;
1165 /* Disable interrupts. */
1166 intrs
= CSR_READ_4(sc
, TI_MB_HOSTINTR
);
1167 CSR_WRITE_4(sc
, TI_MB_HOSTINTR
, 1);
1169 /* First, zot all the existing filters. */
1170 while ((mc
= SIMPLEQ_FIRST(&sc
->ti_mc_listhead
)) != NULL
) {
1171 ti_del_mcast(sc
, &mc
->mc_addr
);
1172 SIMPLEQ_REMOVE_HEAD(&sc
->ti_mc_listhead
, mc_entries
);
1177 * Remember all multicast addresses so that we can delete them
1178 * later. Punt if there is a range of addresses or memory shortage.
1180 ETHER_FIRST_MULTI(step
, &sc
->ethercom
, enm
);
1181 while (enm
!= NULL
) {
1182 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
,
1183 ETHER_ADDR_LEN
) != 0)
1185 if ((mc
= malloc(sizeof(struct ti_mc_entry
), M_DEVBUF
,
1188 memcpy(&mc
->mc_addr
, enm
->enm_addrlo
, ETHER_ADDR_LEN
);
1189 SIMPLEQ_INSERT_HEAD(&sc
->ti_mc_listhead
, mc
, mc_entries
);
1190 ETHER_NEXT_MULTI(step
, enm
);
1193 /* Accept only programmed multicast addresses */
1194 ifp
->if_flags
&= ~IFF_ALLMULTI
;
1195 TI_DO_CMD(TI_CMD_SET_ALLMULTI
, TI_CMD_CODE_ALLMULTI_DIS
, 0);
1197 /* Now program new ones. */
1198 SIMPLEQ_FOREACH(mc
, &sc
->ti_mc_listhead
, mc_entries
)
1199 ti_add_mcast(sc
, &mc
->mc_addr
);
1201 /* Re-enable interrupts. */
1202 CSR_WRITE_4(sc
, TI_MB_HOSTINTR
, intrs
);
1207 /* No need to keep individual multicast addresses */
1208 while ((mc
= SIMPLEQ_FIRST(&sc
->ti_mc_listhead
)) != NULL
) {
1209 SIMPLEQ_REMOVE_HEAD(&sc
->ti_mc_listhead
, mc_entries
);
1213 /* Accept all multicast addresses */
1214 ifp
->if_flags
|= IFF_ALLMULTI
;
1215 TI_DO_CMD(TI_CMD_SET_ALLMULTI
, TI_CMD_CODE_ALLMULTI_ENB
, 0);
1217 /* Re-enable interrupts. */
1218 CSR_WRITE_4(sc
, TI_MB_HOSTINTR
, intrs
);
1222 * Check to see if the BIOS has configured us for a 64 bit slot when
1223 * we aren't actually in one. If we detect this condition, we can work
1224 * around it on the Tigon 2 by setting a bit in the PCI state register,
1225 * but for the Tigon 1 we must give up and abort the interface attach.
1228 ti_64bitslot_war(struct ti_softc
*sc
)
1230 if (!(CSR_READ_4(sc
, TI_PCI_STATE
) & TI_PCISTATE_32BIT_BUS
)) {
1231 CSR_WRITE_4(sc
, 0x600, 0);
1232 CSR_WRITE_4(sc
, 0x604, 0);
1233 CSR_WRITE_4(sc
, 0x600, 0x5555AAAA);
1234 if (CSR_READ_4(sc
, 0x604) == 0x5555AAAA) {
1235 if (sc
->ti_hwrev
== TI_HWREV_TIGON
)
1238 TI_SETBIT(sc
, TI_PCI_STATE
,
1239 TI_PCISTATE_32BIT_BUS
);
1249 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1250 * self-test results.
1253 ti_chipinit(struct ti_softc
*sc
)
1255 u_int32_t cacheline
;
1256 u_int32_t pci_writemax
= 0;
1259 /* Initialize link to down state. */
1260 sc
->ti_linkstat
= TI_EV_CODE_LINK_DOWN
;
1262 /* Set endianness before we access any non-PCI registers. */
1263 #if BYTE_ORDER == BIG_ENDIAN
1264 CSR_WRITE_4(sc
, TI_MISC_HOST_CTL
,
1265 TI_MHC_BIGENDIAN_INIT
| (TI_MHC_BIGENDIAN_INIT
<< 24));
1267 CSR_WRITE_4(sc
, TI_MISC_HOST_CTL
,
1268 TI_MHC_LITTLEENDIAN_INIT
| (TI_MHC_LITTLEENDIAN_INIT
<< 24));
1271 /* Check the ROM failed bit to see if self-tests passed. */
1272 if (CSR_READ_4(sc
, TI_CPU_STATE
) & TI_CPUSTATE_ROMFAIL
) {
1273 printf("%s: board self-diagnostics failed!\n",
1274 device_xname(&sc
->sc_dev
));
1279 TI_SETBIT(sc
, TI_CPU_STATE
, TI_CPUSTATE_HALT
);
1281 /* Figure out the hardware revision. */
1282 rev
= CSR_READ_4(sc
, TI_MISC_HOST_CTL
) & TI_MHC_CHIP_REV_MASK
;
1284 case TI_REV_TIGON_I
:
1285 sc
->ti_hwrev
= TI_HWREV_TIGON
;
1287 case TI_REV_TIGON_II
:
1288 sc
->ti_hwrev
= TI_HWREV_TIGON_II
;
1291 printf("%s: unsupported chip revision 0x%x\n",
1292 device_xname(&sc
->sc_dev
), rev
);
1296 /* Do special setup for Tigon 2. */
1297 if (sc
->ti_hwrev
== TI_HWREV_TIGON_II
) {
1298 TI_SETBIT(sc
, TI_CPU_CTL_B
, TI_CPUSTATE_HALT
);
1299 TI_SETBIT(sc
, TI_MISC_LOCAL_CTL
, TI_MLC_SRAM_BANK_256K
);
1300 TI_SETBIT(sc
, TI_MISC_CONF
, TI_MCR_SRAM_SYNCHRONOUS
);
1303 /* Set up the PCI state register. */
1304 CSR_WRITE_4(sc
, TI_PCI_STATE
, TI_PCI_READ_CMD
|TI_PCI_WRITE_CMD
);
1305 if (sc
->ti_hwrev
== TI_HWREV_TIGON_II
) {
1306 TI_SETBIT(sc
, TI_PCI_STATE
, TI_PCISTATE_USE_MEM_RD_MULT
);
1309 /* Clear the read/write max DMA parameters. */
1310 TI_CLRBIT(sc
, TI_PCI_STATE
, (TI_PCISTATE_WRITE_MAXDMA
|
1311 TI_PCISTATE_READ_MAXDMA
));
1313 /* Get cache line size. */
1314 cacheline
= PCI_CACHELINE(CSR_READ_4(sc
, PCI_BHLC_REG
));
1317 * If the system has set enabled the PCI memory write
1318 * and invalidate command in the command register, set
1319 * the write max parameter accordingly. This is necessary
1320 * to use MWI with the Tigon 2.
1322 if (CSR_READ_4(sc
, PCI_COMMAND_STATUS_REG
)
1323 & PCI_COMMAND_INVALIDATE_ENABLE
) {
1324 switch (cacheline
) {
1333 /* Disable PCI memory write and invalidate. */
1335 printf("%s: cache line size %d not "
1336 "supported; disabling PCI MWI\n",
1337 device_xname(&sc
->sc_dev
), cacheline
);
1338 CSR_WRITE_4(sc
, PCI_COMMAND_STATUS_REG
,
1339 CSR_READ_4(sc
, PCI_COMMAND_STATUS_REG
)
1340 & ~PCI_COMMAND_INVALIDATE_ENABLE
);
1345 #ifdef __brokenalpha__
1347 * From the Alteon sample driver:
1348 * Must insure that we do not cross an 8K (bytes) boundary
1349 * for DMA reads. Our highest limit is 1K bytes. This is a
1350 * restriction on some ALPHA platforms with early revision
1351 * 21174 PCI chipsets, such as the AlphaPC 164lx
1353 TI_SETBIT(sc
, TI_PCI_STATE
, pci_writemax
|TI_PCI_READMAX_1024
);
1355 TI_SETBIT(sc
, TI_PCI_STATE
, pci_writemax
);
1358 /* This sets the min dma param all the way up (0xff). */
1359 TI_SETBIT(sc
, TI_PCI_STATE
, TI_PCISTATE_MINDMA
);
1361 /* Configure DMA variables. */
1362 #if BYTE_ORDER == BIG_ENDIAN
1363 CSR_WRITE_4(sc
, TI_GCR_OPMODE
, TI_OPMODE_BYTESWAP_BD
|
1364 TI_OPMODE_BYTESWAP_DATA
| TI_OPMODE_WORDSWAP_BD
|
1365 TI_OPMODE_WARN_ENB
| TI_OPMODE_FATAL_ENB
|
1366 TI_OPMODE_DONT_FRAG_JUMBO
);
1368 CSR_WRITE_4(sc
, TI_GCR_OPMODE
, TI_OPMODE_BYTESWAP_DATA
|
1369 TI_OPMODE_WORDSWAP_BD
|TI_OPMODE_DONT_FRAG_JUMBO
|
1370 TI_OPMODE_WARN_ENB
|TI_OPMODE_FATAL_ENB
);
1374 * Only allow 1 DMA channel to be active at a time.
1375 * I don't think this is a good idea, but without it
1376 * the firmware racks up lots of nicDmaReadRingFull
1378 * Incompatible with hardware assisted checksums.
1380 if ((sc
->ethercom
.ec_if
.if_capenable
&
1381 (IFCAP_CSUM_TCPv4_Tx
| IFCAP_CSUM_TCPv4_Rx
|
1382 IFCAP_CSUM_UDPv4_Tx
| IFCAP_CSUM_UDPv4_Rx
|
1383 IFCAP_CSUM_IPv4_Tx
| IFCAP_CSUM_IPv4_Rx
)) == 0)
1384 TI_SETBIT(sc
, TI_GCR_OPMODE
, TI_OPMODE_1_DMA_ACTIVE
);
1386 /* Recommended settings from Tigon manual. */
1387 CSR_WRITE_4(sc
, TI_GCR_DMA_WRITECFG
, TI_DMA_STATE_THRESH_8W
);
1388 CSR_WRITE_4(sc
, TI_GCR_DMA_READCFG
, TI_DMA_STATE_THRESH_8W
);
1390 if (ti_64bitslot_war(sc
)) {
1391 printf("%s: bios thinks we're in a 64 bit slot, "
1392 "but we aren't", device_xname(&sc
->sc_dev
));
1400 * Initialize the general information block and firmware, and
1401 * start the CPU(s) running.
1404 ti_gibinit(struct ti_softc
*sc
)
1410 ifp
= &sc
->ethercom
.ec_if
;
1412 /* Disable interrupts for now. */
1413 CSR_WRITE_4(sc
, TI_MB_HOSTINTR
, 1);
1415 /* Tell the chip where to find the general information block. */
1416 CSR_WRITE_4(sc
, TI_GCR_GENINFO_HI
, 0);
1417 CSR_WRITE_4(sc
, TI_GCR_GENINFO_LO
, TI_CDGIBADDR(sc
));
1419 /* Load the firmware into SRAM. */
1422 /* Set up the contents of the general info and ring control blocks. */
1424 /* Set up the event ring and producer pointer. */
1425 rcb
= &sc
->ti_rdata
->ti_info
.ti_ev_rcb
;
1427 TI_HOSTADDR(rcb
->ti_hostaddr
) = TI_CDEVENTADDR(sc
, 0);
1429 TI_HOSTADDR(sc
->ti_rdata
->ti_info
.ti_ev_prodidx_ptr
) =
1430 TI_CDEVPRODADDR(sc
);
1432 sc
->ti_ev_prodidx
.ti_idx
= 0;
1433 CSR_WRITE_4(sc
, TI_GCR_EVENTCONS_IDX
, 0);
1434 sc
->ti_ev_saved_considx
= 0;
1436 /* Set up the command ring and producer mailbox. */
1437 rcb
= &sc
->ti_rdata
->ti_info
.ti_cmd_rcb
;
1439 TI_HOSTADDR(rcb
->ti_hostaddr
) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING
);
1441 rcb
->ti_max_len
= 0;
1442 for (i
= 0; i
< TI_CMD_RING_CNT
; i
++) {
1443 CSR_WRITE_4(sc
, TI_GCR_CMDRING
+ (i
* 4), 0);
1445 CSR_WRITE_4(sc
, TI_GCR_CMDCONS_IDX
, 0);
1446 CSR_WRITE_4(sc
, TI_MB_CMDPROD_IDX
, 0);
1447 sc
->ti_cmd_saved_prodidx
= 0;
1450 * Assign the address of the stats refresh buffer.
1451 * We re-use the current stats buffer for this to
1454 TI_HOSTADDR(sc
->ti_rdata
->ti_info
.ti_refresh_stats_ptr
) =
1457 /* Set up the standard receive ring. */
1458 rcb
= &sc
->ti_rdata
->ti_info
.ti_std_rx_rcb
;
1459 TI_HOSTADDR(rcb
->ti_hostaddr
) = TI_CDRXSTDADDR(sc
, 0);
1460 rcb
->ti_max_len
= ETHER_MAX_LEN
;
1462 if (ifp
->if_capenable
& IFCAP_CSUM_IPv4_Rx
)
1463 rcb
->ti_flags
|= TI_RCB_FLAG_IP_CKSUM
;
1464 if (ifp
->if_capenable
& (IFCAP_CSUM_TCPv4_Rx
|IFCAP_CSUM_UDPv4_Rx
))
1465 rcb
->ti_flags
|= TI_RCB_FLAG_TCP_UDP_CKSUM
;
1466 if (VLAN_ATTACHED(&sc
->ethercom
))
1467 rcb
->ti_flags
|= TI_RCB_FLAG_VLAN_ASSIST
;
1469 /* Set up the jumbo receive ring. */
1470 rcb
= &sc
->ti_rdata
->ti_info
.ti_jumbo_rx_rcb
;
1471 TI_HOSTADDR(rcb
->ti_hostaddr
) = TI_CDRXJUMBOADDR(sc
, 0);
1472 rcb
->ti_max_len
= ETHER_MAX_LEN_JUMBO
;
1474 if (ifp
->if_capenable
& IFCAP_CSUM_IPv4_Rx
)
1475 rcb
->ti_flags
|= TI_RCB_FLAG_IP_CKSUM
;
1476 if (ifp
->if_capenable
& (IFCAP_CSUM_TCPv4_Rx
|IFCAP_CSUM_UDPv4_Rx
))
1477 rcb
->ti_flags
|= TI_RCB_FLAG_TCP_UDP_CKSUM
;
1478 if (VLAN_ATTACHED(&sc
->ethercom
))
1479 rcb
->ti_flags
|= TI_RCB_FLAG_VLAN_ASSIST
;
1482 * Set up the mini ring. Only activated on the
1483 * Tigon 2 but the slot in the config block is
1484 * still there on the Tigon 1.
1486 rcb
= &sc
->ti_rdata
->ti_info
.ti_mini_rx_rcb
;
1487 TI_HOSTADDR(rcb
->ti_hostaddr
) = TI_CDRXMINIADDR(sc
, 0);
1488 rcb
->ti_max_len
= MHLEN
- ETHER_ALIGN
;
1489 if (sc
->ti_hwrev
== TI_HWREV_TIGON
)
1490 rcb
->ti_flags
= TI_RCB_FLAG_RING_DISABLED
;
1493 if (ifp
->if_capenable
& IFCAP_CSUM_IPv4_Rx
)
1494 rcb
->ti_flags
|= TI_RCB_FLAG_IP_CKSUM
;
1495 if (ifp
->if_capenable
& (IFCAP_CSUM_TCPv4_Rx
|IFCAP_CSUM_UDPv4_Rx
))
1496 rcb
->ti_flags
|= TI_RCB_FLAG_TCP_UDP_CKSUM
;
1497 if (VLAN_ATTACHED(&sc
->ethercom
))
1498 rcb
->ti_flags
|= TI_RCB_FLAG_VLAN_ASSIST
;
1501 * Set up the receive return ring.
1503 rcb
= &sc
->ti_rdata
->ti_info
.ti_return_rcb
;
1504 TI_HOSTADDR(rcb
->ti_hostaddr
) = TI_CDRXRTNADDR(sc
, 0);
1506 rcb
->ti_max_len
= TI_RETURN_RING_CNT
;
1507 TI_HOSTADDR(sc
->ti_rdata
->ti_info
.ti_return_prodidx_ptr
) =
1508 TI_CDRTNPRODADDR(sc
);
1511 * Set up the tx ring. Note: for the Tigon 2, we have the option
1512 * of putting the transmit ring in the host's address space and
1513 * letting the chip DMA it instead of leaving the ring in the NIC's
1514 * memory and accessing it through the shared memory region. We
1515 * do this for the Tigon 2, but it doesn't work on the Tigon 1,
1516 * so we have to revert to the shared memory scheme if we detect
1519 CSR_WRITE_4(sc
, TI_WINBASE
, TI_TX_RING_BASE
);
1520 if (sc
->ti_hwrev
== TI_HWREV_TIGON
) {
1521 sc
->ti_tx_ring_nic
=
1522 (struct ti_tx_desc
*)(sc
->ti_vhandle
+ TI_WINDOW
);
1524 memset((char *)sc
->ti_rdata
->ti_tx_ring
, 0,
1525 TI_TX_RING_CNT
* sizeof(struct ti_tx_desc
));
1526 rcb
= &sc
->ti_rdata
->ti_info
.ti_tx_rcb
;
1527 if (sc
->ti_hwrev
== TI_HWREV_TIGON
)
1530 rcb
->ti_flags
= TI_RCB_FLAG_HOST_RING
;
1531 if (ifp
->if_capenable
& IFCAP_CSUM_IPv4_Tx
)
1532 rcb
->ti_flags
|= TI_RCB_FLAG_IP_CKSUM
;
1534 * When we get the packet, there is a pseudo-header seed already
1535 * in the th_sum or uh_sum field. Make sure the firmware doesn't
1536 * compute the pseudo-header checksum again!
1538 if (ifp
->if_capenable
& (IFCAP_CSUM_TCPv4_Tx
|IFCAP_CSUM_UDPv4_Tx
))
1539 rcb
->ti_flags
|= TI_RCB_FLAG_TCP_UDP_CKSUM
|
1540 TI_RCB_FLAG_NO_PHDR_CKSUM
;
1541 if (VLAN_ATTACHED(&sc
->ethercom
))
1542 rcb
->ti_flags
|= TI_RCB_FLAG_VLAN_ASSIST
;
1543 rcb
->ti_max_len
= TI_TX_RING_CNT
;
1544 if (sc
->ti_hwrev
== TI_HWREV_TIGON
)
1545 TI_HOSTADDR(rcb
->ti_hostaddr
) = TI_TX_RING_BASE
;
1547 TI_HOSTADDR(rcb
->ti_hostaddr
) = TI_CDTXADDR(sc
, 0);
1548 TI_HOSTADDR(sc
->ti_rdata
->ti_info
.ti_tx_considx_ptr
) =
1549 TI_CDTXCONSADDR(sc
);
1552 * We're done frobbing the General Information Block. Sync
1553 * it. Note we take care of the first stats sync here, as
1556 TI_CDGIBSYNC(sc
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1558 /* Set up tuneables */
1559 if (ifp
->if_mtu
> (ETHERMTU
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
) ||
1560 (sc
->ethercom
.ec_capenable
& ETHERCAP_VLAN_MTU
))
1561 CSR_WRITE_4(sc
, TI_GCR_RX_COAL_TICKS
,
1562 (sc
->ti_rx_coal_ticks
/ 10));
1564 CSR_WRITE_4(sc
, TI_GCR_RX_COAL_TICKS
, sc
->ti_rx_coal_ticks
);
1565 CSR_WRITE_4(sc
, TI_GCR_TX_COAL_TICKS
, sc
->ti_tx_coal_ticks
);
1566 CSR_WRITE_4(sc
, TI_GCR_STAT_TICKS
, sc
->ti_stat_ticks
);
1567 CSR_WRITE_4(sc
, TI_GCR_RX_MAX_COAL_BD
, sc
->ti_rx_max_coal_bds
);
1568 CSR_WRITE_4(sc
, TI_GCR_TX_MAX_COAL_BD
, sc
->ti_tx_max_coal_bds
);
1569 CSR_WRITE_4(sc
, TI_GCR_TX_BUFFER_RATIO
, sc
->ti_tx_buf_ratio
);
1571 /* Turn interrupts on. */
1572 CSR_WRITE_4(sc
, TI_GCR_MASK_INTRS
, 0);
1573 CSR_WRITE_4(sc
, TI_MB_HOSTINTR
, 0);
1576 TI_CLRBIT(sc
, TI_CPU_STATE
, (TI_CPUSTATE_HALT
|TI_CPUSTATE_STEP
));
1582 * look for id in the device list, returning the first match
1584 static const struct ti_type
*
1585 ti_type_match(struct pci_attach_args
*pa
)
1587 const struct ti_type
*t
;
1590 while (t
->ti_name
!= NULL
) {
1591 if ((PCI_VENDOR(pa
->pa_id
) == t
->ti_vid
) &&
1592 (PCI_PRODUCT(pa
->pa_id
) == t
->ti_did
)) {
1602 * Probe for a Tigon chip. Check the PCI vendor and device IDs
1603 * against our list and return its name if we find a match.
1606 ti_probe(device_t parent
, cfdata_t match
, void *aux
)
1608 struct pci_attach_args
*pa
= aux
;
1609 const struct ti_type
*t
;
1611 t
= ti_type_match(pa
);
1613 return ((t
== NULL
) ? 0 : 1);
1617 ti_attach(device_t parent
, device_t self
, void *aux
)
1621 struct ti_softc
*sc
;
1622 u_int8_t eaddr
[ETHER_ADDR_LEN
];
1623 struct pci_attach_args
*pa
= aux
;
1624 pci_chipset_tag_t pc
= pa
->pa_pc
;
1625 pci_intr_handle_t ih
;
1626 const char *intrstr
= NULL
;
1627 bus_dma_segment_t dmaseg
;
1628 int error
, dmanseg
, nolinear
;
1629 const struct ti_type
*t
;
1631 t
= ti_type_match(pa
);
1633 printf("ti_attach: were did the card go ?\n");
1637 printf(": %s (rev. 0x%02x)\n", t
->ti_name
, PCI_REVISION(pa
->pa_class
));
1639 sc
= device_private(self
);
1642 * Map control/status registers.
1645 if (pci_mapreg_map(pa
, 0x10,
1646 PCI_MAPREG_TYPE_MEM
| PCI_MAPREG_MEM_TYPE_32BIT
,
1647 BUS_SPACE_MAP_LINEAR
, &sc
->ti_btag
, &sc
->ti_bhandle
,
1650 if (pci_mapreg_map(pa
, 0x10,
1651 PCI_MAPREG_TYPE_MEM
| PCI_MAPREG_MEM_TYPE_32BIT
,
1652 0 , &sc
->ti_btag
, &sc
->ti_bhandle
, NULL
, NULL
)) {
1653 printf(": can't map memory space\n");
1658 sc
->ti_vhandle
= bus_space_vaddr(sc
->ti_btag
, sc
->ti_bhandle
);
1660 sc
->ti_vhandle
= NULL
;
1662 command
= pci_conf_read(pc
, pa
->pa_tag
, PCI_COMMAND_STATUS_REG
);
1663 command
|= PCI_COMMAND_MASTER_ENABLE
;
1664 pci_conf_write(pc
, pa
->pa_tag
, PCI_COMMAND_STATUS_REG
, command
);
1666 /* Allocate interrupt */
1667 if (pci_intr_map(pa
, &ih
)) {
1668 aprint_error_dev(&sc
->sc_dev
, "couldn't map interrupt\n");
1671 intrstr
= pci_intr_string(pc
, ih
);
1672 sc
->sc_ih
= pci_intr_establish(pc
, ih
, IPL_NET
, ti_intr
, sc
);
1673 if (sc
->sc_ih
== NULL
) {
1674 aprint_error_dev(&sc
->sc_dev
, "couldn't establish interrupt");
1675 if (intrstr
!= NULL
)
1676 aprint_error(" at %s", intrstr
);
1680 aprint_normal_dev(&sc
->sc_dev
, "interrupting at %s\n", intrstr
);
1682 if (ti_chipinit(sc
)) {
1683 aprint_error_dev(self
, "chip initialization failed\n");
1688 * Deal with some chip diffrences.
1690 switch (sc
->ti_hwrev
) {
1691 case TI_HWREV_TIGON
:
1692 sc
->sc_tx_encap
= ti_encap_tigon1
;
1693 sc
->sc_tx_eof
= ti_txeof_tigon1
;
1695 aprint_error_dev(self
, "memory space not mapped linear\n");
1698 case TI_HWREV_TIGON_II
:
1699 sc
->sc_tx_encap
= ti_encap_tigon2
;
1700 sc
->sc_tx_eof
= ti_txeof_tigon2
;
1704 printf("%s: Unknown chip version: %d\n", device_xname(self
),
1709 /* Zero out the NIC's on-board SRAM. */
1710 ti_mem(sc
, 0x2000, 0x100000 - 0x2000, NULL
);
1712 /* Init again -- zeroing memory may have clobbered some registers. */
1713 if (ti_chipinit(sc
)) {
1714 aprint_error_dev(self
, "chip initialization failed\n");
1719 * Get station address from the EEPROM. Note: the manual states
1720 * that the MAC address is at offset 0x8c, however the data is
1721 * stored as two longwords (since that's how it's loaded into
1722 * the NIC). This means the MAC address is actually preceded
1723 * by two zero bytes. We need to skip over those.
1725 if (ti_read_eeprom(sc
, (void *)&eaddr
,
1726 TI_EE_MAC_OFFSET
+ 2, ETHER_ADDR_LEN
)) {
1727 aprint_error_dev(self
, "failed to read station address\n");
1732 * A Tigon chip was detected. Inform the world.
1734 aprint_error_dev(self
, "Ethernet address: %s\n",
1735 ether_sprintf(eaddr
));
1737 sc
->sc_dmat
= pa
->pa_dmat
;
1739 /* Allocate the general information block and ring buffers. */
1740 if ((error
= bus_dmamem_alloc(sc
->sc_dmat
,
1741 sizeof(struct ti_ring_data
), PAGE_SIZE
, 0, &dmaseg
, 1, &dmanseg
,
1742 BUS_DMA_NOWAIT
)) != 0) {
1743 aprint_error_dev(&sc
->sc_dev
, "can't allocate ring buffer, error = %d\n",
1748 if ((error
= bus_dmamem_map(sc
->sc_dmat
, &dmaseg
, dmanseg
,
1749 sizeof(struct ti_ring_data
), (void **)&sc
->ti_rdata
,
1750 BUS_DMA_NOWAIT
|BUS_DMA_COHERENT
)) != 0) {
1751 aprint_error_dev(&sc
->sc_dev
, "can't map ring buffer, error = %d\n",
1756 if ((error
= bus_dmamap_create(sc
->sc_dmat
,
1757 sizeof(struct ti_ring_data
), 1,
1758 sizeof(struct ti_ring_data
), 0, BUS_DMA_NOWAIT
,
1759 &sc
->info_dmamap
)) != 0) {
1760 aprint_error_dev(&sc
->sc_dev
, "can't create ring buffer DMA map, error = %d\n",
1765 if ((error
= bus_dmamap_load(sc
->sc_dmat
, sc
->info_dmamap
,
1766 sc
->ti_rdata
, sizeof(struct ti_ring_data
), NULL
,
1767 BUS_DMA_NOWAIT
)) != 0) {
1768 aprint_error_dev(&sc
->sc_dev
, "can't load ring buffer DMA map, error = %d\n",
1773 sc
->info_dmaaddr
= sc
->info_dmamap
->dm_segs
[0].ds_addr
;
1775 memset(sc
->ti_rdata
, 0, sizeof(struct ti_ring_data
));
1777 /* Try to allocate memory for jumbo buffers. */
1778 if (ti_alloc_jumbo_mem(sc
)) {
1779 aprint_error_dev(self
, "jumbo buffer allocation failed\n");
1783 SIMPLEQ_INIT(&sc
->ti_mc_listhead
);
1786 * We really need a better way to tell a 1000baseT card
1787 * from a 1000baseSX one, since in theory there could be
1788 * OEMed 1000baseT cards from lame vendors who aren't
1789 * clever enough to change the PCI ID. For the moment
1790 * though, the AceNIC is the only copper card available.
1792 if ((PCI_VENDOR(pa
->pa_id
) == PCI_VENDOR_ALTEON
&&
1793 PCI_PRODUCT(pa
->pa_id
) == PCI_PRODUCT_ALTEON_ACENIC_COPPER
) ||
1794 (PCI_VENDOR(pa
->pa_id
) == PCI_VENDOR_NETGEAR
&&
1795 PCI_PRODUCT(pa
->pa_id
) == PCI_PRODUCT_NETGEAR_GA620T
))
1800 /* Set default tuneable values. */
1801 sc
->ti_stat_ticks
= 2 * TI_TICKS_PER_SEC
;
1802 sc
->ti_rx_coal_ticks
= TI_TICKS_PER_SEC
/ 5000;
1803 sc
->ti_tx_coal_ticks
= TI_TICKS_PER_SEC
/ 500;
1804 sc
->ti_rx_max_coal_bds
= 64;
1805 sc
->ti_tx_max_coal_bds
= 128;
1806 sc
->ti_tx_buf_ratio
= 21;
1808 /* Set up ifnet structure */
1809 ifp
= &sc
->ethercom
.ec_if
;
1811 strlcpy(ifp
->if_xname
, device_xname(&sc
->sc_dev
), IFNAMSIZ
);
1812 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
1813 ifp
->if_ioctl
= ti_ioctl
;
1814 ifp
->if_start
= ti_start
;
1815 ifp
->if_watchdog
= ti_watchdog
;
1816 IFQ_SET_READY(&ifp
->if_snd
);
1820 * XXX This is not really correct -- we don't necessarily
1821 * XXX want to queue up as many as we can transmit at the
1822 * XXX upper layer like that. Someone with a board should
1823 * XXX check to see how this affects performance.
1825 ifp
->if_snd
.ifq_maxlen
= TI_TX_RING_CNT
- 1;
1829 * We can support 802.1Q VLAN-sized frames.
1831 sc
->ethercom
.ec_capabilities
|=
1832 ETHERCAP_VLAN_MTU
| ETHERCAP_VLAN_HWTAGGING
;
1835 * We can do IPv4, TCPv4, and UDPv4 checksums in hardware.
1837 ifp
->if_capabilities
|=
1838 IFCAP_CSUM_IPv4_Tx
| IFCAP_CSUM_IPv4_Rx
|
1839 IFCAP_CSUM_TCPv4_Tx
| IFCAP_CSUM_TCPv4_Rx
|
1840 IFCAP_CSUM_UDPv4_Tx
| IFCAP_CSUM_UDPv4_Rx
;
1842 /* Set up ifmedia support. */
1843 ifmedia_init(&sc
->ifmedia
, IFM_IMASK
, ti_ifmedia_upd
, ti_ifmedia_sts
);
1844 if (sc
->ti_copper
) {
1846 * Copper cards allow manual 10/100 mode selection,
1847 * but not manual 1000baseT mode selection. Why?
1848 * Because currently there's no way to specify the
1849 * master/slave setting through the firmware interface,
1850 * so Alteon decided to just bag it and handle it
1851 * via autonegotiation.
1853 ifmedia_add(&sc
->ifmedia
, IFM_ETHER
|IFM_10_T
, 0, NULL
);
1854 ifmedia_add(&sc
->ifmedia
,
1855 IFM_ETHER
|IFM_10_T
|IFM_FDX
, 0, NULL
);
1856 ifmedia_add(&sc
->ifmedia
, IFM_ETHER
|IFM_100_TX
, 0, NULL
);
1857 ifmedia_add(&sc
->ifmedia
,
1858 IFM_ETHER
|IFM_100_TX
|IFM_FDX
, 0, NULL
);
1859 ifmedia_add(&sc
->ifmedia
, IFM_ETHER
|IFM_1000_T
, 0, NULL
);
1860 ifmedia_add(&sc
->ifmedia
,
1861 IFM_ETHER
|IFM_1000_T
|IFM_FDX
, 0, NULL
);
1863 /* Fiber cards don't support 10/100 modes. */
1864 ifmedia_add(&sc
->ifmedia
, IFM_ETHER
|IFM_1000_SX
, 0, NULL
);
1865 ifmedia_add(&sc
->ifmedia
, IFM_ETHER
|IFM_1000_SX
|IFM_FDX
, 0, NULL
);
1867 ifmedia_add(&sc
->ifmedia
, IFM_ETHER
|IFM_AUTO
, 0, NULL
);
1868 ifmedia_set(&sc
->ifmedia
, IFM_ETHER
|IFM_AUTO
);
1871 * Call MI attach routines.
1874 ether_ifattach(ifp
, eaddr
);
1877 * Add shutdown hook so that DMA is disabled prior to reboot. Not
1878 * doing do could allow DMA to corrupt kernel memory during the
1879 * reboot before the driver initializes.
1881 if (pmf_device_register1(self
, NULL
, NULL
, ti_shutdown
))
1882 pmf_class_network_register(self
, ifp
);
1884 aprint_error_dev(self
, "couldn't establish power handler\n");
1888 pci_intr_disestablish(pc
, sc
->sc_ih
);
1893 * Frame reception handling. This is called if there's a frame
1894 * on the receive return list.
1896 * Note: we have to be able to handle three possibilities here:
1897 * 1) the frame is from the mini receive ring (can only happen)
1898 * on Tigon 2 boards)
1899 * 2) the frame is from the jumbo receive ring
1900 * 3) the frame is from the standard receive ring
1904 ti_rxeof(struct ti_softc
*sc
)
1907 struct ti_cmd_desc cmd
;
1909 ifp
= &sc
->ethercom
.ec_if
;
1911 while (sc
->ti_rx_saved_considx
!= sc
->ti_return_prodidx
.ti_idx
) {
1912 struct ti_rx_desc
*cur_rx
;
1914 struct mbuf
*m
= NULL
;
1915 struct ether_header
*eh
;
1916 bus_dmamap_t dmamap
;
1919 &sc
->ti_rdata
->ti_rx_return_ring
[sc
->ti_rx_saved_considx
];
1920 rxidx
= cur_rx
->ti_idx
;
1921 TI_INC(sc
->ti_rx_saved_considx
, TI_RETURN_RING_CNT
);
1923 if (cur_rx
->ti_flags
& TI_BDFLAG_JUMBO_RING
) {
1924 TI_INC(sc
->ti_jumbo
, TI_JUMBO_RX_RING_CNT
);
1925 m
= sc
->ti_cdata
.ti_rx_jumbo_chain
[rxidx
];
1926 sc
->ti_cdata
.ti_rx_jumbo_chain
[rxidx
] = NULL
;
1927 if (cur_rx
->ti_flags
& TI_BDFLAG_ERROR
) {
1929 ti_newbuf_jumbo(sc
, sc
->ti_jumbo
, m
);
1932 if (ti_newbuf_jumbo(sc
, sc
->ti_jumbo
, NULL
)
1935 ti_newbuf_jumbo(sc
, sc
->ti_jumbo
, m
);
1938 } else if (cur_rx
->ti_flags
& TI_BDFLAG_MINI_RING
) {
1939 TI_INC(sc
->ti_mini
, TI_MINI_RX_RING_CNT
);
1940 m
= sc
->ti_cdata
.ti_rx_mini_chain
[rxidx
];
1941 sc
->ti_cdata
.ti_rx_mini_chain
[rxidx
] = NULL
;
1942 dmamap
= sc
->mini_dmamap
[rxidx
];
1943 sc
->mini_dmamap
[rxidx
] = 0;
1944 if (cur_rx
->ti_flags
& TI_BDFLAG_ERROR
) {
1946 ti_newbuf_mini(sc
, sc
->ti_mini
, m
, dmamap
);
1949 if (ti_newbuf_mini(sc
, sc
->ti_mini
, NULL
, dmamap
)
1952 ti_newbuf_mini(sc
, sc
->ti_mini
, m
, dmamap
);
1956 TI_INC(sc
->ti_std
, TI_STD_RX_RING_CNT
);
1957 m
= sc
->ti_cdata
.ti_rx_std_chain
[rxidx
];
1958 sc
->ti_cdata
.ti_rx_std_chain
[rxidx
] = NULL
;
1959 dmamap
= sc
->std_dmamap
[rxidx
];
1960 sc
->std_dmamap
[rxidx
] = 0;
1961 if (cur_rx
->ti_flags
& TI_BDFLAG_ERROR
) {
1963 ti_newbuf_std(sc
, sc
->ti_std
, m
, dmamap
);
1966 if (ti_newbuf_std(sc
, sc
->ti_std
, NULL
, dmamap
)
1969 ti_newbuf_std(sc
, sc
->ti_std
, m
, dmamap
);
1974 m
->m_pkthdr
.len
= m
->m_len
= cur_rx
->ti_len
;
1976 m
->m_pkthdr
.rcvif
= ifp
;
1980 * Handle BPF listeners. Let the BPF user see the packet, but
1981 * don't pass it up to the ether_input() layer unless it's
1982 * a broadcast packet, multicast packet, matches our ethernet
1983 * address or the interface is in promiscuous mode.
1986 bpf_mtap(ifp
->if_bpf
, m
);
1989 eh
= mtod(m
, struct ether_header
*);
1990 switch (ntohs(eh
->ether_type
)) {
1994 struct ip
*ip
= (struct ip
*) (eh
+ 1);
1997 * Note the Tigon firmware does not invert
1998 * the checksum for us, hence the XOR.
2000 m
->m_pkthdr
.csum_flags
|= M_CSUM_IPv4
;
2001 if ((cur_rx
->ti_ip_cksum
^ 0xffff) != 0)
2002 m
->m_pkthdr
.csum_flags
|= M_CSUM_IPv4_BAD
;
2004 * ntohs() the constant so the compiler can
2007 * XXX Figure out a sane way to deal with
2008 * fragmented packets.
2010 if ((ip
->ip_off
& htons(IP_MF
|IP_OFFMASK
)) == 0) {
2013 m
->m_pkthdr
.csum_data
=
2014 cur_rx
->ti_tcp_udp_cksum
;
2015 m
->m_pkthdr
.csum_flags
|=
2016 M_CSUM_TCPv4
|M_CSUM_DATA
;
2019 m
->m_pkthdr
.csum_data
=
2020 cur_rx
->ti_tcp_udp_cksum
;
2021 m
->m_pkthdr
.csum_flags
|=
2022 M_CSUM_UDPv4
|M_CSUM_DATA
;
2036 if (cur_rx
->ti_flags
& TI_BDFLAG_VLAN_TAG
) {
2037 VLAN_INPUT_TAG(ifp
, m
,
2038 /* ti_vlan_tag also has the priority, trim it */
2039 cur_rx
->ti_vlan_tag
& 4095,
2043 (*ifp
->if_input
)(ifp
, m
);
2046 /* Only necessary on the Tigon 1. */
2047 if (sc
->ti_hwrev
== TI_HWREV_TIGON
)
2048 CSR_WRITE_4(sc
, TI_GCR_RXRETURNCONS_IDX
,
2049 sc
->ti_rx_saved_considx
);
2051 TI_UPDATE_STDPROD(sc
, sc
->ti_std
);
2052 TI_UPDATE_MINIPROD(sc
, sc
->ti_mini
);
2053 TI_UPDATE_JUMBOPROD(sc
, sc
->ti_jumbo
);
2057 ti_txeof_tigon1(struct ti_softc
*sc
)
2059 struct ti_tx_desc
*cur_tx
= NULL
;
2061 struct txdmamap_pool_entry
*dma
;
2063 ifp
= &sc
->ethercom
.ec_if
;
2066 * Go through our tx ring and free mbufs for those
2067 * frames that have been sent.
2069 while (sc
->ti_tx_saved_considx
!= sc
->ti_tx_considx
.ti_idx
) {
2072 idx
= sc
->ti_tx_saved_considx
;
2074 CSR_WRITE_4(sc
, TI_WINBASE
,
2075 TI_TX_RING_BASE
+ 6144);
2077 CSR_WRITE_4(sc
, TI_WINBASE
,
2078 TI_TX_RING_BASE
+ 4096);
2080 CSR_WRITE_4(sc
, TI_WINBASE
,
2081 TI_TX_RING_BASE
+ 2048);
2083 CSR_WRITE_4(sc
, TI_WINBASE
,
2085 cur_tx
= &sc
->ti_tx_ring_nic
[idx
% 128];
2086 if (cur_tx
->ti_flags
& TI_BDFLAG_END
)
2088 if (sc
->ti_cdata
.ti_tx_chain
[idx
] != NULL
) {
2089 m_freem(sc
->ti_cdata
.ti_tx_chain
[idx
]);
2090 sc
->ti_cdata
.ti_tx_chain
[idx
] = NULL
;
2092 dma
= sc
->txdma
[idx
];
2093 KDASSERT(dma
!= NULL
);
2094 bus_dmamap_sync(sc
->sc_dmat
, dma
->dmamap
, 0,
2095 dma
->dmamap
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
2096 bus_dmamap_unload(sc
->sc_dmat
, dma
->dmamap
);
2098 SIMPLEQ_INSERT_HEAD(&sc
->txdma_list
, dma
, link
);
2099 sc
->txdma
[idx
] = NULL
;
2102 TI_INC(sc
->ti_tx_saved_considx
, TI_TX_RING_CNT
);
2107 ifp
->if_flags
&= ~IFF_OACTIVE
;
2111 ti_txeof_tigon2(struct ti_softc
*sc
)
2113 struct ti_tx_desc
*cur_tx
= NULL
;
2115 struct txdmamap_pool_entry
*dma
;
2118 ifp
= &sc
->ethercom
.ec_if
;
2121 * Go through our tx ring and free mbufs for those
2122 * frames that have been sent.
2124 firstidx
= sc
->ti_tx_saved_considx
;
2126 while (sc
->ti_tx_saved_considx
!= sc
->ti_tx_considx
.ti_idx
) {
2129 idx
= sc
->ti_tx_saved_considx
;
2130 cur_tx
= &sc
->ti_rdata
->ti_tx_ring
[idx
];
2131 if (cur_tx
->ti_flags
& TI_BDFLAG_END
)
2133 if (sc
->ti_cdata
.ti_tx_chain
[idx
] != NULL
) {
2134 m_freem(sc
->ti_cdata
.ti_tx_chain
[idx
]);
2135 sc
->ti_cdata
.ti_tx_chain
[idx
] = NULL
;
2137 dma
= sc
->txdma
[idx
];
2138 KDASSERT(dma
!= NULL
);
2139 bus_dmamap_sync(sc
->sc_dmat
, dma
->dmamap
, 0,
2140 dma
->dmamap
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
2141 bus_dmamap_unload(sc
->sc_dmat
, dma
->dmamap
);
2143 SIMPLEQ_INSERT_HEAD(&sc
->txdma_list
, dma
, link
);
2144 sc
->txdma
[idx
] = NULL
;
2148 TI_INC(sc
->ti_tx_saved_considx
, TI_TX_RING_CNT
);
2153 TI_CDTXSYNC(sc
, firstidx
, cnt
, BUS_DMASYNC_POSTWRITE
);
2156 ifp
->if_flags
&= ~IFF_OACTIVE
;
2162 struct ti_softc
*sc
;
2166 ifp
= &sc
->ethercom
.ec_if
;
2169 /* Avoid this for now -- checking this register is expensive. */
2170 /* Make sure this is really our interrupt. */
2171 if (!(CSR_READ_4(sc
, TI_MISC_HOST_CTL
) & TI_MHC_INTSTATE
))
2175 /* Ack interrupt and stop others from occuring. */
2176 CSR_WRITE_4(sc
, TI_MB_HOSTINTR
, 1);
2178 if (ifp
->if_flags
& IFF_RUNNING
) {
2179 /* Check RX return ring producer/consumer */
2182 /* Check TX ring producer/consumer */
2183 (*sc
->sc_tx_eof
)(sc
);
2186 ti_handle_events(sc
);
2188 /* Re-enable interrupts. */
2189 CSR_WRITE_4(sc
, TI_MB_HOSTINTR
, 0);
2191 if ((ifp
->if_flags
& IFF_RUNNING
) != 0 &&
2192 IFQ_IS_EMPTY(&ifp
->if_snd
) == 0)
2199 ti_stats_update(struct ti_softc
*sc
)
2203 ifp
= &sc
->ethercom
.ec_if
;
2205 TI_CDSTATSSYNC(sc
, BUS_DMASYNC_POSTREAD
);
2207 ifp
->if_collisions
+=
2208 (sc
->ti_rdata
->ti_info
.ti_stats
.dot3StatsSingleCollisionFrames
+
2209 sc
->ti_rdata
->ti_info
.ti_stats
.dot3StatsMultipleCollisionFrames
+
2210 sc
->ti_rdata
->ti_info
.ti_stats
.dot3StatsExcessiveCollisions
+
2211 sc
->ti_rdata
->ti_info
.ti_stats
.dot3StatsLateCollisions
) -
2214 TI_CDSTATSSYNC(sc
, BUS_DMASYNC_PREREAD
);
2218 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2219 * pointers to descriptors.
2222 ti_encap_tigon1(struct ti_softc
*sc
, struct mbuf
*m_head
, u_int32_t
*txidx
)
2224 struct ti_tx_desc
*f
= NULL
;
2225 u_int32_t frag
, cur
, cnt
= 0;
2226 struct txdmamap_pool_entry
*dma
;
2227 bus_dmamap_t dmamap
;
2230 u_int16_t csum_flags
= 0;
2232 dma
= SIMPLEQ_FIRST(&sc
->txdma_list
);
2236 dmamap
= dma
->dmamap
;
2238 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
, m_head
,
2239 BUS_DMA_WRITE
| BUS_DMA_NOWAIT
);
2243 for (m
= m_head
; m
; m
= m
->m_next
)
2245 printf("ti_encap: bus_dmamap_load_mbuf (len %d, %d frags) "
2246 "error %d\n", m_head
->m_pkthdr
.len
, j
, error
);
2250 cur
= frag
= *txidx
;
2252 if (m_head
->m_pkthdr
.csum_flags
& M_CSUM_IPv4
) {
2253 /* IP header checksum field must be 0! */
2254 csum_flags
|= TI_BDFLAG_IP_CKSUM
;
2256 if (m_head
->m_pkthdr
.csum_flags
& (M_CSUM_TCPv4
|M_CSUM_UDPv4
))
2257 csum_flags
|= TI_BDFLAG_TCP_UDP_CKSUM
;
2259 /* XXX fragmented packet checksum capability? */
2262 * Start packing the mbufs in this chain into
2263 * the fragment pointers. Stop when we run out
2264 * of fragments or hit the end of the mbuf chain.
2266 for (i
= 0; i
< dmamap
->dm_nsegs
; i
++) {
2268 CSR_WRITE_4(sc
, TI_WINBASE
,
2269 TI_TX_RING_BASE
+ 6144);
2270 else if (frag
> 255)
2271 CSR_WRITE_4(sc
, TI_WINBASE
,
2272 TI_TX_RING_BASE
+ 4096);
2273 else if (frag
> 127)
2274 CSR_WRITE_4(sc
, TI_WINBASE
,
2275 TI_TX_RING_BASE
+ 2048);
2277 CSR_WRITE_4(sc
, TI_WINBASE
,
2279 f
= &sc
->ti_tx_ring_nic
[frag
% 128];
2280 if (sc
->ti_cdata
.ti_tx_chain
[frag
] != NULL
)
2282 TI_HOSTADDR(f
->ti_addr
) = dmamap
->dm_segs
[i
].ds_addr
;
2283 f
->ti_len
= dmamap
->dm_segs
[i
].ds_len
;
2284 f
->ti_flags
= csum_flags
;
2285 if ((mtag
= VLAN_OUTPUT_TAG(&sc
->ethercom
, m_head
))) {
2286 f
->ti_flags
|= TI_BDFLAG_VLAN_TAG
;
2287 f
->ti_vlan_tag
= VLAN_TAG_VALUE(mtag
);
2292 * Sanity check: avoid coming within 16 descriptors
2293 * of the end of the ring.
2295 if ((TI_TX_RING_CNT
- (sc
->ti_txcnt
+ cnt
)) < 16)
2298 TI_INC(frag
, TI_TX_RING_CNT
);
2302 if (i
< dmamap
->dm_nsegs
)
2305 if (frag
== sc
->ti_tx_saved_considx
)
2308 sc
->ti_tx_ring_nic
[cur
% 128].ti_flags
|=
2311 /* Sync the packet's DMA map. */
2312 bus_dmamap_sync(sc
->sc_dmat
, dmamap
, 0, dmamap
->dm_mapsize
,
2313 BUS_DMASYNC_PREWRITE
);
2315 sc
->ti_cdata
.ti_tx_chain
[cur
] = m_head
;
2316 SIMPLEQ_REMOVE_HEAD(&sc
->txdma_list
, link
);
2317 sc
->txdma
[cur
] = dma
;
2318 sc
->ti_txcnt
+= cnt
;
2326 ti_encap_tigon2(struct ti_softc
*sc
, struct mbuf
*m_head
, u_int32_t
*txidx
)
2328 struct ti_tx_desc
*f
= NULL
;
2329 u_int32_t frag
, firstfrag
, cur
, cnt
= 0;
2330 struct txdmamap_pool_entry
*dma
;
2331 bus_dmamap_t dmamap
;
2334 u_int16_t csum_flags
= 0;
2336 dma
= SIMPLEQ_FIRST(&sc
->txdma_list
);
2340 dmamap
= dma
->dmamap
;
2342 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
, m_head
,
2343 BUS_DMA_WRITE
| BUS_DMA_NOWAIT
);
2347 for (m
= m_head
; m
; m
= m
->m_next
)
2349 printf("ti_encap: bus_dmamap_load_mbuf (len %d, %d frags) "
2350 "error %d\n", m_head
->m_pkthdr
.len
, j
, error
);
2354 cur
= firstfrag
= frag
= *txidx
;
2356 if (m_head
->m_pkthdr
.csum_flags
& M_CSUM_IPv4
) {
2357 /* IP header checksum field must be 0! */
2358 csum_flags
|= TI_BDFLAG_IP_CKSUM
;
2360 if (m_head
->m_pkthdr
.csum_flags
& (M_CSUM_TCPv4
|M_CSUM_UDPv4
))
2361 csum_flags
|= TI_BDFLAG_TCP_UDP_CKSUM
;
2363 /* XXX fragmented packet checksum capability? */
2366 * Start packing the mbufs in this chain into
2367 * the fragment pointers. Stop when we run out
2368 * of fragments or hit the end of the mbuf chain.
2370 for (i
= 0; i
< dmamap
->dm_nsegs
; i
++) {
2371 f
= &sc
->ti_rdata
->ti_tx_ring
[frag
];
2372 if (sc
->ti_cdata
.ti_tx_chain
[frag
] != NULL
)
2374 TI_HOSTADDR(f
->ti_addr
) = dmamap
->dm_segs
[i
].ds_addr
;
2375 f
->ti_len
= dmamap
->dm_segs
[i
].ds_len
;
2376 f
->ti_flags
= csum_flags
;
2377 if ((mtag
= VLAN_OUTPUT_TAG(&sc
->ethercom
, m_head
))) {
2378 f
->ti_flags
|= TI_BDFLAG_VLAN_TAG
;
2379 f
->ti_vlan_tag
= VLAN_TAG_VALUE(mtag
);
2384 * Sanity check: avoid coming within 16 descriptors
2385 * of the end of the ring.
2387 if ((TI_TX_RING_CNT
- (sc
->ti_txcnt
+ cnt
)) < 16)
2390 TI_INC(frag
, TI_TX_RING_CNT
);
2394 if (i
< dmamap
->dm_nsegs
)
2397 if (frag
== sc
->ti_tx_saved_considx
)
2400 sc
->ti_rdata
->ti_tx_ring
[cur
].ti_flags
|= TI_BDFLAG_END
;
2402 /* Sync the packet's DMA map. */
2403 bus_dmamap_sync(sc
->sc_dmat
, dmamap
, 0, dmamap
->dm_mapsize
,
2404 BUS_DMASYNC_PREWRITE
);
2406 /* Sync the descriptors we are using. */
2407 TI_CDTXSYNC(sc
, firstfrag
, cnt
, BUS_DMASYNC_PREWRITE
);
2409 sc
->ti_cdata
.ti_tx_chain
[cur
] = m_head
;
2410 SIMPLEQ_REMOVE_HEAD(&sc
->txdma_list
, link
);
2411 sc
->txdma
[cur
] = dma
;
2412 sc
->ti_txcnt
+= cnt
;
2420 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2421 * to the mbuf data regions directly in the transmit descriptors.
2424 ti_start(struct ifnet
*ifp
)
2426 struct ti_softc
*sc
;
2427 struct mbuf
*m_head
= NULL
;
2428 u_int32_t prodidx
= 0;
2432 prodidx
= CSR_READ_4(sc
, TI_MB_SENDPROD_IDX
);
2434 while (sc
->ti_cdata
.ti_tx_chain
[prodidx
] == NULL
) {
2435 IFQ_POLL(&ifp
->if_snd
, m_head
);
2440 * Pack the data into the transmit ring. If we
2441 * don't have room, set the OACTIVE flag and wait
2442 * for the NIC to drain the ring.
2444 if ((*sc
->sc_tx_encap
)(sc
, m_head
, &prodidx
)) {
2445 ifp
->if_flags
|= IFF_OACTIVE
;
2449 IFQ_DEQUEUE(&ifp
->if_snd
, m_head
);
2452 * If there's a BPF listener, bounce a copy of this frame
2457 bpf_mtap(ifp
->if_bpf
, m_head
);
2462 CSR_WRITE_4(sc
, TI_MB_SENDPROD_IDX
, prodidx
);
2465 * Set a timeout in case the chip goes out to lunch.
2473 struct ti_softc
*sc
= xsc
;
2478 /* Cancel pending I/O and flush buffers. */
2481 /* Init the gen info block, ring control blocks and firmware. */
2482 if (ti_gibinit(sc
)) {
2483 aprint_error_dev(&sc
->sc_dev
, "initialization failure\n");
2492 ti_init2(struct ti_softc
*sc
)
2494 struct ti_cmd_desc cmd
;
2497 struct ifmedia
*ifm
;
2500 ifp
= &sc
->ethercom
.ec_if
;
2502 /* Specify MTU and interface index. */
2503 CSR_WRITE_4(sc
, TI_GCR_IFINDEX
, device_unit(&sc
->sc_dev
)); /* ??? */
2505 tmp
= ifp
->if_mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
;
2506 if (sc
->ethercom
.ec_capenable
& ETHERCAP_VLAN_MTU
)
2507 tmp
+= ETHER_VLAN_ENCAP_LEN
;
2508 CSR_WRITE_4(sc
, TI_GCR_IFMTU
, tmp
);
2510 TI_DO_CMD(TI_CMD_UPDATE_GENCOM
, 0, 0);
2512 /* Load our MAC address. */
2513 m
= (const u_int8_t
*)CLLADDR(ifp
->if_sadl
);
2514 CSR_WRITE_4(sc
, TI_GCR_PAR0
, (m
[0] << 8) | m
[1]);
2515 CSR_WRITE_4(sc
, TI_GCR_PAR1
, (m
[2] << 24) | (m
[3] << 16)
2516 | (m
[4] << 8) | m
[5]);
2517 TI_DO_CMD(TI_CMD_SET_MAC_ADDR
, 0, 0);
2519 /* Enable or disable promiscuous mode as needed. */
2520 if (ifp
->if_flags
& IFF_PROMISC
) {
2521 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE
, TI_CMD_CODE_PROMISC_ENB
, 0);
2523 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE
, TI_CMD_CODE_PROMISC_DIS
, 0);
2526 /* Program multicast filter. */
2530 * If this is a Tigon 1, we should tell the
2531 * firmware to use software packet filtering.
2533 if (sc
->ti_hwrev
== TI_HWREV_TIGON
) {
2534 TI_DO_CMD(TI_CMD_FDR_FILTERING
, TI_CMD_CODE_FILT_ENB
, 0);
2538 ti_init_rx_ring_std(sc
);
2540 /* Init jumbo RX ring. */
2541 if (ifp
->if_mtu
> (MCLBYTES
- ETHER_HDR_LEN
- ETHER_CRC_LEN
))
2542 ti_init_rx_ring_jumbo(sc
);
2545 * If this is a Tigon 2, we can also configure the
2548 if (sc
->ti_hwrev
== TI_HWREV_TIGON_II
)
2549 ti_init_rx_ring_mini(sc
);
2551 CSR_WRITE_4(sc
, TI_GCR_RXRETURNCONS_IDX
, 0);
2552 sc
->ti_rx_saved_considx
= 0;
2555 ti_init_tx_ring(sc
);
2557 /* Tell firmware we're alive. */
2558 TI_DO_CMD(TI_CMD_HOST_STATE
, TI_CMD_CODE_STACK_UP
, 0);
2560 /* Enable host interrupts. */
2561 CSR_WRITE_4(sc
, TI_MB_HOSTINTR
, 0);
2563 ifp
->if_flags
|= IFF_RUNNING
;
2564 ifp
->if_flags
&= ~IFF_OACTIVE
;
2567 * Make sure to set media properly. We have to do this
2568 * here since we have to issue commands in order to set
2569 * the link negotiation and we can't issue commands until
2570 * the firmware is running.
2573 tmp
= ifm
->ifm_media
;
2574 ifm
->ifm_media
= ifm
->ifm_cur
->ifm_media
;
2575 ti_ifmedia_upd(ifp
);
2576 ifm
->ifm_media
= tmp
;
2580 * Set media options.
2583 ti_ifmedia_upd(struct ifnet
*ifp
)
2585 struct ti_softc
*sc
;
2586 struct ifmedia
*ifm
;
2587 struct ti_cmd_desc cmd
;
2592 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
2595 switch (IFM_SUBTYPE(ifm
->ifm_media
)) {
2597 CSR_WRITE_4(sc
, TI_GCR_GLINK
, TI_GLNK_PREF
|TI_GLNK_1000MB
|
2598 TI_GLNK_FULL_DUPLEX
|TI_GLNK_RX_FLOWCTL_Y
|
2599 TI_GLNK_AUTONEGENB
|TI_GLNK_ENB
);
2600 CSR_WRITE_4(sc
, TI_GCR_LINK
, TI_LNK_100MB
|TI_LNK_10MB
|
2601 TI_LNK_FULL_DUPLEX
|TI_LNK_HALF_DUPLEX
|
2602 TI_LNK_AUTONEGENB
|TI_LNK_ENB
);
2603 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION
,
2604 TI_CMD_CODE_NEGOTIATE_BOTH
, 0);
2608 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
) {
2609 CSR_WRITE_4(sc
, TI_GCR_GLINK
,
2610 TI_GLNK_PREF
|TI_GLNK_1000MB
|TI_GLNK_FULL_DUPLEX
|
2611 TI_GLNK_RX_FLOWCTL_Y
|TI_GLNK_ENB
);
2613 CSR_WRITE_4(sc
, TI_GCR_GLINK
,
2614 TI_GLNK_PREF
|TI_GLNK_1000MB
|
2615 TI_GLNK_RX_FLOWCTL_Y
|TI_GLNK_ENB
);
2617 CSR_WRITE_4(sc
, TI_GCR_LINK
, 0);
2618 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION
,
2619 TI_CMD_CODE_NEGOTIATE_GIGABIT
, 0);
2625 CSR_WRITE_4(sc
, TI_GCR_GLINK
, 0);
2626 CSR_WRITE_4(sc
, TI_GCR_LINK
, TI_LNK_ENB
|TI_LNK_PREF
);
2627 if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_100_FX
||
2628 IFM_SUBTYPE(ifm
->ifm_media
) == IFM_100_TX
) {
2629 TI_SETBIT(sc
, TI_GCR_LINK
, TI_LNK_100MB
);
2631 TI_SETBIT(sc
, TI_GCR_LINK
, TI_LNK_10MB
);
2633 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
) {
2634 TI_SETBIT(sc
, TI_GCR_LINK
, TI_LNK_FULL_DUPLEX
);
2636 TI_SETBIT(sc
, TI_GCR_LINK
, TI_LNK_HALF_DUPLEX
);
2638 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION
,
2639 TI_CMD_CODE_NEGOTIATE_10_100
, 0);
2643 sc
->ethercom
.ec_if
.if_baudrate
=
2644 ifmedia_baudrate(ifm
->ifm_media
);
2650 * Report current media status.
2653 ti_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
2655 struct ti_softc
*sc
;
2656 u_int32_t media
= 0;
2660 ifmr
->ifm_status
= IFM_AVALID
;
2661 ifmr
->ifm_active
= IFM_ETHER
;
2663 if (sc
->ti_linkstat
== TI_EV_CODE_LINK_DOWN
)
2666 ifmr
->ifm_status
|= IFM_ACTIVE
;
2668 if (sc
->ti_linkstat
== TI_EV_CODE_GIG_LINK_UP
) {
2669 media
= CSR_READ_4(sc
, TI_GCR_GLINK_STAT
);
2671 ifmr
->ifm_active
|= IFM_1000_T
;
2673 ifmr
->ifm_active
|= IFM_1000_SX
;
2674 if (media
& TI_GLNK_FULL_DUPLEX
)
2675 ifmr
->ifm_active
|= IFM_FDX
;
2677 ifmr
->ifm_active
|= IFM_HDX
;
2678 } else if (sc
->ti_linkstat
== TI_EV_CODE_LINK_UP
) {
2679 media
= CSR_READ_4(sc
, TI_GCR_LINK_STAT
);
2680 if (sc
->ti_copper
) {
2681 if (media
& TI_LNK_100MB
)
2682 ifmr
->ifm_active
|= IFM_100_TX
;
2683 if (media
& TI_LNK_10MB
)
2684 ifmr
->ifm_active
|= IFM_10_T
;
2686 if (media
& TI_LNK_100MB
)
2687 ifmr
->ifm_active
|= IFM_100_FX
;
2688 if (media
& TI_LNK_10MB
)
2689 ifmr
->ifm_active
|= IFM_10_FL
;
2691 if (media
& TI_LNK_FULL_DUPLEX
)
2692 ifmr
->ifm_active
|= IFM_FDX
;
2693 if (media
& TI_LNK_HALF_DUPLEX
)
2694 ifmr
->ifm_active
|= IFM_HDX
;
2697 sc
->ethercom
.ec_if
.if_baudrate
=
2698 ifmedia_baudrate(sc
->ifmedia
.ifm_media
);
2702 ti_ether_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
2704 struct ifaddr
*ifa
= (struct ifaddr
*) data
;
2705 struct ti_softc
*sc
= ifp
->if_softc
;
2707 if ((ifp
->if_flags
& IFF_UP
) == 0) {
2708 ifp
->if_flags
|= IFF_UP
;
2713 case SIOCINITIFADDR
:
2715 switch (ifa
->ifa_addr
->sa_family
) {
2718 arp_ifinit(ifp
, ifa
);
2734 ti_ioctl(struct ifnet
*ifp
, u_long command
, void *data
)
2736 struct ti_softc
*sc
= ifp
->if_softc
;
2737 struct ifreq
*ifr
= (struct ifreq
*) data
;
2739 struct ti_cmd_desc cmd
;
2744 case SIOCINITIFADDR
:
2745 error
= ti_ether_ioctl(ifp
, command
, data
);
2748 if (ifr
->ifr_mtu
< ETHERMIN
|| ifr
->ifr_mtu
> ETHERMTU_JUMBO
)
2750 else if ((error
= ifioctl_common(ifp
, command
, data
)) == ENETRESET
){
2756 if ((error
= ifioctl_common(ifp
, command
, data
)) != 0)
2758 if (ifp
->if_flags
& IFF_UP
) {
2760 * If only the state of the PROMISC flag changed,
2761 * then just use the 'set promisc mode' command
2762 * instead of reinitializing the entire NIC. Doing
2763 * a full re-init means reloading the firmware and
2764 * waiting for it to start up, which may take a
2767 if (ifp
->if_flags
& IFF_RUNNING
&&
2768 ifp
->if_flags
& IFF_PROMISC
&&
2769 !(sc
->ti_if_flags
& IFF_PROMISC
)) {
2770 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE
,
2771 TI_CMD_CODE_PROMISC_ENB
, 0);
2772 } else if (ifp
->if_flags
& IFF_RUNNING
&&
2773 !(ifp
->if_flags
& IFF_PROMISC
) &&
2774 sc
->ti_if_flags
& IFF_PROMISC
) {
2775 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE
,
2776 TI_CMD_CODE_PROMISC_DIS
, 0);
2780 if (ifp
->if_flags
& IFF_RUNNING
) {
2784 sc
->ti_if_flags
= ifp
->if_flags
;
2789 error
= ifmedia_ioctl(ifp
, ifr
, &sc
->ifmedia
, command
);
2792 if ((error
= ether_ioctl(ifp
, command
, data
)) != ENETRESET
)
2797 if (command
== SIOCSIFCAP
)
2799 else if (command
!= SIOCADDMULTI
&& command
!= SIOCDELMULTI
)
2801 else if (ifp
->if_flags
& IFF_RUNNING
)
2812 ti_watchdog(struct ifnet
*ifp
)
2814 struct ti_softc
*sc
;
2818 aprint_error_dev(&sc
->sc_dev
, "watchdog timeout -- resetting\n");
2826 * Stop the adapter and free any mbufs allocated to the
2830 ti_stop(struct ti_softc
*sc
)
2833 struct ti_cmd_desc cmd
;
2835 ifp
= &sc
->ethercom
.ec_if
;
2837 /* Disable host interrupts. */
2838 CSR_WRITE_4(sc
, TI_MB_HOSTINTR
, 1);
2840 * Tell firmware we're shutting down.
2842 TI_DO_CMD(TI_CMD_HOST_STATE
, TI_CMD_CODE_STACK_DOWN
, 0);
2844 /* Halt and reinitialize. */
2846 ti_mem(sc
, 0x2000, 0x100000 - 0x2000, NULL
);
2849 /* Free the RX lists. */
2850 ti_free_rx_ring_std(sc
);
2852 /* Free jumbo RX list. */
2853 ti_free_rx_ring_jumbo(sc
);
2855 /* Free mini RX list. */
2856 ti_free_rx_ring_mini(sc
);
2858 /* Free TX buffers. */
2859 ti_free_tx_ring(sc
);
2861 sc
->ti_ev_prodidx
.ti_idx
= 0;
2862 sc
->ti_return_prodidx
.ti_idx
= 0;
2863 sc
->ti_tx_considx
.ti_idx
= 0;
2864 sc
->ti_tx_saved_considx
= TI_TXCONS_UNSET
;
2866 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
2870 * Stop all chip I/O so that the kernel's probe routines don't
2871 * get confused by errant DMAs when rebooting.
2874 ti_shutdown(device_t self
, int howto
)
2876 struct ti_softc
*sc
;
2878 sc
= device_private(self
);