1 /* $NetBSD: if_kse.c,v 1.19 2009/05/12 08:23:00 cegger Exp $ */
4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.19 2009/05/12 08:23:00 cegger Exp $");
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/callout.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/ioctl.h>
44 #include <sys/errno.h>
45 #include <sys/device.h>
46 #include <sys/queue.h>
48 #include <machine/endian.h>
53 #include <net/if_media.h>
54 #include <net/if_dl.h>
55 #include <net/if_ether.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcidevs.h>
65 #define CSR_READ_4(sc, off) \
66 bus_space_read_4(sc->sc_st, sc->sc_sh, off)
67 #define CSR_WRITE_4(sc, off, val) \
68 bus_space_write_4(sc->sc_st, sc->sc_sh, off, val)
69 #define CSR_READ_2(sc, off) \
70 bus_space_read_2(sc->sc_st, sc->sc_sh, off)
71 #define CSR_WRITE_2(sc, off, val) \
72 bus_space_write_2(sc->sc_st, sc->sc_sh, off, val)
74 #define MDTXC 0x000 /* DMA transmit control */
75 #define MDRXC 0x004 /* DMA receive control */
76 #define MDTSC 0x008 /* DMA transmit start */
77 #define MDRSC 0x00c /* DMA receive start */
78 #define TDLB 0x010 /* transmit descriptor list base */
79 #define RDLB 0x014 /* receive descriptor list base */
80 #define MTR0 0x020 /* multicast table 31:0 */
81 #define MTR1 0x024 /* multicast table 63:32 */
82 #define INTEN 0x028 /* interrupt enable */
83 #define INTST 0x02c /* interrupt status */
84 #define MARL 0x200 /* MAC address low */
85 #define MARM 0x202 /* MAC address middle */
86 #define MARH 0x204 /* MAC address high */
87 #define GRR 0x216 /* global reset */
88 #define CIDR 0x400 /* chip ID and enable */
89 #define CGCR 0x40a /* chip global control */
90 #define IACR 0x4a0 /* indirect access control */
91 #define IADR1 0x4a2 /* indirect access data 66:63 */
92 #define IADR2 0x4a4 /* indirect access data 47:32 */
93 #define IADR3 0x4a6 /* indirect access data 63:48 */
94 #define IADR4 0x4a8 /* indirect access data 15:0 */
95 #define IADR5 0x4aa /* indirect access data 31:16 */
96 #define P1CR4 0x512 /* port 1 control 4 */
97 #define P1SR 0x514 /* port 1 status */
98 #define P2CR4 0x532 /* port 2 control 4 */
99 #define P2SR 0x534 /* port 2 status */
101 #define TXC_BS_MSK 0x3f000000 /* burst size */
102 #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
103 #define TXC_UCG (1U<<18) /* generate UDP checksum */
104 #define TXC_TCG (1U<<17) /* generate TCP checksum */
105 #define TXC_ICG (1U<<16) /* generate IP checksum */
106 #define TXC_FCE (1U<<9) /* enable flowcontrol */
107 #define TXC_EP (1U<<2) /* enable automatic padding */
108 #define TXC_AC (1U<<1) /* add CRC to frame */
109 #define TXC_TEN (1) /* enable DMA to run */
111 #define RXC_BS_MSK 0x3f000000 /* burst size */
112 #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
113 #define RXC_IHAE (1U<<19) /* IP header alignment enable */
114 #define RXC_UCC (1U<<18) /* run UDP checksum */
115 #define RXC_TCC (1U<<17) /* run TDP checksum */
116 #define RXC_ICC (1U<<16) /* run IP checksum */
117 #define RXC_FCE (1U<<9) /* enable flowcontrol */
118 #define RXC_RB (1U<<6) /* receive broadcast frame */
119 #define RXC_RM (1U<<5) /* receive multicast frame */
120 #define RXC_RU (1U<<4) /* receive unicast frame */
121 #define RXC_RE (1U<<3) /* accept error frame */
122 #define RXC_RA (1U<<2) /* receive all frame */
123 #define RXC_MHTE (1U<<1) /* use multicast hash table */
124 #define RXC_REN (1) /* enable DMA to run */
126 #define INT_DMLCS (1U<<31) /* link status change */
127 #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */
128 #define INT_DMRS (1U<<29) /* frame was received */
129 #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */
131 #define T0_OWN (1U<<31) /* desc is ready to Tx */
133 #define R0_OWN (1U<<31) /* desc is empty */
134 #define R0_FS (1U<<30) /* first segment of frame */
135 #define R0_LS (1U<<29) /* last segment of frame */
136 #define R0_IPE (1U<<28) /* IP checksum error */
137 #define R0_TCPE (1U<<27) /* TCP checksum error */
138 #define R0_UDPE (1U<<26) /* UDP checksum error */
139 #define R0_ES (1U<<25) /* error summary */
140 #define R0_MF (1U<<24) /* multicast frame */
141 #define R0_SPN 0x00300000 /* 21:20 switch port 1/2 */
142 #define R0_ALIGN 0x00300000 /* 21:20 (KSZ8692P) Rx align amount */
143 #define R0_RE (1U<<19) /* MII reported error */
144 #define R0_TL (1U<<18) /* frame too long, beyond 1518 */
145 #define R0_RF (1U<<17) /* damaged runt frame */
146 #define R0_CE (1U<<16) /* CRC error */
147 #define R0_FT (1U<<15) /* frame type */
148 #define R0_FL_MASK 0x7ff /* frame length 10:0 */
150 #define T1_IC (1U<<31) /* post interrupt on complete */
151 #define T1_FS (1U<<30) /* first segment of frame */
152 #define T1_LS (1U<<29) /* last segment of frame */
153 #define T1_IPCKG (1U<<28) /* generate IP checksum */
154 #define T1_TCPCKG (1U<<27) /* generate TCP checksum */
155 #define T1_UDPCKG (1U<<26) /* generate UDP checksum */
156 #define T1_TER (1U<<25) /* end of ring */
157 #define T1_SPN 0x00300000 /* 21:20 switch port 1/2 */
158 #define T1_TBS_MASK 0x7ff /* segment size 10:0 */
160 #define R1_RER (1U<<25) /* end of ring */
161 #define R1_RBS_MASK 0x7fc /* segment size 10:0 */
163 #define KSE_NTXSEGS 16
164 #define KSE_TXQUEUELEN 64
165 #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1)
166 #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4)
167 #define KSE_NTXDESC 256
168 #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1)
169 #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK)
170 #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK)
172 #define KSE_NRXDESC 64
173 #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1)
174 #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK)
177 uint32_t t0
, t1
, t2
, t3
;
181 uint32_t r0
, r1
, r2
, r3
;
184 struct kse_control_data
{
185 struct tdes kcd_txdescs
[KSE_NTXDESC
];
186 struct rdes kcd_rxdescs
[KSE_NRXDESC
];
188 #define KSE_CDOFF(x) offsetof(struct kse_control_data, x)
189 #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)])
190 #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)])
193 struct mbuf
*txs_mbuf
; /* head of our mbuf chain */
194 bus_dmamap_t txs_dmamap
; /* our DMA map */
195 int txs_firstdesc
; /* first descriptor in packet */
196 int txs_lastdesc
; /* last descriptor in packet */
197 int txs_ndesc
; /* # of descriptors used */
201 struct mbuf
*rxs_mbuf
; /* head of our mbuf chain */
202 bus_dmamap_t rxs_dmamap
; /* our DMA map */
206 struct device sc_dev
; /* generic device information */
207 bus_space_tag_t sc_st
; /* bus space tag */
208 bus_space_handle_t sc_sh
; /* bus space handle */
209 bus_dma_tag_t sc_dmat
; /* bus DMA tag */
210 struct ethercom sc_ethercom
; /* Ethernet common data */
211 void *sc_ih
; /* interrupt cookie */
213 struct ifmedia sc_media
; /* ifmedia information */
214 int sc_media_status
; /* PHY */
215 int sc_media_active
; /* PHY */
216 callout_t sc_callout
; /* MII tick callout */
217 callout_t sc_stat_ch
; /* statistics counter callout */
219 bus_dmamap_t sc_cddmamap
; /* control data DMA map */
220 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
222 struct kse_control_data
*sc_control_data
;
223 #define sc_txdescs sc_control_data->kcd_txdescs
224 #define sc_rxdescs sc_control_data->kcd_rxdescs
226 struct kse_txsoft sc_txsoft
[KSE_TXQUEUELEN
];
227 struct kse_rxsoft sc_rxsoft
[KSE_NRXDESC
];
228 int sc_txfree
; /* number of free Tx descriptors */
229 int sc_txnext
; /* next ready Tx descriptor */
230 int sc_txsfree
; /* number of free Tx jobs */
231 int sc_txsnext
; /* next ready Tx job */
232 int sc_txsdirty
; /* dirty Tx jobs */
233 int sc_rxptr
; /* next ready Rx descriptor/descsoft */
235 uint32_t sc_txc
, sc_rxc
;
241 uint8_t sc_altmac
[16][ETHER_ADDR_LEN
];
242 uint16_t sc_vlan
[16];
244 #ifdef KSE_EVENT_COUNTERS
246 char evcntname
[3][8];
247 struct evcnt pev
[3][34];
248 } sc_ext
; /* switch statistics */
252 #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x)))
253 #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x)))
255 #define KSE_CDTXSYNC(sc, x, n, ops) \
262 /* If it will wrap around, sync to the end of the ring. */ \
263 if ((__x + __n) > KSE_NTXDESC) { \
264 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
265 KSE_CDTXOFF(__x), sizeof(struct tdes) * \
266 (KSE_NTXDESC - __x), (ops)); \
267 __n -= (KSE_NTXDESC - __x); \
271 /* Now sync whatever is left. */ \
272 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
273 KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \
274 } while (/*CONSTCOND*/0)
276 #define KSE_CDRXSYNC(sc, x, ops) \
278 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
279 KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \
280 } while (/*CONSTCOND*/0)
282 #define KSE_INIT_RXDESC(sc, x) \
284 struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
285 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
286 struct mbuf *__m = __rxs->rxs_mbuf; \
288 __m->m_data = __m->m_ext.ext_buf; \
289 __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \
290 __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \
291 __rxd->r0 = R0_OWN; \
292 KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
293 } while (/*CONSTCOND*/0)
295 u_int kse_burstsize
= 8; /* DMA burst length tuning knob */
298 u_int kse_monitor_rxintr
; /* fragmented UDP csum HW bug hook */
301 static int kse_match(device_t
, cfdata_t
, void *);
302 static void kse_attach(device_t
, device_t
, void *);
304 CFATTACH_DECL(kse
, sizeof(struct kse_softc
),
305 kse_match
, kse_attach
, NULL
, NULL
);
307 static int kse_ioctl(struct ifnet
*, u_long
, void *);
308 static void kse_start(struct ifnet
*);
309 static void kse_watchdog(struct ifnet
*);
310 static int kse_init(struct ifnet
*);
311 static void kse_stop(struct ifnet
*, int);
312 static void kse_reset(struct kse_softc
*);
313 static void kse_set_filter(struct kse_softc
*);
314 static int add_rxbuf(struct kse_softc
*, int);
315 static void rxdrain(struct kse_softc
*);
316 static int kse_intr(void *);
317 static void rxintr(struct kse_softc
*);
318 static void txreap(struct kse_softc
*);
319 static void lnkchg(struct kse_softc
*);
320 static int ifmedia_upd(struct ifnet
*);
321 static void ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
322 static void phy_tick(void *);
323 static int ifmedia2_upd(struct ifnet
*);
324 static void ifmedia2_sts(struct ifnet
*, struct ifmediareq
*);
325 #ifdef KSE_EVENT_COUNTERS
326 static void stat_tick(void *);
327 static void zerostats(struct kse_softc
*);
331 kse_match(device_t parent
, cfdata_t match
, void *aux
)
333 struct pci_attach_args
*pa
= (struct pci_attach_args
*)aux
;
335 if (PCI_VENDOR(pa
->pa_id
) == PCI_VENDOR_MICREL
&&
336 (PCI_PRODUCT(pa
->pa_id
) == PCI_PRODUCT_MICREL_KSZ8842
||
337 PCI_PRODUCT(pa
->pa_id
) == PCI_PRODUCT_MICREL_KSZ8841
) &&
338 PCI_CLASS(pa
->pa_class
) == PCI_CLASS_NETWORK
)
345 kse_attach(device_t parent
, device_t self
, void *aux
)
347 struct kse_softc
*sc
= device_private(self
);
348 struct pci_attach_args
*pa
= aux
;
349 pci_chipset_tag_t pc
= pa
->pa_pc
;
350 pci_intr_handle_t ih
;
354 uint8_t enaddr
[ETHER_ADDR_LEN
];
355 bus_dma_segment_t seg
;
356 int i
, p
, error
, nseg
;
360 if (pci_mapreg_map(pa
, 0x10,
361 PCI_MAPREG_TYPE_MEM
| PCI_MAPREG_MEM_TYPE_32BIT
,
362 0, &sc
->sc_st
, &sc
->sc_sh
, NULL
, NULL
) != 0) {
363 printf(": unable to map device registers\n");
367 sc
->sc_dmat
= pa
->pa_dmat
;
369 /* Make sure bus mastering is enabled. */
370 pci_conf_write(pc
, pa
->pa_tag
, PCI_COMMAND_STATUS_REG
,
371 pci_conf_read(pc
, pa
->pa_tag
, PCI_COMMAND_STATUS_REG
) |
372 PCI_COMMAND_MASTER_ENABLE
);
374 /* Get it out of power save mode, if needed. */
375 if (pci_get_capability(pc
, pa
->pa_tag
, PCI_CAP_PWRMGMT
, &pmreg
, 0)) {
376 pmode
= pci_conf_read(pc
, pa
->pa_tag
, pmreg
+ PCI_PMCSR
) &
377 PCI_PMCSR_STATE_MASK
;
378 if (pmode
== PCI_PMCSR_STATE_D3
) {
380 * The card has lost all configuration data in
381 * this state, so punt.
383 printf("%s: unable to wake from power state D3\n",
384 device_xname(&sc
->sc_dev
));
387 if (pmode
!= PCI_PMCSR_STATE_D0
) {
388 printf("%s: waking up from power date D%d\n",
389 device_xname(&sc
->sc_dev
), pmode
);
390 pci_conf_write(pc
, pa
->pa_tag
, pmreg
+ PCI_PMCSR
,
395 sc
->sc_chip
= PCI_PRODUCT(pa
->pa_id
);
396 printf(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n",
397 sc
->sc_chip
, PCI_REVISION(pa
->pa_class
));
400 * Read the Ethernet address from the EEPROM.
402 i
= CSR_READ_2(sc
, MARL
);
403 enaddr
[5] = i
; enaddr
[4] = i
>> 8;
404 i
= CSR_READ_2(sc
, MARM
);
405 enaddr
[3] = i
; enaddr
[2] = i
>> 8;
406 i
= CSR_READ_2(sc
, MARH
);
407 enaddr
[1] = i
; enaddr
[0] = i
>> 8;
408 printf("%s: Ethernet address: %s\n",
409 device_xname(&sc
->sc_dev
), ether_sprintf(enaddr
));
412 * Enable chip function.
414 CSR_WRITE_2(sc
, CIDR
, 1);
417 * Map and establish our interrupt.
419 if (pci_intr_map(pa
, &ih
)) {
420 aprint_error_dev(&sc
->sc_dev
, "unable to map interrupt\n");
423 intrstr
= pci_intr_string(pc
, ih
);
424 sc
->sc_ih
= pci_intr_establish(pc
, ih
, IPL_NET
, kse_intr
, sc
);
425 if (sc
->sc_ih
== NULL
) {
426 aprint_error_dev(&sc
->sc_dev
, "unable to establish interrupt");
428 aprint_error(" at %s", intrstr
);
432 aprint_normal_dev(&sc
->sc_dev
, "interrupting at %s\n", intrstr
);
435 * Allocate the control data structures, and create and load the
438 error
= bus_dmamem_alloc(sc
->sc_dmat
,
439 sizeof(struct kse_control_data
), PAGE_SIZE
, 0, &seg
, 1, &nseg
, 0);
441 aprint_error_dev(&sc
->sc_dev
, "unable to allocate control data, error = %d\n", error
);
444 error
= bus_dmamem_map(sc
->sc_dmat
, &seg
, nseg
,
445 sizeof(struct kse_control_data
), (void **)&sc
->sc_control_data
,
448 aprint_error_dev(&sc
->sc_dev
, "unable to map control data, error = %d\n", error
);
451 error
= bus_dmamap_create(sc
->sc_dmat
,
452 sizeof(struct kse_control_data
), 1,
453 sizeof(struct kse_control_data
), 0, 0, &sc
->sc_cddmamap
);
455 aprint_error_dev(&sc
->sc_dev
, "unable to create control data DMA map, "
456 "error = %d\n", error
);
459 error
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_cddmamap
,
460 sc
->sc_control_data
, sizeof(struct kse_control_data
), NULL
, 0);
462 aprint_error_dev(&sc
->sc_dev
, "unable to load control data DMA map, error = %d\n",
466 for (i
= 0; i
< KSE_TXQUEUELEN
; i
++) {
467 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
,
468 KSE_NTXSEGS
, MCLBYTES
, 0, 0,
469 &sc
->sc_txsoft
[i
].txs_dmamap
)) != 0) {
470 aprint_error_dev(&sc
->sc_dev
, "unable to create tx DMA map %d, "
471 "error = %d\n", i
, error
);
475 for (i
= 0; i
< KSE_NRXDESC
; i
++) {
476 if ((error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
,
477 1, MCLBYTES
, 0, 0, &sc
->sc_rxsoft
[i
].rxs_dmamap
)) != 0) {
478 aprint_error_dev(&sc
->sc_dev
, "unable to create rx DMA map %d, "
479 "error = %d\n", i
, error
);
482 sc
->sc_rxsoft
[i
].rxs_mbuf
= NULL
;
485 callout_init(&sc
->sc_callout
, 0);
486 callout_init(&sc
->sc_stat_ch
, 0);
489 if (sc
->sc_chip
== 0x8841) {
490 ifmedia_init(ifm
, 0, ifmedia_upd
, ifmedia_sts
);
491 ifmedia_add(ifm
, IFM_ETHER
|IFM_10_T
, 0, NULL
);
492 ifmedia_add(ifm
, IFM_ETHER
|IFM_10_T
|IFM_FDX
, 0, NULL
);
493 ifmedia_add(ifm
, IFM_ETHER
|IFM_100_TX
, 0, NULL
);
494 ifmedia_add(ifm
, IFM_ETHER
|IFM_100_TX
|IFM_FDX
, 0, NULL
);
495 ifmedia_add(ifm
, IFM_ETHER
|IFM_AUTO
, 0, NULL
);
496 ifmedia_set(ifm
, IFM_ETHER
|IFM_AUTO
);
499 ifmedia_init(ifm
, 0, ifmedia2_upd
, ifmedia2_sts
);
500 ifmedia_add(ifm
, IFM_ETHER
|IFM_AUTO
, 0, NULL
);
501 ifmedia_set(ifm
, IFM_ETHER
|IFM_AUTO
);
504 printf("%s: 10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n",
505 device_xname(&sc
->sc_dev
));
507 ifp
= &sc
->sc_ethercom
.ec_if
;
508 strlcpy(ifp
->if_xname
, device_xname(&sc
->sc_dev
), IFNAMSIZ
);
510 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
511 ifp
->if_ioctl
= kse_ioctl
;
512 ifp
->if_start
= kse_start
;
513 ifp
->if_watchdog
= kse_watchdog
;
514 ifp
->if_init
= kse_init
;
515 ifp
->if_stop
= kse_stop
;
516 IFQ_SET_READY(&ifp
->if_snd
);
519 * KSZ8842 can handle 802.1Q VLAN-sized frames,
520 * can do IPv4, TCPv4, and UDPv4 checksums in hardware.
522 sc
->sc_ethercom
.ec_capabilities
|= ETHERCAP_VLAN_MTU
;
523 ifp
->if_capabilities
|=
524 IFCAP_CSUM_IPv4_Tx
| IFCAP_CSUM_IPv4_Rx
|
525 IFCAP_CSUM_TCPv4_Tx
| IFCAP_CSUM_TCPv4_Rx
|
526 IFCAP_CSUM_UDPv4_Tx
| IFCAP_CSUM_UDPv4_Rx
;
529 ether_ifattach(ifp
, enaddr
);
531 p
= (sc
->sc_chip
== 0x8842) ? 3 : 1;
532 #ifdef KSE_EVENT_COUNTERS
533 for (i
= 0; i
< p
; i
++) {
534 struct ksext
*ee
= &sc
->sc_ext
;
535 sprintf(ee
->evcntname
[i
], "%s.%d", device_xname(&sc
->sc_dev
), i
+1);
536 evcnt_attach_dynamic(&ee
->pev
[i
][0], EVCNT_TYPE_MISC
,
537 NULL
, ee
->evcntname
[i
], "RxLoPriotyByte");
538 evcnt_attach_dynamic(&ee
->pev
[i
][1], EVCNT_TYPE_MISC
,
539 NULL
, ee
->evcntname
[i
], "RxHiPriotyByte");
540 evcnt_attach_dynamic(&ee
->pev
[i
][2], EVCNT_TYPE_MISC
,
541 NULL
, ee
->evcntname
[i
], "RxUndersizePkt");
542 evcnt_attach_dynamic(&ee
->pev
[i
][3], EVCNT_TYPE_MISC
,
543 NULL
, ee
->evcntname
[i
], "RxFragments");
544 evcnt_attach_dynamic(&ee
->pev
[i
][4], EVCNT_TYPE_MISC
,
545 NULL
, ee
->evcntname
[i
], "RxOversize");
546 evcnt_attach_dynamic(&ee
->pev
[i
][5], EVCNT_TYPE_MISC
,
547 NULL
, ee
->evcntname
[i
], "RxJabbers");
548 evcnt_attach_dynamic(&ee
->pev
[i
][6], EVCNT_TYPE_MISC
,
549 NULL
, ee
->evcntname
[i
], "RxSymbolError");
550 evcnt_attach_dynamic(&ee
->pev
[i
][7], EVCNT_TYPE_MISC
,
551 NULL
, ee
->evcntname
[i
], "RxCRCError");
552 evcnt_attach_dynamic(&ee
->pev
[i
][8], EVCNT_TYPE_MISC
,
553 NULL
, ee
->evcntname
[i
], "RxAlignmentError");
554 evcnt_attach_dynamic(&ee
->pev
[i
][9], EVCNT_TYPE_MISC
,
555 NULL
, ee
->evcntname
[i
], "RxControl8808Pkts");
556 evcnt_attach_dynamic(&ee
->pev
[i
][10], EVCNT_TYPE_MISC
,
557 NULL
, ee
->evcntname
[i
], "RxPausePkts");
558 evcnt_attach_dynamic(&ee
->pev
[i
][11], EVCNT_TYPE_MISC
,
559 NULL
, ee
->evcntname
[i
], "RxBroadcast");
560 evcnt_attach_dynamic(&ee
->pev
[i
][12], EVCNT_TYPE_MISC
,
561 NULL
, ee
->evcntname
[i
], "RxMulticast");
562 evcnt_attach_dynamic(&ee
->pev
[i
][13], EVCNT_TYPE_MISC
,
563 NULL
, ee
->evcntname
[i
], "RxUnicast");
564 evcnt_attach_dynamic(&ee
->pev
[i
][14], EVCNT_TYPE_MISC
,
565 NULL
, ee
->evcntname
[i
], "Rx64Octets");
566 evcnt_attach_dynamic(&ee
->pev
[i
][15], EVCNT_TYPE_MISC
,
567 NULL
, ee
->evcntname
[i
], "Rx65To127Octets");
568 evcnt_attach_dynamic(&ee
->pev
[i
][16], EVCNT_TYPE_MISC
,
569 NULL
, ee
->evcntname
[i
], "Rx128To255Octets");
570 evcnt_attach_dynamic(&ee
->pev
[i
][17], EVCNT_TYPE_MISC
,
571 NULL
, ee
->evcntname
[i
], "Rx255To511Octets");
572 evcnt_attach_dynamic(&ee
->pev
[i
][18], EVCNT_TYPE_MISC
,
573 NULL
, ee
->evcntname
[i
], "Rx512To1023Octets");
574 evcnt_attach_dynamic(&ee
->pev
[i
][19], EVCNT_TYPE_MISC
,
575 NULL
, ee
->evcntname
[i
], "Rx1024To1522Octets");
576 evcnt_attach_dynamic(&ee
->pev
[i
][20], EVCNT_TYPE_MISC
,
577 NULL
, ee
->evcntname
[i
], "TxLoPriotyByte");
578 evcnt_attach_dynamic(&ee
->pev
[i
][21], EVCNT_TYPE_MISC
,
579 NULL
, ee
->evcntname
[i
], "TxHiPriotyByte");
580 evcnt_attach_dynamic(&ee
->pev
[i
][22], EVCNT_TYPE_MISC
,
581 NULL
, ee
->evcntname
[i
], "TxLateCollision");
582 evcnt_attach_dynamic(&ee
->pev
[i
][23], EVCNT_TYPE_MISC
,
583 NULL
, ee
->evcntname
[i
], "TxPausePkts");
584 evcnt_attach_dynamic(&ee
->pev
[i
][24], EVCNT_TYPE_MISC
,
585 NULL
, ee
->evcntname
[i
], "TxBroadcastPkts");
586 evcnt_attach_dynamic(&ee
->pev
[i
][25], EVCNT_TYPE_MISC
,
587 NULL
, ee
->evcntname
[i
], "TxMulticastPkts");
588 evcnt_attach_dynamic(&ee
->pev
[i
][26], EVCNT_TYPE_MISC
,
589 NULL
, ee
->evcntname
[i
], "TxUnicastPkts");
590 evcnt_attach_dynamic(&ee
->pev
[i
][27], EVCNT_TYPE_MISC
,
591 NULL
, ee
->evcntname
[i
], "TxDeferred");
592 evcnt_attach_dynamic(&ee
->pev
[i
][28], EVCNT_TYPE_MISC
,
593 NULL
, ee
->evcntname
[i
], "TxTotalCollision");
594 evcnt_attach_dynamic(&ee
->pev
[i
][29], EVCNT_TYPE_MISC
,
595 NULL
, ee
->evcntname
[i
], "TxExcessiveCollision");
596 evcnt_attach_dynamic(&ee
->pev
[i
][30], EVCNT_TYPE_MISC
,
597 NULL
, ee
->evcntname
[i
], "TxSingleCollision");
598 evcnt_attach_dynamic(&ee
->pev
[i
][31], EVCNT_TYPE_MISC
,
599 NULL
, ee
->evcntname
[i
], "TxMultipleCollision");
600 evcnt_attach_dynamic(&ee
->pev
[i
][32], EVCNT_TYPE_MISC
,
601 NULL
, ee
->evcntname
[i
], "TxDropPkts");
602 evcnt_attach_dynamic(&ee
->pev
[i
][33], EVCNT_TYPE_MISC
,
603 NULL
, ee
->evcntname
[i
], "RxDropPkts");
609 for (i
= 0; i
< KSE_NRXDESC
; i
++) {
610 if (sc
->sc_rxsoft
[i
].rxs_dmamap
!= NULL
)
611 bus_dmamap_destroy(sc
->sc_dmat
,
612 sc
->sc_rxsoft
[i
].rxs_dmamap
);
615 for (i
= 0; i
< KSE_TXQUEUELEN
; i
++) {
616 if (sc
->sc_txsoft
[i
].txs_dmamap
!= NULL
)
617 bus_dmamap_destroy(sc
->sc_dmat
,
618 sc
->sc_txsoft
[i
].txs_dmamap
);
620 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_cddmamap
);
622 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_cddmamap
);
624 bus_dmamem_unmap(sc
->sc_dmat
, (void *)sc
->sc_control_data
,
625 sizeof(struct kse_control_data
));
627 bus_dmamem_free(sc
->sc_dmat
, &seg
, nseg
);
633 kse_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
635 struct kse_softc
*sc
= ifp
->if_softc
;
636 struct ifreq
*ifr
= (struct ifreq
*)data
;
644 error
= ifmedia_ioctl(ifp
, ifr
, &sc
->sc_media
, cmd
);
648 if ((error
= ether_ioctl(ifp
, cmd
, data
)) != ENETRESET
)
653 if (cmd
== SIOCSIFCAP
)
654 error
= (*ifp
->if_init
)(ifp
);
655 if (cmd
!= SIOCADDMULTI
&& cmd
!= SIOCDELMULTI
)
657 else if (ifp
->if_flags
& IFF_RUNNING
) {
659 * Multicast list has changed; set the hardware filter
674 kse_init(struct ifnet
*ifp
)
676 struct kse_softc
*sc
= ifp
->if_softc
;
680 /* cancel pending I/O */
683 /* reset all registers but PCI configuration */
686 /* craft Tx descriptor ring */
687 memset(sc
->sc_txdescs
, 0, sizeof(sc
->sc_txdescs
));
688 for (i
= 0, paddr
= KSE_CDTXADDR(sc
, 1); i
< KSE_NTXDESC
- 1; i
++) {
689 sc
->sc_txdescs
[i
].t3
= paddr
;
690 paddr
+= sizeof(struct tdes
);
692 sc
->sc_txdescs
[KSE_NTXDESC
- 1].t3
= KSE_CDTXADDR(sc
, 0);
693 KSE_CDTXSYNC(sc
, 0, KSE_NTXDESC
,
694 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
695 sc
->sc_txfree
= KSE_NTXDESC
;
698 for (i
= 0; i
< KSE_TXQUEUELEN
; i
++)
699 sc
->sc_txsoft
[i
].txs_mbuf
= NULL
;
700 sc
->sc_txsfree
= KSE_TXQUEUELEN
;
704 /* craft Rx descriptor ring */
705 memset(sc
->sc_rxdescs
, 0, sizeof(sc
->sc_rxdescs
));
706 for (i
= 0, paddr
= KSE_CDRXADDR(sc
, 1); i
< KSE_NRXDESC
- 1; i
++) {
707 sc
->sc_rxdescs
[i
].r3
= paddr
;
708 paddr
+= sizeof(struct rdes
);
710 sc
->sc_rxdescs
[KSE_NRXDESC
- 1].r3
= KSE_CDRXADDR(sc
, 0);
711 for (i
= 0; i
< KSE_NRXDESC
; i
++) {
712 if (sc
->sc_rxsoft
[i
].rxs_mbuf
== NULL
) {
713 if ((error
= add_rxbuf(sc
, i
)) != 0) {
714 printf("%s: unable to allocate or map rx "
715 "buffer %d, error = %d\n",
716 device_xname(&sc
->sc_dev
), i
, error
);
722 KSE_INIT_RXDESC(sc
, i
);
726 /* hand Tx/Rx rings to HW */
727 CSR_WRITE_4(sc
, TDLB
, KSE_CDTXADDR(sc
, 0));
728 CSR_WRITE_4(sc
, RDLB
, KSE_CDRXADDR(sc
, 0));
730 sc
->sc_txc
= TXC_TEN
| TXC_EP
| TXC_AC
| TXC_FCE
;
731 sc
->sc_rxc
= RXC_REN
| RXC_RU
| RXC_FCE
;
732 if (ifp
->if_flags
& IFF_PROMISC
)
733 sc
->sc_rxc
|= RXC_RA
;
734 if (ifp
->if_flags
& IFF_BROADCAST
)
735 sc
->sc_rxc
|= RXC_RB
;
736 sc
->sc_t1csum
= sc
->sc_mcsum
= 0;
737 if (ifp
->if_capenable
& IFCAP_CSUM_IPv4_Rx
) {
738 sc
->sc_rxc
|= RXC_ICC
;
739 sc
->sc_mcsum
|= M_CSUM_IPv4
;
741 if (ifp
->if_capenable
& IFCAP_CSUM_IPv4_Tx
) {
742 sc
->sc_txc
|= TXC_ICG
;
743 sc
->sc_t1csum
|= T1_IPCKG
;
745 if (ifp
->if_capenable
& IFCAP_CSUM_TCPv4_Rx
) {
746 sc
->sc_rxc
|= RXC_TCC
;
747 sc
->sc_mcsum
|= M_CSUM_TCPv4
;
749 if (ifp
->if_capenable
& IFCAP_CSUM_TCPv4_Tx
) {
750 sc
->sc_txc
|= TXC_TCG
;
751 sc
->sc_t1csum
|= T1_TCPCKG
;
753 if (ifp
->if_capenable
& IFCAP_CSUM_UDPv4_Rx
) {
754 sc
->sc_rxc
|= RXC_UCC
;
755 sc
->sc_mcsum
|= M_CSUM_UDPv4
;
757 if (ifp
->if_capenable
& IFCAP_CSUM_UDPv4_Tx
) {
758 sc
->sc_txc
|= TXC_UCG
;
759 sc
->sc_t1csum
|= T1_UDPCKG
;
761 sc
->sc_txc
|= (kse_burstsize
<< TXC_BS_SFT
);
762 sc
->sc_rxc
|= (kse_burstsize
<< RXC_BS_SFT
);
764 /* build multicast hash filter if necessary */
767 /* set current media */
768 (void)ifmedia_upd(ifp
);
770 /* enable transmitter and receiver */
771 CSR_WRITE_4(sc
, MDTXC
, sc
->sc_txc
);
772 CSR_WRITE_4(sc
, MDRXC
, sc
->sc_rxc
);
773 CSR_WRITE_4(sc
, MDRSC
, 1);
775 /* enable interrupts */
776 sc
->sc_inten
= INT_DMTS
|INT_DMRS
|INT_DMRBUS
;
777 if (sc
->sc_chip
== 0x8841)
778 sc
->sc_inten
|= INT_DMLCS
;
779 CSR_WRITE_4(sc
, INTST
, ~0);
780 CSR_WRITE_4(sc
, INTEN
, sc
->sc_inten
);
782 ifp
->if_flags
|= IFF_RUNNING
;
783 ifp
->if_flags
&= ~IFF_OACTIVE
;
785 if (sc
->sc_chip
== 0x8841) {
786 /* start one second timer */
787 callout_reset(&sc
->sc_callout
, hz
, phy_tick
, sc
);
789 #ifdef KSE_EVENT_COUNTERS
790 /* start statistics gather 1 minute timer */
792 callout_reset(&sc
->sc_stat_ch
, hz
* 60, stat_tick
, sc
);
797 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
799 printf("%s: interface not running\n", device_xname(&sc
->sc_dev
));
805 kse_stop(struct ifnet
*ifp
, int disable
)
807 struct kse_softc
*sc
= ifp
->if_softc
;
808 struct kse_txsoft
*txs
;
811 if (sc
->sc_chip
== 0x8841)
812 callout_stop(&sc
->sc_callout
);
813 callout_stop(&sc
->sc_stat_ch
);
815 sc
->sc_txc
&= ~TXC_TEN
;
816 sc
->sc_rxc
&= ~RXC_REN
;
817 CSR_WRITE_4(sc
, MDTXC
, sc
->sc_txc
);
818 CSR_WRITE_4(sc
, MDRXC
, sc
->sc_rxc
);
820 for (i
= 0; i
< KSE_TXQUEUELEN
; i
++) {
821 txs
= &sc
->sc_txsoft
[i
];
822 if (txs
->txs_mbuf
!= NULL
) {
823 bus_dmamap_unload(sc
->sc_dmat
, txs
->txs_dmamap
);
824 m_freem(txs
->txs_mbuf
);
825 txs
->txs_mbuf
= NULL
;
829 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
837 kse_reset(struct kse_softc
*sc
)
840 CSR_WRITE_2(sc
, GRR
, 1);
841 delay(1000); /* PDF does not mention the delay amount */
842 CSR_WRITE_2(sc
, GRR
, 0);
844 CSR_WRITE_2(sc
, CIDR
, 1);
848 kse_watchdog(struct ifnet
*ifp
)
850 struct kse_softc
*sc
= ifp
->if_softc
;
853 * Since we're not interrupting every packet, sweep
854 * up before we report an error.
858 if (sc
->sc_txfree
!= KSE_NTXDESC
) {
859 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
860 device_xname(&sc
->sc_dev
), sc
->sc_txfree
, sc
->sc_txsfree
,
864 /* Reset the interface. */
867 else if (ifp
->if_flags
& IFF_DEBUG
)
868 printf("%s: recovered from device timeout\n",
869 device_xname(&sc
->sc_dev
));
871 /* Try to get more packets going. */
876 kse_start(struct ifnet
*ifp
)
878 struct kse_softc
*sc
= ifp
->if_softc
;
880 struct kse_txsoft
*txs
;
882 int error
, nexttx
, lasttx
, ofree
, seg
;
885 if ((ifp
->if_flags
& (IFF_RUNNING
|IFF_OACTIVE
)) != IFF_RUNNING
)
889 * Remember the previous number of free descriptors.
891 ofree
= sc
->sc_txfree
;
894 * Loop through the send queue, setting up transmit descriptors
895 * until we drain the queue, or use up all available transmit
899 IFQ_POLL(&ifp
->if_snd
, m0
);
903 if (sc
->sc_txsfree
< KSE_TXQUEUE_GC
) {
905 if (sc
->sc_txsfree
== 0)
908 txs
= &sc
->sc_txsoft
[sc
->sc_txsnext
];
909 dmamap
= txs
->txs_dmamap
;
911 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
, m0
,
912 BUS_DMA_WRITE
|BUS_DMA_NOWAIT
);
914 if (error
== EFBIG
) {
915 printf("%s: Tx packet consumes too many "
916 "DMA segments, dropping...\n",
917 device_xname(&sc
->sc_dev
));
918 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
922 /* Short on resources, just stop for now. */
926 if (dmamap
->dm_nsegs
> sc
->sc_txfree
) {
928 * Not enough free descriptors to transmit this
929 * packet. We haven't committed anything yet,
930 * so just unload the DMA map, put the packet
931 * back on the queue, and punt. Notify the upper
932 * layer that there are not more slots left.
934 ifp
->if_flags
|= IFF_OACTIVE
;
935 bus_dmamap_unload(sc
->sc_dmat
, dmamap
);
939 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
942 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
945 bus_dmamap_sync(sc
->sc_dmat
, dmamap
, 0, dmamap
->dm_mapsize
,
946 BUS_DMASYNC_PREWRITE
);
948 lasttx
= -1; tdes0
= 0;
949 for (nexttx
= sc
->sc_txnext
, seg
= 0;
950 seg
< dmamap
->dm_nsegs
;
951 seg
++, nexttx
= KSE_NEXTTX(nexttx
)) {
952 struct tdes
*tdes
= &sc
->sc_txdescs
[nexttx
];
954 * If this is the first descriptor we're
955 * enqueueing, don't set the OWN bit just
956 * yet. That could cause a race condition.
959 tdes
->t2
= dmamap
->dm_segs
[seg
].ds_addr
;
960 tdes
->t1
= sc
->sc_t1csum
961 | (dmamap
->dm_segs
[seg
].ds_len
& T1_TBS_MASK
);
968 * Outgoing NFS mbuf must be unloaded when Tx completed.
969 * Without T1_IC NFS mbuf is left unack'ed for excessive
970 * time and NFS stops to proceed until kse_watchdog()
971 * calls txreap() to reclaim the unack'ed mbuf.
972 * It's painful to traverse every mbuf chain to determine
973 * whether someone is waiting for Tx completion.
977 if ((m
->m_flags
& M_EXT
) && m
->m_ext
.ext_free
) {
978 sc
->sc_txdescs
[lasttx
].t1
|= T1_IC
;
981 } while ((m
= m
->m_next
) != NULL
);
983 /* write last T0_OWN bit of the 1st segment */
984 sc
->sc_txdescs
[lasttx
].t1
|= T1_LS
;
985 sc
->sc_txdescs
[sc
->sc_txnext
].t1
|= T1_FS
;
986 sc
->sc_txdescs
[sc
->sc_txnext
].t0
= T0_OWN
;
987 KSE_CDTXSYNC(sc
, sc
->sc_txnext
, dmamap
->dm_nsegs
,
988 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
990 /* tell DMA start transmit */
991 CSR_WRITE_4(sc
, MDTSC
, 1);
994 txs
->txs_firstdesc
= sc
->sc_txnext
;
995 txs
->txs_lastdesc
= lasttx
;
996 txs
->txs_ndesc
= dmamap
->dm_nsegs
;
998 sc
->sc_txfree
-= txs
->txs_ndesc
;
999 sc
->sc_txnext
= nexttx
;
1001 sc
->sc_txsnext
= KSE_NEXTTXS(sc
->sc_txsnext
);
1004 * Pass the packet to any BPF listeners.
1007 bpf_mtap(ifp
->if_bpf
, m0
);
1008 #endif /* NBPFILTER > 0 */
1011 if (sc
->sc_txsfree
== 0 || sc
->sc_txfree
== 0) {
1012 /* No more slots left; notify upper layer. */
1013 ifp
->if_flags
|= IFF_OACTIVE
;
1015 if (sc
->sc_txfree
!= ofree
) {
1016 /* Set a watchdog timer in case the chip flakes out. */
1022 kse_set_filter(struct kse_softc
*sc
)
1024 struct ether_multistep step
;
1025 struct ether_multi
*enm
;
1026 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1027 uint32_t h
, hashes
[2];
1029 sc
->sc_rxc
&= ~(RXC_MHTE
| RXC_RM
);
1030 ifp
->if_flags
&= ~IFF_ALLMULTI
;
1031 if (ifp
->if_flags
& IFF_PROMISC
)
1034 ETHER_FIRST_MULTI(step
, &sc
->sc_ethercom
, enm
);
1037 hashes
[0] = hashes
[1] = 0;
1039 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, ETHER_ADDR_LEN
)) {
1041 * We must listen to a range of multicast addresses.
1042 * For now, just accept all multicasts, rather than
1043 * trying to set only those filter bits needed to match
1044 * the range. (At this time, the only use of address
1045 * ranges is for IP multicast routing, for which the
1046 * range is big enough to require all bits set.)
1050 h
= ether_crc32_le(enm
->enm_addrlo
, ETHER_ADDR_LEN
) >> 26;
1051 hashes
[h
>> 5] |= 1 << (h
& 0x1f);
1052 ETHER_NEXT_MULTI(step
, enm
);
1053 } while (enm
!= NULL
);
1054 sc
->sc_rxc
|= RXC_MHTE
;
1055 CSR_WRITE_4(sc
, MTR0
, hashes
[0]);
1056 CSR_WRITE_4(sc
, MTR1
, hashes
[1]);
1059 sc
->sc_rxc
|= RXC_RM
;
1060 ifp
->if_flags
|= IFF_ALLMULTI
;
1064 add_rxbuf(struct kse_softc
*sc
, int idx
)
1066 struct kse_rxsoft
*rxs
= &sc
->sc_rxsoft
[idx
];
1070 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1074 MCLGET(m
, M_DONTWAIT
);
1075 if ((m
->m_flags
& M_EXT
) == 0) {
1080 if (rxs
->rxs_mbuf
!= NULL
)
1081 bus_dmamap_unload(sc
->sc_dmat
, rxs
->rxs_dmamap
);
1085 error
= bus_dmamap_load(sc
->sc_dmat
, rxs
->rxs_dmamap
,
1086 m
->m_ext
.ext_buf
, m
->m_ext
.ext_size
, NULL
, BUS_DMA_NOWAIT
);
1088 printf("%s: can't load rx DMA map %d, error = %d\n",
1089 device_xname(&sc
->sc_dev
), idx
, error
);
1090 panic("kse_add_rxbuf");
1093 bus_dmamap_sync(sc
->sc_dmat
, rxs
->rxs_dmamap
, 0,
1094 rxs
->rxs_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
1096 KSE_INIT_RXDESC(sc
, idx
);
1102 rxdrain(struct kse_softc
*sc
)
1104 struct kse_rxsoft
*rxs
;
1107 for (i
= 0; i
< KSE_NRXDESC
; i
++) {
1108 rxs
= &sc
->sc_rxsoft
[i
];
1109 if (rxs
->rxs_mbuf
!= NULL
) {
1110 bus_dmamap_unload(sc
->sc_dmat
, rxs
->rxs_dmamap
);
1111 m_freem(rxs
->rxs_mbuf
);
1112 rxs
->rxs_mbuf
= NULL
;
1120 struct kse_softc
*sc
= arg
;
1123 if ((isr
= CSR_READ_4(sc
, INTST
)) == 0)
1130 if (isr
& INT_DMLCS
)
1132 if (isr
& INT_DMRBUS
)
1133 printf("%s: Rx descriptor full\n", device_xname(&sc
->sc_dev
));
1135 CSR_WRITE_4(sc
, INTST
, isr
);
1140 rxintr(struct kse_softc
*sc
)
1142 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1143 struct kse_rxsoft
*rxs
;
1148 for (i
= sc
->sc_rxptr
; /*CONSTCOND*/ 1; i
= KSE_NEXTRX(i
)) {
1149 rxs
= &sc
->sc_rxsoft
[i
];
1152 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1154 rxstat
= sc
->sc_rxdescs
[i
].r0
;
1156 if (rxstat
& R0_OWN
) /* desc is left empty */
1159 /* R0_FS|R0_LS must have been marked for this desc */
1161 if (rxstat
& R0_ES
) {
1163 #define PRINTERR(bit, str) \
1164 if (rxstat & (bit)) \
1165 printf("%s: receive error: %s\n", \
1166 device_xname(&sc->sc_dev), str)
1167 PRINTERR(R0_TL
, "frame too long");
1168 PRINTERR(R0_RF
, "runt frame");
1169 PRINTERR(R0_CE
, "bad FCS");
1171 KSE_INIT_RXDESC(sc
, i
);
1175 /* HW errata; frame might be too small or too large */
1177 bus_dmamap_sync(sc
->sc_dmat
, rxs
->rxs_dmamap
, 0,
1178 rxs
->rxs_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
1180 len
= rxstat
& R0_FL_MASK
;
1181 len
-= ETHER_CRC_LEN
; /* trim CRC off */
1184 if (add_rxbuf(sc
, i
) != 0) {
1186 KSE_INIT_RXDESC(sc
, i
);
1187 bus_dmamap_sync(sc
->sc_dmat
,
1189 rxs
->rxs_dmamap
->dm_mapsize
,
1190 BUS_DMASYNC_PREREAD
);
1195 m
->m_pkthdr
.rcvif
= ifp
;
1196 m
->m_pkthdr
.len
= m
->m_len
= len
;
1199 m
->m_pkthdr
.csum_flags
|= sc
->sc_mcsum
;
1200 if (rxstat
& R0_IPE
)
1201 m
->m_pkthdr
.csum_flags
|= M_CSUM_IPv4_BAD
;
1202 if (rxstat
& (R0_TCPE
| R0_UDPE
))
1203 m
->m_pkthdr
.csum_flags
|= M_CSUM_TCP_UDP_BAD
;
1207 bpf_mtap(ifp
->if_bpf
, m
);
1208 #endif /* NBPFILTER > 0 */
1209 (*ifp
->if_input
)(ifp
, m
);
1210 #ifdef KSEDIAGNOSTIC
1211 if (kse_monitor_rxintr
> 0) {
1212 printf("m stat %x data %p len %d\n",
1213 rxstat
, m
->m_data
, m
->m_len
);
1221 txreap(struct kse_softc
*sc
)
1223 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1224 struct kse_txsoft
*txs
;
1228 ifp
->if_flags
&= ~IFF_OACTIVE
;
1230 for (i
= sc
->sc_txsdirty
; sc
->sc_txsfree
!= KSE_TXQUEUELEN
;
1231 i
= KSE_NEXTTXS(i
), sc
->sc_txsfree
++) {
1232 txs
= &sc
->sc_txsoft
[i
];
1234 KSE_CDTXSYNC(sc
, txs
->txs_firstdesc
, txs
->txs_ndesc
,
1235 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1237 txstat
= sc
->sc_txdescs
[txs
->txs_lastdesc
].t0
;
1239 if (txstat
& T0_OWN
) /* desc is still in use */
1242 /* there is no way to tell transmission status per frame */
1246 sc
->sc_txfree
+= txs
->txs_ndesc
;
1247 bus_dmamap_sync(sc
->sc_dmat
, txs
->txs_dmamap
,
1248 0, txs
->txs_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
1249 bus_dmamap_unload(sc
->sc_dmat
, txs
->txs_dmamap
);
1250 m_freem(txs
->txs_mbuf
);
1251 txs
->txs_mbuf
= NULL
;
1253 sc
->sc_txsdirty
= i
;
1254 if (sc
->sc_txsfree
== KSE_TXQUEUELEN
)
1259 lnkchg(struct kse_softc
*sc
)
1261 struct ifmediareq ifmr
;
1263 #if 0 /* rambling link status */
1264 printf("%s: link %s\n", device_xname(&sc
->sc_dev
),
1265 (CSR_READ_2(sc
, P1SR
) & (1U << 5)) ? "up" : "down");
1267 ifmedia_sts(&sc
->sc_ethercom
.ec_if
, &ifmr
);
1271 ifmedia_upd(struct ifnet
*ifp
)
1273 struct kse_softc
*sc
= ifp
->if_softc
;
1274 struct ifmedia
*ifm
= &sc
->sc_media
;
1278 if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_AUTO
) {
1279 ctl
|= (1U << 13); /* restart AN */
1280 ctl
|= (1U << 7); /* enable AN */
1281 ctl
|= (1U << 4); /* advertise flow control pause */
1282 ctl
|= (1U << 3) | (1U << 2) | (1U << 1) | (1U << 0);
1285 if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_100_TX
)
1287 if (ifm
->ifm_media
& IFM_FDX
)
1290 CSR_WRITE_2(sc
, P1CR4
, ctl
);
1292 sc
->sc_media_active
= IFM_NONE
;
1293 sc
->sc_media_status
= IFM_AVALID
;
1299 ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1301 struct kse_softc
*sc
= ifp
->if_softc
;
1302 struct ifmedia
*ifm
= &sc
->sc_media
;
1303 uint16_t ctl
, sts
, result
;
1305 ifmr
->ifm_status
= IFM_AVALID
;
1306 ifmr
->ifm_active
= IFM_ETHER
;
1308 ctl
= CSR_READ_2(sc
, P1CR4
);
1309 sts
= CSR_READ_2(sc
, P1SR
);
1310 if ((sts
& (1U << 5)) == 0) {
1311 ifmr
->ifm_active
|= IFM_NONE
;
1312 goto out
; /* link is down */
1314 ifmr
->ifm_status
|= IFM_ACTIVE
;
1315 if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_AUTO
) {
1316 if ((sts
& (1U << 6)) == 0) {
1317 ifmr
->ifm_active
|= IFM_NONE
;
1318 goto out
; /* negotiation in progress */
1320 result
= ctl
& sts
& 017;
1321 if (result
& (1U << 3))
1322 ifmr
->ifm_active
|= IFM_100_TX
|IFM_FDX
;
1323 else if (result
& (1U << 2))
1324 ifmr
->ifm_active
|= IFM_100_TX
;
1325 else if (result
& (1U << 1))
1326 ifmr
->ifm_active
|= IFM_10_T
|IFM_FDX
;
1327 else if (result
& (1U << 0))
1328 ifmr
->ifm_active
|= IFM_10_T
;
1330 ifmr
->ifm_active
|= IFM_NONE
;
1331 if (ctl
& (1U << 4))
1332 ifmr
->ifm_active
|= IFM_FLOW
| IFM_ETH_RXPAUSE
;
1333 if (sts
& (1U << 4))
1334 ifmr
->ifm_active
|= IFM_FLOW
| IFM_ETH_TXPAUSE
;
1337 ifmr
->ifm_active
|= (sts
& (1U << 10)) ? IFM_100_TX
: IFM_10_T
;
1338 if (sts
& (1U << 9))
1339 ifmr
->ifm_active
|= IFM_FDX
;
1340 if (sts
& (1U << 12))
1341 ifmr
->ifm_active
|= IFM_FLOW
| IFM_ETH_RXPAUSE
;
1342 if (sts
& (1U << 11))
1343 ifmr
->ifm_active
|= IFM_FLOW
| IFM_ETH_TXPAUSE
;
1347 sc
->sc_media_status
= ifmr
->ifm_status
;
1348 sc
->sc_media_active
= ifmr
->ifm_active
;
1354 struct kse_softc
*sc
= arg
;
1355 struct ifmediareq ifmr
;
1359 ifmedia_sts(&sc
->sc_ethercom
.ec_if
, &ifmr
);
1362 callout_reset(&sc
->sc_callout
, hz
, phy_tick
, sc
);
1366 ifmedia2_upd(struct ifnet
*ifp
)
1368 struct kse_softc
*sc
= ifp
->if_softc
;
1370 sc
->sc_media_status
= IFM_AVALID
;
1371 sc
->sc_media_active
= IFM_NONE
;
1376 ifmedia2_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1378 struct kse_softc
*sc
= ifp
->if_softc
;
1381 ifmr
->ifm_status
= IFM_AVALID
;
1382 ifmr
->ifm_active
= IFM_ETHER
;
1383 p1sts
= CSR_READ_2(sc
, P1SR
);
1384 p2sts
= CSR_READ_2(sc
, P2SR
);
1385 if (((p1sts
| p2sts
) & (1U << 5)) == 0)
1386 ifmr
->ifm_active
|= IFM_NONE
;
1388 ifmr
->ifm_status
|= IFM_ACTIVE
;
1389 ifmr
->ifm_active
|= IFM_100_TX
|IFM_FDX
;
1390 ifmr
->ifm_active
|= IFM_FLOW
|IFM_ETH_RXPAUSE
|IFM_ETH_TXPAUSE
;
1392 sc
->sc_media_status
= ifmr
->ifm_status
;
1393 sc
->sc_media_active
= ifmr
->ifm_active
;
1396 #ifdef KSE_EVENT_COUNTERS
1398 stat_tick(void *arg
)
1400 struct kse_softc
*sc
= arg
;
1401 struct ksext
*ee
= &sc
->sc_ext
;
1402 int nport
, p
, i
, val
;
1404 nport
= (sc
->sc_chip
== 0x8842) ? 3 : 1;
1405 for (p
= 0; p
< nport
; p
++) {
1406 for (i
= 0; i
< 32; i
++) {
1407 val
= 0x1c00 | (p
* 0x20 + i
);
1408 CSR_WRITE_2(sc
, IACR
, val
);
1410 val
= CSR_READ_2(sc
, IADR5
) << 16;
1411 } while ((val
& (1U << 30)) == 0);
1412 if (val
& (1U << 31)) {
1413 (void)CSR_READ_2(sc
, IADR4
);
1414 val
= 0x3fffffff; /* has made overflow */
1417 val
&= 0x3fff0000; /* 29:16 */
1418 val
|= CSR_READ_2(sc
, IADR4
); /* 15:0 */
1420 ee
->pev
[p
][i
].ev_count
+= val
; /* i (0-31) */
1422 CSR_WRITE_2(sc
, IACR
, 0x1c00 + 0x100 + p
);
1423 ee
->pev
[p
][32].ev_count
= CSR_READ_2(sc
, IADR4
); /* 32 */
1424 CSR_WRITE_2(sc
, IACR
, 0x1c00 + 0x100 + p
* 3 + 1);
1425 ee
->pev
[p
][33].ev_count
= CSR_READ_2(sc
, IADR4
); /* 33 */
1427 callout_reset(&sc
->sc_stat_ch
, hz
* 60, stat_tick
, arg
);
1431 zerostats(struct kse_softc
*sc
)
1433 struct ksext
*ee
= &sc
->sc_ext
;
1434 int nport
, p
, i
, val
;
1436 /* make sure all the HW counters get zero */
1437 nport
= (sc
->sc_chip
== 0x8842) ? 3 : 1;
1438 for (p
= 0; p
< nport
; p
++) {
1439 for (i
= 0; i
< 31; i
++) {
1440 val
= 0x1c00 | (p
* 0x20 + i
);
1441 CSR_WRITE_2(sc
, IACR
, val
);
1443 val
= CSR_READ_2(sc
, IADR5
) << 16;
1444 } while ((val
& (1U << 30)) == 0);
1445 (void)CSR_READ_2(sc
, IADR4
);
1446 ee
->pev
[p
][i
].ev_count
= 0;