1 /* $NetBSD: ixp425_if_npe.c,v 1.16 2009/03/11 16:30:20 msaitoh Exp $ */
4 * Copyright (c) 2006 Sam Leffler. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: src/sys/arm/xscale/ixp425/if_npe.c,v 1.1 2006/11/19 23:55:23 sam Exp $");
31 __KERNEL_RCSID(0, "$NetBSD: ixp425_if_npe.c,v 1.16 2009/03/11 16:30:20 msaitoh Exp $");
34 * Intel XScale NPE Ethernet driver.
36 * This driver handles the two ports present on the IXP425.
37 * Packet processing is done by the Network Processing Engines
38 * (NPE's) that work together with a MAC and PHY. The MAC
39 * is also mapped to the XScale cpu; the PHY is accessed via
40 * the MAC. NPE-XScale communication happens through h/w
41 * queues managed by the Q Manager block.
43 * The code here replaces the ethAcc, ethMii, and ethDB classes
44 * in the Intel Access Library (IAL) and the OS-specific driver.
46 * XXX add vlan support
47 * XXX NPE-C port doesn't work yet
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/device.h>
57 #include <sys/callout.h>
59 #include <sys/malloc.h>
60 #include <sys/socket.h>
61 #include <sys/endian.h>
62 #include <sys/ioctl.h>
63 #include <sys/syslog.h>
65 #include <machine/bus.h>
68 #include <net/if_dl.h>
69 #include <net/if_media.h>
70 #include <net/if_ether.h>
80 #include <arm/xscale/ixp425reg.h>
81 #include <arm/xscale/ixp425var.h>
82 #include <arm/xscale/ixp425_qmgr.h>
83 #include <arm/xscale/ixp425_npevar.h>
84 #include <arm/xscale/ixp425_if_npereg.h>
86 #include <dev/mii/miivar.h>
91 struct npebuf
*ix_next
; /* chain to next buffer */
92 void *ix_m
; /* backpointer to mbuf */
93 bus_dmamap_t ix_map
; /* bus dma map for associated data */
94 struct npehwbuf
*ix_hw
; /* associated h/w block */
95 uint32_t ix_neaddr
; /* phys address of ix_hw */
100 int nbuf
; /* # npebuf's allocated */
102 struct npehwbuf
*hwbuf
; /* NPE h/w buffers */
103 bus_dmamap_t buf_map
;
104 bus_addr_t buf_phys
; /* phys addr of buffers */
105 struct npebuf
*buf
; /* s/w buffers (1-1 w/ h/w) */
109 struct device sc_dev
;
110 struct ethercom sc_ethercom
;
111 uint8_t sc_enaddr
[ETHER_ADDR_LEN
];
112 struct mii_data sc_mii
;
113 bus_space_tag_t sc_iot
;
115 bus_space_handle_t sc_ioh
; /* MAC register window */
116 bus_space_handle_t sc_miih
; /* MII register window */
117 struct ixpnpe_softc
*sc_npe
; /* NPE support */
120 struct callout sc_tick_ch
; /* Tick callout */
122 struct npebuf
*tx_free
; /* list of free tx buffers */
124 int rx_qid
; /* rx qid */
125 int rx_freeqid
; /* rx free buffers qid */
126 int tx_qid
; /* tx qid */
127 int tx_doneqid
; /* tx completed qid */
128 struct npestats
*sc_stats
;
129 bus_dmamap_t sc_stats_map
;
130 bus_addr_t sc_stats_phys
; /* phys addr of sc_stats */
131 int sc_if_flags
; /* keep last if_flags */
133 rndsource_element_t rnd_source
; /* random source */
138 * Per-unit static configuration for IXP425. The tx and
139 * rx free Q id's are fixed by the NPE microcode. The
140 * rx Q id's are programmed to be separate to simplify
141 * multi-port processing. It may be better to handle
142 * all traffic through one Q (as done by the Intel drivers).
144 * Note that the PHY's are accessible only from MAC A
145 * on the IXP425. This and other platform-specific
146 * assumptions probably need to be handled through hints.
148 static const struct {
149 const char *desc
; /* device description */
150 int npeid
; /* NPE assignment */
151 int macport
; /* Port number of the MAC */
152 uint32_t imageid
; /* NPE firmware image id */
161 } npeconfig
[NPE_PORTS_MAX
] = {
162 { .desc
= "IXP NPE-B",
165 .imageid
= IXP425_NPE_B_IMAGEID
,
166 .regbase
= IXP425_MAC_A_HWBASE
,
167 .regsize
= IXP425_MAC_A_SIZE
,
168 .miibase
= IXP425_MAC_A_HWBASE
,
169 .miisize
= IXP425_MAC_A_SIZE
,
175 { .desc
= "IXP NPE-C",
178 .imageid
= IXP425_NPE_C_IMAGEID
,
179 .regbase
= IXP425_MAC_B_HWBASE
,
180 .regsize
= IXP425_MAC_B_SIZE
,
181 .miibase
= IXP425_MAC_A_HWBASE
,
182 .miisize
= IXP425_MAC_A_SIZE
,
189 static struct npe_softc
*npes
[NPE_MAX
]; /* NB: indexed by npeid */
191 static __inline
uint32_t
192 RD4(struct npe_softc
*sc
, bus_size_t off
)
194 return bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, off
);
198 WR4(struct npe_softc
*sc
, bus_size_t off
, uint32_t val
)
200 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, off
, val
);
203 static int npe_activate(struct npe_softc
*);
205 static void npe_deactivate(struct npe_softc
*);
207 static void npe_ifmedia_status(struct ifnet
*ifp
, struct ifmediareq
*ifmr
);
208 static void npe_setmac(struct npe_softc
*sc
, const u_char
*eaddr
);
209 static void npe_getmac(struct npe_softc
*sc
);
210 static void npe_txdone(int qid
, void *arg
);
211 static int npe_rxbuf_init(struct npe_softc
*, struct npebuf
*,
213 static void npe_rxdone(int qid
, void *arg
);
214 static void npeinit_macreg(struct npe_softc
*);
215 static int npeinit(struct ifnet
*);
216 static void npeinit_resetcb(void *);
217 static void npeinit_locked(void *);
218 static void npestart(struct ifnet
*);
219 static void npestop(struct ifnet
*, int);
220 static void npewatchdog(struct ifnet
*);
221 static int npeioctl(struct ifnet
* ifp
, u_long
, void *);
223 static int npe_setrxqosentry(struct npe_softc
*, int classix
,
224 int trafclass
, int qid
);
225 static int npe_updatestats(struct npe_softc
*);
227 static int npe_getstats(struct npe_softc
*);
228 static uint32_t npe_getimageid(struct npe_softc
*);
229 static int npe_setloopback(struct npe_softc
*, int ena
);
232 static int npe_miibus_readreg(struct device
*, int, int);
233 static void npe_miibus_writereg(struct device
*, int, int, int);
234 static void npe_miibus_statchg(struct device
*);
236 static int npe_debug
;
237 #define DPRINTF(sc, fmt, ...) do { \
238 if (npe_debug) printf(fmt, __VA_ARGS__); \
240 #define DPRINTFn(n, sc, fmt, ...) do { \
241 if (npe_debug >= n) printf(fmt, __VA_ARGS__); \
244 #define NPE_TXBUF 128
248 #define ETHER_ALIGN 2 /* XXX: Ditch this */
251 #define MAC2UINT64(addr) (((uint64_t)addr[0] << 40) \
252 + ((uint64_t)addr[1] << 32) \
253 + ((uint64_t)addr[2] << 24) \
254 + ((uint64_t)addr[3] << 16) \
255 + ((uint64_t)addr[4] << 8) \
258 /* NB: all tx done processing goes through one queue */
259 static int tx_doneqid
= -1;
261 void (*npe_getmac_md
)(int, uint8_t *);
263 static int npe_match(struct device
*, struct cfdata
*, void *);
264 static void npe_attach(struct device
*, struct device
*, void *);
266 CFATTACH_DECL(npe
, sizeof(struct npe_softc
),
267 npe_match
, npe_attach
, NULL
, NULL
);
270 npe_match(struct device
*parent
, struct cfdata
*cf
, void *arg
)
272 struct ixpnpe_attach_args
*na
= arg
;
274 return (na
->na_unit
== NPE_B
|| na
->na_unit
== NPE_C
);
278 npe_attach(struct device
*parent
, struct device
*self
, void *arg
)
280 struct npe_softc
*sc
= (void *)self
;
281 struct ixpnpe_attach_args
*na
= arg
;
282 struct ixpnpe_softc
*isc
= (struct ixpnpe_softc
*)parent
;
286 aprint_normal(": Ethernet co-processor\n");
288 sc
->sc_iot
= na
->na_iot
;
289 sc
->sc_dt
= na
->na_dt
;
290 sc
->sc_npe
= na
->na_npe
;
291 sc
->sc_unit
= (na
->na_unit
== NPE_B
) ? 0 : 1;
292 sc
->sc_phy
= na
->na_phy
;
294 memset(&sc
->sc_ethercom
, 0, sizeof(sc
->sc_ethercom
));
295 memset(&sc
->sc_mii
, 0, sizeof(sc
->sc_mii
));
297 callout_init(&sc
->sc_tick_ch
, 0);
299 if (npe_activate(sc
)) {
300 aprint_error("%s: Failed to activate NPE (missing "
301 "microcode?)\n", sc
->sc_dev
.dv_xname
);
308 aprint_normal("%s: Ethernet address %s\n", sc
->sc_dev
.dv_xname
,
309 ether_sprintf(sc
->sc_enaddr
));
311 ifp
= &sc
->sc_ethercom
.ec_if
;
312 sc
->sc_mii
.mii_ifp
= ifp
;
313 sc
->sc_mii
.mii_readreg
= npe_miibus_readreg
;
314 sc
->sc_mii
.mii_writereg
= npe_miibus_writereg
;
315 sc
->sc_mii
.mii_statchg
= npe_miibus_statchg
;
316 sc
->sc_ethercom
.ec_mii
= &sc
->sc_mii
;
318 ifmedia_init(&sc
->sc_mii
.mii_media
, IFM_IMASK
, ether_mediachange
,
321 mii_attach(&sc
->sc_dev
, &sc
->sc_mii
, 0xffffffff, MII_PHY_ANY
,
322 MII_OFFSET_ANY
, MIIF_DOPAUSE
);
323 if (LIST_FIRST(&sc
->sc_mii
.mii_phys
) == NULL
) {
324 ifmedia_add(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
, 0, NULL
);
325 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
);
327 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_AUTO
);
330 strcpy(ifp
->if_xname
, sc
->sc_dev
.dv_xname
);
331 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
332 ifp
->if_start
= npestart
;
333 ifp
->if_ioctl
= npeioctl
;
334 ifp
->if_watchdog
= npewatchdog
;
335 ifp
->if_init
= npeinit
;
336 ifp
->if_stop
= npestop
;
337 IFQ_SET_READY(&ifp
->if_snd
);
340 sc
->sc_ethercom
.ec_capabilities
|= ETHERCAP_VLAN_MTU
;
343 ether_ifattach(ifp
, sc
->sc_enaddr
);
345 rnd_attach_source(&sc
->rnd_source
, sc
->sc_dev
.dv_xname
,
349 /* callback function to reset MAC */
350 isc
->macresetcbfunc
= npeinit_resetcb
;
351 isc
->macresetcbarg
= sc
;
355 * Compute and install the multicast filter.
358 npe_setmcast(struct npe_softc
*sc
)
360 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
361 uint8_t mask
[ETHER_ADDR_LEN
], addr
[ETHER_ADDR_LEN
];
366 /* Always use filter. Is here a correct position? */
367 reg
= RD4(sc
, NPE_MAC_RX_CNTRL1
);
368 WR4(sc
, NPE_MAC_RX_CNTRL1
, reg
| NPE_RX_CNTRL1_ADDR_FLTR_EN
);
370 if (ifp
->if_flags
& IFF_PROMISC
) {
371 memset(mask
, 0, ETHER_ADDR_LEN
);
372 memset(addr
, 0, ETHER_ADDR_LEN
);
373 } else if (ifp
->if_flags
& IFF_ALLMULTI
) {
374 static const uint8_t allmulti
[ETHER_ADDR_LEN
] =
375 { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
377 memcpy(mask
, allmulti
, ETHER_ADDR_LEN
);
378 memcpy(addr
, allmulti
, ETHER_ADDR_LEN
);
380 uint8_t clr
[ETHER_ADDR_LEN
], set
[ETHER_ADDR_LEN
];
381 struct ether_multistep step
;
382 struct ether_multi
*enm
;
384 memset(clr
, 0, ETHER_ADDR_LEN
);
385 memset(set
, 0xff, ETHER_ADDR_LEN
);
387 ETHER_FIRST_MULTI(step
, &sc
->sc_ethercom
, enm
);
388 while (enm
!= NULL
) {
389 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, ETHER_ADDR_LEN
)) {
390 ifp
->if_flags
|= IFF_ALLMULTI
;
394 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
395 clr
[i
] |= enm
->enm_addrlo
[i
];
396 set
[i
] &= enm
->enm_addrlo
[i
];
399 ETHER_NEXT_MULTI(step
, enm
);
402 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
403 mask
[i
] = set
[i
] | ~clr
[i
];
409 * Write the mask and address registers.
411 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
412 WR4(sc
, NPE_MAC_ADDR_MASK(i
), mask
[i
]);
413 WR4(sc
, NPE_MAC_ADDR(i
), addr
[i
]);
416 msg
[0] = NPE_ADDRESSFILTERCONFIG
<< NPE_MAC_MSGID_SHL
417 | (npeconfig
[sc
->sc_unit
].macport
<< NPE_MAC_PORTID_SHL
);
418 msg
[1] = ((ifp
->if_flags
& IFF_PROMISC
) ? 1 : 0) << 24
419 | ((RD4(sc
, NPE_MAC_UNI_ADDR_6
) & 0xff) << 16)
420 | (addr
[5] << 8) | mask
[5];
421 ixpnpe_sendandrecvmsg(sc
->sc_npe
, msg
, msg
);
425 npe_dma_setup(struct npe_softc
*sc
, struct npedma
*dma
,
426 const char *name
, int nbuf
, int maxseg
)
428 bus_dma_segment_t seg
;
433 memset(dma
, 0, sizeof(*dma
));
438 size
= nbuf
* sizeof(struct npehwbuf
);
440 /* XXX COHERENT for now */
441 error
= bus_dmamem_alloc(sc
->sc_dt
, size
, sizeof(uint32_t), 0, &seg
,
442 1, &rseg
, BUS_DMA_NOWAIT
);
444 printf("%s: unable to allocate memory for %s h/w buffers, "
445 "error %u\n", sc
->sc_dev
.dv_xname
, dma
->name
, error
);
448 error
= bus_dmamem_map(sc
->sc_dt
, &seg
, 1, size
, &hwbuf
,
449 BUS_DMA_NOWAIT
| BUS_DMA_COHERENT
| BUS_DMA_NOCACHE
);
451 printf("%s: unable to map memory for %s h/w buffers, "
452 "error %u\n", sc
->sc_dev
.dv_xname
, dma
->name
, error
);
454 bus_dmamem_free(sc
->sc_dt
, &seg
, rseg
);
457 dma
->hwbuf
= (void *)hwbuf
;
459 error
= bus_dmamap_create(sc
->sc_dt
, size
, 1, size
, 0,
460 BUS_DMA_NOWAIT
| BUS_DMA_ALLOCNOW
, &dma
->buf_map
);
462 printf("%s: unable to create map for %s h/w buffers, "
463 "error %u\n", sc
->sc_dev
.dv_xname
, dma
->name
, error
);
466 bus_dmamem_unmap(sc
->sc_dt
, hwbuf
, size
);
470 error
= bus_dmamap_load(sc
->sc_dt
, dma
->buf_map
, hwbuf
, size
, NULL
,
473 printf("%s: unable to load map for %s h/w buffers, "
474 "error %u\n", sc
->sc_dev
.dv_xname
, dma
->name
, error
);
476 bus_dmamap_destroy(sc
->sc_dt
, dma
->buf_map
);
481 dma
->buf
= malloc(nbuf
* sizeof(struct npebuf
), M_TEMP
, M_NOWAIT
| M_ZERO
);
482 if (dma
->buf
== NULL
) {
483 printf("%s: unable to allocate memory for %s s/w buffers\n",
484 sc
->sc_dev
.dv_xname
, dma
->name
);
485 bus_dmamap_unload(sc
->sc_dt
, dma
->buf_map
);
490 dma
->buf_phys
= dma
->buf_map
->dm_segs
[0].ds_addr
;
491 for (i
= 0; i
< dma
->nbuf
; i
++) {
492 struct npebuf
*npe
= &dma
->buf
[i
];
493 struct npehwbuf
*hw
= &dma
->hwbuf
[i
];
495 /* calculate offset to shared area */
496 npe
->ix_neaddr
= dma
->buf_phys
+
497 ((uintptr_t)hw
- (uintptr_t)dma
->hwbuf
);
498 KASSERT((npe
->ix_neaddr
& 0x1f) == 0);
499 error
= bus_dmamap_create(sc
->sc_dt
, MCLBYTES
, maxseg
,
500 MCLBYTES
, 0, 0, &npe
->ix_map
);
502 printf("%s: unable to create dmamap for %s buffer %u, "
503 "error %u\n", sc
->sc_dev
.dv_xname
, dma
->name
, i
,
505 /* XXXSCW: Free up maps... */
510 bus_dmamap_sync(sc
->sc_dt
, dma
->buf_map
, 0, dma
->buf_map
->dm_mapsize
,
511 BUS_DMASYNC_PREWRITE
);
517 npe_dma_destroy(struct npe_softc
*sc
, struct npedma
*dma
)
521 /* XXXSCW: Clean this up */
523 if (dma
->hwbuf
!= NULL
) {
524 for (i
= 0; i
< dma
->nbuf
; i
++) {
525 struct npebuf
*npe
= &dma
->buf
[i
];
526 bus_dmamap_destroy(sc
->sc_dt
, npe
->ix_map
);
528 bus_dmamap_unload(sc
->sc_dt
, dma
->buf_map
);
529 bus_dmamem_free(sc
->sc_dt
, (void *)dma
->hwbuf
, dma
->buf_map
);
530 bus_dmamap_destroy(sc
->sc_dt
, dma
->buf_map
);
532 if (dma
->buf
!= NULL
)
533 free(dma
->buf
, M_TEMP
);
534 memset(dma
, 0, sizeof(*dma
));
539 npe_activate(struct npe_softc
*sc
)
541 bus_dma_segment_t seg
;
542 int unit
= sc
->sc_unit
;
546 /* load NPE firmware and start it running */
547 error
= ixpnpe_init(sc
->sc_npe
, "npe_fw", npeconfig
[unit
].imageid
);
551 if (bus_space_map(sc
->sc_iot
, npeconfig
[unit
].regbase
,
552 npeconfig
[unit
].regsize
, 0, &sc
->sc_ioh
)) {
553 printf("%s: Cannot map registers 0x%x:0x%x\n",
554 sc
->sc_dev
.dv_xname
, npeconfig
[unit
].regbase
,
555 npeconfig
[unit
].regsize
);
559 if (npeconfig
[unit
].miibase
!= npeconfig
[unit
].regbase
) {
561 * The PHY's are only accessible from one MAC (it appears)
562 * so for other MAC's setup an additional mapping for
563 * frobbing the PHY registers.
565 if (bus_space_map(sc
->sc_iot
, npeconfig
[unit
].miibase
,
566 npeconfig
[unit
].miisize
, 0, &sc
->sc_miih
)) {
567 printf("%s: Cannot map MII registers 0x%x:0x%x\n",
568 sc
->sc_dev
.dv_xname
, npeconfig
[unit
].miibase
,
569 npeconfig
[unit
].miisize
);
573 sc
->sc_miih
= sc
->sc_ioh
;
574 error
= npe_dma_setup(sc
, &sc
->txdma
, "tx", NPE_TXBUF
, NPE_MAXSEG
);
577 error
= npe_dma_setup(sc
, &sc
->rxdma
, "rx", NPE_RXBUF
, 1);
581 /* setup statistics block */
582 error
= bus_dmamem_alloc(sc
->sc_dt
, sizeof(struct npestats
),
583 sizeof(uint32_t), 0, &seg
, 1, &rseg
, BUS_DMA_NOWAIT
);
585 printf("%s: unable to allocate memory for stats block, "
586 "error %u\n", sc
->sc_dev
.dv_xname
, error
);
590 error
= bus_dmamem_map(sc
->sc_dt
, &seg
, 1, sizeof(struct npestats
),
591 &statbuf
, BUS_DMA_NOWAIT
);
593 printf("%s: unable to map memory for stats block, "
594 "error %u\n", sc
->sc_dev
.dv_xname
, error
);
597 sc
->sc_stats
= (void *)statbuf
;
599 error
= bus_dmamap_create(sc
->sc_dt
, sizeof(struct npestats
), 1,
600 sizeof(struct npestats
), 0, BUS_DMA_NOWAIT
| BUS_DMA_ALLOCNOW
,
603 printf("%s: unable to create map for stats block, "
604 "error %u\n", sc
->sc_dev
.dv_xname
, error
);
608 if (bus_dmamap_load(sc
->sc_dt
, sc
->sc_stats_map
, sc
->sc_stats
,
609 sizeof(struct npestats
), NULL
, BUS_DMA_NOWAIT
) != 0) {
610 printf("%s: unable to load memory for stats block, error %u\n",
611 sc
->sc_dev
.dv_xname
, error
);
614 sc
->sc_stats_phys
= sc
->sc_stats_map
->dm_segs
[0].ds_addr
;
616 /* XXX disable half-bridge LEARNING+FILTERING feature */
619 * Setup h/w rx/tx queues. There are four q's:
620 * rx inbound q of rx'd frames
621 * rx_free pool of ixpbuf's for receiving frames
622 * tx outbound q of frames to send
623 * tx_done q of tx frames that have been processed
625 * The NPE handles the actual tx/rx process and the q manager
626 * handles the queues. The driver just writes entries to the
627 * q manager mailbox's and gets callbacks when there are rx'd
628 * frames to process or tx'd frames to reap. These callbacks
629 * are controlled by the q configurations; e.g. we get a
630 * callback when tx_done has 2 or more frames to process and
631 * when the rx q has at least one frame. These setings can
632 * changed at the time the q is configured.
634 sc
->rx_qid
= npeconfig
[unit
].rx_qid
;
635 ixpqmgr_qconfig(sc
->rx_qid
, NPE_RXBUF
, 0, 1,
636 IX_QMGR_Q_SOURCE_ID_NOT_E
, npe_rxdone
, sc
);
637 sc
->rx_freeqid
= npeconfig
[unit
].rx_freeqid
;
638 ixpqmgr_qconfig(sc
->rx_freeqid
, NPE_RXBUF
, 0, NPE_RXBUF
/2, 0, NULL
, sc
);
639 /* tell the NPE to direct all traffic to rx_qid */
641 for (i
= 0; i
< 8; i
++)
643 printf("%s: remember to fix rx q setup\n", sc
->sc_dev
.dv_xname
);
644 for (i
= 0; i
< 4; i
++)
646 npe_setrxqosentry(sc
, i
, 0, sc
->rx_qid
);
648 sc
->tx_qid
= npeconfig
[unit
].tx_qid
;
649 sc
->tx_doneqid
= npeconfig
[unit
].tx_doneqid
;
650 ixpqmgr_qconfig(sc
->tx_qid
, NPE_TXBUF
, 0, NPE_TXBUF
, 0, NULL
, sc
);
651 if (tx_doneqid
== -1) {
652 ixpqmgr_qconfig(sc
->tx_doneqid
, NPE_TXBUF
, 0, 2,
653 IX_QMGR_Q_SOURCE_ID_NOT_E
, npe_txdone
, sc
);
654 tx_doneqid
= sc
->tx_doneqid
;
657 KASSERT(npes
[npeconfig
[unit
].npeid
] == NULL
);
658 npes
[npeconfig
[unit
].npeid
] = sc
;
665 npe_deactivate(struct npe_softc
*sc
);
667 int unit
= sc
->sc_unit
;
669 npes
[npeconfig
[unit
].npeid
] = NULL
;
671 /* XXX disable q's */
672 if (sc
->sc_npe
!= NULL
)
673 ixpnpe_stop(sc
->sc_npe
);
674 if (sc
->sc_stats
!= NULL
) {
675 bus_dmamap_unload(sc
->sc_stats_tag
, sc
->sc_stats_map
);
676 bus_dmamem_free(sc
->sc_stats_tag
, sc
->sc_stats
,
678 bus_dmamap_destroy(sc
->sc_stats_tag
, sc
->sc_stats_map
);
680 if (sc
->sc_stats_tag
!= NULL
)
681 bus_dma_tag_destroy(sc
->sc_stats_tag
);
682 npe_dma_destroy(sc
, &sc
->txdma
);
683 npe_dma_destroy(sc
, &sc
->rxdma
);
684 bus_generic_detach(sc
->sc_dev
);
686 device_delete_child(sc
->sc_dev
, sc
->sc_mii
);
688 /* XXX sc_ioh and sc_miih */
690 bus_release_resource(dev
, SYS_RES_IOPORT
,
691 rman_get_rid(sc
->mem_res
), sc
->mem_res
);
698 * Notify the world which media we're using.
701 npe_ifmedia_status(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
703 struct npe_softc
*sc
= ifp
->if_softc
;
705 mii_pollstat(&sc
->sc_mii
);
707 ifmr
->ifm_active
= sc
->sc_mii
.mii_media_active
;
708 ifmr
->ifm_status
= sc
->sc_mii
.mii_media_status
;
712 npe_addstats(struct npe_softc
*sc
)
714 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
715 struct npestats
*ns
= sc
->sc_stats
;
718 be32toh(ns
->dot3StatsInternalMacTransmitErrors
)
719 + be32toh(ns
->dot3StatsCarrierSenseErrors
)
720 + be32toh(ns
->TxVLANIdFilterDiscards
)
722 ifp
->if_ierrors
+= be32toh(ns
->dot3StatsFCSErrors
)
723 + be32toh(ns
->dot3StatsInternalMacReceiveErrors
)
724 + be32toh(ns
->RxOverrunDiscards
)
725 + be32toh(ns
->RxUnderflowEntryDiscards
)
727 ifp
->if_collisions
+=
728 be32toh(ns
->dot3StatsSingleCollisionFrames
)
729 + be32toh(ns
->dot3StatsMultipleCollisionFrames
)
736 #define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
737 struct npe_softc
*sc
= xsc
;
741 * NB: to avoid sleeping with the softc lock held we
742 * split the NPE msg processing into two parts. The
743 * request for statistics is sent w/o waiting for a
744 * reply and then on the next tick we retrieve the
745 * results. This works because npe_tick is the only
746 * code that talks via the mailbox's (except at setup).
747 * This likely can be handled better.
749 if (ixpnpe_recvmsg(sc
->sc_npe
, msg
) == 0 && msg
[0] == ACK
) {
750 bus_dmamap_sync(sc
->sc_dt
, sc
->sc_stats_map
, 0,
751 sizeof(struct npestats
), BUS_DMASYNC_POSTREAD
);
755 mii_tick(&sc
->sc_mii
);
757 /* schedule next poll */
758 callout_reset(&sc
->sc_tick_ch
, hz
, npe_tick
, sc
);
763 npe_setmac(struct npe_softc
*sc
, const u_char
*eaddr
)
766 WR4(sc
, NPE_MAC_UNI_ADDR_1
, eaddr
[0]);
767 WR4(sc
, NPE_MAC_UNI_ADDR_2
, eaddr
[1]);
768 WR4(sc
, NPE_MAC_UNI_ADDR_3
, eaddr
[2]);
769 WR4(sc
, NPE_MAC_UNI_ADDR_4
, eaddr
[3]);
770 WR4(sc
, NPE_MAC_UNI_ADDR_5
, eaddr
[4]);
771 WR4(sc
, NPE_MAC_UNI_ADDR_6
, eaddr
[5]);
775 npe_getmac(struct npe_softc
*sc
)
777 uint8_t *eaddr
= sc
->sc_enaddr
;
779 if (npe_getmac_md
!= NULL
) {
780 (*npe_getmac_md
)(sc
->sc_dev
.dv_unit
, eaddr
);
783 * Some system's unicast address appears to be loaded from
786 eaddr
[0] = RD4(sc
, NPE_MAC_UNI_ADDR_1
) & 0xff;
787 eaddr
[1] = RD4(sc
, NPE_MAC_UNI_ADDR_2
) & 0xff;
788 eaddr
[2] = RD4(sc
, NPE_MAC_UNI_ADDR_3
) & 0xff;
789 eaddr
[3] = RD4(sc
, NPE_MAC_UNI_ADDR_4
) & 0xff;
790 eaddr
[4] = RD4(sc
, NPE_MAC_UNI_ADDR_5
) & 0xff;
791 eaddr
[5] = RD4(sc
, NPE_MAC_UNI_ADDR_6
) & 0xff;
797 struct npebuf
**tail
;
802 npe_txdone_finish(struct npe_softc
*sc
, const struct txdone
*td
)
804 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
806 *td
->tail
= sc
->tx_free
;
807 sc
->tx_free
= td
->head
;
809 * We're no longer busy, so clear the busy flag and call the
810 * start routine to xmit more packets.
812 ifp
->if_opackets
+= td
->count
;
813 ifp
->if_flags
&= ~IFF_OACTIVE
;
819 * Q manager callback on tx done queue. Reap mbufs
820 * and return tx buffers to the free list. Finally
821 * restart output. Note the microcode has only one
822 * txdone q wired into it so we must use the NPE ID
823 * returned with each npehwbuf to decide where to
827 npe_txdone(int qid
, void *arg
)
829 #define P2V(a, dma) \
830 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
831 struct npe_softc
*sc
;
833 struct txdone
*td
, q
[NPE_MAX
];
836 /* XXX no NPE-A support */
837 q
[NPE_B
].tail
= &q
[NPE_B
].head
; q
[NPE_B
].count
= 0;
838 q
[NPE_C
].tail
= &q
[NPE_C
].head
; q
[NPE_C
].count
= 0;
839 /* XXX max # at a time? */
840 while (ixpqmgr_qread(qid
, &entry
) == 0) {
841 sc
= npes
[NPE_QM_Q_NPE(entry
)];
842 DPRINTF(sc
, "%s: entry 0x%x NPE %u port %u\n",
843 __func__
, entry
, NPE_QM_Q_NPE(entry
), NPE_QM_Q_PORT(entry
));
845 if (RND_ENABLED(&sc
->rnd_source
))
846 rnd_add_uint32(&sc
->rnd_source
, entry
);
849 npe
= P2V(NPE_QM_Q_ADDR(entry
), &sc
->txdma
);
853 td
= &q
[NPE_QM_Q_NPE(entry
)];
855 td
->tail
= &npe
->ix_next
;
860 npe_txdone_finish(npes
[NPE_B
], &q
[NPE_B
]);
862 npe_txdone_finish(npes
[NPE_C
], &q
[NPE_C
]);
866 static __inline
struct mbuf
*
871 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
873 MCLGET(m
, M_DONTWAIT
);
874 if ((m
->m_flags
& M_EXT
) == 0) {
883 npe_rxbuf_init(struct npe_softc
*sc
, struct npebuf
*npe
, struct mbuf
*m
)
893 KASSERT(m
->m_ext
.ext_size
>= (NPE_FRAME_SIZE_DEFAULT
+ ETHER_ALIGN
));
894 m
->m_pkthdr
.len
= m
->m_len
= NPE_FRAME_SIZE_DEFAULT
;
895 /* backload payload and align ip hdr */
896 m
->m_data
= m
->m_ext
.ext_buf
+ (m
->m_ext
.ext_size
897 - (NPE_FRAME_SIZE_DEFAULT
+ ETHER_ALIGN
));
898 error
= bus_dmamap_load_mbuf(sc
->sc_dt
, npe
->ix_map
, m
,
899 BUS_DMA_READ
|BUS_DMA_NOWAIT
);
905 hw
->ix_ne
[0].data
= htobe32(npe
->ix_map
->dm_segs
[0].ds_addr
);
906 /* NB: NPE requires length be a multiple of 64 */
907 /* NB: buffer length is shifted in word */
908 hw
->ix_ne
[0].len
= htobe32(npe
->ix_map
->dm_segs
[0].ds_len
<< 16);
909 hw
->ix_ne
[0].next
= 0;
911 /* Flush the memory in the mbuf */
912 bus_dmamap_sync(sc
->sc_dt
, npe
->ix_map
, 0, npe
->ix_map
->dm_mapsize
,
913 BUS_DMASYNC_PREREAD
);
918 * RX q processing for a specific NPE. Claim entries
919 * from the hardware queue and pass the frames up the
920 * stack. Pass the rx buffers to the free list.
923 npe_rxdone(int qid
, void *arg
)
925 #define P2V(a, dma) \
926 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
927 struct npe_softc
*sc
= arg
;
928 struct npedma
*dma
= &sc
->rxdma
;
931 while (ixpqmgr_qread(qid
, &entry
) == 0) {
932 struct npebuf
*npe
= P2V(NPE_QM_Q_ADDR(entry
), dma
);
935 DPRINTF(sc
, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
936 __func__
, entry
, npe
->ix_neaddr
, npe
->ix_hw
->ix_ne
[0].len
);
938 if (RND_ENABLED(&sc
->rnd_source
))
939 rnd_add_uint32(&sc
->rnd_source
, entry
);
942 * Allocate a new mbuf to replenish the rx buffer.
943 * If doing so fails we drop the rx'd frame so we
944 * can reuse the previous mbuf. When we're able to
945 * allocate a new mbuf dispatch the mbuf w/ rx'd
946 * data up the stack and replace it with the newly
951 struct mbuf
*mrx
= npe
->ix_m
;
952 struct npehwbuf
*hw
= npe
->ix_hw
;
953 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
955 /* Flush mbuf memory for rx'd data */
956 bus_dmamap_sync(sc
->sc_dt
, npe
->ix_map
, 0,
957 npe
->ix_map
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
959 /* XXX flush hw buffer; works now 'cuz coherent */
960 /* set m_len etc. per rx frame size */
961 mrx
->m_len
= be32toh(hw
->ix_ne
[0].len
) & 0xffff;
962 mrx
->m_pkthdr
.len
= mrx
->m_len
;
963 mrx
->m_pkthdr
.rcvif
= ifp
;
964 /* Don't add M_HASFCS. See below */
967 if (mrx
->m_pkthdr
.len
< sizeof(struct ether_header
)) {
968 log(LOG_INFO
, "%s: too short frame (len=%d)\n",
969 sc
->sc_dev
.dv_xname
, mrx
->m_pkthdr
.len
);
970 /* Back out "newly allocated" mbuf. */
975 if ((ifp
->if_flags
& IFF_PROMISC
) == 0) {
976 struct ether_header
*eh
;
979 * Workaround for "Non-Intel XScale Technology
980 * Eratta" No. 29. AA:BB:CC:DD:EE:xF's packet
981 * matches the filter (both unicast and
984 eh
= mtod(mrx
, struct ether_header
*);
985 if (ETHER_IS_MULTICAST(eh
->ether_dhost
) == 0) {
988 if (sc
->sc_enaddr
[5] != eh
->ether_dhost
[5]) {
991 printf("discard it\n");
994 * Back out "newly allocated"
1000 } else if (memcmp(eh
->ether_dhost
,
1001 etherbroadcastaddr
, 6) == 0) {
1002 /* Always accept broadcast packet*/
1004 struct ethercom
*ec
= &sc
->sc_ethercom
;
1005 struct ether_multi
*enm
;
1006 struct ether_multistep step
;
1011 ETHER_FIRST_MULTI(step
, ec
, enm
);
1012 while (enm
!= NULL
) {
1013 uint64_t lowint
, highint
, dest
;
1015 lowint
= MAC2UINT64(enm
->enm_addrlo
);
1016 highint
= MAC2UINT64(enm
->enm_addrhi
);
1017 dest
= MAC2UINT64(eh
->ether_dhost
);
1019 printf("%llx\n", lowint
);
1020 printf("%llx\n", dest
);
1021 printf("%llx\n", highint
);
1023 if ((lowint
<= dest
) && (dest
<= highint
)) {
1027 ETHER_NEXT_MULTI(step
, enm
);
1032 printf("discard it(M)\n");
1035 * Back out "newly allocated"
1043 if (mrx
->m_pkthdr
.len
> NPE_FRAME_SIZE_DEFAULT
) {
1044 log(LOG_INFO
, "%s: oversized frame (len=%d)\n",
1045 sc
->sc_dev
.dv_xname
, mrx
->m_pkthdr
.len
);
1046 /* Back out "newly allocated" mbuf. */
1055 * NPE always adds the FCS by this driver's setting,
1056 * so we always trim it here and not add M_HASFCS.
1058 m_adj(mrx
, -ETHER_CRC_LEN
);
1063 * Tap off here if there is a bpf listener.
1065 if (__predict_false(ifp
->if_bpf
))
1066 bpf_mtap(ifp
->if_bpf
, mrx
);
1068 ifp
->if_input(ifp
, mrx
);
1071 /* discard frame and re-use mbuf */
1074 if (npe_rxbuf_init(sc
, npe
, m
) == 0) {
1075 /* return npe buf to rx free list */
1076 ixpqmgr_qwrite(sc
->rx_freeqid
, npe
->ix_neaddr
);
1078 /* XXX should not happen */
1085 npe_startxmit(struct npe_softc
*sc
)
1087 struct npedma
*dma
= &sc
->txdma
;
1091 for (i
= 0; i
< dma
->nbuf
; i
++) {
1092 struct npebuf
*npe
= &dma
->buf
[i
];
1093 if (npe
->ix_m
!= NULL
) {
1094 /* NB: should not happen */
1095 printf("%s: %s: free mbuf at entry %u\n",
1096 sc
->sc_dev
.dv_xname
, __func__
, i
);
1100 npe
->ix_next
= sc
->tx_free
;
1106 npe_startrecv(struct npe_softc
*sc
)
1108 struct npedma
*dma
= &sc
->rxdma
;
1112 for (i
= 0; i
< dma
->nbuf
; i
++) {
1114 npe_rxbuf_init(sc
, npe
, npe
->ix_m
);
1115 /* set npe buf on rx free list */
1116 ixpqmgr_qwrite(sc
->rx_freeqid
, npe
->ix_neaddr
);
1121 npeinit_macreg(struct npe_softc
*sc
)
1127 WR4(sc
, NPE_MAC_CORE_CNTRL
, NPE_CORE_RESET
);
1128 DELAY(NPE_MAC_RESET_DELAY
);
1129 /* configure MAC to generate MDC clock */
1130 WR4(sc
, NPE_MAC_CORE_CNTRL
, NPE_CORE_MDC_EN
);
1132 /* disable transmitter and reciver in the MAC */
1133 WR4(sc
, NPE_MAC_RX_CNTRL1
,
1134 RD4(sc
, NPE_MAC_RX_CNTRL1
) &~ NPE_RX_CNTRL1_RX_EN
);
1135 WR4(sc
, NPE_MAC_TX_CNTRL1
,
1136 RD4(sc
, NPE_MAC_TX_CNTRL1
) &~ NPE_TX_CNTRL1_TX_EN
);
1139 * Set the MAC core registers.
1141 WR4(sc
, NPE_MAC_INT_CLK_THRESH
, 0x1); /* clock ratio: for ipx4xx */
1142 WR4(sc
, NPE_MAC_TX_CNTRL2
, 0xf); /* max retries */
1143 WR4(sc
, NPE_MAC_RANDOM_SEED
, 0x8); /* LFSR back-off seed */
1144 /* thresholds determined by NPE firmware FS */
1145 WR4(sc
, NPE_MAC_THRESH_P_EMPTY
, 0x12);
1146 WR4(sc
, NPE_MAC_THRESH_P_FULL
, 0x30);
1147 WR4(sc
, NPE_MAC_BUF_SIZE_TX
, NPE_MAC_BUF_SIZE_TX_DEFAULT
);
1148 /* tx fifo threshold (bytes) */
1149 WR4(sc
, NPE_MAC_TX_DEFER
, 0x15); /* for single deferral */
1150 WR4(sc
, NPE_MAC_RX_DEFER
, 0x16); /* deferral on inter-frame gap*/
1151 WR4(sc
, NPE_MAC_TX_TWO_DEFER_1
, 0x8); /* for 2-part deferral */
1152 WR4(sc
, NPE_MAC_TX_TWO_DEFER_2
, 0x7); /* for 2-part deferral */
1153 WR4(sc
, NPE_MAC_SLOT_TIME
, NPE_MAC_SLOT_TIME_MII_DEFAULT
);
1154 /* assumes MII mode */
1155 WR4(sc
, NPE_MAC_TX_CNTRL1
,
1156 NPE_TX_CNTRL1_RETRY
/* retry failed xmits */
1157 | NPE_TX_CNTRL1_FCS_EN
/* append FCS */
1158 | NPE_TX_CNTRL1_2DEFER
/* 2-part deferal */
1159 | NPE_TX_CNTRL1_PAD_EN
); /* pad runt frames */
1160 /* XXX pad strip? */
1161 WR4(sc
, NPE_MAC_RX_CNTRL1
,
1162 NPE_RX_CNTRL1_CRC_EN
/* include CRC/FCS */
1163 | NPE_RX_CNTRL1_PAUSE_EN
); /* ena pause frame handling */
1164 WR4(sc
, NPE_MAC_RX_CNTRL2
, 0);
1168 npeinit_resetcb(void *xsc
)
1170 struct npe_softc
*sc
= xsc
;
1171 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1177 msg
[0] = NPE_NOTIFYMACRECOVERYDONE
<< NPE_MAC_MSGID_SHL
1178 | (npeconfig
[sc
->sc_unit
].macport
<< NPE_MAC_PORTID_SHL
);
1180 ixpnpe_sendandrecvmsg(sc
->sc_npe
, msg
, msg
);
1184 * Reset and initialize the chip
1187 npeinit_locked(void *xsc
)
1189 struct npe_softc
*sc
= xsc
;
1190 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1192 /* Cancel any pending I/O. */
1195 /* Reset the chip to a known state. */
1197 npe_setmac(sc
, CLLADDR(ifp
->if_sadl
));
1198 ether_mediachange(ifp
);
1204 ifp
->if_flags
|= IFF_RUNNING
;
1205 ifp
->if_flags
&= ~IFF_OACTIVE
;
1206 ifp
->if_timer
= 0; /* just in case */
1208 /* enable transmitter and reciver in the MAC */
1209 WR4(sc
, NPE_MAC_RX_CNTRL1
,
1210 RD4(sc
, NPE_MAC_RX_CNTRL1
) | NPE_RX_CNTRL1_RX_EN
);
1211 WR4(sc
, NPE_MAC_TX_CNTRL1
,
1212 RD4(sc
, NPE_MAC_TX_CNTRL1
) | NPE_TX_CNTRL1_TX_EN
);
1214 callout_reset(&sc
->sc_tick_ch
, hz
, npe_tick
, sc
);
1218 npeinit(struct ifnet
*ifp
)
1220 struct npe_softc
*sc
= ifp
->if_softc
;
1231 * Defragment an mbuf chain, returning at most maxfrags separate
1232 * mbufs+clusters. If this is not possible NULL is returned and
1233 * the original mbuf chain is left in it's present (potentially
1234 * modified) state. We use two techniques: collapsing consecutive
1235 * mbufs and replacing consecutive mbufs by a cluster.
1237 static __inline
struct mbuf
*
1238 npe_defrag(struct mbuf
*m0
)
1242 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1245 M_COPY_PKTHDR(m
, m0
);
1247 if ((m
->m_len
= m0
->m_pkthdr
.len
) > MHLEN
) {
1248 MCLGET(m
, M_DONTWAIT
);
1249 if ((m
->m_flags
& M_EXT
) == 0) {
1255 m_copydata(m0
, 0, m0
->m_pkthdr
.len
, mtod(m
, void *));
1262 * Dequeue packets and place on the h/w transmit queue.
1265 npestart(struct ifnet
*ifp
)
1267 struct npe_softc
*sc
= ifp
->if_softc
;
1269 struct npehwbuf
*hw
;
1271 bus_dma_segment_t
*segs
;
1272 int nseg
, len
, error
, i
;
1275 if ((ifp
->if_flags
& (IFF_RUNNING
|IFF_OACTIVE
)) != IFF_RUNNING
)
1278 while (sc
->tx_free
!= NULL
) {
1279 IFQ_DEQUEUE(&ifp
->if_snd
, m
);
1283 error
= bus_dmamap_load_mbuf(sc
->sc_dt
, npe
->ix_map
, m
,
1284 BUS_DMA_WRITE
|BUS_DMA_NOWAIT
);
1285 if (error
== EFBIG
) {
1288 printf("%s: %s: too many fragments\n",
1289 sc
->sc_dev
.dv_xname
, __func__
);
1294 error
= bus_dmamap_load_mbuf(sc
->sc_dt
, npe
->ix_map
,
1295 m
, BUS_DMA_WRITE
|BUS_DMA_NOWAIT
);
1298 printf("%s: %s: error %u\n",
1299 sc
->sc_dev
.dv_xname
, __func__
, error
);
1303 sc
->tx_free
= npe
->ix_next
;
1307 * Tap off here if there is a bpf listener.
1309 if (__predict_false(ifp
->if_bpf
))
1310 bpf_mtap(ifp
->if_bpf
, m
);
1313 bus_dmamap_sync(sc
->sc_dt
, npe
->ix_map
, 0,
1314 npe
->ix_map
->dm_mapsize
, BUS_DMASYNC_PREWRITE
);
1318 len
= m
->m_pkthdr
.len
;
1319 nseg
= npe
->ix_map
->dm_nsegs
;
1320 segs
= npe
->ix_map
->dm_segs
;
1321 next
= npe
->ix_neaddr
+ sizeof(hw
->ix_ne
[0]);
1322 for (i
= 0; i
< nseg
; i
++) {
1323 hw
->ix_ne
[i
].data
= htobe32(segs
[i
].ds_addr
);
1324 hw
->ix_ne
[i
].len
= htobe32((segs
[i
].ds_len
<<16) | len
);
1325 hw
->ix_ne
[i
].next
= htobe32(next
);
1327 len
= 0; /* zero for segments > 1 */
1328 next
+= sizeof(hw
->ix_ne
[0]);
1330 hw
->ix_ne
[i
-1].next
= 0; /* zero last in chain */
1331 /* XXX flush descriptor instead of using uncached memory */
1333 DPRINTF(sc
, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1334 __func__
, sc
->tx_qid
, npe
->ix_neaddr
,
1335 hw
->ix_ne
[0].data
, hw
->ix_ne
[0].len
);
1336 /* stick it on the tx q */
1337 /* XXX add vlan priority */
1338 ixpqmgr_qwrite(sc
->tx_qid
, npe
->ix_neaddr
);
1342 if (sc
->tx_free
== NULL
)
1343 ifp
->if_flags
|= IFF_OACTIVE
;
1347 npe_stopxmit(struct npe_softc
*sc
)
1349 struct npedma
*dma
= &sc
->txdma
;
1353 for (i
= 0; i
< dma
->nbuf
; i
++) {
1354 struct npebuf
*npe
= &dma
->buf
[i
];
1356 if (npe
->ix_m
!= NULL
) {
1357 bus_dmamap_unload(sc
->sc_dt
, npe
->ix_map
);
1365 npe_stoprecv(struct npe_softc
*sc
)
1367 struct npedma
*dma
= &sc
->rxdma
;
1371 for (i
= 0; i
< dma
->nbuf
; i
++) {
1372 struct npebuf
*npe
= &dma
->buf
[i
];
1374 if (npe
->ix_m
!= NULL
) {
1375 bus_dmamap_unload(sc
->sc_dt
, npe
->ix_map
);
1383 * Turn off interrupts, and stop the nic.
1386 npestop(struct ifnet
*ifp
, int disable
)
1388 struct npe_softc
*sc
= ifp
->if_softc
;
1390 /* disable transmitter and reciver in the MAC */
1391 WR4(sc
, NPE_MAC_RX_CNTRL1
,
1392 RD4(sc
, NPE_MAC_RX_CNTRL1
) &~ NPE_RX_CNTRL1_RX_EN
);
1393 WR4(sc
, NPE_MAC_TX_CNTRL1
,
1394 RD4(sc
, NPE_MAC_TX_CNTRL1
) &~ NPE_TX_CNTRL1_TX_EN
);
1396 callout_stop(&sc
->sc_tick_ch
);
1400 /* XXX go into loopback & drain q's? */
1401 /* XXX but beware of disabling tx above */
1404 * The MAC core rx/tx disable may leave the MAC hardware in an
1405 * unpredictable state. A hw reset is executed before resetting
1406 * all the MAC parameters to a known value.
1408 WR4(sc
, NPE_MAC_CORE_CNTRL
, NPE_CORE_RESET
);
1409 DELAY(NPE_MAC_RESET_DELAY
);
1410 WR4(sc
, NPE_MAC_INT_CLK_THRESH
, NPE_MAC_INT_CLK_THRESH_DEFAULT
);
1411 WR4(sc
, NPE_MAC_CORE_CNTRL
, NPE_CORE_MDC_EN
);
1414 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1418 npewatchdog(struct ifnet
*ifp
)
1420 struct npe_softc
*sc
= ifp
->if_softc
;
1423 printf("%s: device timeout\n", sc
->sc_dev
.dv_xname
);
1431 npeioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
1433 struct npe_softc
*sc
= ifp
->if_softc
;
1434 struct ifreq
*ifr
= (struct ifreq
*) data
;
1443 /* Flow control requires full-duplex mode. */
1444 if (IFM_SUBTYPE(ifr
->ifr_media
) == IFM_AUTO
||
1445 (ifr
->ifr_media
& IFM_FDX
) == 0)
1446 ifr
->ifr_media
&= ~IFM_ETH_FMASK
;
1447 if (IFM_SUBTYPE(ifr
->ifr_media
) != IFM_AUTO
) {
1448 if ((ifr
->ifr_media
& IFM_ETH_FMASK
) == IFM_FLOW
) {
1449 /* We can do both TXPAUSE and RXPAUSE. */
1451 IFM_ETH_TXPAUSE
| IFM_ETH_RXPAUSE
;
1453 sc
->sc_flowflags
= ifr
->ifr_media
& IFM_ETH_FMASK
;
1456 error
= ifmedia_ioctl(ifp
, ifr
, &sc
->sc_mii
.mii_media
, cmd
);
1459 if ((ifp
->if_flags
& (IFF_UP
|IFF_RUNNING
)) == IFF_RUNNING
) {
1461 * If interface is marked down and it is running,
1462 * then stop and disable it.
1464 (*ifp
->if_stop
)(ifp
, 1);
1465 } else if ((ifp
->if_flags
& (IFF_UP
|IFF_RUNNING
)) == IFF_UP
) {
1467 * If interface is marked up and it is stopped, then
1470 error
= (*ifp
->if_init
)(ifp
);
1471 } else if ((ifp
->if_flags
& IFF_UP
) != 0) {
1474 /* Up (AND RUNNING). */
1476 diff
= (ifp
->if_flags
^ sc
->sc_if_flags
)
1477 & (IFF_PROMISC
|IFF_ALLMULTI
);
1478 if ((diff
& (IFF_PROMISC
|IFF_ALLMULTI
)) != 0) {
1480 * If the difference bettween last flag and
1481 * new flag only IFF_PROMISC or IFF_ALLMULTI,
1482 * set multicast filter only (don't reset to
1483 * prevent link down).
1488 * Reset the interface to pick up changes in
1489 * any other flags that affect the hardware
1492 error
= (*ifp
->if_init
)(ifp
);
1495 sc
->sc_if_flags
= ifp
->if_flags
;
1498 error
= ether_ioctl(ifp
, cmd
, data
);
1499 if (error
== ENETRESET
) {
1501 * Multicast list has changed; set the hardware filter
1516 * Setup a traffic class -> rx queue mapping.
1519 npe_setrxqosentry(struct npe_softc
*sc
, int classix
, int trafclass
, int qid
)
1521 int npeid
= npeconfig
[sc
->sc_unit
].npeid
;
1524 msg
[0] = (NPE_SETRXQOSENTRY
<< NPE_MAC_MSGID_SHL
) | (npeid
<< 20)
1526 msg
[1] = (trafclass
<< 24) | (1 << 23) | (qid
<< 16) | (qid
<< 4);
1527 return ixpnpe_sendandrecvmsg(sc
->sc_npe
, msg
, msg
);
1531 * Update and reset the statistics in the NPE.
1534 npe_updatestats(struct npe_softc
*sc
)
1538 msg
[0] = NPE_RESETSTATS
<< NPE_MAC_MSGID_SHL
;
1539 msg
[1] = sc
->sc_stats_phys
; /* physical address of stat block */
1540 return ixpnpe_sendmsg(sc
->sc_npe
, msg
); /* NB: no recv */
1545 * Get the current statistics block.
1548 npe_getstats(struct npe_softc
*sc
)
1552 msg
[0] = NPE_GETSTATS
<< NPE_MAC_MSGID_SHL
;
1553 msg
[1] = sc
->sc_stats_phys
; /* physical address of stat block */
1554 return ixpnpe_sendandrecvmsg(sc
->sc_npe
, msg
, msg
);
1558 * Query the image id of the loaded firmware.
1561 npe_getimageid(struct npe_softc
*sc
)
1565 msg
[0] = NPE_GETSTATUS
<< NPE_MAC_MSGID_SHL
;
1567 return ixpnpe_sendandrecvmsg(sc
->sc_npe
, msg
, msg
) == 0 ? msg
[1] : 0;
1571 * Enable/disable loopback.
1574 npe_setloopback(struct npe_softc
*sc
, int ena
)
1578 msg
[0] = (NPE_SETLOOPBACK
<< NPE_MAC_MSGID_SHL
) | (ena
!= 0);
1580 return ixpnpe_sendandrecvmsg(sc
->sc_npe
, msg
, msg
);
1585 * MII bus support routines.
1587 * NB: ixp425 has one PHY per NPE
1590 npe_mii_mdio_read(struct npe_softc
*sc
, int reg
)
1592 #define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1595 /* NB: registers are known to be sequential */
1596 v
= (MII_RD4(sc
, reg
+0) & 0xff) << 0;
1597 v
|= (MII_RD4(sc
, reg
+4) & 0xff) << 8;
1598 v
|= (MII_RD4(sc
, reg
+8) & 0xff) << 16;
1599 v
|= (MII_RD4(sc
, reg
+12) & 0xff) << 24;
1605 npe_mii_mdio_write(struct npe_softc
*sc
, int reg
, uint32_t cmd
)
1607 #define MII_WR4(sc, reg, v) \
1608 bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1610 /* NB: registers are known to be sequential */
1611 MII_WR4(sc
, reg
+0, cmd
& 0xff);
1612 MII_WR4(sc
, reg
+4, (cmd
>> 8) & 0xff);
1613 MII_WR4(sc
, reg
+8, (cmd
>> 16) & 0xff);
1614 MII_WR4(sc
, reg
+12, (cmd
>> 24) & 0xff);
1619 npe_mii_mdio_wait(struct npe_softc
*sc
)
1621 #define MAXTRIES 100 /* XXX */
1625 for (i
= 0; i
< MAXTRIES
; i
++) {
1626 v
= npe_mii_mdio_read(sc
, NPE_MAC_MDIO_CMD
);
1627 if ((v
& NPE_MII_GO
) == 0)
1630 return 0; /* NB: timeout */
1635 npe_miibus_readreg(struct device
*self
, int phy
, int reg
)
1637 struct npe_softc
*sc
= (void *)self
;
1640 if (sc
->sc_phy
> IXPNPECF_PHY_DEFAULT
&& phy
!= sc
->sc_phy
)
1642 v
= (phy
<< NPE_MII_ADDR_SHL
) | (reg
<< NPE_MII_REG_SHL
)
1644 npe_mii_mdio_write(sc
, NPE_MAC_MDIO_CMD
, v
);
1645 if (npe_mii_mdio_wait(sc
))
1646 v
= npe_mii_mdio_read(sc
, NPE_MAC_MDIO_STS
);
1648 v
= 0xffff | NPE_MII_READ_FAIL
;
1649 return (v
& NPE_MII_READ_FAIL
) ? 0xffff : (v
& 0xffff);
1654 npe_miibus_writereg(struct device
*self
, int phy
, int reg
, int data
)
1656 struct npe_softc
*sc
= (void *)self
;
1659 if (sc
->sc_phy
> IXPNPECF_PHY_DEFAULT
&& phy
!= sc
->sc_phy
)
1661 v
= (phy
<< NPE_MII_ADDR_SHL
) | (reg
<< NPE_MII_REG_SHL
)
1662 | data
| NPE_MII_WRITE
1664 npe_mii_mdio_write(sc
, NPE_MAC_MDIO_CMD
, v
);
1665 /* XXX complain about timeout */
1666 (void) npe_mii_mdio_wait(sc
);
1670 npe_miibus_statchg(struct device
*self
)
1672 struct npe_softc
*sc
= (void *)self
;
1676 /* sync MAC duplex state */
1677 tx1
= RD4(sc
, NPE_MAC_TX_CNTRL1
);
1678 rx1
= RD4(sc
, NPE_MAC_RX_CNTRL1
);
1679 if (sc
->sc_mii
.mii_media_active
& IFM_FDX
) {
1680 WR4(sc
, NPE_MAC_SLOT_TIME
, NPE_MAC_SLOT_TIME_MII_DEFAULT
);
1681 tx1
&= ~NPE_TX_CNTRL1_DUPLEX
;
1682 rx1
|= NPE_RX_CNTRL1_PAUSE_EN
;
1686 randoff
= (RD4(sc
, NPE_MAC_UNI_ADDR_6
) ^ now
.tv_usec
)
1688 WR4(sc
, NPE_MAC_SLOT_TIME
, NPE_MAC_SLOT_TIME_MII_DEFAULT
1690 tx1
|= NPE_TX_CNTRL1_DUPLEX
;
1691 rx1
&= ~NPE_RX_CNTRL1_PAUSE_EN
;
1693 WR4(sc
, NPE_MAC_RX_CNTRL1
, rx1
);
1694 WR4(sc
, NPE_MAC_TX_CNTRL1
, tx1
);