1 /* $NetBSD: if_gfe.c,v 1.32 2009/05/12 12:18:45 cegger Exp $ */
4 * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the NetBSD Project by
18 * Allegro Networks, Inc., and Wasabi Systems, Inc.
19 * 4. The name of Allegro Networks, Inc. may not be used to endorse
20 * or promote products derived from this software without specific prior
22 * 5. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
26 * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND
27 * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC.
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
41 * if_gfe.c -- GT ethernet MAC driver
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.32 2009/05/12 12:18:45 cegger Exp $");
50 #include <sys/param.h>
51 #include <sys/types.h>
52 #include <sys/inttypes.h>
53 #include <sys/queue.h>
55 #include <uvm/uvm_extern.h>
57 #include <sys/callout.h>
58 #include <sys/device.h>
59 #include <sys/errno.h>
60 #include <sys/ioctl.h>
62 #include <sys/socket.h>
67 #include <net/if_dl.h>
68 #include <net/if_ether.h>
69 #include <net/if_media.h>
72 #include <netinet/in.h>
73 #include <netinet/if_inarp.h>
79 #include <dev/mii/miivar.h>
81 #include <dev/marvell/gtintrreg.h>
82 #include <dev/marvell/gtethreg.h>
84 #include <dev/marvell/gtvar.h>
85 #include <dev/marvell/if_gfevar.h>
87 #define GE_READ(sc, reg) \
88 bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg)
89 #define GE_WRITE(sc, reg, v) \
90 bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg, (v))
99 #define GE_DPRINTF(sc, a) do \
100 if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \
103 #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func))
104 #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]"))
106 #define GE_DPRINTF(sc, a) do { } while (0)
107 #define GE_FUNC_ENTER(sc, func) do { } while (0)
108 #define GE_FUNC_EXIT(sc, str) do { } while (0)
111 GE_WHACK_START
, GE_WHACK_RESTART
,
112 GE_WHACK_CHANGE
, GE_WHACK_STOP
116 GE_HASH_ADD
, GE_HASH_REMOVE
,
120 #define htogt32(a) htobe32(a)
121 #define gt32toh(a) be32toh(a)
123 #define htogt32(a) htole32(a)
124 #define gt32toh(a) le32toh(a)
127 #define GE_RXDSYNC(sc, rxq, n, ops) \
128 bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \
129 (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \
131 #define GE_RXDPRESYNC(sc, rxq, n) \
132 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
133 #define GE_RXDPOSTSYNC(sc, rxq, n) \
134 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
136 #define GE_TXDSYNC(sc, txq, n, ops) \
137 bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \
138 (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \
140 #define GE_TXDPRESYNC(sc, txq, n) \
141 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
142 #define GE_TXDPOSTSYNC(sc, txq, n) \
143 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
147 STATIC
int gfe_match (device_t
, cfdata_t
, void *);
148 STATIC
void gfe_attach (device_t
, device_t
, void *);
150 STATIC
int gfe_dmamem_alloc(struct gfe_softc
*, struct gfe_dmamem
*, int,
152 STATIC
void gfe_dmamem_free(struct gfe_softc
*, struct gfe_dmamem
*);
154 STATIC
int gfe_ifioctl (struct ifnet
*, u_long
, void *);
155 STATIC
void gfe_ifstart (struct ifnet
*);
156 STATIC
void gfe_ifwatchdog (struct ifnet
*);
158 STATIC
int gfe_mii_read (device_t
, int, int);
159 STATIC
void gfe_mii_write (device_t
, int, int, int);
160 STATIC
void gfe_mii_statchg (device_t
);
162 STATIC
void gfe_tick(void *arg
);
164 STATIC
void gfe_tx_restart(void *);
165 STATIC
int gfe_tx_enqueue(struct gfe_softc
*, enum gfe_txprio
);
166 STATIC
uint32_t gfe_tx_done(struct gfe_softc
*, enum gfe_txprio
, uint32_t);
167 STATIC
void gfe_tx_cleanup(struct gfe_softc
*, enum gfe_txprio
, int);
168 STATIC
int gfe_tx_txqalloc(struct gfe_softc
*, enum gfe_txprio
);
169 STATIC
int gfe_tx_start(struct gfe_softc
*, enum gfe_txprio
);
170 STATIC
void gfe_tx_stop(struct gfe_softc
*, enum gfe_whack_op
);
172 STATIC
void gfe_rx_cleanup(struct gfe_softc
*, enum gfe_rxprio
);
173 STATIC
void gfe_rx_get(struct gfe_softc
*, enum gfe_rxprio
);
174 STATIC
int gfe_rx_prime(struct gfe_softc
*);
175 STATIC
uint32_t gfe_rx_process(struct gfe_softc
*, uint32_t, uint32_t);
176 STATIC
int gfe_rx_rxqalloc(struct gfe_softc
*, enum gfe_rxprio
);
177 STATIC
int gfe_rx_rxqinit(struct gfe_softc
*, enum gfe_rxprio
);
178 STATIC
void gfe_rx_stop(struct gfe_softc
*, enum gfe_whack_op
);
180 STATIC
int gfe_intr(void *);
182 STATIC
int gfe_whack(struct gfe_softc
*, enum gfe_whack_op
);
184 STATIC
int gfe_hash_compute(struct gfe_softc
*, const uint8_t [ETHER_ADDR_LEN
]);
185 STATIC
int gfe_hash_entry_op(struct gfe_softc
*, enum gfe_hash_op
,
186 enum gfe_rxprio
, const uint8_t [ETHER_ADDR_LEN
]);
187 STATIC
int gfe_hash_multichg(struct ethercom
*, const struct ether_multi
*,
189 STATIC
int gfe_hash_fill(struct gfe_softc
*);
190 STATIC
int gfe_hash_alloc(struct gfe_softc
*);
192 /* Linkup to the rest of the kernel */
193 CFATTACH_DECL(gfe
, sizeof(struct gfe_softc
),
194 gfe_match
, gfe_attach
, NULL
, NULL
);
196 extern struct cfdriver gfe_cd
;
199 gfe_match(device_t parent
, cfdata_t cf
, void *aux
)
201 struct gt_softc
*gt
= (struct gt_softc
*) parent
;
202 struct gt_attach_args
*ga
= aux
;
205 if (!GT_ETHEROK(gt
, ga
, &gfe_cd
))
208 if (gtget_macaddr(gt
, ga
->ga_unit
, enaddr
) < 0)
211 if (enaddr
[0] == 0 && enaddr
[1] == 0 && enaddr
[2] == 0 &&
212 enaddr
[3] == 0 && enaddr
[4] == 0 && enaddr
[5] == 0)
219 * Attach this instance, and then all the sub-devices
222 gfe_attach(device_t parent
, device_t self
, void *aux
)
224 struct gt_attach_args
* const ga
= aux
;
225 struct gt_softc
* const gt
= device_private(parent
);
226 struct gfe_softc
* const sc
= device_private(self
);
227 struct ifnet
* const ifp
= &sc
->sc_ec
.ec_if
;
234 GT_ETHERFOUND(gt
, ga
);
236 sc
->sc_gt_memt
= ga
->ga_memt
;
237 sc
->sc_gt_memh
= ga
->ga_memh
;
238 sc
->sc_dmat
= ga
->ga_dmat
;
239 sc
->sc_macno
= ga
->ga_unit
;
241 if (bus_space_subregion(sc
->sc_gt_memt
, sc
->sc_gt_memh
,
242 ETH_BASE(sc
->sc_macno
), ETH_SIZE
, &sc
->sc_memh
)) {
243 aprint_error(": failed to map registers\n");
246 callout_init(&sc
->sc_co
, 0);
248 data
= bus_space_read_4(sc
->sc_gt_memt
, sc
->sc_gt_memh
, ETH_EPAR
);
249 phyaddr
= ETH_EPAR_PhyAD_GET(data
, sc
->sc_macno
);
251 gtget_macaddr(gt
, sc
->sc_macno
, enaddr
);
253 sc
->sc_pcr
= GE_READ(sc
, EPCR
);
254 sc
->sc_pcxr
= GE_READ(sc
, EPCXR
);
255 sc
->sc_intrmask
= GE_READ(sc
, EIMR
) | ETH_IR_MIIPhySTC
;
257 aprint_normal(": address %s", ether_sprintf(enaddr
));
260 aprint_normal(", pcr %#x, pcxr %#x", sc
->sc_pcr
, sc
->sc_pcxr
);
263 sc
->sc_pcxr
&= ~ETH_EPCXR_PRIOrx_Override
;
264 if (device_cfdata(&sc
->sc_dev
)->cf_flags
& 1) {
265 aprint_normal(", phy %d (rmii)", phyaddr
);
266 sc
->sc_pcxr
|= ETH_EPCXR_RMIIEn
;
268 aprint_normal(", phy %d (mii)", phyaddr
);
269 sc
->sc_pcxr
&= ~ETH_EPCXR_RMIIEn
;
271 if (device_cfdata(&sc
->sc_dev
)->cf_flags
& 2)
272 sc
->sc_flags
|= GE_NOFREE
;
273 sc
->sc_pcxr
&= ~(3 << 14);
274 sc
->sc_pcxr
|= (ETH_EPCXR_MFL_1536
<< 14);
276 if (sc
->sc_pcr
& ETH_EPCR_EN
) {
279 * Abort transmitter and receiver and wait for them to quiese
281 GE_WRITE(sc
, ESDCMR
, ETH_ESDCMR_AR
|ETH_ESDCMR_AT
);
284 } while (tries
-- > 0 && (GE_READ(sc
, ESDCMR
) & (ETH_ESDCMR_AR
|ETH_ESDCMR_AT
)));
287 sc
->sc_pcr
&= ~(ETH_EPCR_EN
| ETH_EPCR_RBM
| ETH_EPCR_PM
| ETH_EPCR_PBF
);
290 aprint_normal(", pcr %#x, pcxr %#x", sc
->sc_pcr
, sc
->sc_pcxr
);
294 * Now turn off the GT. If it didn't quiese, too ***ing bad.
296 GE_WRITE(sc
, EPCR
, sc
->sc_pcr
);
297 GE_WRITE(sc
, EIMR
, sc
->sc_intrmask
);
298 sdcr
= GE_READ(sc
, ESDCR
);
299 ETH_ESDCR_BSZ_SET(sdcr
, ETH_ESDCR_BSZ_4
);
300 sdcr
|= ETH_ESDCR_RIFB
;
301 GE_WRITE(sc
, ESDCR
, sdcr
);
302 sc
->sc_max_frame_length
= 1536;
305 sc
->sc_mii
.mii_ifp
= ifp
;
306 sc
->sc_mii
.mii_readreg
= gfe_mii_read
;
307 sc
->sc_mii
.mii_writereg
= gfe_mii_write
;
308 sc
->sc_mii
.mii_statchg
= gfe_mii_statchg
;
310 sc
->sc_ec
.ec_mii
= &sc
->sc_mii
;
311 ifmedia_init(&sc
->sc_mii
.mii_media
, 0, ether_mediachange
,
314 mii_attach(&sc
->sc_dev
, &sc
->sc_mii
, 0xffffffff, phyaddr
,
315 MII_OFFSET_ANY
, MIIF_NOISOLATE
);
316 if (LIST_FIRST(&sc
->sc_mii
.mii_phys
) == NULL
) {
317 ifmedia_add(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
, 0, NULL
);
318 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
);
320 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_AUTO
);
323 strlcpy(ifp
->if_xname
, device_xname(&sc
->sc_dev
), IFNAMSIZ
);
325 /* ifp->if_mowner = &sc->sc_mowner; */
326 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
328 ifp
->if_flags
|= IFF_DEBUG
;
330 ifp
->if_ioctl
= gfe_ifioctl
;
331 ifp
->if_start
= gfe_ifstart
;
332 ifp
->if_watchdog
= gfe_ifwatchdog
;
334 if (sc
->sc_flags
& GE_NOFREE
) {
335 error
= gfe_rx_rxqalloc(sc
, GE_RXPRIO_HI
);
337 error
= gfe_rx_rxqalloc(sc
, GE_RXPRIO_MEDHI
);
339 error
= gfe_rx_rxqalloc(sc
, GE_RXPRIO_MEDLO
);
341 error
= gfe_rx_rxqalloc(sc
, GE_RXPRIO_LO
);
343 error
= gfe_tx_txqalloc(sc
, GE_TXPRIO_HI
);
345 error
= gfe_hash_alloc(sc
);
348 "%s: failed to allocate resources: %d\n",
349 ifp
->if_xname
, error
);
353 ether_ifattach(ifp
, enaddr
);
355 bpfattach(ifp
, DLT_EN10MB
, sizeof(struct ether_header
));
358 rnd_attach_source(&sc
->sc_rnd_source
, device_xname(self
), RND_TYPE_NET
, 0);
360 intr_establish(IRQ_ETH0
+ sc
->sc_macno
, IST_LEVEL
, IPL_NET
,
365 gfe_dmamem_alloc(struct gfe_softc
*sc
, struct gfe_dmamem
*gdm
, int maxsegs
,
366 size_t size
, int flags
)
369 GE_FUNC_ENTER(sc
, "gfe_dmamem_alloc");
371 KASSERT(gdm
->gdm_kva
== NULL
);
372 gdm
->gdm_size
= size
;
373 gdm
->gdm_maxsegs
= maxsegs
;
375 error
= bus_dmamem_alloc(sc
->sc_dmat
, gdm
->gdm_size
, PAGE_SIZE
,
376 gdm
->gdm_size
, gdm
->gdm_segs
, gdm
->gdm_maxsegs
, &gdm
->gdm_nsegs
,
381 error
= bus_dmamem_map(sc
->sc_dmat
, gdm
->gdm_segs
, gdm
->gdm_nsegs
,
382 gdm
->gdm_size
, &gdm
->gdm_kva
, flags
| BUS_DMA_NOWAIT
);
386 error
= bus_dmamap_create(sc
->sc_dmat
, gdm
->gdm_size
, gdm
->gdm_nsegs
,
387 gdm
->gdm_size
, 0, BUS_DMA_ALLOCNOW
|BUS_DMA_NOWAIT
, &gdm
->gdm_map
);
391 error
= bus_dmamap_load(sc
->sc_dmat
, gdm
->gdm_map
, gdm
->gdm_kva
,
392 gdm
->gdm_size
, NULL
, BUS_DMA_NOWAIT
);
396 /* invalidate from cache */
397 bus_dmamap_sync(sc
->sc_dmat
, gdm
->gdm_map
, 0, gdm
->gdm_size
,
398 BUS_DMASYNC_PREREAD
);
401 gfe_dmamem_free(sc
, gdm
);
402 GE_DPRINTF(sc
, (":err=%d", error
));
404 GE_DPRINTF(sc
, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%x/%x",
405 gdm
->gdm_kva
, gdm
->gdm_size
, gdm
->gdm_map
, gdm
->gdm_map
->dm_nsegs
,
406 gdm
->gdm_map
->dm_segs
->ds_addr
, gdm
->gdm_map
->dm_segs
->ds_len
));
407 GE_FUNC_EXIT(sc
, "");
412 gfe_dmamem_free(struct gfe_softc
*sc
, struct gfe_dmamem
*gdm
)
414 GE_FUNC_ENTER(sc
, "gfe_dmamem_free");
416 bus_dmamap_destroy(sc
->sc_dmat
, gdm
->gdm_map
);
418 bus_dmamem_unmap(sc
->sc_dmat
, gdm
->gdm_kva
, gdm
->gdm_size
);
419 if (gdm
->gdm_nsegs
> 0)
420 bus_dmamem_free(sc
->sc_dmat
, gdm
->gdm_segs
, gdm
->gdm_nsegs
);
424 GE_FUNC_EXIT(sc
, "");
428 gfe_ifioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
430 struct gfe_softc
* const sc
= ifp
->if_softc
;
431 struct ifreq
*ifr
= (struct ifreq
*) data
;
432 struct ifaddr
*ifa
= (struct ifaddr
*) data
;
435 GE_FUNC_ENTER(sc
, "gfe_ifioctl");
440 ifp
->if_flags
|= IFF_UP
;
441 error
= gfe_whack(sc
, GE_WHACK_START
);
442 switch (ifa
->ifa_addr
->sa_family
) {
446 arp_ifinit(ifp
, ifa
);
455 if ((error
= ifioctl_common(ifp
, cmd
, data
)) != 0)
457 /* XXX re-use ether_ioctl() */
458 switch (ifp
->if_flags
& (IFF_UP
|IFF_RUNNING
)) {
459 case IFF_UP
|IFF_RUNNING
:/* active->active, update */
460 error
= gfe_whack(sc
, GE_WHACK_CHANGE
);
462 case IFF_RUNNING
: /* not up, so we stop */
463 error
= gfe_whack(sc
, GE_WHACK_STOP
);
465 case IFF_UP
: /* not running, so we start */
466 error
= gfe_whack(sc
, GE_WHACK_START
);
468 case 0: /* idle->idle: do nothing */
477 if ((error
= ether_ioctl(ifp
, cmd
, data
)) == ENETRESET
) {
478 if (ifp
->if_flags
& IFF_RUNNING
)
479 error
= gfe_whack(sc
, GE_WHACK_CHANGE
);
486 if (ifr
->ifr_mtu
> ETHERMTU
|| ifr
->ifr_mtu
< ETHERMIN
) {
490 if ((error
= ifioctl_common(ifp
, cmd
, data
)) == ENETRESET
)
495 error
= ether_ioctl(ifp
, cmd
, data
);
499 GE_FUNC_EXIT(sc
, "");
504 gfe_ifstart(struct ifnet
*ifp
)
506 struct gfe_softc
* const sc
= ifp
->if_softc
;
509 GE_FUNC_ENTER(sc
, "gfe_ifstart");
511 if ((ifp
->if_flags
& IFF_RUNNING
) == 0) {
512 GE_FUNC_EXIT(sc
, "$");
517 IF_DEQUEUE(&ifp
->if_snd
, m
);
519 ifp
->if_flags
&= ~IFF_OACTIVE
;
520 GE_FUNC_EXIT(sc
, "");
525 * No space in the pending queue? try later.
527 if (IF_QFULL(&sc
->sc_txq
[GE_TXPRIO_HI
].txq_pendq
))
531 * Try to enqueue a mbuf to the device. If that fails, we
532 * can always try to map the next mbuf.
534 IF_ENQUEUE(&sc
->sc_txq
[GE_TXPRIO_HI
].txq_pendq
, m
);
535 GE_DPRINTF(sc
, (">"));
537 (void) gfe_tx_enqueue(sc
, GE_TXPRIO_HI
);
542 * Attempt to queue the mbuf for send failed.
544 IF_PREPEND(&ifp
->if_snd
, m
);
545 ifp
->if_flags
|= IFF_OACTIVE
;
546 GE_FUNC_EXIT(sc
, "%%");
550 gfe_ifwatchdog(struct ifnet
*ifp
)
552 struct gfe_softc
* const sc
= ifp
->if_softc
;
553 struct gfe_txqueue
* const txq
= &sc
->sc_txq
[GE_TXPRIO_HI
];
555 GE_FUNC_ENTER(sc
, "gfe_ifwatchdog");
556 printf("%s: device timeout", device_xname(&sc
->sc_dev
));
557 if (ifp
->if_flags
& IFF_RUNNING
) {
558 uint32_t curtxdnum
= (bus_space_read_4(sc
->sc_gt_memt
, sc
->sc_gt_memh
, txq
->txq_ectdp
) - txq
->txq_desc_busaddr
) / sizeof(txq
->txq_descs
[0]);
559 GE_TXDPOSTSYNC(sc
, txq
, txq
->txq_fi
);
560 GE_TXDPOSTSYNC(sc
, txq
, curtxdnum
);
561 printf(" (fi=%d(%#x),lo=%d,cur=%d(%#x),icm=%#x) ",
562 txq
->txq_fi
, txq
->txq_descs
[txq
->txq_fi
].ed_cmdsts
,
563 txq
->txq_lo
, curtxdnum
, txq
->txq_descs
[curtxdnum
].ed_cmdsts
,
565 GE_TXDPRESYNC(sc
, txq
, txq
->txq_fi
);
566 GE_TXDPRESYNC(sc
, txq
, curtxdnum
);
570 (void) gfe_whack(sc
, GE_WHACK_RESTART
);
571 GE_FUNC_EXIT(sc
, "");
575 gfe_rx_rxqalloc(struct gfe_softc
*sc
, enum gfe_rxprio rxprio
)
577 struct gfe_rxqueue
* const rxq
= &sc
->sc_rxq
[rxprio
];
580 GE_FUNC_ENTER(sc
, "gfe_rx_rxqalloc");
581 GE_DPRINTF(sc
, ("(%d)", rxprio
));
583 error
= gfe_dmamem_alloc(sc
, &rxq
->rxq_desc_mem
, 1,
584 GE_RXDESC_MEMSIZE
, BUS_DMA_NOCACHE
);
586 GE_FUNC_EXIT(sc
, "!!");
590 error
= gfe_dmamem_alloc(sc
, &rxq
->rxq_buf_mem
, GE_RXBUF_NSEGS
,
591 GE_RXBUF_MEMSIZE
, 0);
593 GE_FUNC_EXIT(sc
, "!!!");
596 GE_FUNC_EXIT(sc
, "");
601 gfe_rx_rxqinit(struct gfe_softc
*sc
, enum gfe_rxprio rxprio
)
603 struct gfe_rxqueue
* const rxq
= &sc
->sc_rxq
[rxprio
];
604 volatile struct gt_eth_desc
*rxd
;
605 const bus_dma_segment_t
*ds
;
610 GE_FUNC_ENTER(sc
, "gfe_rx_rxqinit");
611 GE_DPRINTF(sc
, ("(%d)", rxprio
));
613 if ((sc
->sc_flags
& GE_NOFREE
) == 0) {
614 int error
= gfe_rx_rxqalloc(sc
, rxprio
);
616 GE_FUNC_EXIT(sc
, "!");
620 KASSERT(rxq
->rxq_desc_mem
.gdm_kva
!= NULL
);
621 KASSERT(rxq
->rxq_buf_mem
.gdm_kva
!= NULL
);
624 memset(rxq
->rxq_desc_mem
.gdm_kva
, 0, GE_RXDESC_MEMSIZE
);
627 (volatile struct gt_eth_desc
*) rxq
->rxq_desc_mem
.gdm_kva
;
628 rxq
->rxq_desc_busaddr
= rxq
->rxq_desc_mem
.gdm_map
->dm_segs
[0].ds_addr
;
629 rxq
->rxq_bufs
= (struct gfe_rxbuf
*) rxq
->rxq_buf_mem
.gdm_kva
;
631 rxq
->rxq_active
= GE_RXDESC_MAX
;
632 for (idx
= 0, rxd
= rxq
->rxq_descs
,
633 boff
= 0, ds
= rxq
->rxq_buf_mem
.gdm_map
->dm_segs
,
634 nxtaddr
= rxq
->rxq_desc_busaddr
+ sizeof(*rxd
);
636 idx
++, rxd
++, nxtaddr
+= sizeof(*rxd
)) {
637 rxd
->ed_lencnt
= htogt32(GE_RXBUF_SIZE
<< 16);
638 rxd
->ed_cmdsts
= htogt32(RX_CMD_F
|RX_CMD_L
|RX_CMD_O
|RX_CMD_EI
);
639 rxd
->ed_bufptr
= htogt32(ds
->ds_addr
+ boff
);
641 * update the nxtptr to point to the next txd.
643 if (idx
== GE_RXDESC_MAX
- 1)
644 nxtaddr
= rxq
->rxq_desc_busaddr
;
645 rxd
->ed_nxtptr
= htogt32(nxtaddr
);
646 boff
+= GE_RXBUF_SIZE
;
647 if (boff
== ds
->ds_len
) {
652 bus_dmamap_sync(sc
->sc_dmat
, rxq
->rxq_desc_mem
.gdm_map
, 0,
653 rxq
->rxq_desc_mem
.gdm_map
->dm_mapsize
,
654 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
655 bus_dmamap_sync(sc
->sc_dmat
, rxq
->rxq_buf_mem
.gdm_map
, 0,
656 rxq
->rxq_buf_mem
.gdm_map
->dm_mapsize
,
657 BUS_DMASYNC_PREREAD
);
659 rxq
->rxq_intrbits
= ETH_IR_RxBuffer
|ETH_IR_RxError
;
662 rxq
->rxq_intrbits
|= ETH_IR_RxBuffer_3
|ETH_IR_RxError_3
;
663 rxq
->rxq_efrdp
= ETH_EFRDP3(sc
->sc_macno
);
664 rxq
->rxq_ecrdp
= ETH_ECRDP3(sc
->sc_macno
);
666 case GE_RXPRIO_MEDHI
:
667 rxq
->rxq_intrbits
|= ETH_IR_RxBuffer_2
|ETH_IR_RxError_2
;
668 rxq
->rxq_efrdp
= ETH_EFRDP2(sc
->sc_macno
);
669 rxq
->rxq_ecrdp
= ETH_ECRDP2(sc
->sc_macno
);
671 case GE_RXPRIO_MEDLO
:
672 rxq
->rxq_intrbits
|= ETH_IR_RxBuffer_1
|ETH_IR_RxError_1
;
673 rxq
->rxq_efrdp
= ETH_EFRDP1(sc
->sc_macno
);
674 rxq
->rxq_ecrdp
= ETH_ECRDP1(sc
->sc_macno
);
677 rxq
->rxq_intrbits
|= ETH_IR_RxBuffer_0
|ETH_IR_RxError_0
;
678 rxq
->rxq_efrdp
= ETH_EFRDP0(sc
->sc_macno
);
679 rxq
->rxq_ecrdp
= ETH_ECRDP0(sc
->sc_macno
);
682 GE_FUNC_EXIT(sc
, "");
687 gfe_rx_get(struct gfe_softc
*sc
, enum gfe_rxprio rxprio
)
689 struct ifnet
* const ifp
= &sc
->sc_ec
.ec_if
;
690 struct gfe_rxqueue
* const rxq
= &sc
->sc_rxq
[rxprio
];
691 struct mbuf
*m
= rxq
->rxq_curpkt
;
693 GE_FUNC_ENTER(sc
, "gfe_rx_get");
694 GE_DPRINTF(sc
, ("(%d)", rxprio
));
696 while (rxq
->rxq_active
> 0) {
697 volatile struct gt_eth_desc
*rxd
= &rxq
->rxq_descs
[rxq
->rxq_fi
];
698 struct gfe_rxbuf
*rxb
= &rxq
->rxq_bufs
[rxq
->rxq_fi
];
699 const struct ether_header
*eh
;
703 GE_RXDPOSTSYNC(sc
, rxq
, rxq
->rxq_fi
);
704 cmdsts
= gt32toh(rxd
->ed_cmdsts
);
705 GE_DPRINTF(sc
, (":%d=%#x", rxq
->rxq_fi
, cmdsts
));
706 rxq
->rxq_cmdsts
= cmdsts
;
708 * Sometimes the GE "forgets" to reset the ownership bit.
709 * But if the length has been rewritten, the packet is ours
710 * so pretend the O bit is set.
712 buflen
= gt32toh(rxd
->ed_lencnt
) & 0xffff;
713 if ((cmdsts
& RX_CMD_O
) && buflen
== 0) {
714 GE_RXDPRESYNC(sc
, rxq
, rxq
->rxq_fi
);
719 * If this is not a single buffer packet with no errors
720 * or for some reason it's bigger than our frame size,
721 * ignore it and go to the next packet.
723 if ((cmdsts
& (RX_CMD_F
|RX_CMD_L
|RX_STS_ES
)) !=
724 (RX_CMD_F
|RX_CMD_L
) ||
725 buflen
> sc
->sc_max_frame_length
) {
726 GE_DPRINTF(sc
, ("!"));
733 /* CRC is included with the packet; trim it off. */
734 buflen
-= ETHER_CRC_LEN
;
737 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
739 GE_DPRINTF(sc
, ("?"));
743 if ((m
->m_flags
& M_EXT
) == 0 && buflen
> MHLEN
- 2) {
744 MCLGET(m
, M_DONTWAIT
);
745 if ((m
->m_flags
& M_EXT
) == 0) {
746 GE_DPRINTF(sc
, ("?"));
753 m
->m_pkthdr
.rcvif
= ifp
;
754 rxq
->rxq_cmdsts
= cmdsts
;
757 bus_dmamap_sync(sc
->sc_dmat
, rxq
->rxq_buf_mem
.gdm_map
,
758 rxq
->rxq_fi
* sizeof(*rxb
), buflen
, BUS_DMASYNC_POSTREAD
);
760 KASSERT(m
->m_len
== 0 && m
->m_pkthdr
.len
== 0);
761 memcpy(m
->m_data
+ m
->m_len
, rxb
->rxb_data
, buflen
);
763 m
->m_pkthdr
.len
= buflen
;
767 if (ifp
->if_bpf
!= NULL
)
768 bpf_mtap(ifp
->if_bpf
, m
);
771 eh
= (const struct ether_header
*) m
->m_data
;
772 if ((ifp
->if_flags
& IFF_PROMISC
) ||
773 (rxq
->rxq_cmdsts
& RX_STS_M
) == 0 ||
774 (rxq
->rxq_cmdsts
& RX_STS_HE
) ||
775 (eh
->ether_dhost
[0] & 1) != 0 ||
776 memcmp(eh
->ether_dhost
, CLLADDR(ifp
->if_sadl
),
777 ETHER_ADDR_LEN
) == 0) {
778 (*ifp
->if_input
)(ifp
, m
);
780 GE_DPRINTF(sc
, (">"));
784 GE_DPRINTF(sc
, ("+"));
789 rxd
->ed_lencnt
&= ~0xffff; /* zero out length */
790 rxd
->ed_cmdsts
= htogt32(RX_CMD_F
|RX_CMD_L
|RX_CMD_O
|RX_CMD_EI
);
792 GE_DPRINTF(sc
, ("([%d]->%08lx.%08lx.%08lx.%08lx)",
794 ((unsigned long *)rxd
)[0], ((unsigned long *)rxd
)[1],
795 ((unsigned long *)rxd
)[2], ((unsigned long *)rxd
)[3]));
797 GE_RXDPRESYNC(sc
, rxq
, rxq
->rxq_fi
);
798 if (++rxq
->rxq_fi
== GE_RXDESC_MAX
)
803 GE_FUNC_EXIT(sc
, "");
807 gfe_rx_process(struct gfe_softc
*sc
, uint32_t cause
, uint32_t intrmask
)
809 struct ifnet
* const ifp
= &sc
->sc_ec
.ec_if
;
810 struct gfe_rxqueue
*rxq
;
812 #define RXPRIO_DECODER 0xffffaa50
813 GE_FUNC_ENTER(sc
, "gfe_rx_process");
815 rxbits
= ETH_IR_RxBuffer_GET(cause
);
817 enum gfe_rxprio rxprio
= (RXPRIO_DECODER
>> (rxbits
* 2)) & 3;
818 GE_DPRINTF(sc
, ("%1x", rxbits
));
819 rxbits
&= ~(1 << rxprio
);
820 gfe_rx_get(sc
, rxprio
);
823 rxbits
= ETH_IR_RxError_GET(cause
);
825 enum gfe_rxprio rxprio
= (RXPRIO_DECODER
>> (rxbits
* 2)) & 3;
826 uint32_t masks
[(GE_RXDESC_MAX
+ 31) / 32];
828 rxbits
&= ~(1 << rxprio
);
829 rxq
= &sc
->sc_rxq
[rxprio
];
830 sc
->sc_idlemask
|= (rxq
->rxq_intrbits
& ETH_IR_RxBits
);
831 intrmask
&= ~(rxq
->rxq_intrbits
& ETH_IR_RxBits
);
832 if ((sc
->sc_tickflags
& GE_TICK_RX_RESTART
) == 0) {
833 sc
->sc_tickflags
|= GE_TICK_RX_RESTART
;
834 callout_reset(&sc
->sc_co
, 1, gfe_tick
, sc
);
837 GE_DPRINTF(sc
, ("%s: rx queue %d filled at %u\n",
838 device_xname(&sc
->sc_dev
), rxprio
, rxq
->rxq_fi
));
839 memset(masks
, 0, sizeof(masks
));
840 bus_dmamap_sync(sc
->sc_dmat
, rxq
->rxq_desc_mem
.gdm_map
,
841 0, rxq
->rxq_desc_mem
.gdm_size
,
842 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
843 for (idx
= 0; idx
< GE_RXDESC_MAX
; idx
++) {
844 volatile struct gt_eth_desc
*rxd
= &rxq
->rxq_descs
[idx
];
846 if (RX_CMD_O
& gt32toh(rxd
->ed_cmdsts
))
847 masks
[idx
/32] |= 1 << (idx
& 31);
849 bus_dmamap_sync(sc
->sc_dmat
, rxq
->rxq_desc_mem
.gdm_map
,
850 0, rxq
->rxq_desc_mem
.gdm_size
,
851 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
853 printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n",
854 device_xname(&sc
->sc_dev
), rxprio
, rxq
->rxq_fi
,
855 rxq
->rxq_cmdsts
, masks
[0], masks
[1]);
858 if ((intrmask
& ETH_IR_RxBits
) == 0)
859 intrmask
&= ~(ETH_IR_RxBuffer
|ETH_IR_RxError
);
861 GE_FUNC_EXIT(sc
, "");
866 gfe_rx_prime(struct gfe_softc
*sc
)
868 struct gfe_rxqueue
*rxq
;
871 GE_FUNC_ENTER(sc
, "gfe_rx_prime");
873 error
= gfe_rx_rxqinit(sc
, GE_RXPRIO_HI
);
876 rxq
= &sc
->sc_rxq
[GE_RXPRIO_HI
];
877 if ((sc
->sc_flags
& GE_RXACTIVE
) == 0) {
878 GE_WRITE(sc
, EFRDP3
, rxq
->rxq_desc_busaddr
);
879 GE_WRITE(sc
, ECRDP3
, rxq
->rxq_desc_busaddr
);
881 sc
->sc_intrmask
|= rxq
->rxq_intrbits
;
883 error
= gfe_rx_rxqinit(sc
, GE_RXPRIO_MEDHI
);
886 if ((sc
->sc_flags
& GE_RXACTIVE
) == 0) {
887 rxq
= &sc
->sc_rxq
[GE_RXPRIO_MEDHI
];
888 GE_WRITE(sc
, EFRDP2
, rxq
->rxq_desc_busaddr
);
889 GE_WRITE(sc
, ECRDP2
, rxq
->rxq_desc_busaddr
);
890 sc
->sc_intrmask
|= rxq
->rxq_intrbits
;
893 error
= gfe_rx_rxqinit(sc
, GE_RXPRIO_MEDLO
);
896 if ((sc
->sc_flags
& GE_RXACTIVE
) == 0) {
897 rxq
= &sc
->sc_rxq
[GE_RXPRIO_MEDLO
];
898 GE_WRITE(sc
, EFRDP1
, rxq
->rxq_desc_busaddr
);
899 GE_WRITE(sc
, ECRDP1
, rxq
->rxq_desc_busaddr
);
900 sc
->sc_intrmask
|= rxq
->rxq_intrbits
;
903 error
= gfe_rx_rxqinit(sc
, GE_RXPRIO_LO
);
906 if ((sc
->sc_flags
& GE_RXACTIVE
) == 0) {
907 rxq
= &sc
->sc_rxq
[GE_RXPRIO_LO
];
908 GE_WRITE(sc
, EFRDP0
, rxq
->rxq_desc_busaddr
);
909 GE_WRITE(sc
, ECRDP0
, rxq
->rxq_desc_busaddr
);
910 sc
->sc_intrmask
|= rxq
->rxq_intrbits
;
914 GE_FUNC_EXIT(sc
, "");
919 gfe_rx_cleanup(struct gfe_softc
*sc
, enum gfe_rxprio rxprio
)
921 struct gfe_rxqueue
*rxq
= &sc
->sc_rxq
[rxprio
];
922 GE_FUNC_ENTER(sc
, "gfe_rx_cleanup");
924 GE_FUNC_EXIT(sc
, "");
929 m_freem(rxq
->rxq_curpkt
);
930 if ((sc
->sc_flags
& GE_NOFREE
) == 0) {
931 gfe_dmamem_free(sc
, &rxq
->rxq_desc_mem
);
932 gfe_dmamem_free(sc
, &rxq
->rxq_buf_mem
);
934 GE_FUNC_EXIT(sc
, "");
938 gfe_rx_stop(struct gfe_softc
*sc
, enum gfe_whack_op op
)
940 GE_FUNC_ENTER(sc
, "gfe_rx_stop");
941 sc
->sc_flags
&= ~GE_RXACTIVE
;
942 sc
->sc_idlemask
&= ~(ETH_IR_RxBits
|ETH_IR_RxBuffer
|ETH_IR_RxError
);
943 sc
->sc_intrmask
&= ~(ETH_IR_RxBits
|ETH_IR_RxBuffer
|ETH_IR_RxError
);
944 GE_WRITE(sc
, EIMR
, sc
->sc_intrmask
);
945 GE_WRITE(sc
, ESDCMR
, ETH_ESDCMR_AR
);
948 } while (GE_READ(sc
, ESDCMR
) & ETH_ESDCMR_AR
);
949 gfe_rx_cleanup(sc
, GE_RXPRIO_HI
);
950 gfe_rx_cleanup(sc
, GE_RXPRIO_MEDHI
);
951 gfe_rx_cleanup(sc
, GE_RXPRIO_MEDLO
);
952 gfe_rx_cleanup(sc
, GE_RXPRIO_LO
);
953 GE_FUNC_EXIT(sc
, "");
959 struct gfe_softc
* const sc
= arg
;
961 unsigned int tickflags
;
964 GE_FUNC_ENTER(sc
, "gfe_tick");
968 tickflags
= sc
->sc_tickflags
;
969 sc
->sc_tickflags
= 0;
970 intrmask
= sc
->sc_intrmask
;
971 if (tickflags
& GE_TICK_TX_IFSTART
)
972 gfe_ifstart(&sc
->sc_ec
.ec_if
);
973 if (tickflags
& GE_TICK_RX_RESTART
) {
974 intrmask
|= sc
->sc_idlemask
;
975 if (sc
->sc_idlemask
& (ETH_IR_RxBuffer_3
|ETH_IR_RxError_3
)) {
976 struct gfe_rxqueue
*rxq
= &sc
->sc_rxq
[GE_RXPRIO_HI
];
978 GE_WRITE(sc
, EFRDP3
, rxq
->rxq_desc_busaddr
);
979 GE_WRITE(sc
, ECRDP3
, rxq
->rxq_desc_busaddr
);
981 if (sc
->sc_idlemask
& (ETH_IR_RxBuffer_2
|ETH_IR_RxError_2
)) {
982 struct gfe_rxqueue
*rxq
= &sc
->sc_rxq
[GE_RXPRIO_MEDHI
];
984 GE_WRITE(sc
, EFRDP2
, rxq
->rxq_desc_busaddr
);
985 GE_WRITE(sc
, ECRDP2
, rxq
->rxq_desc_busaddr
);
987 if (sc
->sc_idlemask
& (ETH_IR_RxBuffer_1
|ETH_IR_RxError_1
)) {
988 struct gfe_rxqueue
*rxq
= &sc
->sc_rxq
[GE_RXPRIO_MEDLO
];
990 GE_WRITE(sc
, EFRDP1
, rxq
->rxq_desc_busaddr
);
991 GE_WRITE(sc
, ECRDP1
, rxq
->rxq_desc_busaddr
);
993 if (sc
->sc_idlemask
& (ETH_IR_RxBuffer_0
|ETH_IR_RxError_0
)) {
994 struct gfe_rxqueue
*rxq
= &sc
->sc_rxq
[GE_RXPRIO_LO
];
996 GE_WRITE(sc
, EFRDP0
, rxq
->rxq_desc_busaddr
);
997 GE_WRITE(sc
, ECRDP0
, rxq
->rxq_desc_busaddr
);
1001 if (intrmask
!= sc
->sc_intrmask
) {
1002 sc
->sc_intrmask
= intrmask
;
1003 GE_WRITE(sc
, EIMR
, sc
->sc_intrmask
);
1008 GE_FUNC_EXIT(sc
, "");
1012 gfe_tx_enqueue(struct gfe_softc
*sc
, enum gfe_txprio txprio
)
1014 const int dcache_line_size
= curcpu()->ci_ci
.dcache_line_size
;
1015 struct ifnet
* const ifp
= &sc
->sc_ec
.ec_if
;
1016 struct gfe_txqueue
* const txq
= &sc
->sc_txq
[txprio
];
1017 volatile struct gt_eth_desc
* const txd
= &txq
->txq_descs
[txq
->txq_lo
];
1018 uint32_t intrmask
= sc
->sc_intrmask
;
1022 GE_FUNC_ENTER(sc
, "gfe_tx_enqueue");
1025 * Anything in the pending queue to enqueue? if not, punt. Likewise
1026 * if the txq is not yet created.
1027 * otherwise grab its dmamap.
1029 if (txq
== NULL
|| (m
= txq
->txq_pendq
.ifq_head
) == NULL
) {
1030 GE_FUNC_EXIT(sc
, "-");
1035 * Have we [over]consumed our limit of descriptors?
1036 * Do we have enough free descriptors?
1038 if (GE_TXDESC_MAX
== txq
->txq_nactive
+ 2) {
1039 volatile struct gt_eth_desc
* const txd2
= &txq
->txq_descs
[txq
->txq_fi
];
1042 GE_TXDPOSTSYNC(sc
, txq
, txq
->txq_fi
);
1043 cmdsts
= gt32toh(txd2
->ed_cmdsts
);
1044 if (cmdsts
& TX_CMD_O
) {
1047 * Sometime the Discovery forgets to update the
1048 * last descriptor. See if we own the descriptor
1049 * after it (since we know we've turned that to
1050 * the discovery and if we owned it, the Discovery
1051 * gave it back). If we do, we know the Discovery
1052 * gave back this one but forgot to mark it as ours.
1054 nextin
= txq
->txq_fi
+ 1;
1055 if (nextin
== GE_TXDESC_MAX
)
1057 GE_TXDPOSTSYNC(sc
, txq
, nextin
);
1058 if (gt32toh(txq
->txq_descs
[nextin
].ed_cmdsts
) & TX_CMD_O
) {
1059 GE_TXDPRESYNC(sc
, txq
, txq
->txq_fi
);
1060 GE_TXDPRESYNC(sc
, txq
, nextin
);
1061 GE_FUNC_EXIT(sc
, "@");
1065 printf("%s: txenqueue: transmitter resynced at %d\n",
1066 device_xname(&sc
->sc_dev
), txq
->txq_fi
);
1069 if (++txq
->txq_fi
== GE_TXDESC_MAX
)
1071 txq
->txq_inptr
= gt32toh(txd2
->ed_bufptr
) - txq
->txq_buf_busaddr
;
1072 pktlen
= (gt32toh(txd2
->ed_lencnt
) >> 16) & 0xffff;
1073 txq
->txq_inptr
+= roundup(pktlen
, dcache_line_size
);
1078 if (cmdsts
& TX_STS_ES
)
1080 GE_DPRINTF(sc
, ("%%"));
1083 buflen
= roundup(m
->m_pkthdr
.len
, dcache_line_size
);
1086 * If this packet would wrap around the end of the buffer, reset back
1089 if (txq
->txq_outptr
+ buflen
> GE_TXBUF_SIZE
) {
1090 txq
->txq_ei_gapcount
+= GE_TXBUF_SIZE
- txq
->txq_outptr
;
1091 txq
->txq_outptr
= 0;
1095 * Make sure the output packet doesn't run over the beginning of
1096 * what we've already given the GT.
1098 if (txq
->txq_nactive
> 0 && txq
->txq_outptr
<= txq
->txq_inptr
&&
1099 txq
->txq_outptr
+ buflen
> txq
->txq_inptr
) {
1100 intrmask
|= txq
->txq_intrbits
&
1101 (ETH_IR_TxBufferHigh
|ETH_IR_TxBufferLow
);
1102 if (sc
->sc_intrmask
!= intrmask
) {
1103 sc
->sc_intrmask
= intrmask
;
1104 GE_WRITE(sc
, EIMR
, sc
->sc_intrmask
);
1106 GE_FUNC_EXIT(sc
, "#");
1111 * The end-of-list descriptor we put on last time is the starting point
1112 * for this packet. The GT is supposed to terminate list processing on
1113 * a NULL nxtptr but that currently is broken so a CPU-owned descriptor
1114 * must terminate the list.
1116 intrmask
= sc
->sc_intrmask
;
1118 m_copydata(m
, 0, m
->m_pkthdr
.len
,
1119 (char *)txq
->txq_buf_mem
.gdm_kva
+ (int)txq
->txq_outptr
);
1120 bus_dmamap_sync(sc
->sc_dmat
, txq
->txq_buf_mem
.gdm_map
,
1121 txq
->txq_outptr
, buflen
, BUS_DMASYNC_PREWRITE
);
1122 txd
->ed_bufptr
= htogt32(txq
->txq_buf_busaddr
+ txq
->txq_outptr
);
1123 txd
->ed_lencnt
= htogt32(m
->m_pkthdr
.len
<< 16);
1124 GE_TXDPRESYNC(sc
, txq
, txq
->txq_lo
);
1127 * Request a buffer interrupt every 2/3 of the way thru the transmit
1130 txq
->txq_ei_gapcount
+= buflen
;
1131 if (txq
->txq_ei_gapcount
> 2 * GE_TXBUF_SIZE
/ 3) {
1132 txd
->ed_cmdsts
= htogt32(TX_CMD_FIRST
|TX_CMD_LAST
|TX_CMD_EI
);
1133 txq
->txq_ei_gapcount
= 0;
1135 txd
->ed_cmdsts
= htogt32(TX_CMD_FIRST
|TX_CMD_LAST
);
1138 GE_DPRINTF(sc
, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq
->txq_lo
,
1139 ((unsigned long *)txd
)[0], ((unsigned long *)txd
)[1],
1140 ((unsigned long *)txd
)[2], ((unsigned long *)txd
)[3]));
1142 GE_TXDPRESYNC(sc
, txq
, txq
->txq_lo
);
1144 txq
->txq_outptr
+= buflen
;
1146 * Tell the SDMA engine to "Fetch!"
1148 GE_WRITE(sc
, ESDCMR
,
1149 txq
->txq_esdcmrbits
& (ETH_ESDCMR_TXDH
|ETH_ESDCMR_TXDL
));
1151 GE_DPRINTF(sc
, ("(%d)", txq
->txq_lo
));
1154 * Update the last out appropriately.
1157 if (++txq
->txq_lo
== GE_TXDESC_MAX
)
1161 * Move mbuf from the pending queue to the snd queue.
1163 IF_DEQUEUE(&txq
->txq_pendq
, m
);
1165 if (ifp
->if_bpf
!= NULL
)
1166 bpf_mtap(ifp
->if_bpf
, m
);
1169 ifp
->if_flags
&= ~IFF_OACTIVE
;
1172 * Since we have put an item into the packet queue, we now want
1173 * an interrupt when the transmit queue finishes processing the
1174 * list. But only update the mask if needs changing.
1176 intrmask
|= txq
->txq_intrbits
& (ETH_IR_TxEndHigh
|ETH_IR_TxEndLow
);
1177 if (sc
->sc_intrmask
!= intrmask
) {
1178 sc
->sc_intrmask
= intrmask
;
1179 GE_WRITE(sc
, EIMR
, sc
->sc_intrmask
);
1181 if (ifp
->if_timer
== 0)
1183 GE_FUNC_EXIT(sc
, "*");
1188 gfe_tx_done(struct gfe_softc
*sc
, enum gfe_txprio txprio
, uint32_t intrmask
)
1190 struct gfe_txqueue
* const txq
= &sc
->sc_txq
[txprio
];
1191 struct ifnet
* const ifp
= &sc
->sc_ec
.ec_if
;
1193 GE_FUNC_ENTER(sc
, "gfe_tx_done");
1196 GE_FUNC_EXIT(sc
, "");
1200 while (txq
->txq_nactive
> 0) {
1201 const int dcache_line_size
= curcpu()->ci_ci
.dcache_line_size
;
1202 volatile struct gt_eth_desc
*txd
= &txq
->txq_descs
[txq
->txq_fi
];
1206 GE_TXDPOSTSYNC(sc
, txq
, txq
->txq_fi
);
1207 if ((cmdsts
= gt32toh(txd
->ed_cmdsts
)) & TX_CMD_O
) {
1210 if (txq
->txq_nactive
== 1) {
1211 GE_TXDPRESYNC(sc
, txq
, txq
->txq_fi
);
1212 GE_FUNC_EXIT(sc
, "");
1216 * Sometimes the Discovery forgets to update the
1217 * ownership bit in the descriptor. See if we own the
1218 * descriptor after it (since we know we've turned
1219 * that to the Discovery and if we own it now then the
1220 * Discovery gave it back). If we do, we know the
1221 * Discovery gave back this one but forgot to mark it
1224 nextin
= txq
->txq_fi
+ 1;
1225 if (nextin
== GE_TXDESC_MAX
)
1227 GE_TXDPOSTSYNC(sc
, txq
, nextin
);
1228 if (gt32toh(txq
->txq_descs
[nextin
].ed_cmdsts
) & TX_CMD_O
) {
1229 GE_TXDPRESYNC(sc
, txq
, txq
->txq_fi
);
1230 GE_TXDPRESYNC(sc
, txq
, nextin
);
1231 GE_FUNC_EXIT(sc
, "");
1235 printf("%s: txdone: transmitter resynced at %d\n",
1236 device_xname(&sc
->sc_dev
), txq
->txq_fi
);
1240 GE_DPRINTF(sc
, ("([%d]<-%08lx.%08lx.%08lx.%08lx)",
1242 ((unsigned long *)txd
)[0], ((unsigned long *)txd
)[1],
1243 ((unsigned long *)txd
)[2], ((unsigned long *)txd
)[3]));
1245 GE_DPRINTF(sc
, ("(%d)", txq
->txq_fi
));
1246 if (++txq
->txq_fi
== GE_TXDESC_MAX
)
1248 txq
->txq_inptr
= gt32toh(txd
->ed_bufptr
) - txq
->txq_buf_busaddr
;
1249 pktlen
= (gt32toh(txd
->ed_lencnt
) >> 16) & 0xffff;
1250 bus_dmamap_sync(sc
->sc_dmat
, txq
->txq_buf_mem
.gdm_map
,
1251 txq
->txq_inptr
, pktlen
, BUS_DMASYNC_POSTWRITE
);
1252 txq
->txq_inptr
+= roundup(pktlen
, dcache_line_size
);
1256 if (cmdsts
& TX_STS_ES
)
1259 /* txd->ed_bufptr = 0; */
1264 if (txq
->txq_nactive
!= 0)
1265 panic("%s: transmit fifo%d empty but active count (%d) > 0!",
1266 device_xname(&sc
->sc_dev
), txprio
, txq
->txq_nactive
);
1268 intrmask
&= ~(txq
->txq_intrbits
& (ETH_IR_TxEndHigh
|ETH_IR_TxEndLow
));
1269 intrmask
&= ~(txq
->txq_intrbits
& (ETH_IR_TxBufferHigh
|ETH_IR_TxBufferLow
));
1270 GE_FUNC_EXIT(sc
, "");
1275 gfe_tx_txqalloc(struct gfe_softc
*sc
, enum gfe_txprio txprio
)
1277 struct gfe_txqueue
* const txq
= &sc
->sc_txq
[txprio
];
1280 GE_FUNC_ENTER(sc
, "gfe_tx_txqalloc");
1282 error
= gfe_dmamem_alloc(sc
, &txq
->txq_desc_mem
, 1,
1283 GE_TXDESC_MEMSIZE
, BUS_DMA_NOCACHE
);
1285 GE_FUNC_EXIT(sc
, "");
1288 error
= gfe_dmamem_alloc(sc
, &txq
->txq_buf_mem
, 1, GE_TXBUF_SIZE
, 0);
1290 gfe_dmamem_free(sc
, &txq
->txq_desc_mem
);
1291 GE_FUNC_EXIT(sc
, "");
1294 GE_FUNC_EXIT(sc
, "");
1299 gfe_tx_start(struct gfe_softc
*sc
, enum gfe_txprio txprio
)
1301 struct gfe_txqueue
* const txq
= &sc
->sc_txq
[txprio
];
1302 volatile struct gt_eth_desc
*txd
;
1306 GE_FUNC_ENTER(sc
, "gfe_tx_start");
1308 sc
->sc_intrmask
&= ~(ETH_IR_TxEndHigh
|ETH_IR_TxBufferHigh
|
1309 ETH_IR_TxEndLow
|ETH_IR_TxBufferLow
);
1311 if (sc
->sc_flags
& GE_NOFREE
) {
1312 KASSERT(txq
->txq_desc_mem
.gdm_kva
!= NULL
);
1313 KASSERT(txq
->txq_buf_mem
.gdm_kva
!= NULL
);
1315 int error
= gfe_tx_txqalloc(sc
, txprio
);
1317 GE_FUNC_EXIT(sc
, "!");
1323 (volatile struct gt_eth_desc
*) txq
->txq_desc_mem
.gdm_kva
;
1324 txq
->txq_desc_busaddr
= txq
->txq_desc_mem
.gdm_map
->dm_segs
[0].ds_addr
;
1325 txq
->txq_buf_busaddr
= txq
->txq_buf_mem
.gdm_map
->dm_segs
[0].ds_addr
;
1327 txq
->txq_pendq
.ifq_maxlen
= 10;
1328 txq
->txq_ei_gapcount
= 0;
1329 txq
->txq_nactive
= 0;
1332 txq
->txq_inptr
= GE_TXBUF_SIZE
;
1333 txq
->txq_outptr
= 0;
1334 for (i
= 0, txd
= txq
->txq_descs
,
1335 addr
= txq
->txq_desc_busaddr
+ sizeof(*txd
);
1336 i
< GE_TXDESC_MAX
- 1;
1337 i
++, txd
++, addr
+= sizeof(*txd
)) {
1339 * update the nxtptr to point to the next txd.
1342 txd
->ed_nxtptr
= htogt32(addr
);
1344 txq
->txq_descs
[GE_TXDESC_MAX
-1].ed_nxtptr
=
1345 htogt32(txq
->txq_desc_busaddr
);
1346 bus_dmamap_sync(sc
->sc_dmat
, txq
->txq_desc_mem
.gdm_map
, 0,
1347 GE_TXDESC_MEMSIZE
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1351 txq
->txq_intrbits
= ETH_IR_TxEndHigh
|ETH_IR_TxBufferHigh
;
1352 txq
->txq_esdcmrbits
= ETH_ESDCMR_TXDH
;
1353 txq
->txq_epsrbits
= ETH_EPSR_TxHigh
;
1354 txq
->txq_ectdp
= ETH_ECTDP1(sc
->sc_macno
);
1355 GE_WRITE(sc
, ECTDP1
, txq
->txq_desc_busaddr
);
1359 txq
->txq_intrbits
= ETH_IR_TxEndLow
|ETH_IR_TxBufferLow
;
1360 txq
->txq_esdcmrbits
= ETH_ESDCMR_TXDL
;
1361 txq
->txq_epsrbits
= ETH_EPSR_TxLow
;
1362 txq
->txq_ectdp
= ETH_ECTDP0(sc
->sc_macno
);
1363 GE_WRITE(sc
, ECTDP0
, txq
->txq_desc_busaddr
);
1366 case GE_TXPRIO_NONE
:
1370 GE_DPRINTF(sc
, ("(ectdp=%#x", txq
->txq_ectdp
));
1371 gt_write(device_parent(&sc
->sc_dev
), txq
->txq_ectdp
,
1372 txq
->txq_desc_busaddr
);
1373 GE_DPRINTF(sc
, (")"));
1377 * If we are restarting, there may be packets in the pending queue
1378 * waiting to be enqueued. Try enqueuing packets from both priority
1379 * queues until the pending queue is empty or there no room for them
1382 while (gfe_tx_enqueue(sc
, txprio
))
1385 GE_FUNC_EXIT(sc
, "");
1390 gfe_tx_cleanup(struct gfe_softc
*sc
, enum gfe_txprio txprio
, int flush
)
1392 struct gfe_txqueue
* const txq
= &sc
->sc_txq
[txprio
];
1394 GE_FUNC_ENTER(sc
, "gfe_tx_cleanup");
1396 GE_FUNC_EXIT(sc
, "");
1401 GE_FUNC_EXIT(sc
, "");
1405 if ((sc
->sc_flags
& GE_NOFREE
) == 0) {
1406 gfe_dmamem_free(sc
, &txq
->txq_desc_mem
);
1407 gfe_dmamem_free(sc
, &txq
->txq_buf_mem
);
1409 GE_FUNC_EXIT(sc
, "-F");
1413 gfe_tx_stop(struct gfe_softc
*sc
, enum gfe_whack_op op
)
1415 GE_FUNC_ENTER(sc
, "gfe_tx_stop");
1417 GE_WRITE(sc
, ESDCMR
, ETH_ESDCMR_STDH
|ETH_ESDCMR_STDL
);
1419 sc
->sc_intrmask
= gfe_tx_done(sc
, GE_TXPRIO_HI
, sc
->sc_intrmask
);
1420 sc
->sc_intrmask
= gfe_tx_done(sc
, GE_TXPRIO_LO
, sc
->sc_intrmask
);
1421 sc
->sc_intrmask
&= ~(ETH_IR_TxEndHigh
|ETH_IR_TxBufferHigh
|
1422 ETH_IR_TxEndLow
|ETH_IR_TxBufferLow
);
1424 gfe_tx_cleanup(sc
, GE_TXPRIO_HI
, op
== GE_WHACK_STOP
);
1425 gfe_tx_cleanup(sc
, GE_TXPRIO_LO
, op
== GE_WHACK_STOP
);
1427 sc
->sc_ec
.ec_if
.if_timer
= 0;
1428 GE_FUNC_EXIT(sc
, "");
1434 struct gfe_softc
* const sc
= arg
;
1436 uint32_t intrmask
= sc
->sc_intrmask
;
1440 GE_FUNC_ENTER(sc
, "gfe_intr");
1442 for (cnt
= 0; cnt
< 4; cnt
++) {
1443 if (sc
->sc_intrmask
!= intrmask
) {
1444 sc
->sc_intrmask
= intrmask
;
1445 GE_WRITE(sc
, EIMR
, sc
->sc_intrmask
);
1447 cause
= GE_READ(sc
, EICR
);
1448 cause
&= sc
->sc_intrmask
;
1449 GE_DPRINTF(sc
, (".%#x", cause
));
1455 GE_WRITE(sc
, EICR
, ~cause
);
1457 if (cause
& (ETH_IR_RxBuffer
|ETH_IR_RxError
))
1458 intrmask
= gfe_rx_process(sc
, cause
, intrmask
);
1462 if (cause
& (ETH_IR_TxBufferHigh
|ETH_IR_TxEndHigh
))
1463 intrmask
= gfe_tx_done(sc
, GE_TXPRIO_HI
, intrmask
);
1464 if (cause
& (ETH_IR_TxBufferLow
|ETH_IR_TxEndLow
))
1465 intrmask
= gfe_tx_done(sc
, GE_TXPRIO_LO
, intrmask
);
1467 if (cause
& ETH_IR_MIIPhySTC
) {
1468 sc
->sc_flags
|= GE_PHYSTSCHG
;
1469 /* intrmask &= ~ETH_IR_MIIPhySTC; */
1473 while (gfe_tx_enqueue(sc
, GE_TXPRIO_HI
))
1475 while (gfe_tx_enqueue(sc
, GE_TXPRIO_LO
))
1478 GE_FUNC_EXIT(sc
, "");
1483 gfe_mii_read (device_t self
, int phy
, int reg
)
1485 return gt_mii_read(self
, device_parent(self
), phy
, reg
);
1489 gfe_mii_write (device_t self
, int phy
, int reg
, int value
)
1491 gt_mii_write(self
, device_parent(self
), phy
, reg
, value
);
1495 gfe_mii_statchg (device_t self
)
1497 /* struct gfe_softc *sc = device_private(self); */
1502 gfe_whack(struct gfe_softc
*sc
, enum gfe_whack_op op
)
1505 GE_FUNC_ENTER(sc
, "gfe_whack");
1508 case GE_WHACK_RESTART
:
1510 gfe_tx_stop(sc
, op
);
1512 /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */
1514 case GE_WHACK_START
:
1516 if (error
== 0 && sc
->sc_hashtable
== NULL
) {
1517 error
= gfe_hash_alloc(sc
);
1521 if (op
!= GE_WHACK_RESTART
)
1525 if (op
!= GE_WHACK_RESTART
) {
1526 error
= gfe_rx_prime(sc
);
1532 error
= gfe_tx_start(sc
, GE_TXPRIO_HI
);
1536 sc
->sc_ec
.ec_if
.if_flags
|= IFF_RUNNING
;
1537 GE_WRITE(sc
, EPCR
, sc
->sc_pcr
| ETH_EPCR_EN
);
1538 GE_WRITE(sc
, EPCXR
, sc
->sc_pcxr
);
1539 GE_WRITE(sc
, EICR
, 0);
1540 GE_WRITE(sc
, EIMR
, sc
->sc_intrmask
);
1542 GE_WRITE(sc
, EHTPR
, sc
->sc_hash_mem
.gdm_map
->dm_segs
->ds_addr
);
1545 GE_WRITE(sc
, ESDCMR
, ETH_ESDCMR_ERD
);
1546 sc
->sc_flags
|= GE_RXACTIVE
;
1549 case GE_WHACK_CHANGE
:
1550 GE_DPRINTF(sc
, ("(pcr=%#x,imr=%#x)",
1551 GE_READ(sc
, EPCR
), GE_READ(sc
, EIMR
)));
1552 GE_WRITE(sc
, EPCR
, sc
->sc_pcr
| ETH_EPCR_EN
);
1553 GE_WRITE(sc
, EIMR
, sc
->sc_intrmask
);
1554 gfe_ifstart(&sc
->sc_ec
.ec_if
);
1555 GE_DPRINTF(sc
, ("(ectdp0=%#x, ectdp1=%#x)",
1556 GE_READ(sc
, ECTDP0
), GE_READ(sc
, ECTDP1
)));
1557 GE_FUNC_EXIT(sc
, "");
1565 GE_DPRINTF(sc
, (" failed: %d\n", error
));
1567 GE_WRITE(sc
, EPCR
, sc
->sc_pcr
);
1568 GE_WRITE(sc
, EIMR
, 0);
1569 sc
->sc_ec
.ec_if
.if_flags
&= ~IFF_RUNNING
;
1571 gfe_tx_stop(sc
, GE_WHACK_STOP
);
1574 gfe_rx_stop(sc
, GE_WHACK_STOP
);
1577 if ((sc
->sc_flags
& GE_NOFREE
) == 0) {
1578 gfe_dmamem_free(sc
, &sc
->sc_hash_mem
);
1579 sc
->sc_hashtable
= NULL
;
1583 GE_FUNC_EXIT(sc
, "");
1588 gfe_hash_compute(struct gfe_softc
*sc
, const uint8_t eaddr
[ETHER_ADDR_LEN
])
1590 uint32_t w0
, add0
, add1
;
1593 GE_FUNC_ENTER(sc
, "gfe_hash_compute");
1594 add0
= ((uint32_t) eaddr
[5] << 0) |
1595 ((uint32_t) eaddr
[4] << 8) |
1596 ((uint32_t) eaddr
[3] << 16);
1598 add0
= ((add0
& 0x00f0f0f0) >> 4) | ((add0
& 0x000f0f0f) << 4);
1599 add0
= ((add0
& 0x00cccccc) >> 2) | ((add0
& 0x00333333) << 2);
1600 add0
= ((add0
& 0x00aaaaaa) >> 1) | ((add0
& 0x00555555) << 1);
1602 add1
= ((uint32_t) eaddr
[2] << 0) |
1603 ((uint32_t) eaddr
[1] << 8) |
1604 ((uint32_t) eaddr
[0] << 16);
1606 add1
= ((add1
& 0x00f0f0f0) >> 4) | ((add1
& 0x000f0f0f) << 4);
1607 add1
= ((add1
& 0x00cccccc) >> 2) | ((add1
& 0x00333333) << 2);
1608 add1
= ((add1
& 0x00aaaaaa) >> 1) | ((add1
& 0x00555555) << 1);
1610 GE_DPRINTF(sc
, ("%s=", ether_sprintf(eaddr
)));
1612 * hashResult is the 15 bits Hash entry address.
1613 * ethernetADD is a 48 bit number, which is derived from the Ethernet
1614 * MAC address, by nibble swapping in every byte (i.e MAC address
1615 * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb).
1618 if ((sc
->sc_pcr
& ETH_EPCR_HM
) == 0) {
1620 * hashResult[14:0] = hashFunc0(ethernetADD[47:0])
1622 * hashFunc0 calculates the hashResult in the following manner:
1623 * hashResult[ 8:0] = ethernetADD[14:8,1,0]
1624 * XOR ethernetADD[23:15] XOR ethernetADD[32:24]
1626 result
= (add0
& 3) | ((add0
>> 6) & ~3);
1627 result
^= (add0
>> 15) ^ (add1
>> 0);
1630 * hashResult[14:9] = ethernetADD[7:2]
1632 result
|= (add0
& ~3) << 7; /* excess bits will be masked */
1633 GE_DPRINTF(sc
, ("0(%#x)", result
& 0x7fff));
1635 #define TRIBITFLIP 073516240 /* yes its in octal */
1637 * hashResult[14:0] = hashFunc1(ethernetADD[47:0])
1639 * hashFunc1 calculates the hashResult in the following manner:
1640 * hashResult[08:00] = ethernetADD[06:14]
1641 * XOR ethernetADD[15:23] XOR ethernetADD[24:32]
1643 w0
= ((add0
>> 6) ^ (add0
>> 15) ^ (add1
)) & 0x1ff;
1645 * Now bitswap those 9 bits
1648 result
|= ((TRIBITFLIP
>> (((w0
>> 0) & 7) * 3)) & 7) << 6;
1649 result
|= ((TRIBITFLIP
>> (((w0
>> 3) & 7) * 3)) & 7) << 3;
1650 result
|= ((TRIBITFLIP
>> (((w0
>> 6) & 7) * 3)) & 7) << 0;
1653 * hashResult[14:09] = ethernetADD[00:05]
1655 result
|= ((TRIBITFLIP
>> (((add0
>> 0) & 7) * 3)) & 7) << 12;
1656 result
|= ((TRIBITFLIP
>> (((add0
>> 3) & 7) * 3)) & 7) << 9;
1657 GE_DPRINTF(sc
, ("1(%#x)", result
));
1659 GE_FUNC_EXIT(sc
, "");
1660 return result
& ((sc
->sc_pcr
& ETH_EPCR_HS_512
) ? 0x7ff : 0x7fff);
1664 gfe_hash_entry_op(struct gfe_softc
*sc
, enum gfe_hash_op op
,
1665 enum gfe_rxprio prio
, const uint8_t eaddr
[ETHER_ADDR_LEN
])
1668 uint64_t *maybe_he_p
= NULL
;
1673 GE_FUNC_ENTER(sc
, "gfe_hash_entry_op");
1675 hash
= gfe_hash_compute(sc
, eaddr
);
1677 if (sc
->sc_hashtable
== NULL
) {
1678 panic("%s:%d: hashtable == NULL!", device_xname(&sc
->sc_dev
),
1683 * Assume we are going to insert so create the hash entry we
1684 * are going to insert. We also use it to match entries we
1687 he
= ((uint64_t) eaddr
[5] << 43) |
1688 ((uint64_t) eaddr
[4] << 35) |
1689 ((uint64_t) eaddr
[3] << 27) |
1690 ((uint64_t) eaddr
[2] << 19) |
1691 ((uint64_t) eaddr
[1] << 11) |
1692 ((uint64_t) eaddr
[0] << 3) |
1693 HSH_PRIO_INS(prio
) | HSH_V
| HSH_R
;
1696 * The GT will search upto 12 entries for a hit, so we must mimic that.
1698 hash
&= sc
->sc_hashmask
/ sizeof(he
);
1699 for (limit
= HSH_LIMIT
; limit
> 0 ; --limit
) {
1701 * Does the GT wrap at the end, stop at the, or overrun the
1702 * end? Assume it wraps for now. Stash a copy of the
1703 * current hash entry.
1705 uint64_t *he_p
= &sc
->sc_hashtable
[hash
];
1706 uint64_t thishe
= *he_p
;
1709 * If the hash entry isn't valid, that break the chain. And
1710 * this entry a good candidate for reuse.
1712 if ((thishe
& HSH_V
) == 0) {
1718 * If the hash entry has the same address we are looking for
1719 * then ... if we are removing and the skip bit is set, its
1720 * already been removed. if are adding and the skip bit is
1721 * clear, then its already added. In either return EBUSY
1722 * indicating the op has already been done. Otherwise flip
1723 * the skip bit and return 0.
1725 if (((he
^ thishe
) & HSH_ADDR_MASK
) == 0) {
1726 if (((op
== GE_HASH_REMOVE
) && (thishe
& HSH_S
)) ||
1727 ((op
== GE_HASH_ADD
) && (thishe
& HSH_S
) == 0))
1729 *he_p
= thishe
^ HSH_S
;
1730 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_hash_mem
.gdm_map
,
1731 hash
* sizeof(he
), sizeof(he
),
1732 BUS_DMASYNC_PREWRITE
);
1733 GE_FUNC_EXIT(sc
, "^");
1738 * If we haven't found a slot for the entry and this entry
1739 * is currently being skipped, return this entry.
1741 if (maybe_he_p
== NULL
&& (thishe
& HSH_S
)) {
1746 hash
= (hash
+ 1) & (sc
->sc_hashmask
/ sizeof(he
));
1750 * If we got here, then there was no entry to remove.
1752 if (op
== GE_HASH_REMOVE
) {
1753 GE_FUNC_EXIT(sc
, "?");
1758 * If we couldn't find a slot, return an error.
1760 if (maybe_he_p
== NULL
) {
1761 GE_FUNC_EXIT(sc
, "!");
1765 /* Update the entry.
1768 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_hash_mem
.gdm_map
,
1769 maybe_hash
* sizeof(he
), sizeof(he
), BUS_DMASYNC_PREWRITE
);
1770 GE_FUNC_EXIT(sc
, "+");
1775 gfe_hash_multichg(struct ethercom
*ec
, const struct ether_multi
*enm
, u_long cmd
)
1777 struct gfe_softc
* const sc
= ec
->ec_if
.if_softc
;
1779 enum gfe_hash_op op
;
1780 enum gfe_rxprio prio
;
1782 GE_FUNC_ENTER(sc
, "hash_multichg");
1784 * Is this a wildcard entry? If so and its being removed, recompute.
1786 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, ETHER_ADDR_LEN
) != 0) {
1787 if (cmd
== SIOCDELMULTI
) {
1788 GE_FUNC_EXIT(sc
, "");
1795 sc
->sc_flags
|= GE_ALLMULTI
;
1796 if ((sc
->sc_pcr
& ETH_EPCR_PM
) == 0) {
1797 sc
->sc_pcr
|= ETH_EPCR_PM
;
1798 GE_WRITE(sc
, EPCR
, sc
->sc_pcr
);
1799 GE_FUNC_EXIT(sc
, "");
1802 GE_FUNC_EXIT(sc
, "");
1806 prio
= GE_RXPRIO_MEDLO
;
1807 op
= (cmd
== SIOCDELMULTI
? GE_HASH_REMOVE
: GE_HASH_ADD
);
1809 if (sc
->sc_hashtable
== NULL
) {
1810 GE_FUNC_EXIT(sc
, "");
1814 error
= gfe_hash_entry_op(sc
, op
, prio
, enm
->enm_addrlo
);
1815 if (error
== EBUSY
) {
1816 printf("%s: multichg: tried to %s %s again\n",
1817 device_xname(&sc
->sc_dev
),
1818 cmd
== SIOCDELMULTI
? "remove" : "add",
1819 ether_sprintf(enm
->enm_addrlo
));
1820 GE_FUNC_EXIT(sc
, "");
1824 if (error
== ENOENT
) {
1825 printf("%s: multichg: failed to remove %s: not in table\n",
1826 device_xname(&sc
->sc_dev
),
1827 ether_sprintf(enm
->enm_addrlo
));
1828 GE_FUNC_EXIT(sc
, "");
1832 if (error
== ENOSPC
) {
1833 printf("%s: multichg: failed to add %s: no space; regenerating table\n",
1834 device_xname(&sc
->sc_dev
),
1835 ether_sprintf(enm
->enm_addrlo
));
1836 GE_FUNC_EXIT(sc
, "");
1839 GE_DPRINTF(sc
, ("%s: multichg: %s: %s succeeded\n",
1840 device_xname(&sc
->sc_dev
),
1841 cmd
== SIOCDELMULTI
? "remove" : "add",
1842 ether_sprintf(enm
->enm_addrlo
)));
1843 GE_FUNC_EXIT(sc
, "");
1848 gfe_hash_fill(struct gfe_softc
*sc
)
1850 struct ether_multistep step
;
1851 struct ether_multi
*enm
;
1854 GE_FUNC_ENTER(sc
, "gfe_hash_fill");
1856 error
= gfe_hash_entry_op(sc
, GE_HASH_ADD
, GE_RXPRIO_HI
,
1857 CLLADDR(sc
->sc_ec
.ec_if
.if_sadl
));
1859 GE_FUNC_EXIT(sc
, "!");
1862 sc
->sc_flags
&= ~GE_ALLMULTI
;
1863 if ((sc
->sc_ec
.ec_if
.if_flags
& IFF_PROMISC
) == 0)
1864 sc
->sc_pcr
&= ~ETH_EPCR_PM
;
1865 ETHER_FIRST_MULTI(step
, &sc
->sc_ec
, enm
);
1866 while (enm
!= NULL
) {
1867 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, ETHER_ADDR_LEN
)) {
1868 sc
->sc_flags
|= GE_ALLMULTI
;
1869 sc
->sc_pcr
|= ETH_EPCR_PM
;
1871 error
= gfe_hash_entry_op(sc
, GE_HASH_ADD
,
1872 GE_RXPRIO_MEDLO
, enm
->enm_addrlo
);
1873 if (error
== ENOSPC
)
1876 ETHER_NEXT_MULTI(step
, enm
);
1879 GE_FUNC_EXIT(sc
, "");
1884 gfe_hash_alloc(struct gfe_softc
*sc
)
1887 GE_FUNC_ENTER(sc
, "gfe_hash_alloc");
1888 sc
->sc_hashmask
= (sc
->sc_pcr
& ETH_EPCR_HS_512
? 16 : 256)*1024 - 1;
1889 error
= gfe_dmamem_alloc(sc
, &sc
->sc_hash_mem
, 1, sc
->sc_hashmask
+ 1,
1892 printf("%s: failed to allocate %d bytes for hash table: %d\n",
1893 device_xname(&sc
->sc_dev
), sc
->sc_hashmask
+ 1, error
);
1894 GE_FUNC_EXIT(sc
, "");
1897 sc
->sc_hashtable
= (uint64_t *) sc
->sc_hash_mem
.gdm_kva
;
1898 memset(sc
->sc_hashtable
, 0, sc
->sc_hashmask
+ 1);
1899 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_hash_mem
.gdm_map
,
1900 0, sc
->sc_hashmask
+ 1, BUS_DMASYNC_PREWRITE
);
1901 GE_FUNC_EXIT(sc
, "");