2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2010 Nathan Whitehorn
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sockio.h>
31 #include <sys/endian.h>
34 #include <sys/module.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/kernel.h>
38 #include <sys/socket.h>
45 #include <net/if_var.h>
46 #include <net/ethernet.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_dl.h>
51 #include <machine/pio.h>
52 #include <machine/bus.h>
53 #include <machine/platform.h>
54 #include <machine/resource.h>
59 #include "ps3-hvcall.h"
60 #include "if_glcreg.h"
62 static int glc_probe(device_t
);
63 static int glc_attach(device_t
);
64 static void glc_init(void *xsc
);
65 static void glc_start(if_t ifp
);
66 static int glc_ioctl(if_t ifp
, u_long cmd
, caddr_t data
);
67 static void glc_set_multicast(struct glc_softc
*sc
);
68 static int glc_add_rxbuf(struct glc_softc
*sc
, int idx
);
69 static int glc_add_rxbuf_dma(struct glc_softc
*sc
, int idx
);
70 static int glc_encap(struct glc_softc
*sc
, struct mbuf
**m_head
,
72 static int glc_intr_filter(void *xsc
);
73 static void glc_intr(void *xsc
);
74 static void glc_tick(void *xsc
);
75 static void glc_media_status(if_t ifp
, struct ifmediareq
*ifmr
);
76 static int glc_media_change(if_t ifp
);
78 static MALLOC_DEFINE(M_GLC
, "gelic", "PS3 GELIC ethernet");
80 static device_method_t glc_methods
[] = {
81 /* Device interface */
82 DEVMETHOD(device_probe
, glc_probe
),
83 DEVMETHOD(device_attach
, glc_attach
),
87 static driver_t glc_driver
= {
90 sizeof(struct glc_softc
)
93 DRIVER_MODULE(glc
, ps3bus
, glc_driver
, 0, 0);
96 glc_probe(device_t dev
)
99 if (ps3bus_get_bustype(dev
) != PS3_BUSTYPE_SYSBUS
||
100 ps3bus_get_devtype(dev
) != PS3_DEVTYPE_GELIC
)
103 device_set_desc(dev
, "Playstation 3 GELIC Network Controller");
104 return (BUS_PROBE_SPECIFIC
);
108 glc_getphys(void *xaddr
, bus_dma_segment_t
*segs
, int nsegs
, int error
)
113 *(bus_addr_t
*)xaddr
= segs
[0].ds_addr
;
117 glc_attach(device_t dev
)
119 struct glc_softc
*sc
;
120 struct glc_txsoft
*txs
;
121 uint64_t mac64
, val
, junk
;
124 sc
= device_get_softc(dev
);
126 sc
->sc_bus
= ps3bus_get_bus(dev
);
127 sc
->sc_dev
= ps3bus_get_device(dev
);
130 mtx_init(&sc
->sc_mtx
, device_get_nameunit(dev
), MTX_NETWORK_LOCK
,
132 callout_init_mtx(&sc
->sc_tick_ch
, &sc
->sc_mtx
, 0);
133 sc
->next_txdma_slot
= 0;
134 sc
->bsy_txdma_slots
= 0;
135 sc
->sc_next_rxdma_slot
= 0;
136 sc
->first_used_txdma_slot
= -1;
139 * Shut down existing tasks.
142 lv1_net_stop_tx_dma(sc
->sc_bus
, sc
->sc_dev
, 0);
143 lv1_net_stop_rx_dma(sc
->sc_bus
, sc
->sc_dev
, 0);
145 sc
->sc_ifp
= if_alloc(IFT_ETHER
);
146 if_setsoftc(sc
->sc_ifp
, sc
);
149 * Get MAC address and VLAN id
152 lv1_net_control(sc
->sc_bus
, sc
->sc_dev
, GELIC_GET_MAC_ADDRESS
,
153 0, 0, 0, &mac64
, &junk
);
154 memcpy(sc
->sc_enaddr
, &((uint8_t *)&mac64
)[2], sizeof(sc
->sc_enaddr
));
155 sc
->sc_tx_vlan
= sc
->sc_rx_vlan
= -1;
156 err
= lv1_net_control(sc
->sc_bus
, sc
->sc_dev
, GELIC_GET_VLAN_ID
,
157 GELIC_VLAN_TX_ETHERNET
, 0, 0, &val
, &junk
);
159 sc
->sc_tx_vlan
= val
;
160 err
= lv1_net_control(sc
->sc_bus
, sc
->sc_dev
, GELIC_GET_VLAN_ID
,
161 GELIC_VLAN_RX_ETHERNET
, 0, 0, &val
, &junk
);
163 sc
->sc_rx_vlan
= val
;
166 * Set up interrupt handler
169 sc
->sc_irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
, &sc
->sc_irqid
,
171 if (sc
->sc_irq
== NULL
) {
172 device_printf(dev
, "Could not allocate IRQ!\n");
173 mtx_destroy(&sc
->sc_mtx
);
177 bus_setup_intr(dev
, sc
->sc_irq
,
178 INTR_TYPE_NET
| INTR_MPSAFE
| INTR_ENTROPY
,
179 glc_intr_filter
, glc_intr
, sc
, &sc
->sc_irqctx
);
180 sc
->sc_hwirq_status
= (uint64_t *)contigmalloc(8, M_GLC
, M_ZERO
, 0,
181 BUS_SPACE_MAXADDR_32BIT
, 8, PAGE_SIZE
);
182 lv1_net_set_interrupt_status_indicator(sc
->sc_bus
, sc
->sc_dev
,
183 vtophys(sc
->sc_hwirq_status
), 0);
184 lv1_net_set_interrupt_mask(sc
->sc_bus
, sc
->sc_dev
,
185 GELIC_INT_RXDONE
| GELIC_INT_RXFRAME
| GELIC_INT_PHY
|
186 GELIC_INT_TX_CHAIN_END
, 0);
192 err
= bus_dma_tag_create(bus_get_dma_tag(dev
), 32, 0,
193 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
, NULL
, NULL
,
194 129*sizeof(struct glc_dmadesc
), 1, 128*sizeof(struct glc_dmadesc
),
195 0, NULL
,NULL
, &sc
->sc_dmadesc_tag
);
197 err
= bus_dmamem_alloc(sc
->sc_dmadesc_tag
, (void **)&sc
->sc_txdmadesc
,
198 BUS_DMA_WAITOK
| BUS_DMA_COHERENT
| BUS_DMA_ZERO
,
199 &sc
->sc_txdmadesc_map
);
200 err
= bus_dmamap_load(sc
->sc_dmadesc_tag
, sc
->sc_txdmadesc_map
,
201 sc
->sc_txdmadesc
, 128*sizeof(struct glc_dmadesc
), glc_getphys
,
202 &sc
->sc_txdmadesc_phys
, 0);
203 err
= bus_dmamem_alloc(sc
->sc_dmadesc_tag
, (void **)&sc
->sc_rxdmadesc
,
204 BUS_DMA_WAITOK
| BUS_DMA_COHERENT
| BUS_DMA_ZERO
,
205 &sc
->sc_rxdmadesc_map
);
206 err
= bus_dmamap_load(sc
->sc_dmadesc_tag
, sc
->sc_rxdmadesc_map
,
207 sc
->sc_rxdmadesc
, 128*sizeof(struct glc_dmadesc
), glc_getphys
,
208 &sc
->sc_rxdmadesc_phys
, 0);
210 err
= bus_dma_tag_create(bus_get_dma_tag(dev
), 128, 0,
211 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
, NULL
, NULL
,
212 BUS_SPACE_MAXSIZE_32BIT
, 0, BUS_SPACE_MAXSIZE_32BIT
, 0, NULL
,NULL
,
214 err
= bus_dma_tag_create(bus_get_dma_tag(dev
), 1, 0,
215 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
, NULL
, NULL
,
216 BUS_SPACE_MAXSIZE_32BIT
, 16, BUS_SPACE_MAXSIZE_32BIT
, 0, NULL
,NULL
,
219 /* init transmit descriptors */
220 STAILQ_INIT(&sc
->sc_txfreeq
);
221 STAILQ_INIT(&sc
->sc_txdirtyq
);
223 /* create TX DMA maps */
225 for (i
= 0; i
< GLC_MAX_TX_PACKETS
; i
++) {
226 txs
= &sc
->sc_txsoft
[i
];
227 txs
->txs_mbuf
= NULL
;
228 err
= bus_dmamap_create(sc
->sc_txdma_tag
, 0, &txs
->txs_dmamap
);
231 "unable to create TX DMA map %d, error = %d\n",
234 STAILQ_INSERT_TAIL(&sc
->sc_txfreeq
, txs
, txs_q
);
237 /* Create the receive buffer DMA maps. */
238 for (i
= 0; i
< GLC_MAX_RX_PACKETS
; i
++) {
239 err
= bus_dmamap_create(sc
->sc_rxdma_tag
, 0,
240 &sc
->sc_rxsoft
[i
].rxs_dmamap
);
243 "unable to create RX DMA map %d, error = %d\n",
246 sc
->sc_rxsoft
[i
].rxs_mbuf
= NULL
;
250 * Attach to network stack
253 if_initname(sc
->sc_ifp
, device_get_name(dev
), device_get_unit(dev
));
254 if_setmtu(sc
->sc_ifp
, ETHERMTU
);
255 if_setflags(sc
->sc_ifp
, IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
);
256 if_sethwassist(sc
->sc_ifp
, CSUM_TCP
| CSUM_UDP
);
257 if_setcapabilities(sc
->sc_ifp
, IFCAP_HWCSUM
| IFCAP_RXCSUM
);
258 if_setcapenable(sc
->sc_ifp
, IFCAP_HWCSUM
| IFCAP_RXCSUM
);
259 if_setstartfn(sc
->sc_ifp
, glc_start
);
260 if_setioctlfn(sc
->sc_ifp
, glc_ioctl
);
261 if_setinitfn(sc
->sc_ifp
, glc_init
);
263 ifmedia_init(&sc
->sc_media
, IFM_IMASK
, glc_media_change
,
265 ifmedia_add(&sc
->sc_media
, IFM_ETHER
| IFM_10_T
, 0, NULL
);
266 ifmedia_add(&sc
->sc_media
, IFM_ETHER
| IFM_10_T
| IFM_FDX
, 0, NULL
);
267 ifmedia_add(&sc
->sc_media
, IFM_ETHER
| IFM_100_TX
, 0, NULL
);
268 ifmedia_add(&sc
->sc_media
, IFM_ETHER
| IFM_100_TX
| IFM_FDX
, 0, NULL
);
269 ifmedia_add(&sc
->sc_media
, IFM_ETHER
| IFM_1000_T
| IFM_FDX
, 0, NULL
);
270 ifmedia_add(&sc
->sc_media
, IFM_ETHER
| IFM_AUTO
, 0, NULL
);
271 ifmedia_set(&sc
->sc_media
, IFM_ETHER
| IFM_AUTO
);
273 if_setsendqlen(sc
->sc_ifp
, GLC_MAX_TX_PACKETS
);
274 if_setsendqready(sc
->sc_ifp
);
276 ether_ifattach(sc
->sc_ifp
, sc
->sc_enaddr
);
277 if_sethwassist(sc
->sc_ifp
, 0);
281 mtx_destroy(&sc
->sc_mtx
);
287 glc_init_locked(struct glc_softc
*sc
)
290 struct glc_rxsoft
*rxs
;
291 struct glc_txsoft
*txs
;
293 mtx_assert(&sc
->sc_mtx
, MA_OWNED
);
295 lv1_net_stop_tx_dma(sc
->sc_bus
, sc
->sc_dev
, 0);
296 lv1_net_stop_rx_dma(sc
->sc_bus
, sc
->sc_dev
, 0);
298 glc_set_multicast(sc
);
300 for (i
= 0; i
< GLC_MAX_RX_PACKETS
; i
++) {
301 rxs
= &sc
->sc_rxsoft
[i
];
302 rxs
->rxs_desc_slot
= i
;
304 if (rxs
->rxs_mbuf
== NULL
) {
305 glc_add_rxbuf(sc
, i
);
307 if (rxs
->rxs_mbuf
== NULL
) {
308 rxs
->rxs_desc_slot
= -1;
313 glc_add_rxbuf_dma(sc
, i
);
314 bus_dmamap_sync(sc
->sc_dmadesc_tag
, sc
->sc_rxdmadesc_map
,
315 BUS_DMASYNC_PREREAD
);
318 /* Clear TX dirty queue */
319 while ((txs
= STAILQ_FIRST(&sc
->sc_txdirtyq
)) != NULL
) {
320 STAILQ_REMOVE_HEAD(&sc
->sc_txdirtyq
, txs_q
);
321 bus_dmamap_unload(sc
->sc_txdma_tag
, txs
->txs_dmamap
);
323 if (txs
->txs_mbuf
!= NULL
) {
324 m_freem(txs
->txs_mbuf
);
325 txs
->txs_mbuf
= NULL
;
328 STAILQ_INSERT_TAIL(&sc
->sc_txfreeq
, txs
, txs_q
);
330 sc
->first_used_txdma_slot
= -1;
331 sc
->bsy_txdma_slots
= 0;
333 error
= lv1_net_start_rx_dma(sc
->sc_bus
, sc
->sc_dev
,
334 sc
->sc_rxsoft
[0].rxs_desc
, 0);
336 device_printf(sc
->sc_self
,
337 "lv1_net_start_rx_dma error: %d\n", error
);
339 if_setdrvflagbits(sc
->sc_ifp
, IFF_DRV_RUNNING
, 0);
340 if_setdrvflagbits(sc
->sc_ifp
, 0, IFF_DRV_OACTIVE
);
341 sc
->sc_ifpflags
= if_getflags(sc
->sc_ifp
);
343 sc
->sc_wdog_timer
= 0;
344 callout_reset(&sc
->sc_tick_ch
, hz
, glc_tick
, sc
);
350 struct glc_softc
*sc
= xsc
;
352 mtx_assert(&sc
->sc_mtx
, MA_OWNED
);
354 lv1_net_stop_tx_dma(sc
->sc_bus
, sc
->sc_dev
, 0);
355 lv1_net_stop_rx_dma(sc
->sc_bus
, sc
->sc_dev
, 0);
361 struct glc_softc
*sc
= xsc
;
363 mtx_lock(&sc
->sc_mtx
);
365 mtx_unlock(&sc
->sc_mtx
);
371 struct glc_softc
*sc
= xsc
;
373 mtx_assert(&sc
->sc_mtx
, MA_OWNED
);
376 * XXX: Sometimes the RX queue gets stuck. Poke it periodically until
377 * we figure out why. This will fail harmlessly if the RX queue is
380 lv1_net_start_rx_dma(sc
->sc_bus
, sc
->sc_dev
,
381 sc
->sc_rxsoft
[sc
->sc_next_rxdma_slot
].rxs_desc
, 0);
383 if (sc
->sc_wdog_timer
== 0 || --sc
->sc_wdog_timer
!= 0) {
384 callout_reset(&sc
->sc_tick_ch
, hz
, glc_tick
, sc
);
389 device_printf(sc
->sc_self
, "device timeout\n");
395 glc_start_locked(if_t ifp
)
397 struct glc_softc
*sc
= if_getsoftc(ifp
);
398 bus_addr_t first
, pktdesc
;
401 struct mbuf
*mb_head
;
403 mtx_assert(&sc
->sc_mtx
, MA_OWNED
);
406 if ((if_getdrvflags(ifp
) & (IFF_DRV_RUNNING
| IFF_DRV_OACTIVE
)) !=
410 if (STAILQ_EMPTY(&sc
->sc_txdirtyq
))
413 while (!if_sendq_empty(ifp
)) {
414 mb_head
= if_dequeue(ifp
);
419 /* Check if the ring buffer is full */
420 if (sc
->bsy_txdma_slots
> 125) {
421 /* Put the packet back and stop */
422 if_setdrvflagbits(ifp
, IFF_DRV_OACTIVE
, 0);
423 if_sendq_prepend(ifp
, mb_head
);
427 BPF_MTAP(ifp
, mb_head
);
429 if (sc
->sc_tx_vlan
>= 0)
430 mb_head
= ether_vlanencap(mb_head
, sc
->sc_tx_vlan
);
432 if (glc_encap(sc
, &mb_head
, &pktdesc
)) {
433 if_setdrvflagbits(ifp
, IFF_DRV_OACTIVE
, 0);
441 if (kickstart
&& first
!= 0) {
442 error
= lv1_net_start_tx_dma(sc
->sc_bus
, sc
->sc_dev
, first
, 0);
444 device_printf(sc
->sc_self
,
445 "lv1_net_start_tx_dma error: %d\n", error
);
446 sc
->sc_wdog_timer
= 5;
453 struct glc_softc
*sc
= if_getsoftc(ifp
);
455 mtx_lock(&sc
->sc_mtx
);
456 glc_start_locked(ifp
);
457 mtx_unlock(&sc
->sc_mtx
);
461 glc_ioctl(if_t ifp
, u_long cmd
, caddr_t data
)
463 struct glc_softc
*sc
= if_getsoftc(ifp
);
464 struct ifreq
*ifr
= (struct ifreq
*)data
;
469 mtx_lock(&sc
->sc_mtx
);
470 if ((if_getflags(ifp
) & IFF_UP
) != 0) {
471 if ((if_getdrvflags(ifp
) & IFF_DRV_RUNNING
) != 0 &&
472 ((if_getflags(ifp
) ^ sc
->sc_ifpflags
) &
473 (IFF_ALLMULTI
| IFF_PROMISC
)) != 0)
474 glc_set_multicast(sc
);
478 else if ((if_getdrvflags(ifp
) & IFF_DRV_RUNNING
) != 0)
480 sc
->sc_ifpflags
= if_getflags(ifp
);
481 mtx_unlock(&sc
->sc_mtx
);
485 mtx_lock(&sc
->sc_mtx
);
486 glc_set_multicast(sc
);
487 mtx_unlock(&sc
->sc_mtx
);
491 err
= ifmedia_ioctl(ifp
, ifr
, &sc
->sc_media
, cmd
);
494 err
= ether_ioctl(ifp
, cmd
, data
);
502 glc_add_maddr(void *arg
, struct sockaddr_dl
*sdl
, u_int cnt
)
504 struct glc_softc
*sc
= arg
;
508 * Filter can only hold 32 addresses, so fall back to
509 * the IFF_ALLMULTI case if we have too many. +1 is for
516 memcpy(&((uint8_t *)(&addr
))[2], LLADDR(sdl
), ETHER_ADDR_LEN
);
517 lv1_net_add_multicast_address(sc
->sc_bus
, sc
->sc_dev
, addr
, 0);
523 glc_set_multicast(struct glc_softc
*sc
)
525 if_t ifp
= sc
->sc_ifp
;
528 /* Clear multicast filter */
529 lv1_net_remove_multicast_address(sc
->sc_bus
, sc
->sc_dev
, 0, 1);
532 lv1_net_add_multicast_address(sc
->sc_bus
, sc
->sc_dev
,
535 if ((if_getflags(ifp
) & IFF_ALLMULTI
) != 0) {
536 lv1_net_add_multicast_address(sc
->sc_bus
, sc
->sc_dev
, 0, 1);
538 naddrs
= if_foreach_llmaddr(ifp
, glc_add_maddr
, sc
);
539 if (naddrs
+ 1 == 32)
540 lv1_net_add_multicast_address(sc
->sc_bus
,
546 glc_add_rxbuf(struct glc_softc
*sc
, int idx
)
548 struct glc_rxsoft
*rxs
= &sc
->sc_rxsoft
[idx
];
550 bus_dma_segment_t segs
[1];
553 m
= m_getcl(M_NOWAIT
, MT_DATA
, M_PKTHDR
);
556 m
->m_len
= m
->m_pkthdr
.len
= m
->m_ext
.ext_size
;
558 if (rxs
->rxs_mbuf
!= NULL
) {
559 bus_dmamap_sync(sc
->sc_rxdma_tag
, rxs
->rxs_dmamap
,
560 BUS_DMASYNC_POSTREAD
);
561 bus_dmamap_unload(sc
->sc_rxdma_tag
, rxs
->rxs_dmamap
);
564 error
= bus_dmamap_load_mbuf_sg(sc
->sc_rxdma_tag
, rxs
->rxs_dmamap
, m
,
565 segs
, &nsegs
, BUS_DMA_NOWAIT
);
567 device_printf(sc
->sc_self
,
568 "cannot load RS DMA map %d, error = %d\n", idx
, error
);
572 /* If nsegs is wrong then the stack is corrupt. */
574 ("%s: too many DMA segments (%d)", __func__
, nsegs
));
576 rxs
->segment
= segs
[0];
578 bus_dmamap_sync(sc
->sc_rxdma_tag
, rxs
->rxs_dmamap
, BUS_DMASYNC_PREREAD
);
584 glc_add_rxbuf_dma(struct glc_softc
*sc
, int idx
)
586 struct glc_rxsoft
*rxs
= &sc
->sc_rxsoft
[idx
];
588 bzero(&sc
->sc_rxdmadesc
[idx
], sizeof(sc
->sc_rxdmadesc
[idx
]));
589 sc
->sc_rxdmadesc
[idx
].paddr
= rxs
->segment
.ds_addr
;
590 sc
->sc_rxdmadesc
[idx
].len
= rxs
->segment
.ds_len
;
591 sc
->sc_rxdmadesc
[idx
].next
= sc
->sc_rxdmadesc_phys
+
592 ((idx
+ 1) % GLC_MAX_RX_PACKETS
)*sizeof(sc
->sc_rxdmadesc
[idx
]);
593 sc
->sc_rxdmadesc
[idx
].cmd_stat
= GELIC_DESCR_OWNED
;
595 rxs
->rxs_desc_slot
= idx
;
596 rxs
->rxs_desc
= sc
->sc_rxdmadesc_phys
+ idx
*sizeof(struct glc_dmadesc
);
602 glc_encap(struct glc_softc
*sc
, struct mbuf
**m_head
, bus_addr_t
*pktdesc
)
604 bus_dma_segment_t segs
[16];
605 struct glc_txsoft
*txs
;
607 bus_addr_t firstslotphys
;
608 int i
, idx
, nsegs
, nsegs_max
;
611 /* Max number of segments is the number of free DMA slots */
612 nsegs_max
= 128 - sc
->bsy_txdma_slots
;
614 if (nsegs_max
> 16 || sc
->first_used_txdma_slot
< 0)
617 /* Get a work queue entry. */
618 if ((txs
= STAILQ_FIRST(&sc
->sc_txfreeq
)) == NULL
) {
619 /* Ran out of descriptors. */
624 for (m
= *m_head
; m
!= NULL
; m
= m
->m_next
)
627 if (nsegs
> nsegs_max
) {
628 m
= m_collapse(*m_head
, M_NOWAIT
, nsegs_max
);
637 err
= bus_dmamap_load_mbuf_sg(sc
->sc_txdma_tag
, txs
->txs_dmamap
,
638 *m_head
, segs
, &nsegs
, BUS_DMA_NOWAIT
);
645 KASSERT(nsegs
<= 128 - sc
->bsy_txdma_slots
,
646 ("GLC: Mapped too many (%d) DMA segments with %d available",
647 nsegs
, 128 - sc
->bsy_txdma_slots
));
655 txs
->txs_ndescs
= nsegs
;
656 txs
->txs_firstdesc
= sc
->next_txdma_slot
;
658 idx
= txs
->txs_firstdesc
;
659 firstslotphys
= sc
->sc_txdmadesc_phys
+
660 txs
->txs_firstdesc
*sizeof(struct glc_dmadesc
);
662 for (i
= 0; i
< nsegs
; i
++) {
663 bzero(&sc
->sc_txdmadesc
[idx
], sizeof(sc
->sc_txdmadesc
[idx
]));
664 sc
->sc_txdmadesc
[idx
].paddr
= segs
[i
].ds_addr
;
665 sc
->sc_txdmadesc
[idx
].len
= segs
[i
].ds_len
;
666 sc
->sc_txdmadesc
[idx
].next
= sc
->sc_txdmadesc_phys
+
667 ((idx
+ 1) % GLC_MAX_TX_PACKETS
)*sizeof(struct glc_dmadesc
);
668 sc
->sc_txdmadesc
[idx
].cmd_stat
|= GELIC_CMDSTAT_NOIPSEC
;
671 txs
->txs_lastdesc
= idx
;
672 sc
->sc_txdmadesc
[idx
].next
= 0;
673 sc
->sc_txdmadesc
[idx
].cmd_stat
|= GELIC_CMDSTAT_LAST
;
676 if ((*m_head
)->m_pkthdr
.csum_flags
& CSUM_TCP
)
677 sc
->sc_txdmadesc
[idx
].cmd_stat
|= GELIC_CMDSTAT_CSUM_TCP
;
678 if ((*m_head
)->m_pkthdr
.csum_flags
& CSUM_UDP
)
679 sc
->sc_txdmadesc
[idx
].cmd_stat
|= GELIC_CMDSTAT_CSUM_UDP
;
680 sc
->sc_txdmadesc
[idx
].cmd_stat
|= GELIC_DESCR_OWNED
;
682 idx
= (idx
+ 1) % GLC_MAX_TX_PACKETS
;
684 sc
->next_txdma_slot
= idx
;
685 sc
->bsy_txdma_slots
+= nsegs
;
686 if (txs
->txs_firstdesc
!= 0)
687 idx
= txs
->txs_firstdesc
- 1;
689 idx
= GLC_MAX_TX_PACKETS
- 1;
691 if (sc
->first_used_txdma_slot
< 0)
692 sc
->first_used_txdma_slot
= txs
->txs_firstdesc
;
694 bus_dmamap_sync(sc
->sc_txdma_tag
, txs
->txs_dmamap
,
695 BUS_DMASYNC_PREWRITE
);
696 sc
->sc_txdmadesc
[idx
].next
= firstslotphys
;
698 STAILQ_REMOVE_HEAD(&sc
->sc_txfreeq
, txs_q
);
699 STAILQ_INSERT_TAIL(&sc
->sc_txdirtyq
, txs
, txs_q
);
700 txs
->txs_mbuf
= *m_head
;
701 *pktdesc
= firstslotphys
;
707 glc_rxintr(struct glc_softc
*sc
)
709 int i
, restart_rxdma
, error
;
711 if_t ifp
= sc
->sc_ifp
;
713 bus_dmamap_sync(sc
->sc_dmadesc_tag
, sc
->sc_rxdmadesc_map
,
714 BUS_DMASYNC_POSTREAD
);
717 while ((sc
->sc_rxdmadesc
[sc
->sc_next_rxdma_slot
].cmd_stat
&
718 GELIC_DESCR_OWNED
) == 0) {
719 i
= sc
->sc_next_rxdma_slot
;
720 sc
->sc_next_rxdma_slot
++;
721 if (sc
->sc_next_rxdma_slot
>= GLC_MAX_RX_PACKETS
)
722 sc
->sc_next_rxdma_slot
= 0;
724 if (sc
->sc_rxdmadesc
[i
].cmd_stat
& GELIC_CMDSTAT_CHAIN_END
)
727 if (sc
->sc_rxdmadesc
[i
].rxerror
& GELIC_RXERRORS
) {
728 if_inc_counter(ifp
, IFCOUNTER_IERRORS
, 1);
732 m
= sc
->sc_rxsoft
[i
].rxs_mbuf
;
733 if (sc
->sc_rxdmadesc
[i
].data_stat
& GELIC_RX_IPCSUM
) {
734 m
->m_pkthdr
.csum_flags
|=
735 CSUM_IP_CHECKED
| CSUM_IP_VALID
;
737 if (sc
->sc_rxdmadesc
[i
].data_stat
& GELIC_RX_TCPUDPCSUM
) {
738 m
->m_pkthdr
.csum_flags
|=
739 CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
740 m
->m_pkthdr
.csum_data
= 0xffff;
743 if (glc_add_rxbuf(sc
, i
)) {
744 if_inc_counter(ifp
, IFCOUNTER_IERRORS
, 1);
748 if_inc_counter(ifp
, IFCOUNTER_IPACKETS
, 1);
749 m
->m_pkthdr
.rcvif
= ifp
;
750 m
->m_len
= sc
->sc_rxdmadesc
[i
].valid_size
;
751 m
->m_pkthdr
.len
= m
->m_len
;
754 * Remove VLAN tag. Even on early firmwares that do not allow
755 * multiple VLANs, the VLAN tag is still in place here.
759 mtx_unlock(&sc
->sc_mtx
);
761 mtx_lock(&sc
->sc_mtx
);
764 glc_add_rxbuf_dma(sc
, i
);
767 bus_dmamap_sync(sc
->sc_dmadesc_tag
, sc
->sc_rxdmadesc_map
,
768 BUS_DMASYNC_PREWRITE
);
771 error
= lv1_net_start_rx_dma(sc
->sc_bus
, sc
->sc_dev
,
772 sc
->sc_rxsoft
[sc
->sc_next_rxdma_slot
].rxs_desc
, 0);
774 device_printf(sc
->sc_self
,
775 "lv1_net_start_rx_dma error: %d\n", error
);
780 glc_txintr(struct glc_softc
*sc
)
782 if_t ifp
= sc
->sc_ifp
;
783 struct glc_txsoft
*txs
;
784 int progress
= 0, kickstart
= 0, error
;
786 bus_dmamap_sync(sc
->sc_dmadesc_tag
, sc
->sc_txdmadesc_map
,
787 BUS_DMASYNC_POSTREAD
);
789 while ((txs
= STAILQ_FIRST(&sc
->sc_txdirtyq
)) != NULL
) {
790 if (sc
->sc_txdmadesc
[txs
->txs_lastdesc
].cmd_stat
794 STAILQ_REMOVE_HEAD(&sc
->sc_txdirtyq
, txs_q
);
795 bus_dmamap_unload(sc
->sc_txdma_tag
, txs
->txs_dmamap
);
796 sc
->bsy_txdma_slots
-= txs
->txs_ndescs
;
798 if (txs
->txs_mbuf
!= NULL
) {
799 m_freem(txs
->txs_mbuf
);
800 txs
->txs_mbuf
= NULL
;
803 if ((sc
->sc_txdmadesc
[txs
->txs_lastdesc
].cmd_stat
& 0xf0000000)
805 lv1_net_stop_tx_dma(sc
->sc_bus
, sc
->sc_dev
, 0);
807 if_inc_counter(ifp
, IFCOUNTER_OERRORS
, 1);
810 if (sc
->sc_txdmadesc
[txs
->txs_lastdesc
].cmd_stat
&
811 GELIC_CMDSTAT_CHAIN_END
)
814 STAILQ_INSERT_TAIL(&sc
->sc_txfreeq
, txs
, txs_q
);
815 if_inc_counter(ifp
, IFCOUNTER_OPACKETS
, 1);
820 sc
->first_used_txdma_slot
= txs
->txs_firstdesc
;
822 sc
->first_used_txdma_slot
= -1;
824 if (kickstart
|| txs
!= NULL
) {
825 /* Speculatively (or necessarily) start the TX queue again */
826 error
= lv1_net_start_tx_dma(sc
->sc_bus
, sc
->sc_dev
,
827 sc
->sc_txdmadesc_phys
+
828 ((txs
== NULL
) ? 0 : txs
->txs_firstdesc
)*
829 sizeof(struct glc_dmadesc
), 0);
831 device_printf(sc
->sc_self
,
832 "lv1_net_start_tx_dma error: %d\n", error
);
837 * We freed some descriptors, so reset IFF_DRV_OACTIVE
840 if_setdrvflagbits(ifp
, 0, IFF_DRV_OACTIVE
);
841 sc
->sc_wdog_timer
= STAILQ_EMPTY(&sc
->sc_txdirtyq
) ? 0 : 5;
843 if ((if_getdrvflags(ifp
) & IFF_DRV_RUNNING
) &&
844 !if_sendq_empty(ifp
))
845 glc_start_locked(ifp
);
850 glc_intr_filter(void *xsc
)
852 struct glc_softc
*sc
= xsc
;
855 atomic_set_64(&sc
->sc_interrupt_status
, *sc
->sc_hwirq_status
);
856 return (FILTER_SCHEDULE_THREAD
);
862 struct glc_softc
*sc
= xsc
;
863 uint64_t status
, linkstat
, junk
;
865 mtx_lock(&sc
->sc_mtx
);
867 status
= atomic_readandclear_64(&sc
->sc_interrupt_status
);
870 mtx_unlock(&sc
->sc_mtx
);
874 if (status
& (GELIC_INT_RXDONE
| GELIC_INT_RXFRAME
))
877 if (status
& (GELIC_INT_TXDONE
| GELIC_INT_TX_CHAIN_END
))
880 if (status
& GELIC_INT_PHY
) {
881 lv1_net_control(sc
->sc_bus
, sc
->sc_dev
, GELIC_GET_LINK_STATUS
,
882 GELIC_VLAN_TX_ETHERNET
, 0, 0, &linkstat
, &junk
);
884 linkstat
= (linkstat
& GELIC_LINK_UP
) ?
885 LINK_STATE_UP
: LINK_STATE_DOWN
;
886 if_link_state_change(sc
->sc_ifp
, linkstat
);
889 mtx_unlock(&sc
->sc_mtx
);
893 glc_media_status(if_t ifp
, struct ifmediareq
*ifmr
)
895 struct glc_softc
*sc
= if_getsoftc(ifp
);
896 uint64_t status
, junk
;
898 ifmr
->ifm_status
= IFM_AVALID
;
899 ifmr
->ifm_active
= IFM_ETHER
;
901 lv1_net_control(sc
->sc_bus
, sc
->sc_dev
, GELIC_GET_LINK_STATUS
,
902 GELIC_VLAN_TX_ETHERNET
, 0, 0, &status
, &junk
);
904 if (status
& GELIC_LINK_UP
)
905 ifmr
->ifm_status
|= IFM_ACTIVE
;
907 if (status
& GELIC_SPEED_10
)
908 ifmr
->ifm_active
|= IFM_10_T
;
909 else if (status
& GELIC_SPEED_100
)
910 ifmr
->ifm_active
|= IFM_100_TX
;
911 else if (status
& GELIC_SPEED_1000
)
912 ifmr
->ifm_active
|= IFM_1000_T
;
914 if (status
& GELIC_FULL_DUPLEX
)
915 ifmr
->ifm_active
|= IFM_FDX
;
917 ifmr
->ifm_active
|= IFM_HDX
;
921 glc_media_change(if_t ifp
)
923 struct glc_softc
*sc
= if_getsoftc(ifp
);
927 if (IFM_TYPE(sc
->sc_media
.ifm_media
) != IFM_ETHER
)
930 switch (IFM_SUBTYPE(sc
->sc_media
.ifm_media
)) {
932 mode
= GELIC_AUTO_NEG
;
935 mode
= GELIC_SPEED_10
;
938 mode
= GELIC_SPEED_100
;
941 mode
= GELIC_SPEED_1000
| GELIC_FULL_DUPLEX
;
947 if (IFM_OPTIONS(sc
->sc_media
.ifm_media
) & IFM_FDX
)
948 mode
|= GELIC_FULL_DUPLEX
;
950 result
= lv1_net_control(sc
->sc_bus
, sc
->sc_dev
, GELIC_SET_LINK_MODE
,
951 GELIC_VLAN_TX_ETHERNET
, mode
, 0, &junk
, &junk
);
953 return (result
? EIO
: 0);