1 /* $NetBSD: if_gmc.c,v 1.2 2008/12/15 04:44:27 matt Exp $ */
3 * Copyright (c) 2008 The NetBSD Foundation, Inc.
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas <matt@3am-software.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/param.h>
32 #include <sys/callout.h>
33 #include <sys/device.h>
34 #include <sys/ioctl.h>
35 #include <sys/kernel.h>
39 #include <machine/bus.h>
40 #include <machine/intr.h>
42 #include <arm/gemini/gemini_reg.h>
43 #include <arm/gemini/gemini_gmacreg.h>
44 #include <arm/gemini/gemini_gmacvar.h>
47 #include <net/if_ether.h>
48 #include <net/if_dl.h>
50 __KERNEL_RCSID(0, "$NetBSD: if_gmc.c,v 1.2 2008/12/15 04:44:27 matt Exp $");
56 struct gmac_softc
*sc_psc
;
57 struct gmc_softc
*sc_sibling
;
58 bus_dma_tag_t sc_dmat
;
59 bus_space_tag_t sc_iot
;
60 bus_space_handle_t sc_ioh
;
61 bus_space_handle_t sc_dma_ioh
;
62 bus_space_handle_t sc_gmac_ioh
;
63 struct ethercom sc_ec
;
64 struct mii_data sc_mii
;
68 gmac_hwqueue_t
*sc_rxq
;
69 gmac_hwqueue_t
*sc_txq
[6];
72 uint32_t sc_gmac_status
;
73 uint32_t sc_gmac_sta_add
[3];
74 uint32_t sc_gmac_mcast_filter
[2];
75 uint32_t sc_gmac_rx_filter
;
76 uint32_t sc_gmac_config
[2];
79 uint32_t sc_int_mask
[5];
80 uint32_t sc_int_enabled
[5];
83 #define sc_if sc_ec.ec_if
86 gmc_txqueue(struct gmc_softc
*sc
, gmac_hwqueue_t
*hwq
, struct mbuf
*m
)
89 uint32_t desc0
, desc1
, desc3
;
90 struct mbuf
*last_m
, *m0
;
97 map
= gmac_mapcache_get(hwq
->hwq_hqm
->hqm_mc
);
101 for (last_m
= NULL
, m0
= m
, count
= 0;
103 last_m
= m0
, m0
= m0
->m_next
) {
104 vaddr_t addr
= (uintptr_t)m0
->m_data
;
108 if (last_m
!= NULL
&& M_TRAILINGSPACE(last_m
) > 0) {
109 last_m
->m_data
[last_m
->m_len
++] = *m
->m_data
++;
111 } else if (M_TRAILINGSPACE(m0
) > 0) {
112 memmove(m0
->m_data
+ 1, m0
->m_data
, m0
->m_len
);
114 } else if (M_LEADINGSPACE(m0
) > 0) {
115 memmove(m0
->m_data
- 1, m0
->m_data
, m0
->m_len
);
118 panic("gmc_txqueue: odd addr %p", m0
->m_data
);
121 count
+= ((addr
& PGOFSET
) + m
->m_len
+ PGOFSET
) >> PGSHIFT
;
124 gmac_hwqueue_sync(hwq
);
125 if (hwq
->hwq_free
<= count
) {
126 gmac_mapcache_put(hwq
->hwq_hqm
->hqm_mc
, map
);
130 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, map
, m
,
131 BUS_DMA_WRITE
|BUS_DMA_NOWAIT
);
133 aprint_error_dev(sc
->sc_dev
, "ifstart: load failed: %d\n",
135 gmac_mapcache_put(hwq
->hwq_hqm
->hqm_mc
, map
);
137 sc
->sc_if
.if_oerrors
++;
140 KASSERT(map
->dm_nsegs
> 0);
143 * Sync the mbuf contents to memory/cache.
145 bus_dmamap_sync(sc
->sc_dmat
, map
, 0, map
->dm_mapsize
,
146 BUS_DMASYNC_PREWRITE
);
149 * Now we need to load the descriptors...
151 desc0
= map
->dm_nsegs
<< 16;
152 desc1
= m
->m_pkthdr
.len
;
159 aprint_debug_dev(sc
->sc_dev
,
160 "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
161 i
-1, d
, d
->d_desc0
, d
->d_desc1
,
162 d
->d_bufaddr
, d
->d_desc3
);
164 d
= gmac_hwqueue_desc(hwq
, i
);
165 KASSERT(map
->dm_segs
[i
].ds_len
> 0);
166 KASSERT((map
->dm_segs
[i
].ds_addr
& 1) == 0);
167 d
->d_desc0
= htole32(map
->dm_segs
[i
].ds_len
| desc0
);
168 d
->d_desc1
= htole32(desc1
);
169 d
->d_bufaddr
= htole32(map
->dm_segs
[i
].ds_addr
);
170 d
->d_desc3
= htole32(desc3
);
172 } while (++i
< map
->dm_nsegs
);
174 d
->d_desc3
|= htole32(DESC3_EOF
|DESC3_EOFIE
);
176 aprint_debug_dev(sc
->sc_dev
,
177 "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
178 i
-1, d
, d
->d_desc0
, d
->d_desc1
, d
->d_bufaddr
, d
->d_desc3
);
181 IF_ENQUEUE(&hwq
->hwq_ifq
, m
);
183 * Last descriptor has been marked. Give them to the h/w.
184 * This will sync for us.
186 gmac_hwqueue_produce(hwq
, map
->dm_nsegs
);
188 aprint_debug_dev(sc
->sc_dev
,
189 "gmac_txqueue: *%zu@%p=%#x/%#x/%#x/%#x\n",
190 i
-1, d
, d
->d_desc0
, d
->d_desc1
, d
->d_bufaddr
, d
->d_desc3
);
196 gmc_filter_change(struct gmc_softc
*sc
)
198 struct ether_multi
*enm
;
199 struct ether_multistep step
;
201 uint32_t new0
, new1
, new2
;
202 const char * const eaddr
= CLLADDR(sc
->sc_if
.if_sadl
);
204 new0
= eaddr
[0] | ((eaddr
[1] | (eaddr
[2] | (eaddr
[3] << 8)) << 8) << 8);
205 new1
= eaddr
[4] | (eaddr
[5] << 8);
207 if (sc
->sc_gmac_sta_add
[0] != new0
208 || sc
->sc_gmac_sta_add
[1] != new1
209 || sc
->sc_gmac_sta_add
[2] != new2
) {
210 bus_space_write_4(sc
->sc_iot
, sc
->sc_gmac_ioh
, GMAC_STA_ADD0
,
212 bus_space_write_4(sc
->sc_iot
, sc
->sc_gmac_ioh
, GMAC_STA_ADD1
,
214 bus_space_write_4(sc
->sc_iot
, sc
->sc_gmac_ioh
, GMAC_STA_ADD2
,
216 sc
->sc_gmac_sta_add
[0] = new0
;
217 sc
->sc_gmac_sta_add
[1] = new1
;
218 sc
->sc_gmac_sta_add
[2] = new2
;
223 ETHER_FIRST_MULTI(step
, &sc
->sc_ec
, enm
);
224 while (enm
!= NULL
) {
226 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
, ETHER_ADDR_LEN
)) {
227 mhash
[0] = mhash
[1] = 0xffffffff;
230 i
= ether_crc32_be(enm
->enm_addrlo
, ETHER_ADDR_LEN
);
231 mhash
[(i
>> 5) & 1] |= 1 << (i
& 31);
232 ETHER_NEXT_MULTI(step
, enm
);
235 if (sc
->sc_gmac_mcast_filter
[0] != mhash
[0]
236 || sc
->sc_gmac_mcast_filter
[1] != mhash
[1]) {
237 bus_space_write_4(sc
->sc_iot
, sc
->sc_gmac_ioh
,
238 GMAC_MCAST_FILTER0
, mhash
[0]);
239 bus_space_write_4(sc
->sc_iot
, sc
->sc_gmac_ioh
,
240 GMAC_MCAST_FILTER1
, mhash
[1]);
241 sc
->sc_gmac_mcast_filter
[0] = mhash
[0];
242 sc
->sc_gmac_mcast_filter
[1] = mhash
[1];
245 new0
= sc
->sc_gmac_rx_filter
& ~RXFILTER_PROMISC
;
246 new0
|= RXFILTER_BROADCAST
| RXFILTER_UNICAST
| RXFILTER_MULTICAST
;
247 if (sc
->sc_if
.if_flags
& IFF_PROMISC
)
248 new0
|= RXFILTER_PROMISC
;
250 if (new0
!= sc
->sc_gmac_rx_filter
) {
251 bus_space_write_4(sc
->sc_iot
, sc
->sc_gmac_ioh
, GMAC_RX_FILTER
,
253 sc
->sc_gmac_rx_filter
= new0
;
258 gmc_mii_tick(void *arg
)
260 struct gmc_softc
* const sc
= arg
;
261 struct gmac_softc
* const psc
= sc
->sc_psc
;
265 * If we had to increase the number of receive mbufs due to fifo
266 * overflows, we need a way to decrease them. So every second we
267 * recieve less than or equal to MIN_RXMAPS packets, we decrement
268 * swfree_min until it returns to MIN_RXMAPS.
270 if (psc
->sc_rxpkts_per_sec
<= MIN_RXMAPS
271 && psc
->sc_swfree_min
> MIN_RXMAPS
) {
272 psc
->sc_swfree_min
--;
273 gmac_swfree_min_update(psc
);
276 * If only one GMAC is running or this is port0, reset the count.
278 if (psc
->sc_running
!= 3 || !sc
->sc_port1
)
279 psc
->sc_rxpkts_per_sec
= 0;
281 mii_tick(&sc
->sc_mii
);
282 if (sc
->sc_if
.if_flags
& IFF_RUNNING
)
283 callout_schedule(&sc
->sc_mii_ch
, hz
);
289 gmc_mediachange(struct ifnet
*ifp
)
291 struct gmc_softc
* const sc
= ifp
->if_softc
;
293 if ((ifp
->if_flags
& IFF_UP
) == 0)
296 return mii_mediachg(&sc
->sc_mii
);
300 gmc_mediastatus(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
302 struct gmc_softc
* const sc
= ifp
->if_softc
;
304 mii_pollstat(&sc
->sc_mii
);
305 ifmr
->ifm_status
= sc
->sc_mii
.mii_media_status
;
306 ifmr
->ifm_active
= sc
->sc_mii
.mii_media_active
;
310 gmc_mii_statchg(device_t self
)
312 struct gmc_softc
* const sc
= device_private(self
);
313 uint32_t gmac_status
;
315 gmac_status
= sc
->sc_gmac_status
;
317 gmac_status
&= ~STATUS_PHYMODE_MASK
;
318 gmac_status
|= STATUS_PHYMODE_RGMII_A
;
320 gmac_status
&= ~STATUS_SPEED_MASK
;
321 if (IFM_SUBTYPE(sc
->sc_mii
.mii_media_active
) == IFM_1000_T
) {
322 gmac_status
|= STATUS_SPEED_1000M
;
323 } else if (IFM_SUBTYPE(sc
->sc_mii
.mii_media_active
) == IFM_100_TX
) {
324 gmac_status
|= STATUS_SPEED_100M
;
326 gmac_status
|= STATUS_SPEED_10M
;
329 if (sc
->sc_mii
.mii_media_active
& IFM_FDX
)
330 gmac_status
|= STATUS_DUPLEX_FULL
;
332 gmac_status
&= ~STATUS_DUPLEX_FULL
;
334 if (sc
->sc_mii
.mii_media_status
& IFM_ACTIVE
)
335 gmac_status
|= STATUS_LINK_ON
;
337 gmac_status
&= ~STATUS_LINK_ON
;
339 if (sc
->sc_gmac_status
!= gmac_status
) {
340 aprint_debug_dev(sc
->sc_dev
,
341 "status change old=%#x new=%#x active=%#x\n",
342 sc
->sc_gmac_status
, gmac_status
,
343 sc
->sc_mii
.mii_media_active
);
344 sc
->sc_gmac_status
= gmac_status
;
345 bus_space_write_4(sc
->sc_iot
, sc
->sc_gmac_ioh
, GMAC_STATUS
,
349 (*sc
->sc_mii
.mii_writereg
)(sc
->sc_dev
, sc
->sc_phy
, 0x0018, 0x0041);
353 gmc_ifioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
355 struct gmc_softc
* const sc
= ifp
->if_softc
;
356 struct ifreq
* const ifr
= data
;
364 error
= ifmedia_ioctl(ifp
, ifr
, &sc
->sc_mii
.mii_media
, cmd
);
367 error
= ether_ioctl(ifp
, cmd
, data
);
368 if (error
== ENETRESET
) {
369 if (ifp
->if_flags
& IFF_RUNNING
) {
371 * If the interface is running, we have to
372 * update its multicast filter.
374 gmc_filter_change(sc
);
385 gmc_ifstart(struct ifnet
*ifp
)
387 struct gmc_softc
* const sc
= ifp
->if_softc
;
390 if ((sc
->sc_gmac_status
& STATUS_LINK_ON
) == 0)
393 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
398 IF_DEQUEUE(&ifp
->if_snd
, m
);
401 if (!gmc_txqueue(sc
, sc
->sc_txq
[0], m
)) {
402 IF_PREPEND(&ifp
->if_snd
, m
);
403 ifp
->if_flags
|= IFF_OACTIVE
;
410 gmc_ifstop(struct ifnet
*ifp
, int disable
)
412 struct gmc_softc
* const sc
= ifp
->if_softc
;
413 struct gmac_softc
* const psc
= sc
->sc_psc
;
415 psc
->sc_running
&= ~(sc
->sc_port1
? 2 : 1);
416 psc
->sc_int_enabled
[0] &= ~sc
->sc_int_enabled
[0];
417 psc
->sc_int_enabled
[1] &= ~sc
->sc_int_enabled
[1];
418 psc
->sc_int_enabled
[2] &= ~sc
->sc_int_enabled
[2];
419 psc
->sc_int_enabled
[3] &= ~sc
->sc_int_enabled
[3];
420 psc
->sc_int_enabled
[4] &= ~sc
->sc_int_enabled
[4] | INT4_SW_FREEQ_EMPTY
;
421 if (psc
->sc_running
== 0) {
422 psc
->sc_int_enabled
[4] &= ~INT4_SW_FREEQ_EMPTY
;
423 KASSERT(psc
->sc_int_enabled
[0] == 0);
424 KASSERT(psc
->sc_int_enabled
[1] == 0);
425 KASSERT(psc
->sc_int_enabled
[2] == 0);
426 KASSERT(psc
->sc_int_enabled
[3] == 0);
427 KASSERT(psc
->sc_int_enabled
[4] == 0);
428 } else if (((psc
->sc_int_select
[4] & INT4_SW_FREEQ_EMPTY
) != 0)
430 psc
->sc_int_select
[4] &= ~INT4_SW_FREEQ_EMPTY
;
431 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT4_MASK
,
432 psc
->sc_int_select
[4]);
434 gmac_intr_update(psc
);
437 if (psc
->sc_running
== 0) {
438 gmac_mapcache_destroy(&psc
->sc_txmaps
);
439 gmac_mapcache_destroy(&psc
->sc_rxmaps
);
446 gmc_ifinit(struct ifnet
*ifp
)
448 struct gmc_softc
* const sc
= ifp
->if_softc
;
449 struct gmac_softc
* const psc
= sc
->sc_psc
;
452 gmac_mapcache_fill(psc
->sc_rxmaps
, MIN_RXMAPS
);
453 gmac_mapcache_fill(psc
->sc_txmaps
, MIN_TXMAPS
);
455 if (sc
->sc_rxq
== NULL
) {
457 hqm
= gmac_hwqmem_create(psc
->sc_rxmaps
, 16, /*RXQ_NDESCS,*/ 1,
458 HQM_CONSUMER
|HQM_RX
);
459 sc
->sc_rxq
= gmac_hwqueue_create(hqm
, sc
->sc_iot
,
460 sc
->sc_ioh
, GMAC_DEF_RXQn_RWPTR(sc
->sc_port1
),
461 GMAC_DEF_RXQn_BASE(sc
->sc_port1
), 0);
462 if (sc
->sc_rxq
== NULL
) {
463 gmac_hwqmem_destroy(hqm
);
466 sc
->sc_rxq
->hwq_ifp
= ifp
;
467 sc
->sc_rxq
->hwq_producer
= psc
->sc_swfreeq
;
470 if (sc
->sc_txq
[0] == NULL
) {
471 gmac_hwqueue_t
*hwq
, *last_hwq
;
475 hqm
= gmac_hwqmem_create(psc
->sc_txmaps
, TXQ_NDESCS
, 6,
476 HQM_PRODUCER
|HQM_TX
);
477 KASSERT(hqm
!= NULL
);
478 for (i
= 0; i
< __arraycount(sc
->sc_txq
); i
++) {
479 sc
->sc_txq
[i
] = gmac_hwqueue_create(hqm
, sc
->sc_iot
,
480 sc
->sc_dma_ioh
, GMAC_SW_TX_Qn_RWPTR(i
),
481 GMAC_SW_TX_Q_BASE
, i
);
482 if (sc
->sc_txq
[i
] == NULL
) {
484 gmac_hwqmem_destroy(hqm
);
487 sc
->sc_txq
[i
]->hwq_ifp
= ifp
;
490 SLIST_FOREACH(hwq
, &psc
->sc_hwfreeq
->hwq_producers
,
492 if (sc
->sc_txq
[i
]->hwq_qoff
< hwq
->hwq_qoff
)
496 if (last_hwq
== NULL
)
498 &psc
->sc_hwfreeq
->hwq_producers
,
499 sc
->sc_txq
[i
], hwq_link
);
501 SLIST_INSERT_AFTER(last_hwq
, sc
->sc_txq
[i
],
506 gmc_filter_change(sc
);
508 mask
= DMAVR_LOOPBACK
|DMAVR_DROP_SMALL_ACK
|DMAVR_EXTRABYTES_MASK
509 |DMAVR_RXBURSTSIZE_MASK
|DMAVR_RXBUSWIDTH_MASK
510 |DMAVR_TXBURSTSIZE_MASK
|DMAVR_TXBUSWIDTH_MASK
;
511 new = DMAVR_RXDMA_ENABLE
|DMAVR_TXDMA_ENABLE
513 |DMAVR_RXBURSTSIZE(DMAVR_BURSTSIZE_32W
)
514 |DMAVR_RXBUSWIDTH(DMAVR_BUSWIDTH_32BITS
)
515 |DMAVR_TXBURSTSIZE(DMAVR_BURSTSIZE_32W
)
516 |DMAVR_TXBUSWIDTH(DMAVR_BUSWIDTH_32BITS
);
517 new |= sc
->sc_dmavr
& ~mask
;
518 if (sc
->sc_dmavr
!= new) {
520 bus_space_write_4(sc
->sc_iot
, sc
->sc_dma_ioh
, GMAC_DMAVR
,
522 aprint_debug_dev(sc
->sc_dev
, "gmc_ifinit: dmavr=%#x/%#x\n",
524 bus_space_read_4(sc
->sc_iot
, sc
->sc_dma_ioh
, GMAC_DMAVR
));
527 mask
= CONFIG0_MAXLEN_MASK
|CONFIG0_TX_DISABLE
|CONFIG0_RX_DISABLE
528 |CONFIG0_LOOPBACK
|/*CONFIG0_SIM_TEST|*/CONFIG0_INVERSE_RXC_RGMII
529 |CONFIG0_RGMII_INBAND_STATUS_ENABLE
;
530 new = CONFIG0_MAXLEN(CONFIG0_MAXLEN_1536
)|CONFIG0_R_LATCHED_MMII
;
531 new |= (sc
->sc_gmac_config
[0] & ~mask
);
532 if (sc
->sc_gmac_config
[0] != new) {
533 sc
->sc_gmac_config
[0] = new;
534 bus_space_write_4(sc
->sc_iot
, sc
->sc_gmac_ioh
, GMAC_CONFIG0
,
535 sc
->sc_gmac_config
[0]);
536 aprint_debug_dev(sc
->sc_dev
, "gmc_ifinit: config0=%#x/%#x\n",
537 sc
->sc_gmac_config
[0],
538 bus_space_read_4(sc
->sc_iot
, sc
->sc_gmac_ioh
, GMAC_CONFIG0
));
541 psc
->sc_rxpkts_per_sec
+=
542 gmac_rxproduce(psc
->sc_swfreeq
, psc
->sc_swfree_min
);
545 * If we will be the only active interface, make sure the sw freeq
546 * interrupt gets routed to use.
548 if (psc
->sc_running
== 0
549 && (((psc
->sc_int_select
[4] & INT4_SW_FREEQ_EMPTY
) != 0) != sc
->sc_port1
)) {
550 psc
->sc_int_select
[4] ^= INT4_SW_FREEQ_EMPTY
;
551 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT4_MASK
,
552 psc
->sc_int_select
[4]);
554 sc
->sc_int_enabled
[0] = sc
->sc_int_mask
[0]
555 & (INT0_TXDERR
|INT0_TXPERR
|INT0_RXDERR
|INT0_RXPERR
|INT0_SWTXQ_EOF
);
556 sc
->sc_int_enabled
[1] = sc
->sc_int_mask
[1] & INT1_DEF_RXQ_EOF
;
557 sc
->sc_int_enabled
[4] = INT4_SW_FREEQ_EMPTY
| (sc
->sc_int_mask
[4]
558 & (INT4_TX_FAIL
|INT4_MIB_HEMIWRAP
|INT4_RX_FIFO_OVRN
559 |INT4_RGMII_STSCHG
));
561 psc
->sc_int_enabled
[0] |= sc
->sc_int_enabled
[0];
562 psc
->sc_int_enabled
[1] |= sc
->sc_int_enabled
[1];
563 psc
->sc_int_enabled
[4] |= sc
->sc_int_enabled
[4];
565 gmac_intr_update(psc
);
567 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
568 mii_tick(&sc
->sc_mii
);
570 ifp
->if_flags
|= IFF_RUNNING
;
571 psc
->sc_running
|= (sc
->sc_port1
? 2 : 1);
573 callout_schedule(&sc
->sc_mii_ch
, hz
);
578 gmc_ifstop(ifp
, true);
585 struct gmc_softc
* const sc
= arg
;
586 uint32_t int0_status
, int1_status
, int4_status
;
588 bool do_ifstart
= false;
591 aprint_debug_dev(sc
->sc_dev
, "gmac_intr: entry\n");
593 int0_status
= bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
,
595 int1_status
= bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
,
597 int4_status
= bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
,
600 aprint_debug_dev(sc
->sc_dev
, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
601 int0_status
, int1_status
,
602 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT2_STATUS
),
603 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT3_STATUS
),
607 aprint_debug_dev(sc
->sc_dev
, "gmac_intr: mask=%#x/%#x/%#x/%#x/%#x\n",
608 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT0_MASK
),
609 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT1_MASK
),
610 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT2_MASK
),
611 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT3_MASK
),
612 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT4_MASK
));
615 status
= int0_status
& sc
->sc_int_mask
[0];
616 if (status
& (INT0_TXDERR
|INT0_TXPERR
)) {
617 aprint_error_dev(sc
->sc_dev
,
618 "transmit%s%s error: %#x %08x bufaddr %#x\n",
619 status
& INT0_TXDERR
? " data" : "",
620 status
& INT0_TXPERR
? " protocol" : "",
621 bus_space_read_4(sc
->sc_iot
, sc
->sc_dma_ioh
,
622 GMAC_DMA_TX_CUR_DESC
),
623 bus_space_read_4(sc
->sc_iot
, sc
->sc_dma_ioh
,
624 GMAC_SW_TX_Q0_RWPTR
),
625 bus_space_read_4(sc
->sc_iot
, sc
->sc_dma_ioh
,
627 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT0_STATUS
,
628 status
& (INT0_TXDERR
|INT0_TXPERR
));
631 if (status
& (INT0_RXDERR
|INT0_RXPERR
)) {
632 aprint_error_dev(sc
->sc_dev
,
633 "receive%s%s error: %#x %#x=%#x/%#x/%#x/%#x\n",
634 status
& INT0_RXDERR
? " data" : "",
635 status
& INT0_RXPERR
? " protocol" : "",
636 bus_space_read_4(sc
->sc_iot
, sc
->sc_dma_ioh
,
637 GMAC_DMA_RX_CUR_DESC
),
638 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
,
640 bus_space_read_4(sc
->sc_iot
, sc
->sc_dma_ioh
,
642 bus_space_read_4(sc
->sc_iot
, sc
->sc_dma_ioh
,
644 bus_space_read_4(sc
->sc_iot
, sc
->sc_dma_ioh
,
646 bus_space_read_4(sc
->sc_iot
, sc
->sc_dma_ioh
,
648 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT0_STATUS
,
649 status
& (INT0_RXDERR
|INT0_RXPERR
));
652 if (status
& INT0_SWTXQ_EOF
) {
653 status
&= INT0_SWTXQ_EOF
;
654 for (int i
= 0; status
&& i
< __arraycount(sc
->sc_txq
); i
++) {
655 if (status
& INT0_SWTXQn_EOF(i
)) {
656 gmac_hwqueue_sync(sc
->sc_txq
[i
]);
657 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
,
659 sc
->sc_int_mask
[0] & (INT0_SWTXQn_EOF(i
)|INT0_SWTXQn_FIN(i
)));
660 status
&= ~INT0_SWTXQn_EOF(i
);
667 if (int4_status
& INT4_SW_FREEQ_EMPTY
) {
668 struct gmac_softc
* const psc
= sc
->sc_psc
;
669 psc
->sc_rxpkts_per_sec
+=
670 gmac_rxproduce(psc
->sc_swfreeq
, psc
->sc_swfree_min
);
671 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT4_STATUS
,
672 status
& INT4_SW_FREEQ_EMPTY
);
676 status
= int1_status
& sc
->sc_int_mask
[1];
677 if (status
& INT1_DEF_RXQ_EOF
) {
678 struct gmac_softc
* const psc
= sc
->sc_psc
;
679 psc
->sc_rxpkts_per_sec
+=
680 gmac_hwqueue_consume(sc
->sc_rxq
, psc
->sc_swfree_min
);
681 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT1_STATUS
,
682 status
& INT1_DEF_RXQ_EOF
);
686 status
= int4_status
& sc
->sc_int_enabled
[4];
687 if (status
& INT4_TX_FAIL
) {
689 if (status
& INT4_MIB_HEMIWRAP
) {
691 if (status
& INT4_RX_XON
) {
693 if (status
& INT4_RX_XOFF
) {
695 if (status
& INT4_TX_XON
) {
697 if (status
& INT4_TX_XOFF
) {
699 if (status
& INT4_RX_FIFO_OVRN
) {
701 if (sc
->sc_psc
->sc_swfree_min
< MAX_RXMAPS
) {
702 sc
->sc_psc
->sc_swfree_min
++;
703 gmac_swfree_min_update(psc
);
706 sc
->sc_if
.if_ierrors
++;
708 if (status
& INT4_RGMII_STSCHG
) {
709 mii_tick(&sc
->sc_mii
);
711 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT4_STATUS
, status
);
714 gmc_ifstart(&sc
->sc_if
);
716 aprint_debug_dev(sc
->sc_dev
, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
717 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT0_STATUS
),
718 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT1_STATUS
),
719 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT2_STATUS
),
720 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT3_STATUS
),
721 bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, GMAC_INT4_STATUS
));
722 aprint_debug_dev(sc
->sc_dev
, "gmac_intr: exit rv=%d\n", rv
);
727 gmc_match(device_t parent
, cfdata_t cf
, void *aux
)
729 struct gmac_softc
*psc
= device_private(parent
);
730 struct gmac_attach_args
*gma
= aux
;
732 if ((unsigned int)gma
->gma_phy
> 31)
734 if ((unsigned int)gma
->gma_port
> 1)
736 if (gma
->gma_intr
< 1 || gma
->gma_intr
> 2)
739 if (psc
->sc_ports
& (1 << gma
->gma_port
))
746 gmc_attach(device_t parent
, device_t self
, void *aux
)
748 struct gmac_softc
* const psc
= device_private(parent
);
749 struct gmc_softc
* const sc
= device_private(self
);
750 struct gmac_attach_args
*gma
= aux
;
751 struct ifnet
* const ifp
= &sc
->sc_if
;
752 static const char eaddrs
[2][6] = {
753 "\x0\x52\xc3\x11\x22\x33",
754 "\x0\x52\xc3\x44\x55\x66",
757 psc
->sc_ports
|= 1 << gma
->gma_port
;
758 sc
->sc_port1
= (gma
->gma_port
== 1);
759 sc
->sc_phy
= gma
->gma_phy
;
763 sc
->sc_iot
= psc
->sc_iot
;
764 sc
->sc_ioh
= psc
->sc_ioh
;
765 sc
->sc_dmat
= psc
->sc_dmat
;
767 bus_space_subregion(sc
->sc_iot
, sc
->sc_ioh
,
768 GMAC_PORTn_DMA_OFFSET(gma
->gma_port
), GMAC_PORTn_DMA_SIZE
,
770 bus_space_subregion(sc
->sc_iot
, sc
->sc_ioh
,
771 GMAC_PORTn_GMAC_OFFSET(gma
->gma_port
), GMAC_PORTn_GMAC_SIZE
,
776 strlcpy(ifp
->if_xname
, device_xname(self
), sizeof(ifp
->if_xname
));
777 ifp
->if_flags
= IFF_SIMPLEX
|IFF_MULTICAST
|IFF_BROADCAST
;
779 ifp
->if_ioctl
= gmc_ifioctl
;
780 ifp
->if_stop
= gmc_ifstop
;
781 ifp
->if_start
= gmc_ifstart
;
782 ifp
->if_init
= gmc_ifinit
;
784 IFQ_SET_READY(&ifp
->if_snd
);
786 sc
->sc_ec
.ec_capabilities
= ETHERCAP_VLAN_MTU
| ETHERCAP_JUMBO_MTU
;
787 sc
->sc_ec
.ec_mii
= &sc
->sc_mii
;
789 sc
->sc_mii
.mii_ifp
= ifp
;
790 sc
->sc_mii
.mii_statchg
= gmc_mii_statchg
;
791 sc
->sc_mii
.mii_readreg
= gma
->gma_mii_readreg
;
792 sc
->sc_mii
.mii_writereg
= gma
->gma_mii_writereg
;
794 ifmedia_init(&sc
->sc_mii
.mii_media
, 0, gmc_mediachange
,
798 ether_ifattach(ifp
, eaddrs
[gma
->gma_port
]);
799 mii_attach(sc
->sc_dev
, &sc
->sc_mii
, 0xffffffff,
800 gma
->gma_phy
, MII_OFFSET_ANY
, 0);
802 if (LIST_EMPTY(&sc
->sc_mii
.mii_phys
)) {
803 ifmedia_add(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
, 0, NULL
);
804 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_NONE
);
806 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_AUTO
);
807 // ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX);
810 sc
->sc_gmac_status
= bus_space_read_4(sc
->sc_iot
, sc
->sc_gmac_ioh
,
812 sc
->sc_gmac_sta_add
[0] = bus_space_read_4(sc
->sc_iot
, sc
->sc_gmac_ioh
,
814 sc
->sc_gmac_sta_add
[1] = bus_space_read_4(sc
->sc_iot
, sc
->sc_gmac_ioh
,
816 sc
->sc_gmac_sta_add
[2] = bus_space_read_4(sc
->sc_iot
, sc
->sc_gmac_ioh
,
818 sc
->sc_gmac_mcast_filter
[0] = bus_space_read_4(sc
->sc_iot
,
819 sc
->sc_gmac_ioh
, GMAC_MCAST_FILTER0
);
820 sc
->sc_gmac_mcast_filter
[1] = bus_space_read_4(sc
->sc_iot
,
821 sc
->sc_gmac_ioh
, GMAC_MCAST_FILTER1
);
822 sc
->sc_gmac_rx_filter
= bus_space_read_4(sc
->sc_iot
, sc
->sc_gmac_ioh
,
824 sc
->sc_gmac_config
[0] = bus_space_read_4(sc
->sc_iot
, sc
->sc_gmac_ioh
,
826 sc
->sc_dmavr
= bus_space_read_4(sc
->sc_iot
, sc
->sc_dma_ioh
, GMAC_DMAVR
);
828 /* sc->sc_int_enabled is already zeroed */
829 sc
->sc_int_mask
[0] = (sc
->sc_port1
? INT0_GMAC1
: INT0_GMAC0
);
830 sc
->sc_int_mask
[1] = (sc
->sc_port1
? INT1_GMAC1
: INT1_GMAC0
);
831 sc
->sc_int_mask
[2] = (sc
->sc_port1
? INT2_GMAC1
: INT2_GMAC0
);
832 sc
->sc_int_mask
[3] = (sc
->sc_port1
? INT3_GMAC1
: INT3_GMAC0
);
833 sc
->sc_int_mask
[4] = (sc
->sc_port1
? INT4_GMAC1
: INT4_GMAC0
);
836 sc
->sc_ih
= intr_establish(gma
->gma_intr
, IPL_NET
, IST_LEVEL_HIGH
,
838 KASSERT(sc
->sc_ih
!= NULL
);
841 callout_init(&sc
->sc_mii_ch
, 0);
842 callout_setfunc(&sc
->sc_mii_ch
, gmc_mii_tick
, sc
);
844 aprint_normal_dev(sc
->sc_dev
, "Ethernet address %s\n",
845 ether_sprintf(CLLADDR(sc
->sc_if
.if_sadl
)));
848 CFATTACH_DECL_NEW(gmc
, sizeof(struct gmc_softc
),
849 gmc_match
, gmc_attach
, NULL
, NULL
);