Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / dev / ic / gemvar.h
blobc0455d9efa05fd71adeb51727b2a4cbd46cc646b
1 /* $NetBSD: gemvar.h,v 1.19 2009/07/27 18:10:53 dyoung Exp $ */
3 /*
5 * Copyright (C) 2001 Eduardo Horvath.
6 * All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
32 #ifndef _IF_GEMVAR_H
33 #define _IF_GEMVAR_H
36 #include "rnd.h"
38 #include <sys/queue.h>
39 #include <sys/callout.h>
41 #if NRND > 0
42 #include <sys/rnd.h>
43 #endif
46 * Misc. definitions for the Sun ``Gem'' Ethernet controller family driver.
50 * Transmit descriptor list size. This is arbitrary, but allocate
51 * enough descriptors for 64 pending transmissions and 16 segments
52 * per packet.
54 #define GEM_NTXSEGS 16
56 #define GEM_TXQUEUELEN 64
57 #define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS)
58 #define GEM_NTXDESC_MASK (GEM_NTXDESC - 1)
59 #define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK)
62 * Receive descriptor list size. We have one Rx buffer per incoming
63 * packet, so this logic is a little simpler.
65 #define GEM_NRXDESC 128
66 #define GEM_NRXDESC_MASK (GEM_NRXDESC - 1)
67 #define GEM_PREVRX(x) ((x - 1) & GEM_NRXDESC_MASK)
68 #define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK)
71 * Control structures are DMA'd to the GEM chip. We allocate them in
72 * a single clump that maps to a single DMA segment to make several things
73 * easier.
75 struct gem_control_data {
77 * The transmit descriptors.
79 struct gem_desc gcd_txdescs[GEM_NTXDESC];
82 * The receive descriptors.
84 struct gem_desc gcd_rxdescs[GEM_NRXDESC];
87 #define GEM_CDOFF(x) offsetof(struct gem_control_data, x)
88 #define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)])
89 #define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)])
92 * Software state for transmit jobs.
94 struct gem_txsoft {
95 struct mbuf *txs_mbuf; /* head of our mbuf chain */
96 bus_dmamap_t txs_dmamap; /* our DMA map */
97 int txs_firstdesc; /* first descriptor in packet */
98 int txs_lastdesc; /* last descriptor in packet */
99 int txs_ndescs; /* number of descriptors */
100 SIMPLEQ_ENTRY(gem_txsoft) txs_q;
103 SIMPLEQ_HEAD(gem_txsq, gem_txsoft);
106 * Software state for receive jobs.
108 struct gem_rxsoft {
109 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
110 bus_dmamap_t rxs_dmamap; /* our DMA map */
113 enum gem_attach_stage {
114 GEM_ATT_BACKEND_2 = 0
115 , GEM_ATT_BACKEND_1
116 , GEM_ATT_FINISHED
117 , GEM_ATT_MII
118 , GEM_ATT_7
119 , GEM_ATT_6
120 , GEM_ATT_5
121 , GEM_ATT_4
122 , GEM_ATT_3
123 , GEM_ATT_2
124 , GEM_ATT_1
125 , GEM_ATT_0
126 , GEM_ATT_BACKEND_0
130 * Software state per device.
132 struct gem_softc {
133 device_t sc_dev; /* generic device information */
134 struct ethercom sc_ethercom; /* ethernet common data */
135 struct mii_data sc_mii; /* MII media control */
136 struct callout sc_tick_ch; /* tick callout */
138 /* The following bus handles are to be provided by the bus front-end */
139 bus_space_tag_t sc_bustag; /* bus tag */
140 bus_dma_tag_t sc_dmatag; /* bus dma tag */
141 bus_dmamap_t sc_dmamap; /* bus dma handle */
142 bus_space_handle_t sc_h1; /* bus space handle for bank 1 regs */
143 bus_space_handle_t sc_h2; /* bus space handle for bank 2 regs */
144 bus_size_t sc_size; /* bank 1 size */
146 int sc_phys[2]; /* MII instance -> PHY map */
148 int sc_mif_config; /* Selected MII reg setting */
149 uint32_t sc_mii_anar; /* copy of PCS GEM_MII_ANAR register */
150 int sc_mii_media; /* Media selected for PCS MII */
152 u_int sc_variant; /* which GEM are we dealing with? */
153 #define GEM_UNKNOWN 0 /* don't know */
154 #define GEM_SUN_GEM 1 /* Sun GEM variant */
155 #define GEM_SUN_ERI 2 /* Sun ERI variant */
156 #define GEM_APPLE_GMAC 3 /* Apple GMAC variant */
157 #define GEM_APPLE_K2_GMAC 4 /* Apple K2 GMAC */
159 #define GEM_IS_SUN(sc) \
160 ((sc)->sc_variant == GEM_SUN_GEM || \
161 (sc)->sc_variant == GEM_SUN_ERI)
162 #define GEM_IS_APPLE(sc) \
163 ((sc)->sc_variant == GEM_APPLE_GMAC || \
164 (sc)->sc_variant == GEM_APPLE_K2_GMAC)
166 int sc_chiprev; /* hardware revision */
168 u_int sc_flags; /* */
169 short sc_if_flags; /* copy of ifp->if_flags */
170 #define GEM_GIGABIT 0x0001 /* has a gigabit PHY */
171 #define GEM_LINK 0x0002 /* link is up */
172 #define GEM_PCI 0x0004 /* XXX PCI busses are little-endian */
173 #define GEM_SERDES 0x0008 /* use the SERDES */
174 #define GEM_SERIAL 0x0010 /* use the serial link */
177 * Ring buffer DMA stuff.
179 bus_dma_segment_t sc_cdseg; /* control data memory */
180 int sc_cdnseg; /* number of segments */
181 bus_dmamap_t sc_cddmamap; /* control data DMA map */
182 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
184 bus_dmamap_t sc_nulldmamap; /* for small packets padding */
187 * Software state for transmit and receive descriptors.
189 struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN];
190 struct gem_rxsoft sc_rxsoft[GEM_NRXDESC];
193 * Control data structures.
195 struct gem_control_data *sc_control_data;
196 #define sc_txdescs sc_control_data->gcd_txdescs
197 #define sc_rxdescs sc_control_data->gcd_rxdescs
199 int sc_txfree; /* number of free Tx descriptors */
200 int sc_txnext; /* next ready Tx descriptor */
201 int sc_txwin; /* Tx descriptors since last Tx int */
203 struct gem_txsq sc_txfreeq; /* free Tx descsofts */
204 struct gem_txsq sc_txdirtyq; /* dirty Tx descsofts */
206 int sc_rxptr; /* next ready RX descriptor/descsoft */
207 int sc_rxfifosize; /* Rx FIFO size (bytes) */
209 /* ========== */
210 int sc_inited;
211 int sc_meminited;
212 int sc_debug;
213 void *sc_sh; /* shutdownhook cookie */
215 /* Special hardware hooks */
216 void (*sc_hwreset)(struct gem_softc *);
217 void (*sc_hwinit)(struct gem_softc *);
219 #if NRND > 0
220 rndsource_element_t rnd_source;
221 #endif
223 struct evcnt sc_ev_intr;
224 #ifdef GEM_COUNTERS
225 struct evcnt sc_ev_txint;
226 struct evcnt sc_ev_rxint;
227 struct evcnt sc_ev_rxnobuf;
228 struct evcnt sc_ev_rxfull;
229 struct evcnt sc_ev_rxhist[9];
230 #endif
232 enum gem_attach_stage sc_att_stage;
235 #ifdef GEM_COUNTERS
236 #define GEM_COUNTER_INCR(sc, ctr) ((void) (sc->ctr.ev_count++))
237 #else
238 #define GEM_COUNTER_INCR(sc, ctr) ((void) sc)
239 #endif
242 #define GEM_DMA_READ(sc, v) \
243 (((sc)->sc_flags & GEM_PCI) ? le64toh(v) : be64toh(v))
244 #define GEM_DMA_WRITE(sc, v) \
245 (((sc)->sc_flags & GEM_PCI) ? htole64(v) : htobe64(v))
247 #define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x)))
248 #define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x)))
250 #define GEM_CDADDR(sc) ((sc)->sc_cddma + GEM_CDOFF)
252 #define GEM_CDTXSYNC(sc, x, n, ops) \
253 do { \
254 int __x, __n; \
256 __x = (x); \
257 __n = (n); \
259 /* If it will wrap around, sync to the end of the ring. */ \
260 if ((__x + __n) > GEM_NTXDESC) { \
261 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
262 GEM_CDTXOFF(__x), sizeof(struct gem_desc) * \
263 (GEM_NTXDESC - __x), (ops)); \
264 __n -= (GEM_NTXDESC - __x); \
265 __x = 0; \
268 /* Now sync whatever is left. */ \
269 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
270 GEM_CDTXOFF(__x), sizeof(struct gem_desc) * __n, (ops)); \
271 } while (0)
273 #define GEM_CDRXSYNC(sc, x, ops) \
274 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
275 GEM_CDRXOFF((x)), sizeof(struct gem_desc), (ops))
277 #define GEM_CDSYNC(sc, ops) \
278 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
279 0, sizeof(struct gem_control_data), (ops))
281 #define GEM_INIT_RXDESC(sc, x) \
282 do { \
283 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \
284 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \
285 struct mbuf *__m = __rxs->rxs_mbuf; \
287 __m->m_data = __m->m_ext.ext_buf; \
288 __rxd->gd_addr = \
289 GEM_DMA_WRITE((sc), __rxs->rxs_dmamap->dm_segs[0].ds_addr); \
290 __rxd->gd_flags = \
291 GEM_DMA_WRITE((sc), \
292 (((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT) \
293 & GEM_RD_BUFSIZE) | GEM_RD_OWN); \
294 GEM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
295 } while (0)
297 #define GEM_UPDATE_RXDESC(sc, x) \
298 do { \
299 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \
300 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \
301 struct mbuf *__m = __rxs->rxs_mbuf; \
303 __rxd->gd_flags = \
304 GEM_DMA_WRITE((sc), \
305 (((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT) \
306 & GEM_RD_BUFSIZE) | GEM_RD_OWN); \
307 } while (0)
309 #ifdef _KERNEL
310 bool gem_shutdown(device_t, int);
311 bool gem_suspend(device_t, pmf_qual_t);
312 bool gem_resume(device_t, pmf_qual_t);
313 void gem_attach(struct gem_softc *, const uint8_t *);
314 int gem_intr(void *);
315 int gem_detach(struct gem_softc *, int);
317 void gem_reset(struct gem_softc *);
318 #endif /* _KERNEL */
321 #endif