proc: use seq_puts()/seq_putc() where possible
[linux-2.6/next.git] / drivers / net / bcm63xx_enet.h
blob0e3048b788c2c366595c9da160b344bbb6dd1fb0
1 #ifndef BCM63XX_ENET_H_
2 #define BCM63XX_ENET_H_
4 #include <linux/types.h>
5 #include <linux/mii.h>
6 #include <linux/mutex.h>
7 #include <linux/phy.h>
8 #include <linux/platform_device.h>
10 #include <bcm63xx_regs.h>
11 #include <bcm63xx_irq.h>
12 #include <bcm63xx_io.h>
14 /* default number of descriptor */
15 #define BCMENET_DEF_RX_DESC 64
16 #define BCMENET_DEF_TX_DESC 32
18 /* maximum burst len for dma (4 bytes unit) */
19 #define BCMENET_DMA_MAXBURST 16
21 /* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
22 * must be low enough so that a DMA transfer of above burst length can
23 * not overflow the fifo */
24 #define BCMENET_TX_FIFO_TRESH 32
27 * hardware maximum rx/tx packet size including FCS, max mtu is
28 * actually 2047, but if we set max rx size register to 2047 we won't
29 * get overflow information if packet size is 2048 or above
31 #define BCMENET_MAX_MTU 2046
34 * rx/tx dma descriptor
36 struct bcm_enet_desc {
37 u32 len_stat;
38 u32 address;
41 #define DMADESC_LENGTH_SHIFT 16
42 #define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
43 #define DMADESC_OWNER_MASK (1 << 15)
44 #define DMADESC_EOP_MASK (1 << 14)
45 #define DMADESC_SOP_MASK (1 << 13)
46 #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
47 #define DMADESC_WRAP_MASK (1 << 12)
49 #define DMADESC_UNDER_MASK (1 << 9)
50 #define DMADESC_APPEND_CRC (1 << 8)
51 #define DMADESC_OVSIZE_MASK (1 << 4)
52 #define DMADESC_RXER_MASK (1 << 2)
53 #define DMADESC_CRC_MASK (1 << 1)
54 #define DMADESC_OV_MASK (1 << 0)
55 #define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
56 DMADESC_OVSIZE_MASK | \
57 DMADESC_RXER_MASK | \
58 DMADESC_CRC_MASK | \
59 DMADESC_OV_MASK)
63 * MIB Counters register definitions
65 #define ETH_MIB_TX_GD_OCTETS 0
66 #define ETH_MIB_TX_GD_PKTS 1
67 #define ETH_MIB_TX_ALL_OCTETS 2
68 #define ETH_MIB_TX_ALL_PKTS 3
69 #define ETH_MIB_TX_BRDCAST 4
70 #define ETH_MIB_TX_MULT 5
71 #define ETH_MIB_TX_64 6
72 #define ETH_MIB_TX_65_127 7
73 #define ETH_MIB_TX_128_255 8
74 #define ETH_MIB_TX_256_511 9
75 #define ETH_MIB_TX_512_1023 10
76 #define ETH_MIB_TX_1024_MAX 11
77 #define ETH_MIB_TX_JAB 12
78 #define ETH_MIB_TX_OVR 13
79 #define ETH_MIB_TX_FRAG 14
80 #define ETH_MIB_TX_UNDERRUN 15
81 #define ETH_MIB_TX_COL 16
82 #define ETH_MIB_TX_1_COL 17
83 #define ETH_MIB_TX_M_COL 18
84 #define ETH_MIB_TX_EX_COL 19
85 #define ETH_MIB_TX_LATE 20
86 #define ETH_MIB_TX_DEF 21
87 #define ETH_MIB_TX_CRS 22
88 #define ETH_MIB_TX_PAUSE 23
90 #define ETH_MIB_RX_GD_OCTETS 32
91 #define ETH_MIB_RX_GD_PKTS 33
92 #define ETH_MIB_RX_ALL_OCTETS 34
93 #define ETH_MIB_RX_ALL_PKTS 35
94 #define ETH_MIB_RX_BRDCAST 36
95 #define ETH_MIB_RX_MULT 37
96 #define ETH_MIB_RX_64 38
97 #define ETH_MIB_RX_65_127 39
98 #define ETH_MIB_RX_128_255 40
99 #define ETH_MIB_RX_256_511 41
100 #define ETH_MIB_RX_512_1023 42
101 #define ETH_MIB_RX_1024_MAX 43
102 #define ETH_MIB_RX_JAB 44
103 #define ETH_MIB_RX_OVR 45
104 #define ETH_MIB_RX_FRAG 46
105 #define ETH_MIB_RX_DROP 47
106 #define ETH_MIB_RX_CRC_ALIGN 48
107 #define ETH_MIB_RX_UND 49
108 #define ETH_MIB_RX_CRC 50
109 #define ETH_MIB_RX_ALIGN 51
110 #define ETH_MIB_RX_SYM 52
111 #define ETH_MIB_RX_PAUSE 53
112 #define ETH_MIB_RX_CNTRL 54
115 struct bcm_enet_mib_counters {
116 u64 tx_gd_octets;
117 u32 tx_gd_pkts;
118 u32 tx_all_octets;
119 u32 tx_all_pkts;
120 u32 tx_brdcast;
121 u32 tx_mult;
122 u32 tx_64;
123 u32 tx_65_127;
124 u32 tx_128_255;
125 u32 tx_256_511;
126 u32 tx_512_1023;
127 u32 tx_1024_max;
128 u32 tx_jab;
129 u32 tx_ovr;
130 u32 tx_frag;
131 u32 tx_underrun;
132 u32 tx_col;
133 u32 tx_1_col;
134 u32 tx_m_col;
135 u32 tx_ex_col;
136 u32 tx_late;
137 u32 tx_def;
138 u32 tx_crs;
139 u32 tx_pause;
140 u64 rx_gd_octets;
141 u32 rx_gd_pkts;
142 u32 rx_all_octets;
143 u32 rx_all_pkts;
144 u32 rx_brdcast;
145 u32 rx_mult;
146 u32 rx_64;
147 u32 rx_65_127;
148 u32 rx_128_255;
149 u32 rx_256_511;
150 u32 rx_512_1023;
151 u32 rx_1024_max;
152 u32 rx_jab;
153 u32 rx_ovr;
154 u32 rx_frag;
155 u32 rx_drop;
156 u32 rx_crc_align;
157 u32 rx_und;
158 u32 rx_crc;
159 u32 rx_align;
160 u32 rx_sym;
161 u32 rx_pause;
162 u32 rx_cntrl;
166 struct bcm_enet_priv {
168 /* mac id (from platform device id) */
169 int mac_id;
171 /* base remapped address of device */
172 void __iomem *base;
174 /* mac irq, rx_dma irq, tx_dma irq */
175 int irq;
176 int irq_rx;
177 int irq_tx;
179 /* hw view of rx & tx dma ring */
180 dma_addr_t rx_desc_dma;
181 dma_addr_t tx_desc_dma;
183 /* allocated size (in bytes) for rx & tx dma ring */
184 unsigned int rx_desc_alloc_size;
185 unsigned int tx_desc_alloc_size;
188 struct napi_struct napi;
190 /* dma channel id for rx */
191 int rx_chan;
193 /* number of dma desc in rx ring */
194 int rx_ring_size;
196 /* cpu view of rx dma ring */
197 struct bcm_enet_desc *rx_desc_cpu;
199 /* current number of armed descriptor given to hardware for rx */
200 int rx_desc_count;
202 /* next rx descriptor to fetch from hardware */
203 int rx_curr_desc;
205 /* next dirty rx descriptor to refill */
206 int rx_dirty_desc;
208 /* size of allocated rx skbs */
209 unsigned int rx_skb_size;
211 /* list of skb given to hw for rx */
212 struct sk_buff **rx_skb;
214 /* used when rx skb allocation failed, so we defer rx queue
215 * refill */
216 struct timer_list rx_timeout;
218 /* lock rx_timeout against rx normal operation */
219 spinlock_t rx_lock;
222 /* dma channel id for tx */
223 int tx_chan;
225 /* number of dma desc in tx ring */
226 int tx_ring_size;
228 /* cpu view of rx dma ring */
229 struct bcm_enet_desc *tx_desc_cpu;
231 /* number of available descriptor for tx */
232 int tx_desc_count;
234 /* next tx descriptor avaiable */
235 int tx_curr_desc;
237 /* next dirty tx descriptor to reclaim */
238 int tx_dirty_desc;
240 /* list of skb given to hw for tx */
241 struct sk_buff **tx_skb;
243 /* lock used by tx reclaim and xmit */
244 spinlock_t tx_lock;
247 /* set if internal phy is ignored and external mii interface
248 * is selected */
249 int use_external_mii;
251 /* set if a phy is connected, phy address must be known,
252 * probing is not possible */
253 int has_phy;
254 int phy_id;
256 /* set if connected phy has an associated irq */
257 int has_phy_interrupt;
258 int phy_interrupt;
260 /* used when a phy is connected (phylib used) */
261 struct mii_bus *mii_bus;
262 struct phy_device *phydev;
263 int old_link;
264 int old_duplex;
265 int old_pause;
267 /* used when no phy is connected */
268 int force_speed_100;
269 int force_duplex_full;
271 /* pause parameters */
272 int pause_auto;
273 int pause_rx;
274 int pause_tx;
276 /* stats */
277 struct bcm_enet_mib_counters mib;
279 /* after mib interrupt, mib registers update is done in this
280 * work queue */
281 struct work_struct mib_update_task;
283 /* lock mib update between userspace request and workqueue */
284 struct mutex mib_update_lock;
286 /* mac clock */
287 struct clk *mac_clk;
289 /* phy clock if internal phy is used */
290 struct clk *phy_clk;
292 /* network device reference */
293 struct net_device *net_dev;
295 /* platform device reference */
296 struct platform_device *pdev;
298 /* maximum hardware transmit/receive size */
299 unsigned int hw_mtu;
302 #endif /* ! BCM63XX_ENET_H_ */