[PATCH] revert "swsusp add check for suspension of X controlled devices"
[linux/fpc-iii.git] / drivers / net / mv643xx_eth.h
blob4262c1da6d4a883d1fc761f39e06b5629570e2d0
1 #ifndef __MV643XX_ETH_H__
2 #define __MV643XX_ETH_H__
4 #include <linux/module.h>
5 #include <linux/kernel.h>
6 #include <linux/spinlock.h>
7 #include <linux/workqueue.h>
8 #include <linux/mii.h>
10 #include <linux/mv643xx.h>
12 /* Checksum offload for Tx works for most packets, but
13 * fails if previous packet sent did not use hw csum
15 #define MV643XX_CHECKSUM_OFFLOAD_TX
16 #define MV643XX_NAPI
17 #define MV643XX_TX_FAST_REFILL
18 #undef MV643XX_COAL
21 * Number of RX / TX descriptors on RX / TX rings.
22 * Note that allocating RX descriptors is done by allocating the RX
23 * ring AND a preallocated RX buffers (skb's) for each descriptor.
24 * The TX descriptors only allocates the TX descriptors ring,
25 * with no pre allocated TX buffers (skb's are allocated by higher layers.
28 /* Default TX ring size is 1000 descriptors */
29 #define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
31 /* Default RX ring size is 400 descriptors */
32 #define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
34 #define MV643XX_TX_COAL 100
35 #ifdef MV643XX_COAL
36 #define MV643XX_RX_COAL 100
37 #endif
39 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
40 #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
41 #else
42 #define MAX_DESCS_PER_SKB 1
43 #endif
46 * The MV643XX HW requires 8-byte alignment. However, when I/O
47 * is non-cache-coherent, we need to ensure that the I/O buffers
48 * we use don't share cache lines with other data.
50 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE)
51 #define ETH_DMA_ALIGN L1_CACHE_BYTES
52 #else
53 #define ETH_DMA_ALIGN 8
54 #endif
56 #define ETH_VLAN_HLEN 4
57 #define ETH_FCS_LEN 4
58 #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
59 #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
60 ETH_VLAN_HLEN + ETH_FCS_LEN)
61 #define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + ETH_DMA_ALIGN)
63 #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
64 #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
66 #define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2)
67 #define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9)
68 #define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR)
69 #define ETH_INT_CAUSE_EXT 0x00000002
70 #define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT)
72 #define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0)
73 #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
74 #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
75 #define ETH_INT_CAUSE_PHY 0x00010000
76 #define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY)
78 #define ETH_INT_MASK_ALL 0x00000000
79 #define ETH_INT_MASK_ALL_EXT 0x00000000
81 #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
82 #define PHY_WAIT_MICRO_SECONDS 10
84 /* Buffer offset from buffer pointer */
85 #define RX_BUF_OFFSET 0x2
87 /* Gigabit Ethernet Unit Global Registers */
89 /* MIB Counters register definitions */
90 #define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
91 #define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
92 #define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
93 #define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
94 #define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
95 #define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
96 #define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
97 #define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
98 #define ETH_MIB_FRAMES_64_OCTETS 0x20
99 #define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
100 #define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
101 #define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
102 #define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
103 #define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
104 #define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
105 #define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
106 #define ETH_MIB_GOOD_FRAMES_SENT 0x40
107 #define ETH_MIB_EXCESSIVE_COLLISION 0x44
108 #define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
109 #define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
110 #define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
111 #define ETH_MIB_FC_SENT 0x54
112 #define ETH_MIB_GOOD_FC_RECEIVED 0x58
113 #define ETH_MIB_BAD_FC_RECEIVED 0x5c
114 #define ETH_MIB_UNDERSIZE_RECEIVED 0x60
115 #define ETH_MIB_FRAGMENTS_RECEIVED 0x64
116 #define ETH_MIB_OVERSIZE_RECEIVED 0x68
117 #define ETH_MIB_JABBER_RECEIVED 0x6c
118 #define ETH_MIB_MAC_RECEIVE_ERROR 0x70
119 #define ETH_MIB_BAD_CRC_EVENT 0x74
120 #define ETH_MIB_COLLISION 0x78
121 #define ETH_MIB_LATE_COLLISION 0x7c
123 /* Port serial status reg (PSR) */
124 #define ETH_INTERFACE_PCM 0x00000001
125 #define ETH_LINK_IS_UP 0x00000002
126 #define ETH_PORT_AT_FULL_DUPLEX 0x00000004
127 #define ETH_RX_FLOW_CTRL_ENABLED 0x00000008
128 #define ETH_GMII_SPEED_1000 0x00000010
129 #define ETH_MII_SPEED_100 0x00000020
130 #define ETH_TX_IN_PROGRESS 0x00000080
131 #define ETH_BYPASS_ACTIVE 0x00000100
132 #define ETH_PORT_AT_PARTITION_STATE 0x00000200
133 #define ETH_PORT_TX_FIFO_EMPTY 0x00000400
135 /* SMI reg */
136 #define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
137 #define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
138 #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */
139 #define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
141 /* Interrupt Cause Register Bit Definitions */
143 /* SDMA command status fields macros */
145 /* Tx & Rx descriptors status */
146 #define ETH_ERROR_SUMMARY 0x00000001
148 /* Tx & Rx descriptors command */
149 #define ETH_BUFFER_OWNED_BY_DMA 0x80000000
151 /* Tx descriptors status */
152 #define ETH_LC_ERROR 0
153 #define ETH_UR_ERROR 0x00000002
154 #define ETH_RL_ERROR 0x00000004
155 #define ETH_LLC_SNAP_FORMAT 0x00000200
157 /* Rx descriptors status */
158 #define ETH_OVERRUN_ERROR 0x00000002
159 #define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004
160 #define ETH_RESOURCE_ERROR 0x00000006
161 #define ETH_VLAN_TAGGED 0x00080000
162 #define ETH_BPDU_FRAME 0x00100000
163 #define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000
164 #define ETH_OTHER_FRAME_TYPE 0x00400000
165 #define ETH_LAYER_2_IS_ETH_V_2 0x00800000
166 #define ETH_FRAME_TYPE_IP_V_4 0x01000000
167 #define ETH_FRAME_HEADER_OK 0x02000000
168 #define ETH_RX_LAST_DESC 0x04000000
169 #define ETH_RX_FIRST_DESC 0x08000000
170 #define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000
171 #define ETH_RX_ENABLE_INTERRUPT 0x20000000
172 #define ETH_LAYER_4_CHECKSUM_OK 0x40000000
174 /* Rx descriptors byte count */
175 #define ETH_FRAME_FRAGMENTED 0x00000004
177 /* Tx descriptors command */
178 #define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400
179 #define ETH_FRAME_SET_TO_VLAN 0x00008000
180 #define ETH_UDP_FRAME 0x00010000
181 #define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000
182 #define ETH_GEN_IP_V_4_CHECKSUM 0x00040000
183 #define ETH_ZERO_PADDING 0x00080000
184 #define ETH_TX_LAST_DESC 0x00100000
185 #define ETH_TX_FIRST_DESC 0x00200000
186 #define ETH_GEN_CRC 0x00400000
187 #define ETH_TX_ENABLE_INTERRUPT 0x00800000
188 #define ETH_AUTO_MODE 0x40000000
190 #define ETH_TX_IHL_SHIFT 11
192 /* typedefs */
194 typedef enum _eth_func_ret_status {
195 ETH_OK, /* Returned as expected. */
196 ETH_ERROR, /* Fundamental error. */
197 ETH_RETRY, /* Could not process request. Try later.*/
198 ETH_END_OF_JOB, /* Ring has nothing to process. */
199 ETH_QUEUE_FULL, /* Ring resource error. */
200 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
201 } ETH_FUNC_RET_STATUS;
203 typedef enum _eth_target {
204 ETH_TARGET_DRAM,
205 ETH_TARGET_DEVICE,
206 ETH_TARGET_CBS,
207 ETH_TARGET_PCI0,
208 ETH_TARGET_PCI1
209 } ETH_TARGET;
211 /* These are for big-endian machines. Little endian needs different
212 * definitions.
214 #if defined(__BIG_ENDIAN)
215 struct eth_rx_desc {
216 u16 byte_cnt; /* Descriptor buffer byte count */
217 u16 buf_size; /* Buffer size */
218 u32 cmd_sts; /* Descriptor command status */
219 u32 next_desc_ptr; /* Next descriptor pointer */
220 u32 buf_ptr; /* Descriptor buffer pointer */
223 struct eth_tx_desc {
224 u16 byte_cnt; /* buffer byte count */
225 u16 l4i_chk; /* CPU provided TCP checksum */
226 u32 cmd_sts; /* Command/status field */
227 u32 next_desc_ptr; /* Pointer to next descriptor */
228 u32 buf_ptr; /* pointer to buffer for this descriptor*/
231 #elif defined(__LITTLE_ENDIAN)
232 struct eth_rx_desc {
233 u32 cmd_sts; /* Descriptor command status */
234 u16 buf_size; /* Buffer size */
235 u16 byte_cnt; /* Descriptor buffer byte count */
236 u32 buf_ptr; /* Descriptor buffer pointer */
237 u32 next_desc_ptr; /* Next descriptor pointer */
240 struct eth_tx_desc {
241 u32 cmd_sts; /* Command/status field */
242 u16 l4i_chk; /* CPU provided TCP checksum */
243 u16 byte_cnt; /* buffer byte count */
244 u32 buf_ptr; /* pointer to buffer for this descriptor*/
245 u32 next_desc_ptr; /* Pointer to next descriptor */
247 #else
248 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
249 #endif
251 /* Unified struct for Rx and Tx operations. The user is not required to */
252 /* be familier with neither Tx nor Rx descriptors. */
253 struct pkt_info {
254 unsigned short byte_cnt; /* Descriptor buffer byte count */
255 unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
256 unsigned int cmd_sts; /* Descriptor command status */
257 dma_addr_t buf_ptr; /* Descriptor buffer pointer */
258 struct sk_buff *return_info; /* User resource return information */
261 /* Ethernet port specific infomation */
263 struct mv643xx_mib_counters {
264 u64 good_octets_received;
265 u32 bad_octets_received;
266 u32 internal_mac_transmit_err;
267 u32 good_frames_received;
268 u32 bad_frames_received;
269 u32 broadcast_frames_received;
270 u32 multicast_frames_received;
271 u32 frames_64_octets;
272 u32 frames_65_to_127_octets;
273 u32 frames_128_to_255_octets;
274 u32 frames_256_to_511_octets;
275 u32 frames_512_to_1023_octets;
276 u32 frames_1024_to_max_octets;
277 u64 good_octets_sent;
278 u32 good_frames_sent;
279 u32 excessive_collision;
280 u32 multicast_frames_sent;
281 u32 broadcast_frames_sent;
282 u32 unrec_mac_control_received;
283 u32 fc_sent;
284 u32 good_fc_received;
285 u32 bad_fc_received;
286 u32 undersize_received;
287 u32 fragments_received;
288 u32 oversize_received;
289 u32 jabber_received;
290 u32 mac_receive_error;
291 u32 bad_crc_event;
292 u32 collision;
293 u32 late_collision;
296 struct mv643xx_private {
297 int port_num; /* User Ethernet port number */
299 u32 rx_sram_addr; /* Base address of rx sram area */
300 u32 rx_sram_size; /* Size of rx sram area */
301 u32 tx_sram_addr; /* Base address of tx sram area */
302 u32 tx_sram_size; /* Size of tx sram area */
304 int rx_resource_err; /* Rx ring resource error flag */
306 /* Tx/Rx rings managment indexes fields. For driver use */
308 /* Next available and first returning Rx resource */
309 int rx_curr_desc_q, rx_used_desc_q;
311 /* Next available and first returning Tx resource */
312 int tx_curr_desc_q, tx_used_desc_q;
314 #ifdef MV643XX_TX_FAST_REFILL
315 u32 tx_clean_threshold;
316 #endif
318 struct eth_rx_desc *p_rx_desc_area;
319 dma_addr_t rx_desc_dma;
320 int rx_desc_area_size;
321 struct sk_buff **rx_skb;
323 struct eth_tx_desc *p_tx_desc_area;
324 dma_addr_t tx_desc_dma;
325 int tx_desc_area_size;
326 struct sk_buff **tx_skb;
328 struct work_struct tx_timeout_task;
330 struct net_device_stats stats;
331 struct mv643xx_mib_counters mib_counters;
332 spinlock_t lock;
333 /* Size of Tx Ring per queue */
334 int tx_ring_size;
335 /* Number of tx descriptors in use */
336 int tx_desc_count;
337 /* Size of Rx Ring per queue */
338 int rx_ring_size;
339 /* Number of rx descriptors in use */
340 int rx_desc_count;
343 * Used in case RX Ring is empty, which can be caused when
344 * system does not have resources (skb's)
346 struct timer_list timeout;
348 u32 rx_int_coal;
349 u32 tx_int_coal;
350 struct mii_if_info mii;
353 /* Port operation control routines */
354 static void eth_port_init(struct mv643xx_private *mp);
355 static void eth_port_reset(unsigned int eth_port_num);
356 static void eth_port_start(struct net_device *dev);
358 /* Port MAC address routines */
359 static void eth_port_uc_addr_set(unsigned int eth_port_num,
360 unsigned char *p_addr);
362 /* PHY and MIB routines */
363 static void ethernet_phy_reset(unsigned int eth_port_num);
365 static void eth_port_write_smi_reg(unsigned int eth_port_num,
366 unsigned int phy_reg, unsigned int value);
368 static void eth_port_read_smi_reg(unsigned int eth_port_num,
369 unsigned int phy_reg, unsigned int *value);
371 static void eth_clear_mib_counters(unsigned int eth_port_num);
373 /* Port data flow control routines */
374 static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
375 struct pkt_info *p_pkt_info);
376 static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
377 struct pkt_info *p_pkt_info);
379 #endif /* __MV643XX_ETH_H__ */