2 * Copyright 2010-2011 Calxeda, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/kernel.h>
19 #include <linux/circ_buf.h>
20 #include <linux/interrupt.h>
21 #include <linux/etherdevice.h>
22 #include <linux/platform_device.h>
23 #include <linux/skbuff.h>
24 #include <linux/ethtool.h>
26 #include <linux/crc32.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/slab.h>
30 /* XGMAC Register definitions */
31 #define XGMAC_CONTROL 0x00000000 /* MAC Configuration */
32 #define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */
33 #define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */
34 #define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */
35 #define XGMAC_VERSION 0x00000020 /* Version */
36 #define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */
37 #define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */
38 #define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */
39 #define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */
40 #define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */
41 #define XGMAC_DEBUG 0x00000038 /* Debug */
42 #define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */
43 #define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8))
44 #define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8))
45 #define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */
46 #define XGMAC_NUM_HASH 16
47 #define XGMAC_OMR 0x00000400
48 #define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */
49 #define XGMAC_PMT 0x00000704 /* PMT Control and Status */
50 #define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */
51 #define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */
52 #define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */
53 #define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */
54 #define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */
56 /* Hardware TX Statistics Counters */
57 #define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
58 #define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
59 #define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
60 #define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
61 #define XGMAC_MMC_TXBCFRAME_G 0x00000824
62 #define XGMAC_MMC_TXMCFRAME_G 0x0000082C
63 #define XGMAC_MMC_TXUCFRAME_GB 0x00000864
64 #define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
65 #define XGMAC_MMC_TXBCFRAME_GB 0x00000874
66 #define XGMAC_MMC_TXUNDERFLOW 0x0000087C
67 #define XGMAC_MMC_TXOCTET_G_LO 0x00000884
68 #define XGMAC_MMC_TXOCTET_G_HI 0x00000888
69 #define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
70 #define XGMAC_MMC_TXFRAME_G_HI 0x00000890
71 #define XGMAC_MMC_TXPAUSEFRAME 0x00000894
72 #define XGMAC_MMC_TXVLANFRAME 0x0000089C
74 /* Hardware RX Statistics Counters */
75 #define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
76 #define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
77 #define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
78 #define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
79 #define XGMAC_MMC_RXOCTET_G_LO 0x00000910
80 #define XGMAC_MMC_RXOCTET_G_HI 0x00000914
81 #define XGMAC_MMC_RXBCFRAME_G 0x00000918
82 #define XGMAC_MMC_RXMCFRAME_G 0x00000920
83 #define XGMAC_MMC_RXCRCERR 0x00000928
84 #define XGMAC_MMC_RXRUNT 0x00000930
85 #define XGMAC_MMC_RXJABBER 0x00000934
86 #define XGMAC_MMC_RXUCFRAME_G 0x00000970
87 #define XGMAC_MMC_RXLENGTHERR 0x00000978
88 #define XGMAC_MMC_RXPAUSEFRAME 0x00000988
89 #define XGMAC_MMC_RXOVERFLOW 0x00000990
90 #define XGMAC_MMC_RXVLANFRAME 0x00000998
91 #define XGMAC_MMC_RXWATCHDOG 0x000009a0
93 /* DMA Control and Status Registers */
94 #define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */
95 #define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */
96 #define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */
97 #define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */
98 #define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */
99 #define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */
100 #define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */
101 #define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */
102 #define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */
103 #define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */
104 #define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */
105 #define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */
106 #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
108 #define XGMAC_ADDR_AE 0x80000000
109 #define XGMAC_MAX_FILTER_ADDR 31
111 /* PMT Control and Status */
112 #define XGMAC_PMT_POINTER_RESET 0x80000000
113 #define XGMAC_PMT_GLBL_UNICAST 0x00000200
114 #define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
115 #define XGMAC_PMT_MAGIC_PKT 0x00000020
116 #define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
117 #define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
118 #define XGMAC_PMT_POWERDOWN 0x00000001
120 #define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */
121 #define XGMAC_CONTROL_SPD_MASK 0x60000000
122 #define XGMAC_CONTROL_SPD_1G 0x60000000
123 #define XGMAC_CONTROL_SPD_2_5G 0x40000000
124 #define XGMAC_CONTROL_SPD_10G 0x00000000
125 #define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */
126 #define XGMAC_CONTROL_SARK_MASK 0x18000000
127 #define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */
128 #define XGMAC_CONTROL_CAR_MASK 0x06000000
129 #define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */
130 #define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */
131 #define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */
132 #define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
133 #define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
134 #define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
135 #define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */
136 #define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */
137 #define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
138 #define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
140 /* XGMAC Frame Filter defines */
141 #define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
142 #define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
143 #define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
144 #define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
145 #define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
146 #define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
147 #define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
148 #define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
149 #define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
150 #define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */
151 #define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */
152 #define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
154 /* XGMAC FLOW CTRL defines */
155 #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
156 #define XGMAC_FLOW_CTRL_PT_SHIFT 16
157 #define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */
158 #define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */
159 #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */
160 #define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */
161 #define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
162 #define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
163 #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
165 /* XGMAC_INT_STAT reg */
166 #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
167 #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
169 /* DMA Bus Mode register defines */
170 #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
171 #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
172 #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
173 #define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
175 /* Programmable burst length */
176 #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
177 #define DMA_BUS_MODE_PBL_SHIFT 8
178 #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
179 #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
180 #define DMA_BUS_MODE_RPBL_SHIFT 17
181 #define DMA_BUS_MODE_USP 0x00800000
182 #define DMA_BUS_MODE_8PBL 0x01000000
183 #define DMA_BUS_MODE_AAL 0x02000000
185 /* DMA Bus Mode register defines */
186 #define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
187 #define DMA_BUS_PR_RATIO_SHIFT 14
188 #define DMA_BUS_FB 0x00010000 /* Fixed Burst */
190 /* DMA Control register defines */
191 #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
192 #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
193 #define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
195 /* DMA Normal interrupt */
196 #define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
197 #define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
198 #define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
199 #define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
200 #define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
201 #define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
202 #define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
203 #define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
204 #define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
205 #define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
206 #define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
207 #define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
208 #define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */
209 #define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
210 #define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
212 #define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
215 #define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
216 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
217 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
218 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
221 /* DMA default interrupt mask */
222 #define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
224 /* DMA Status register defines */
225 #define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
226 #define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
227 #define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
228 #define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
229 #define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
230 #define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
231 #define DMA_STATUS_TS_SHIFT 20
232 #define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
233 #define DMA_STATUS_RS_SHIFT 17
234 #define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
235 #define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
236 #define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
237 #define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
238 #define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
239 #define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
240 #define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
241 #define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
242 #define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
243 #define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
244 #define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
245 #define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
246 #define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */
247 #define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
248 #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
250 /* Common MAC defines */
251 #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
252 #define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
254 /* XGMAC Operation Mode Register */
255 #define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */
256 #define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */
257 #define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */
258 #define XGMAC_OMR_TTC_MASK 0x00030000
259 #define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */
260 #define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */
261 #define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */
262 #define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */
263 #define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */
264 #define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
265 #define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
266 #define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
267 #define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */
268 #define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
270 /* XGMAC HW Features Register */
271 #define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */
273 #define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
275 /* XGMAC Descriptor Defines */
276 #define MAX_DESC_BUF_SZ (0x2000 - 8)
278 #define RXDESC_EXT_STATUS 0x00000001
279 #define RXDESC_CRC_ERR 0x00000002
280 #define RXDESC_RX_ERR 0x00000008
281 #define RXDESC_RX_WDOG 0x00000010
282 #define RXDESC_FRAME_TYPE 0x00000020
283 #define RXDESC_GIANT_FRAME 0x00000080
284 #define RXDESC_LAST_SEG 0x00000100
285 #define RXDESC_FIRST_SEG 0x00000200
286 #define RXDESC_VLAN_FRAME 0x00000400
287 #define RXDESC_OVERFLOW_ERR 0x00000800
288 #define RXDESC_LENGTH_ERR 0x00001000
289 #define RXDESC_SA_FILTER_FAIL 0x00002000
290 #define RXDESC_DESCRIPTOR_ERR 0x00004000
291 #define RXDESC_ERROR_SUMMARY 0x00008000
292 #define RXDESC_FRAME_LEN_OFFSET 16
293 #define RXDESC_FRAME_LEN_MASK 0x3fff0000
294 #define RXDESC_DA_FILTER_FAIL 0x40000000
296 #define RXDESC1_END_RING 0x00008000
298 #define RXDESC_IP_PAYLOAD_MASK 0x00000003
299 #define RXDESC_IP_PAYLOAD_UDP 0x00000001
300 #define RXDESC_IP_PAYLOAD_TCP 0x00000002
301 #define RXDESC_IP_PAYLOAD_ICMP 0x00000003
302 #define RXDESC_IP_HEADER_ERR 0x00000008
303 #define RXDESC_IP_PAYLOAD_ERR 0x00000010
304 #define RXDESC_IPV4_PACKET 0x00000040
305 #define RXDESC_IPV6_PACKET 0x00000080
306 #define TXDESC_UNDERFLOW_ERR 0x00000001
307 #define TXDESC_JABBER_TIMEOUT 0x00000002
308 #define TXDESC_LOCAL_FAULT 0x00000004
309 #define TXDESC_REMOTE_FAULT 0x00000008
310 #define TXDESC_VLAN_FRAME 0x00000010
311 #define TXDESC_FRAME_FLUSHED 0x00000020
312 #define TXDESC_IP_HEADER_ERR 0x00000040
313 #define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
314 #define TXDESC_ERROR_SUMMARY 0x00008000
315 #define TXDESC_SA_CTRL_INSERT 0x00040000
316 #define TXDESC_SA_CTRL_REPLACE 0x00080000
317 #define TXDESC_2ND_ADDR_CHAINED 0x00100000
318 #define TXDESC_END_RING 0x00200000
319 #define TXDESC_CSUM_IP 0x00400000
320 #define TXDESC_CSUM_IP_PAYLD 0x00800000
321 #define TXDESC_CSUM_ALL 0x00C00000
322 #define TXDESC_CRC_EN_REPLACE 0x01000000
323 #define TXDESC_CRC_EN_APPEND 0x02000000
324 #define TXDESC_DISABLE_PAD 0x04000000
325 #define TXDESC_FIRST_SEG 0x10000000
326 #define TXDESC_LAST_SEG 0x20000000
327 #define TXDESC_INTERRUPT 0x40000000
329 #define DESC_OWN 0x80000000
330 #define DESC_BUFFER1_SZ_MASK 0x00001fff
331 #define DESC_BUFFER2_SZ_MASK 0x1fff0000
332 #define DESC_BUFFER2_SZ_OFFSET 16
334 struct xgmac_dma_desc
{
337 __le32 buf1_addr
; /* Buffer 1 Address Pointer */
338 __le32 buf2_addr
; /* Buffer 2 Address Pointer */
343 struct xgmac_extra_stats
{
344 /* Transmit errors */
345 unsigned long tx_jabber
;
346 unsigned long tx_frame_flushed
;
347 unsigned long tx_payload_error
;
348 unsigned long tx_ip_header_error
;
349 unsigned long tx_local_fault
;
350 unsigned long tx_remote_fault
;
352 unsigned long rx_watchdog
;
353 unsigned long rx_da_filter_fail
;
354 unsigned long rx_sa_filter_fail
;
355 unsigned long rx_payload_error
;
356 unsigned long rx_ip_header_error
;
357 /* Tx/Rx IRQ errors */
358 unsigned long tx_undeflow
;
359 unsigned long tx_process_stopped
;
360 unsigned long rx_buf_unav
;
361 unsigned long rx_process_stopped
;
362 unsigned long tx_early
;
363 unsigned long fatal_bus_error
;
367 struct xgmac_dma_desc
*dma_rx
;
368 struct sk_buff
**rx_skbuff
;
369 unsigned int rx_tail
;
370 unsigned int rx_head
;
372 struct xgmac_dma_desc
*dma_tx
;
373 struct sk_buff
**tx_skbuff
;
374 unsigned int tx_head
;
375 unsigned int tx_tail
;
378 struct sk_buff_head rx_recycle
;
379 unsigned int dma_buf_sz
;
380 dma_addr_t dma_rx_phy
;
381 dma_addr_t dma_tx_phy
;
383 struct net_device
*dev
;
384 struct device
*device
;
385 struct napi_struct napi
;
387 struct xgmac_extra_stats xstats
;
389 spinlock_t stats_lock
;
396 /* XGMAC Configuration Settings */
398 #define PAUSE_TIME 0x400
400 #define DMA_RX_RING_SZ 256
401 #define DMA_TX_RING_SZ 128
402 /* minimum number of free TX descriptors required to wake up TX process */
403 #define TX_THRESH (DMA_TX_RING_SZ/4)
405 /* DMA descriptor ring helpers */
406 #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
407 #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
408 #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
410 /* XGMAC Descriptor Access Helpers */
411 static inline void desc_set_buf_len(struct xgmac_dma_desc
*p
, u32 buf_sz
)
413 if (buf_sz
> MAX_DESC_BUF_SZ
)
414 p
->buf_size
= cpu_to_le32(MAX_DESC_BUF_SZ
|
415 (buf_sz
- MAX_DESC_BUF_SZ
) << DESC_BUFFER2_SZ_OFFSET
);
417 p
->buf_size
= cpu_to_le32(buf_sz
);
420 static inline int desc_get_buf_len(struct xgmac_dma_desc
*p
)
422 u32 len
= cpu_to_le32(p
->flags
);
423 return (len
& DESC_BUFFER1_SZ_MASK
) +
424 ((len
& DESC_BUFFER2_SZ_MASK
) >> DESC_BUFFER2_SZ_OFFSET
);
427 static inline void desc_init_rx_desc(struct xgmac_dma_desc
*p
, int ring_size
,
430 struct xgmac_dma_desc
*end
= p
+ ring_size
- 1;
432 memset(p
, 0, sizeof(*p
) * ring_size
);
434 for (; p
<= end
; p
++)
435 desc_set_buf_len(p
, buf_sz
);
437 end
->buf_size
|= cpu_to_le32(RXDESC1_END_RING
);
440 static inline void desc_init_tx_desc(struct xgmac_dma_desc
*p
, u32 ring_size
)
442 memset(p
, 0, sizeof(*p
) * ring_size
);
443 p
[ring_size
- 1].flags
= cpu_to_le32(TXDESC_END_RING
);
446 static inline int desc_get_owner(struct xgmac_dma_desc
*p
)
448 return le32_to_cpu(p
->flags
) & DESC_OWN
;
451 static inline void desc_set_rx_owner(struct xgmac_dma_desc
*p
)
453 /* Clear all fields and set the owner */
454 p
->flags
= cpu_to_le32(DESC_OWN
);
457 static inline void desc_set_tx_owner(struct xgmac_dma_desc
*p
, u32 flags
)
459 u32 tmpflags
= le32_to_cpu(p
->flags
);
460 tmpflags
&= TXDESC_END_RING
;
461 tmpflags
|= flags
| DESC_OWN
;
462 p
->flags
= cpu_to_le32(tmpflags
);
465 static inline int desc_get_tx_ls(struct xgmac_dma_desc
*p
)
467 return le32_to_cpu(p
->flags
) & TXDESC_LAST_SEG
;
470 static inline u32
desc_get_buf_addr(struct xgmac_dma_desc
*p
)
472 return le32_to_cpu(p
->buf1_addr
);
475 static inline void desc_set_buf_addr(struct xgmac_dma_desc
*p
,
478 p
->buf1_addr
= cpu_to_le32(paddr
);
479 if (len
> MAX_DESC_BUF_SZ
)
480 p
->buf2_addr
= cpu_to_le32(paddr
+ MAX_DESC_BUF_SZ
);
483 static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc
*p
,
486 desc_set_buf_len(p
, len
);
487 desc_set_buf_addr(p
, paddr
, len
);
490 static inline int desc_get_rx_frame_len(struct xgmac_dma_desc
*p
)
492 u32 data
= le32_to_cpu(p
->flags
);
493 u32 len
= (data
& RXDESC_FRAME_LEN_MASK
) >> RXDESC_FRAME_LEN_OFFSET
;
494 if (data
& RXDESC_FRAME_TYPE
)
500 static void xgmac_dma_flush_tx_fifo(void __iomem
*ioaddr
)
503 u32 reg
= readl(ioaddr
+ XGMAC_OMR
);
504 writel(reg
| XGMAC_OMR_FTF
, ioaddr
+ XGMAC_OMR
);
506 while ((timeout
-- > 0) && readl(ioaddr
+ XGMAC_OMR
) & XGMAC_OMR_FTF
)
510 static int desc_get_tx_status(struct xgmac_priv
*priv
, struct xgmac_dma_desc
*p
)
512 struct xgmac_extra_stats
*x
= &priv
->xstats
;
513 u32 status
= le32_to_cpu(p
->flags
);
515 if (!(status
& TXDESC_ERROR_SUMMARY
))
518 netdev_dbg(priv
->dev
, "tx desc error = 0x%08x\n", status
);
519 if (status
& TXDESC_JABBER_TIMEOUT
)
521 if (status
& TXDESC_FRAME_FLUSHED
)
522 x
->tx_frame_flushed
++;
523 if (status
& TXDESC_UNDERFLOW_ERR
)
524 xgmac_dma_flush_tx_fifo(priv
->base
);
525 if (status
& TXDESC_IP_HEADER_ERR
)
526 x
->tx_ip_header_error
++;
527 if (status
& TXDESC_LOCAL_FAULT
)
529 if (status
& TXDESC_REMOTE_FAULT
)
530 x
->tx_remote_fault
++;
531 if (status
& TXDESC_PAYLOAD_CSUM_ERR
)
532 x
->tx_payload_error
++;
537 static int desc_get_rx_status(struct xgmac_priv
*priv
, struct xgmac_dma_desc
*p
)
539 struct xgmac_extra_stats
*x
= &priv
->xstats
;
540 int ret
= CHECKSUM_UNNECESSARY
;
541 u32 status
= le32_to_cpu(p
->flags
);
542 u32 ext_status
= le32_to_cpu(p
->ext_status
);
544 if (status
& RXDESC_DA_FILTER_FAIL
) {
545 netdev_dbg(priv
->dev
, "XGMAC RX : Dest Address filter fail\n");
546 x
->rx_da_filter_fail
++;
550 /* All frames should fit into a single buffer */
551 if (!(status
& RXDESC_FIRST_SEG
) || !(status
& RXDESC_LAST_SEG
))
554 /* Check if packet has checksum already */
555 if ((status
& RXDESC_FRAME_TYPE
) && (status
& RXDESC_EXT_STATUS
) &&
556 !(ext_status
& RXDESC_IP_PAYLOAD_MASK
))
559 netdev_dbg(priv
->dev
, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
560 (status
& RXDESC_FRAME_TYPE
) ? 1 : 0, ret
, ext_status
);
562 if (!(status
& RXDESC_ERROR_SUMMARY
))
565 /* Handle any errors */
566 if (status
& (RXDESC_DESCRIPTOR_ERR
| RXDESC_OVERFLOW_ERR
|
567 RXDESC_GIANT_FRAME
| RXDESC_LENGTH_ERR
| RXDESC_CRC_ERR
))
570 if (status
& RXDESC_EXT_STATUS
) {
571 if (ext_status
& RXDESC_IP_HEADER_ERR
)
572 x
->rx_ip_header_error
++;
573 if (ext_status
& RXDESC_IP_PAYLOAD_ERR
)
574 x
->rx_payload_error
++;
575 netdev_dbg(priv
->dev
, "IP checksum error - stat %08x\n",
577 return CHECKSUM_NONE
;
583 static inline void xgmac_mac_enable(void __iomem
*ioaddr
)
585 u32 value
= readl(ioaddr
+ XGMAC_CONTROL
);
586 value
|= MAC_ENABLE_RX
| MAC_ENABLE_TX
;
587 writel(value
, ioaddr
+ XGMAC_CONTROL
);
589 value
= readl(ioaddr
+ XGMAC_DMA_CONTROL
);
590 value
|= DMA_CONTROL_ST
| DMA_CONTROL_SR
;
591 writel(value
, ioaddr
+ XGMAC_DMA_CONTROL
);
594 static inline void xgmac_mac_disable(void __iomem
*ioaddr
)
596 u32 value
= readl(ioaddr
+ XGMAC_DMA_CONTROL
);
597 value
&= ~(DMA_CONTROL_ST
| DMA_CONTROL_SR
);
598 writel(value
, ioaddr
+ XGMAC_DMA_CONTROL
);
600 value
= readl(ioaddr
+ XGMAC_CONTROL
);
601 value
&= ~(MAC_ENABLE_TX
| MAC_ENABLE_RX
);
602 writel(value
, ioaddr
+ XGMAC_CONTROL
);
605 static void xgmac_set_mac_addr(void __iomem
*ioaddr
, unsigned char *addr
,
610 data
= (addr
[5] << 8) | addr
[4] | (num
? XGMAC_ADDR_AE
: 0);
611 writel(data
, ioaddr
+ XGMAC_ADDR_HIGH(num
));
612 data
= (addr
[3] << 24) | (addr
[2] << 16) | (addr
[1] << 8) | addr
[0];
613 writel(data
, ioaddr
+ XGMAC_ADDR_LOW(num
));
616 static void xgmac_get_mac_addr(void __iomem
*ioaddr
, unsigned char *addr
,
619 u32 hi_addr
, lo_addr
;
621 /* Read the MAC address from the hardware */
622 hi_addr
= readl(ioaddr
+ XGMAC_ADDR_HIGH(num
));
623 lo_addr
= readl(ioaddr
+ XGMAC_ADDR_LOW(num
));
625 /* Extract the MAC address from the high and low words */
626 addr
[0] = lo_addr
& 0xff;
627 addr
[1] = (lo_addr
>> 8) & 0xff;
628 addr
[2] = (lo_addr
>> 16) & 0xff;
629 addr
[3] = (lo_addr
>> 24) & 0xff;
630 addr
[4] = hi_addr
& 0xff;
631 addr
[5] = (hi_addr
>> 8) & 0xff;
634 static int xgmac_set_flow_ctrl(struct xgmac_priv
*priv
, int rx
, int tx
)
637 unsigned int flow
= 0;
644 flow
|= XGMAC_FLOW_CTRL_RFE
;
646 flow
|= XGMAC_FLOW_CTRL_TFE
;
648 flow
|= XGMAC_FLOW_CTRL_PLT
| XGMAC_FLOW_CTRL_UP
;
649 flow
|= (PAUSE_TIME
<< XGMAC_FLOW_CTRL_PT_SHIFT
);
651 writel(flow
, priv
->base
+ XGMAC_FLOW_CTRL
);
653 reg
= readl(priv
->base
+ XGMAC_OMR
);
654 reg
|= XGMAC_OMR_EFC
;
655 writel(reg
, priv
->base
+ XGMAC_OMR
);
657 writel(0, priv
->base
+ XGMAC_FLOW_CTRL
);
659 reg
= readl(priv
->base
+ XGMAC_OMR
);
660 reg
&= ~XGMAC_OMR_EFC
;
661 writel(reg
, priv
->base
+ XGMAC_OMR
);
667 static void xgmac_rx_refill(struct xgmac_priv
*priv
)
669 struct xgmac_dma_desc
*p
;
672 while (dma_ring_space(priv
->rx_head
, priv
->rx_tail
, DMA_RX_RING_SZ
) > 1) {
673 int entry
= priv
->rx_head
;
676 p
= priv
->dma_rx
+ entry
;
678 if (priv
->rx_skbuff
[entry
] != NULL
)
681 skb
= __skb_dequeue(&priv
->rx_recycle
);
683 skb
= netdev_alloc_skb(priv
->dev
, priv
->dma_buf_sz
);
684 if (unlikely(skb
== NULL
))
687 priv
->rx_skbuff
[entry
] = skb
;
688 paddr
= dma_map_single(priv
->device
, skb
->data
,
689 priv
->dma_buf_sz
, DMA_FROM_DEVICE
);
690 desc_set_buf_addr(p
, paddr
, priv
->dma_buf_sz
);
692 netdev_dbg(priv
->dev
, "rx ring: head %d, tail %d\n",
693 priv
->rx_head
, priv
->rx_tail
);
695 priv
->rx_head
= dma_ring_incr(priv
->rx_head
, DMA_RX_RING_SZ
);
696 /* Ensure descriptor is in memory before handing to h/w */
698 desc_set_rx_owner(p
);
703 * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
704 * @dev: net device structure
705 * Description: this function initializes the DMA RX/TX descriptors
706 * and allocates the socket buffers.
708 static int xgmac_dma_desc_rings_init(struct net_device
*dev
)
710 struct xgmac_priv
*priv
= netdev_priv(dev
);
713 /* Set the Buffer size according to the MTU;
714 * indeed, in case of jumbo we need to bump-up the buffer sizes.
716 bfsize
= ALIGN(dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ NET_IP_ALIGN
+ 64,
719 netdev_dbg(priv
->dev
, "mtu [%d] bfsize [%d]\n", dev
->mtu
, bfsize
);
721 priv
->rx_skbuff
= kzalloc(sizeof(struct sk_buff
*) * DMA_RX_RING_SZ
,
723 if (!priv
->rx_skbuff
)
726 priv
->dma_rx
= dma_alloc_coherent(priv
->device
,
728 sizeof(struct xgmac_dma_desc
),
734 priv
->tx_skbuff
= kzalloc(sizeof(struct sk_buff
*) * DMA_TX_RING_SZ
,
736 if (!priv
->tx_skbuff
)
739 priv
->dma_tx
= dma_alloc_coherent(priv
->device
,
741 sizeof(struct xgmac_dma_desc
),
747 netdev_dbg(priv
->dev
, "DMA desc rings: virt addr (Rx %p, "
748 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
749 priv
->dma_rx
, priv
->dma_tx
,
750 (unsigned int)priv
->dma_rx_phy
, (unsigned int)priv
->dma_tx_phy
);
754 priv
->dma_buf_sz
= bfsize
;
755 desc_init_rx_desc(priv
->dma_rx
, DMA_RX_RING_SZ
, priv
->dma_buf_sz
);
756 xgmac_rx_refill(priv
);
760 desc_init_tx_desc(priv
->dma_tx
, DMA_TX_RING_SZ
);
762 writel(priv
->dma_tx_phy
, priv
->base
+ XGMAC_DMA_TX_BASE_ADDR
);
763 writel(priv
->dma_rx_phy
, priv
->base
+ XGMAC_DMA_RX_BASE_ADDR
);
768 kfree(priv
->tx_skbuff
);
770 dma_free_coherent(priv
->device
,
771 DMA_RX_RING_SZ
* sizeof(struct xgmac_dma_desc
),
772 priv
->dma_rx
, priv
->dma_rx_phy
);
774 kfree(priv
->rx_skbuff
);
778 static void xgmac_free_rx_skbufs(struct xgmac_priv
*priv
)
781 struct xgmac_dma_desc
*p
;
783 if (!priv
->rx_skbuff
)
786 for (i
= 0; i
< DMA_RX_RING_SZ
; i
++) {
787 if (priv
->rx_skbuff
[i
] == NULL
)
790 p
= priv
->dma_rx
+ i
;
791 dma_unmap_single(priv
->device
, desc_get_buf_addr(p
),
792 priv
->dma_buf_sz
, DMA_FROM_DEVICE
);
793 dev_kfree_skb_any(priv
->rx_skbuff
[i
]);
794 priv
->rx_skbuff
[i
] = NULL
;
798 static void xgmac_free_tx_skbufs(struct xgmac_priv
*priv
)
801 struct xgmac_dma_desc
*p
;
803 if (!priv
->tx_skbuff
)
806 for (i
= 0; i
< DMA_TX_RING_SZ
; i
++) {
807 if (priv
->tx_skbuff
[i
] == NULL
)
810 p
= priv
->dma_tx
+ i
;
811 dma_unmap_single(priv
->device
, desc_get_buf_addr(p
),
812 desc_get_buf_len(p
), DMA_TO_DEVICE
);
814 for (f
= 0; f
< skb_shinfo(priv
->tx_skbuff
[i
])->nr_frags
; f
++) {
815 p
= priv
->dma_tx
+ i
++;
816 dma_unmap_page(priv
->device
, desc_get_buf_addr(p
),
817 desc_get_buf_len(p
), DMA_TO_DEVICE
);
820 dev_kfree_skb_any(priv
->tx_skbuff
[i
]);
821 priv
->tx_skbuff
[i
] = NULL
;
825 static void xgmac_free_dma_desc_rings(struct xgmac_priv
*priv
)
827 /* Release the DMA TX/RX socket buffers */
828 xgmac_free_rx_skbufs(priv
);
829 xgmac_free_tx_skbufs(priv
);
831 /* Free the consistent memory allocated for descriptor rings */
833 dma_free_coherent(priv
->device
,
834 DMA_TX_RING_SZ
* sizeof(struct xgmac_dma_desc
),
835 priv
->dma_tx
, priv
->dma_tx_phy
);
839 dma_free_coherent(priv
->device
,
840 DMA_RX_RING_SZ
* sizeof(struct xgmac_dma_desc
),
841 priv
->dma_rx
, priv
->dma_rx_phy
);
844 kfree(priv
->rx_skbuff
);
845 priv
->rx_skbuff
= NULL
;
846 kfree(priv
->tx_skbuff
);
847 priv
->tx_skbuff
= NULL
;
852 * @priv: private driver structure
853 * Description: it reclaims resources after transmission completes.
855 static void xgmac_tx_complete(struct xgmac_priv
*priv
)
858 void __iomem
*ioaddr
= priv
->base
;
860 writel(DMA_STATUS_TU
| DMA_STATUS_NIS
, ioaddr
+ XGMAC_DMA_STATUS
);
862 while (dma_ring_cnt(priv
->tx_head
, priv
->tx_tail
, DMA_TX_RING_SZ
)) {
863 unsigned int entry
= priv
->tx_tail
;
864 struct sk_buff
*skb
= priv
->tx_skbuff
[entry
];
865 struct xgmac_dma_desc
*p
= priv
->dma_tx
+ entry
;
867 /* Check if the descriptor is owned by the DMA. */
868 if (desc_get_owner(p
))
871 /* Verify tx error by looking at the last segment */
872 if (desc_get_tx_ls(p
))
873 desc_get_tx_status(priv
, p
);
875 netdev_dbg(priv
->dev
, "tx ring: curr %d, dirty %d\n",
876 priv
->tx_head
, priv
->tx_tail
);
878 dma_unmap_single(priv
->device
, desc_get_buf_addr(p
),
879 desc_get_buf_len(p
), DMA_TO_DEVICE
);
881 priv
->tx_skbuff
[entry
] = NULL
;
882 priv
->tx_tail
= dma_ring_incr(entry
, DMA_TX_RING_SZ
);
888 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
889 entry
= priv
->tx_tail
= dma_ring_incr(priv
->tx_tail
,
891 p
= priv
->dma_tx
+ priv
->tx_tail
;
893 dma_unmap_page(priv
->device
, desc_get_buf_addr(p
),
894 desc_get_buf_len(p
), DMA_TO_DEVICE
);
898 * If there's room in the queue (limit it to size)
899 * we add this skb back into the pool,
900 * if it's the right size.
902 if ((skb_queue_len(&priv
->rx_recycle
) <
904 skb_recycle_check(skb
, priv
->dma_buf_sz
))
905 __skb_queue_head(&priv
->rx_recycle
, skb
);
910 if (dma_ring_space(priv
->tx_head
, priv
->tx_tail
, DMA_TX_RING_SZ
) >
912 netif_wake_queue(priv
->dev
);
917 * @priv: pointer to the private device structure
918 * Description: it cleans the descriptors and restarts the transmission
921 static void xgmac_tx_err(struct xgmac_priv
*priv
)
923 u32 reg
, value
, inten
;
925 netif_stop_queue(priv
->dev
);
927 inten
= readl(priv
->base
+ XGMAC_DMA_INTR_ENA
);
928 writel(0, priv
->base
+ XGMAC_DMA_INTR_ENA
);
930 reg
= readl(priv
->base
+ XGMAC_DMA_CONTROL
);
931 writel(reg
& ~DMA_CONTROL_ST
, priv
->base
+ XGMAC_DMA_CONTROL
);
933 value
= readl(priv
->base
+ XGMAC_DMA_STATUS
) & 0x700000;
934 } while (value
&& (value
!= 0x600000));
936 xgmac_free_tx_skbufs(priv
);
937 desc_init_tx_desc(priv
->dma_tx
, DMA_TX_RING_SZ
);
940 writel(reg
| DMA_CONTROL_ST
, priv
->base
+ XGMAC_DMA_CONTROL
);
942 writel(DMA_STATUS_TU
| DMA_STATUS_TPS
| DMA_STATUS_NIS
| DMA_STATUS_AIS
,
943 priv
->base
+ XGMAC_DMA_STATUS
);
944 writel(inten
, priv
->base
+ XGMAC_DMA_INTR_ENA
);
946 netif_wake_queue(priv
->dev
);
949 static int xgmac_hw_init(struct net_device
*dev
)
953 struct xgmac_priv
*priv
= netdev_priv(dev
);
954 void __iomem
*ioaddr
= priv
->base
;
956 /* Save the ctrl register value */
957 ctrl
= readl(ioaddr
+ XGMAC_CONTROL
) & XGMAC_CONTROL_SPD_MASK
;
960 value
= DMA_BUS_MODE_SFT_RESET
;
961 writel(value
, ioaddr
+ XGMAC_DMA_BUS_MODE
);
964 (readl(ioaddr
+ XGMAC_DMA_BUS_MODE
) & DMA_BUS_MODE_SFT_RESET
))
969 value
= (0x10 << DMA_BUS_MODE_PBL_SHIFT
) |
970 (0x10 << DMA_BUS_MODE_RPBL_SHIFT
) |
971 DMA_BUS_MODE_FB
| DMA_BUS_MODE_ATDS
| DMA_BUS_MODE_AAL
;
972 writel(value
, ioaddr
+ XGMAC_DMA_BUS_MODE
);
974 /* Enable interrupts */
975 writel(DMA_INTR_DEFAULT_MASK
, ioaddr
+ XGMAC_DMA_STATUS
);
976 writel(DMA_INTR_DEFAULT_MASK
, ioaddr
+ XGMAC_DMA_INTR_ENA
);
978 /* XGMAC requires AXI bus init. This is a 'magic number' for now */
979 writel(0x000100E, ioaddr
+ XGMAC_DMA_AXI_BUS
);
981 ctrl
|= XGMAC_CONTROL_DDIC
| XGMAC_CONTROL_JE
| XGMAC_CONTROL_ACS
|
983 if (dev
->features
& NETIF_F_RXCSUM
)
984 ctrl
|= XGMAC_CONTROL_IPC
;
985 writel(ctrl
, ioaddr
+ XGMAC_CONTROL
);
987 value
= DMA_CONTROL_DFF
;
988 writel(value
, ioaddr
+ XGMAC_DMA_CONTROL
);
990 /* Set the HW DMA mode and the COE */
991 writel(XGMAC_OMR_TSF
| XGMAC_OMR_RSF
| XGMAC_OMR_RFD
| XGMAC_OMR_RFA
,
994 /* Reset the MMC counters */
995 writel(1, ioaddr
+ XGMAC_MMC_CTRL
);
1000 * xgmac_open - open entry point of the driver
1001 * @dev : pointer to the device structure.
1003 * This function is the open entry point of the driver.
1005 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1008 static int xgmac_open(struct net_device
*dev
)
1011 struct xgmac_priv
*priv
= netdev_priv(dev
);
1012 void __iomem
*ioaddr
= priv
->base
;
1014 /* Check that the MAC address is valid. If its not, refuse
1015 * to bring the device up. The user must specify an
1016 * address using the following linux command:
1017 * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
1018 if (!is_valid_ether_addr(dev
->dev_addr
)) {
1019 eth_hw_addr_random(dev
);
1020 netdev_dbg(priv
->dev
, "generated random MAC address %pM\n",
1024 skb_queue_head_init(&priv
->rx_recycle
);
1025 memset(&priv
->xstats
, 0, sizeof(struct xgmac_extra_stats
));
1027 /* Initialize the XGMAC and descriptors */
1029 xgmac_set_mac_addr(ioaddr
, dev
->dev_addr
, 0);
1030 xgmac_set_flow_ctrl(priv
, priv
->rx_pause
, priv
->tx_pause
);
1032 ret
= xgmac_dma_desc_rings_init(dev
);
1036 /* Enable the MAC Rx/Tx */
1037 xgmac_mac_enable(ioaddr
);
1039 napi_enable(&priv
->napi
);
1040 netif_start_queue(dev
);
1046 * xgmac_release - close entry point of the driver
1047 * @dev : device pointer.
1049 * This is the stop entry point of the driver.
1051 static int xgmac_stop(struct net_device
*dev
)
1053 struct xgmac_priv
*priv
= netdev_priv(dev
);
1055 netif_stop_queue(dev
);
1057 if (readl(priv
->base
+ XGMAC_DMA_INTR_ENA
))
1058 napi_disable(&priv
->napi
);
1060 writel(0, priv
->base
+ XGMAC_DMA_INTR_ENA
);
1061 skb_queue_purge(&priv
->rx_recycle
);
1063 /* Disable the MAC core */
1064 xgmac_mac_disable(priv
->base
);
1066 /* Release and free the Rx/Tx resources */
1067 xgmac_free_dma_desc_rings(priv
);
1074 * @skb : the socket buffer
1075 * @dev : device pointer
1076 * Description : Tx entry point of the driver.
1078 static netdev_tx_t
xgmac_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1080 struct xgmac_priv
*priv
= netdev_priv(dev
);
1083 int nfrags
= skb_shinfo(skb
)->nr_frags
;
1084 struct xgmac_dma_desc
*desc
, *first
;
1085 unsigned int desc_flags
;
1089 if (dma_ring_space(priv
->tx_head
, priv
->tx_tail
, DMA_TX_RING_SZ
) <
1091 writel(DMA_INTR_DEFAULT_MASK
| DMA_INTR_ENA_TIE
,
1092 priv
->base
+ XGMAC_DMA_INTR_ENA
);
1093 netif_stop_queue(dev
);
1094 return NETDEV_TX_BUSY
;
1097 desc_flags
= (skb
->ip_summed
== CHECKSUM_PARTIAL
) ?
1098 TXDESC_CSUM_ALL
: 0;
1099 entry
= priv
->tx_head
;
1100 desc
= priv
->dma_tx
+ entry
;
1103 len
= skb_headlen(skb
);
1104 paddr
= dma_map_single(priv
->device
, skb
->data
, len
, DMA_TO_DEVICE
);
1105 if (dma_mapping_error(priv
->device
, paddr
)) {
1109 priv
->tx_skbuff
[entry
] = skb
;
1110 desc_set_buf_addr_and_size(desc
, paddr
, len
);
1112 for (i
= 0; i
< nfrags
; i
++) {
1113 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1117 paddr
= skb_frag_dma_map(priv
->device
, frag
, 0, len
,
1119 if (dma_mapping_error(priv
->device
, paddr
)) {
1124 entry
= dma_ring_incr(entry
, DMA_TX_RING_SZ
);
1125 desc
= priv
->dma_tx
+ entry
;
1126 priv
->tx_skbuff
[entry
] = NULL
;
1128 desc_set_buf_addr_and_size(desc
, paddr
, len
);
1129 if (i
< (nfrags
- 1))
1130 desc_set_tx_owner(desc
, desc_flags
);
1133 /* Interrupt on completition only for the latest segment */
1135 desc_set_tx_owner(desc
, desc_flags
|
1136 TXDESC_LAST_SEG
| TXDESC_INTERRUPT
);
1138 desc_flags
|= TXDESC_LAST_SEG
| TXDESC_INTERRUPT
;
1140 /* Set owner on first desc last to avoid race condition */
1142 desc_set_tx_owner(first
, desc_flags
| TXDESC_FIRST_SEG
);
1144 priv
->tx_head
= dma_ring_incr(entry
, DMA_TX_RING_SZ
);
1146 writel(1, priv
->base
+ XGMAC_DMA_TX_POLL
);
1148 return NETDEV_TX_OK
;
1151 static int xgmac_rx(struct xgmac_priv
*priv
, int limit
)
1154 unsigned int count
= 0;
1155 struct xgmac_dma_desc
*p
;
1157 while (count
< limit
) {
1159 struct sk_buff
*skb
;
1162 writel(DMA_STATUS_RI
| DMA_STATUS_NIS
,
1163 priv
->base
+ XGMAC_DMA_STATUS
);
1165 entry
= priv
->rx_tail
;
1166 p
= priv
->dma_rx
+ entry
;
1167 if (desc_get_owner(p
))
1171 priv
->rx_tail
= dma_ring_incr(priv
->rx_tail
, DMA_RX_RING_SZ
);
1173 /* read the status of the incoming frame */
1174 ip_checksum
= desc_get_rx_status(priv
, p
);
1175 if (ip_checksum
< 0)
1178 skb
= priv
->rx_skbuff
[entry
];
1179 if (unlikely(!skb
)) {
1180 netdev_err(priv
->dev
, "Inconsistent Rx descriptor chain\n");
1183 priv
->rx_skbuff
[entry
] = NULL
;
1185 frame_len
= desc_get_rx_frame_len(p
);
1186 netdev_dbg(priv
->dev
, "RX frame size %d, COE status: %d\n",
1187 frame_len
, ip_checksum
);
1189 skb_put(skb
, frame_len
);
1190 dma_unmap_single(priv
->device
, desc_get_buf_addr(p
),
1191 frame_len
, DMA_FROM_DEVICE
);
1193 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
1194 skb
->ip_summed
= ip_checksum
;
1195 if (ip_checksum
== CHECKSUM_NONE
)
1196 netif_receive_skb(skb
);
1198 napi_gro_receive(&priv
->napi
, skb
);
1201 xgmac_rx_refill(priv
);
1203 writel(1, priv
->base
+ XGMAC_DMA_RX_POLL
);
1209 * xgmac_poll - xgmac poll method (NAPI)
1210 * @napi : pointer to the napi structure.
1211 * @budget : maximum number of packets that the current CPU can receive from
1214 * This function implements the the reception process.
1215 * Also it runs the TX completion thread
1217 static int xgmac_poll(struct napi_struct
*napi
, int budget
)
1219 struct xgmac_priv
*priv
= container_of(napi
,
1220 struct xgmac_priv
, napi
);
1223 xgmac_tx_complete(priv
);
1224 work_done
= xgmac_rx(priv
, budget
);
1226 if (work_done
< budget
) {
1227 napi_complete(napi
);
1228 writel(DMA_INTR_DEFAULT_MASK
, priv
->base
+ XGMAC_DMA_INTR_ENA
);
1235 * @dev : Pointer to net device structure
1236 * Description: this function is called when a packet transmission fails to
1237 * complete within a reasonable tmrate. The driver will mark the error in the
1238 * netdev structure and arrange for the device to be reset to a sane state
1239 * in order to transmit a new packet.
1241 static void xgmac_tx_timeout(struct net_device
*dev
)
1243 struct xgmac_priv
*priv
= netdev_priv(dev
);
1245 /* Clear Tx resources and restart transmitting again */
1250 * xgmac_set_rx_mode - entry point for multicast addressing
1251 * @dev : pointer to the device structure
1253 * This function is a driver entry point which gets called by the kernel
1254 * whenever multicast addresses must be enabled/disabled.
1258 static void xgmac_set_rx_mode(struct net_device
*dev
)
1261 struct xgmac_priv
*priv
= netdev_priv(dev
);
1262 void __iomem
*ioaddr
= priv
->base
;
1263 unsigned int value
= 0;
1264 u32 hash_filter
[XGMAC_NUM_HASH
];
1266 struct netdev_hw_addr
*ha
;
1267 bool use_hash
= false;
1269 netdev_dbg(priv
->dev
, "# mcasts %d, # unicast %d\n",
1270 netdev_mc_count(dev
), netdev_uc_count(dev
));
1272 if (dev
->flags
& IFF_PROMISC
) {
1273 writel(XGMAC_FRAME_FILTER_PR
, ioaddr
+ XGMAC_FRAME_FILTER
);
1277 memset(hash_filter
, 0, sizeof(hash_filter
));
1279 if (netdev_uc_count(dev
) > XGMAC_MAX_FILTER_ADDR
) {
1281 value
|= XGMAC_FRAME_FILTER_HUC
| XGMAC_FRAME_FILTER_HPF
;
1283 netdev_for_each_uc_addr(ha
, dev
) {
1285 u32 bit_nr
= ~ether_crc(ETH_ALEN
, ha
->addr
) >> 23;
1287 /* The most significant 4 bits determine the register to
1288 * use (H/L) while the other 5 bits determine the bit
1289 * within the register. */
1290 hash_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 31);
1292 xgmac_set_mac_addr(ioaddr
, ha
->addr
, reg
);
1297 if (dev
->flags
& IFF_ALLMULTI
) {
1298 value
|= XGMAC_FRAME_FILTER_PM
;
1302 if ((netdev_mc_count(dev
) + reg
- 1) > XGMAC_MAX_FILTER_ADDR
) {
1304 value
|= XGMAC_FRAME_FILTER_HMC
| XGMAC_FRAME_FILTER_HPF
;
1306 netdev_for_each_mc_addr(ha
, dev
) {
1308 u32 bit_nr
= ~ether_crc(ETH_ALEN
, ha
->addr
) >> 23;
1310 /* The most significant 4 bits determine the register to
1311 * use (H/L) while the other 5 bits determine the bit
1312 * within the register. */
1313 hash_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 31);
1315 xgmac_set_mac_addr(ioaddr
, ha
->addr
, reg
);
1321 for (i
= 0; i
< XGMAC_NUM_HASH
; i
++)
1322 writel(hash_filter
[i
], ioaddr
+ XGMAC_HASH(i
));
1324 writel(value
, ioaddr
+ XGMAC_FRAME_FILTER
);
1328 * xgmac_change_mtu - entry point to change MTU size for the device.
1329 * @dev : device pointer.
1330 * @new_mtu : the new MTU size for the device.
1331 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1332 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1333 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1335 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1338 static int xgmac_change_mtu(struct net_device
*dev
, int new_mtu
)
1340 struct xgmac_priv
*priv
= netdev_priv(dev
);
1343 if ((new_mtu
< 46) || (new_mtu
> MAX_MTU
)) {
1344 netdev_err(priv
->dev
, "invalid MTU, max MTU is: %d\n", MAX_MTU
);
1351 /* return early if the buffer sizes will not change */
1352 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
1354 if (old_mtu
== new_mtu
)
1357 /* Stop everything, get ready to change the MTU */
1358 if (!netif_running(dev
))
1361 /* Bring the interface down and then back up */
1363 return xgmac_open(dev
);
1366 static irqreturn_t
xgmac_pmt_interrupt(int irq
, void *dev_id
)
1369 struct net_device
*dev
= (struct net_device
*)dev_id
;
1370 struct xgmac_priv
*priv
= netdev_priv(dev
);
1371 void __iomem
*ioaddr
= priv
->base
;
1373 intr_status
= readl(ioaddr
+ XGMAC_INT_STAT
);
1374 if (intr_status
& XGMAC_INT_STAT_PMT
) {
1375 netdev_dbg(priv
->dev
, "received Magic frame\n");
1376 /* clear the PMT bits 5 and 6 by reading the PMT */
1377 readl(ioaddr
+ XGMAC_PMT
);
1382 static irqreturn_t
xgmac_interrupt(int irq
, void *dev_id
)
1385 bool tx_err
= false;
1386 struct net_device
*dev
= (struct net_device
*)dev_id
;
1387 struct xgmac_priv
*priv
= netdev_priv(dev
);
1388 struct xgmac_extra_stats
*x
= &priv
->xstats
;
1390 /* read the status register (CSR5) */
1391 intr_status
= readl(priv
->base
+ XGMAC_DMA_STATUS
);
1392 intr_status
&= readl(priv
->base
+ XGMAC_DMA_INTR_ENA
);
1393 writel(intr_status
, priv
->base
+ XGMAC_DMA_STATUS
);
1395 /* It displays the DMA process states (CSR5 register) */
1396 /* ABNORMAL interrupts */
1397 if (unlikely(intr_status
& DMA_STATUS_AIS
)) {
1398 if (intr_status
& DMA_STATUS_TJT
) {
1399 netdev_err(priv
->dev
, "transmit jabber\n");
1402 if (intr_status
& DMA_STATUS_RU
)
1404 if (intr_status
& DMA_STATUS_RPS
) {
1405 netdev_err(priv
->dev
, "receive process stopped\n");
1406 x
->rx_process_stopped
++;
1408 if (intr_status
& DMA_STATUS_ETI
) {
1409 netdev_err(priv
->dev
, "transmit early interrupt\n");
1412 if (intr_status
& DMA_STATUS_TPS
) {
1413 netdev_err(priv
->dev
, "transmit process stopped\n");
1414 x
->tx_process_stopped
++;
1417 if (intr_status
& DMA_STATUS_FBI
) {
1418 netdev_err(priv
->dev
, "fatal bus error\n");
1419 x
->fatal_bus_error
++;
1427 /* TX/RX NORMAL interrupts */
1428 if (intr_status
& (DMA_STATUS_RI
| DMA_STATUS_TU
)) {
1429 writel(DMA_INTR_ABNORMAL
, priv
->base
+ XGMAC_DMA_INTR_ENA
);
1430 napi_schedule(&priv
->napi
);
1436 #ifdef CONFIG_NET_POLL_CONTROLLER
1437 /* Polling receive - used by NETCONSOLE and other diagnostic tools
1438 * to allow network I/O with interrupts disabled. */
1439 static void xgmac_poll_controller(struct net_device
*dev
)
1441 disable_irq(dev
->irq
);
1442 xgmac_interrupt(dev
->irq
, dev
);
1443 enable_irq(dev
->irq
);
1447 static struct rtnl_link_stats64
*
1448 xgmac_get_stats64(struct net_device
*dev
,
1449 struct rtnl_link_stats64
*storage
)
1451 struct xgmac_priv
*priv
= netdev_priv(dev
);
1452 void __iomem
*base
= priv
->base
;
1455 spin_lock_bh(&priv
->stats_lock
);
1456 writel(XGMAC_MMC_CTRL_CNT_FRZ
, base
+ XGMAC_MMC_CTRL
);
1458 storage
->rx_bytes
= readl(base
+ XGMAC_MMC_RXOCTET_G_LO
);
1459 storage
->rx_bytes
|= (u64
)(readl(base
+ XGMAC_MMC_RXOCTET_G_HI
)) << 32;
1461 storage
->rx_packets
= readl(base
+ XGMAC_MMC_RXFRAME_GB_LO
);
1462 storage
->multicast
= readl(base
+ XGMAC_MMC_RXMCFRAME_G
);
1463 storage
->rx_crc_errors
= readl(base
+ XGMAC_MMC_RXCRCERR
);
1464 storage
->rx_length_errors
= readl(base
+ XGMAC_MMC_RXLENGTHERR
);
1465 storage
->rx_missed_errors
= readl(base
+ XGMAC_MMC_RXOVERFLOW
);
1467 storage
->tx_bytes
= readl(base
+ XGMAC_MMC_TXOCTET_G_LO
);
1468 storage
->tx_bytes
|= (u64
)(readl(base
+ XGMAC_MMC_TXOCTET_G_HI
)) << 32;
1470 count
= readl(base
+ XGMAC_MMC_TXFRAME_GB_LO
);
1471 storage
->tx_errors
= count
- readl(base
+ XGMAC_MMC_TXFRAME_G_LO
);
1472 storage
->tx_packets
= count
;
1473 storage
->tx_fifo_errors
= readl(base
+ XGMAC_MMC_TXUNDERFLOW
);
1475 writel(0, base
+ XGMAC_MMC_CTRL
);
1476 spin_unlock_bh(&priv
->stats_lock
);
1480 static int xgmac_set_mac_address(struct net_device
*dev
, void *p
)
1482 struct xgmac_priv
*priv
= netdev_priv(dev
);
1483 void __iomem
*ioaddr
= priv
->base
;
1484 struct sockaddr
*addr
= p
;
1486 if (!is_valid_ether_addr(addr
->sa_data
))
1487 return -EADDRNOTAVAIL
;
1489 dev
->addr_assign_type
&= ~NET_ADDR_RANDOM
;
1490 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1492 xgmac_set_mac_addr(ioaddr
, dev
->dev_addr
, 0);
1497 static int xgmac_set_features(struct net_device
*dev
, netdev_features_t features
)
1500 struct xgmac_priv
*priv
= netdev_priv(dev
);
1501 void __iomem
*ioaddr
= priv
->base
;
1502 u32 changed
= dev
->features
^ features
;
1504 if (!(changed
& NETIF_F_RXCSUM
))
1507 ctrl
= readl(ioaddr
+ XGMAC_CONTROL
);
1508 if (features
& NETIF_F_RXCSUM
)
1509 ctrl
|= XGMAC_CONTROL_IPC
;
1511 ctrl
&= ~XGMAC_CONTROL_IPC
;
1512 writel(ctrl
, ioaddr
+ XGMAC_CONTROL
);
1517 static const struct net_device_ops xgmac_netdev_ops
= {
1518 .ndo_open
= xgmac_open
,
1519 .ndo_start_xmit
= xgmac_xmit
,
1520 .ndo_stop
= xgmac_stop
,
1521 .ndo_change_mtu
= xgmac_change_mtu
,
1522 .ndo_set_rx_mode
= xgmac_set_rx_mode
,
1523 .ndo_tx_timeout
= xgmac_tx_timeout
,
1524 .ndo_get_stats64
= xgmac_get_stats64
,
1525 #ifdef CONFIG_NET_POLL_CONTROLLER
1526 .ndo_poll_controller
= xgmac_poll_controller
,
1528 .ndo_set_mac_address
= xgmac_set_mac_address
,
1529 .ndo_set_features
= xgmac_set_features
,
1532 static int xgmac_ethtool_getsettings(struct net_device
*dev
,
1533 struct ethtool_cmd
*cmd
)
1536 cmd
->duplex
= DUPLEX_FULL
;
1537 ethtool_cmd_speed_set(cmd
, 10000);
1539 cmd
->advertising
= 0;
1540 cmd
->transceiver
= XCVR_INTERNAL
;
1544 static void xgmac_get_pauseparam(struct net_device
*netdev
,
1545 struct ethtool_pauseparam
*pause
)
1547 struct xgmac_priv
*priv
= netdev_priv(netdev
);
1549 pause
->rx_pause
= priv
->rx_pause
;
1550 pause
->tx_pause
= priv
->tx_pause
;
1553 static int xgmac_set_pauseparam(struct net_device
*netdev
,
1554 struct ethtool_pauseparam
*pause
)
1556 struct xgmac_priv
*priv
= netdev_priv(netdev
);
1561 return xgmac_set_flow_ctrl(priv
, pause
->rx_pause
, pause
->tx_pause
);
1564 struct xgmac_stats
{
1565 char stat_string
[ETH_GSTRING_LEN
];
1570 #define XGMAC_STAT(m) \
1571 { #m, offsetof(struct xgmac_priv, xstats.m), false }
1572 #define XGMAC_HW_STAT(m, reg_offset) \
1573 { #m, reg_offset, true }
1575 static const struct xgmac_stats xgmac_gstrings_stats
[] = {
1576 XGMAC_STAT(tx_frame_flushed
),
1577 XGMAC_STAT(tx_payload_error
),
1578 XGMAC_STAT(tx_ip_header_error
),
1579 XGMAC_STAT(tx_local_fault
),
1580 XGMAC_STAT(tx_remote_fault
),
1581 XGMAC_STAT(tx_early
),
1582 XGMAC_STAT(tx_process_stopped
),
1583 XGMAC_STAT(tx_jabber
),
1584 XGMAC_STAT(rx_buf_unav
),
1585 XGMAC_STAT(rx_process_stopped
),
1586 XGMAC_STAT(rx_payload_error
),
1587 XGMAC_STAT(rx_ip_header_error
),
1588 XGMAC_STAT(rx_da_filter_fail
),
1589 XGMAC_STAT(rx_sa_filter_fail
),
1590 XGMAC_STAT(fatal_bus_error
),
1591 XGMAC_HW_STAT(rx_watchdog
, XGMAC_MMC_RXWATCHDOG
),
1592 XGMAC_HW_STAT(tx_vlan
, XGMAC_MMC_TXVLANFRAME
),
1593 XGMAC_HW_STAT(rx_vlan
, XGMAC_MMC_RXVLANFRAME
),
1594 XGMAC_HW_STAT(tx_pause
, XGMAC_MMC_TXPAUSEFRAME
),
1595 XGMAC_HW_STAT(rx_pause
, XGMAC_MMC_RXPAUSEFRAME
),
1597 #define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
1599 static void xgmac_get_ethtool_stats(struct net_device
*dev
,
1600 struct ethtool_stats
*dummy
,
1603 struct xgmac_priv
*priv
= netdev_priv(dev
);
1607 for (i
= 0; i
< XGMAC_STATS_LEN
; i
++) {
1608 if (xgmac_gstrings_stats
[i
].is_reg
)
1609 *data
++ = readl(priv
->base
+
1610 xgmac_gstrings_stats
[i
].stat_offset
);
1612 *data
++ = *(u32
*)(p
+
1613 xgmac_gstrings_stats
[i
].stat_offset
);
1617 static int xgmac_get_sset_count(struct net_device
*netdev
, int sset
)
1621 return XGMAC_STATS_LEN
;
1627 static void xgmac_get_strings(struct net_device
*dev
, u32 stringset
,
1633 switch (stringset
) {
1635 for (i
= 0; i
< XGMAC_STATS_LEN
; i
++) {
1636 memcpy(p
, xgmac_gstrings_stats
[i
].stat_string
,
1638 p
+= ETH_GSTRING_LEN
;
1647 static void xgmac_get_wol(struct net_device
*dev
,
1648 struct ethtool_wolinfo
*wol
)
1650 struct xgmac_priv
*priv
= netdev_priv(dev
);
1652 if (device_can_wakeup(priv
->device
)) {
1653 wol
->supported
= WAKE_MAGIC
| WAKE_UCAST
;
1654 wol
->wolopts
= priv
->wolopts
;
1658 static int xgmac_set_wol(struct net_device
*dev
,
1659 struct ethtool_wolinfo
*wol
)
1661 struct xgmac_priv
*priv
= netdev_priv(dev
);
1662 u32 support
= WAKE_MAGIC
| WAKE_UCAST
;
1664 if (!device_can_wakeup(priv
->device
))
1667 if (wol
->wolopts
& ~support
)
1670 priv
->wolopts
= wol
->wolopts
;
1673 device_set_wakeup_enable(priv
->device
, 1);
1674 enable_irq_wake(dev
->irq
);
1676 device_set_wakeup_enable(priv
->device
, 0);
1677 disable_irq_wake(dev
->irq
);
1683 static const struct ethtool_ops xgmac_ethtool_ops
= {
1684 .get_settings
= xgmac_ethtool_getsettings
,
1685 .get_link
= ethtool_op_get_link
,
1686 .get_pauseparam
= xgmac_get_pauseparam
,
1687 .set_pauseparam
= xgmac_set_pauseparam
,
1688 .get_ethtool_stats
= xgmac_get_ethtool_stats
,
1689 .get_strings
= xgmac_get_strings
,
1690 .get_wol
= xgmac_get_wol
,
1691 .set_wol
= xgmac_set_wol
,
1692 .get_sset_count
= xgmac_get_sset_count
,
1697 * @pdev: platform device pointer
1698 * Description: the driver is initialized through platform_device.
1700 static int xgmac_probe(struct platform_device
*pdev
)
1703 struct resource
*res
;
1704 struct net_device
*ndev
= NULL
;
1705 struct xgmac_priv
*priv
= NULL
;
1708 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1712 if (!request_mem_region(res
->start
, resource_size(res
), pdev
->name
))
1715 ndev
= alloc_etherdev(sizeof(struct xgmac_priv
));
1721 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1722 priv
= netdev_priv(ndev
);
1723 platform_set_drvdata(pdev
, ndev
);
1725 ndev
->netdev_ops
= &xgmac_netdev_ops
;
1726 SET_ETHTOOL_OPS(ndev
, &xgmac_ethtool_ops
);
1727 spin_lock_init(&priv
->stats_lock
);
1729 priv
->device
= &pdev
->dev
;
1734 priv
->base
= ioremap(res
->start
, resource_size(res
));
1736 netdev_err(ndev
, "ioremap failed\n");
1741 uid
= readl(priv
->base
+ XGMAC_VERSION
);
1742 netdev_info(ndev
, "h/w version is 0x%x\n", uid
);
1744 writel(0, priv
->base
+ XGMAC_DMA_INTR_ENA
);
1745 ndev
->irq
= platform_get_irq(pdev
, 0);
1746 if (ndev
->irq
== -ENXIO
) {
1747 netdev_err(ndev
, "No irq resource\n");
1752 ret
= request_irq(ndev
->irq
, xgmac_interrupt
, 0,
1753 dev_name(&pdev
->dev
), ndev
);
1755 netdev_err(ndev
, "Could not request irq %d - ret %d)\n",
1760 priv
->pmt_irq
= platform_get_irq(pdev
, 1);
1761 if (priv
->pmt_irq
== -ENXIO
) {
1762 netdev_err(ndev
, "No pmt irq resource\n");
1763 ret
= priv
->pmt_irq
;
1767 ret
= request_irq(priv
->pmt_irq
, xgmac_pmt_interrupt
, 0,
1768 dev_name(&pdev
->dev
), ndev
);
1770 netdev_err(ndev
, "Could not request irq %d - ret %d)\n",
1771 priv
->pmt_irq
, ret
);
1775 device_set_wakeup_capable(&pdev
->dev
, 1);
1776 if (device_can_wakeup(priv
->device
))
1777 priv
->wolopts
= WAKE_MAGIC
; /* Magic Frame as default */
1779 ndev
->hw_features
= NETIF_F_SG
| NETIF_F_HIGHDMA
;
1780 if (readl(priv
->base
+ XGMAC_DMA_HW_FEATURE
) & DMA_HW_FEAT_TXCOESEL
)
1781 ndev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1783 ndev
->features
|= ndev
->hw_features
;
1784 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
1786 /* Get the MAC address */
1787 xgmac_get_mac_addr(priv
->base
, ndev
->dev_addr
, 0);
1788 if (!is_valid_ether_addr(ndev
->dev_addr
))
1789 netdev_warn(ndev
, "MAC address %pM not valid",
1792 netif_napi_add(ndev
, &priv
->napi
, xgmac_poll
, 64);
1793 ret
= register_netdev(ndev
);
1800 netif_napi_del(&priv
->napi
);
1801 free_irq(priv
->pmt_irq
, ndev
);
1803 free_irq(ndev
->irq
, ndev
);
1805 iounmap(priv
->base
);
1809 release_mem_region(res
->start
, resource_size(res
));
1810 platform_set_drvdata(pdev
, NULL
);
1816 * @pdev: platform device pointer
1817 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
1818 * changes the link status, releases the DMA descriptor rings,
1819 * unregisters the MDIO bus and unmaps the allocated memory.
1821 static int xgmac_remove(struct platform_device
*pdev
)
1823 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1824 struct xgmac_priv
*priv
= netdev_priv(ndev
);
1825 struct resource
*res
;
1827 xgmac_mac_disable(priv
->base
);
1829 /* Free the IRQ lines */
1830 free_irq(ndev
->irq
, ndev
);
1831 free_irq(priv
->pmt_irq
, ndev
);
1833 platform_set_drvdata(pdev
, NULL
);
1834 unregister_netdev(ndev
);
1835 netif_napi_del(&priv
->napi
);
1837 iounmap(priv
->base
);
1838 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1839 release_mem_region(res
->start
, resource_size(res
));
1846 #ifdef CONFIG_PM_SLEEP
1847 static void xgmac_pmt(void __iomem
*ioaddr
, unsigned long mode
)
1849 unsigned int pmt
= 0;
1851 if (mode
& WAKE_MAGIC
)
1852 pmt
|= XGMAC_PMT_POWERDOWN
| XGMAC_PMT_MAGIC_PKT
;
1853 if (mode
& WAKE_UCAST
)
1854 pmt
|= XGMAC_PMT_POWERDOWN
| XGMAC_PMT_GLBL_UNICAST
;
1856 writel(pmt
, ioaddr
+ XGMAC_PMT
);
1859 static int xgmac_suspend(struct device
*dev
)
1861 struct net_device
*ndev
= platform_get_drvdata(to_platform_device(dev
));
1862 struct xgmac_priv
*priv
= netdev_priv(ndev
);
1865 if (!ndev
|| !netif_running(ndev
))
1868 netif_device_detach(ndev
);
1869 napi_disable(&priv
->napi
);
1870 writel(0, priv
->base
+ XGMAC_DMA_INTR_ENA
);
1872 if (device_may_wakeup(priv
->device
)) {
1873 /* Stop TX/RX DMA Only */
1874 value
= readl(priv
->base
+ XGMAC_DMA_CONTROL
);
1875 value
&= ~(DMA_CONTROL_ST
| DMA_CONTROL_SR
);
1876 writel(value
, priv
->base
+ XGMAC_DMA_CONTROL
);
1878 xgmac_pmt(priv
->base
, priv
->wolopts
);
1880 xgmac_mac_disable(priv
->base
);
1885 static int xgmac_resume(struct device
*dev
)
1887 struct net_device
*ndev
= platform_get_drvdata(to_platform_device(dev
));
1888 struct xgmac_priv
*priv
= netdev_priv(ndev
);
1889 void __iomem
*ioaddr
= priv
->base
;
1891 if (!netif_running(ndev
))
1894 xgmac_pmt(ioaddr
, 0);
1896 /* Enable the MAC and DMA */
1897 xgmac_mac_enable(ioaddr
);
1898 writel(DMA_INTR_DEFAULT_MASK
, ioaddr
+ XGMAC_DMA_STATUS
);
1899 writel(DMA_INTR_DEFAULT_MASK
, ioaddr
+ XGMAC_DMA_INTR_ENA
);
1901 netif_device_attach(ndev
);
1902 napi_enable(&priv
->napi
);
1907 static SIMPLE_DEV_PM_OPS(xgmac_pm_ops
, xgmac_suspend
, xgmac_resume
);
1908 #define XGMAC_PM_OPS (&xgmac_pm_ops)
1910 #define XGMAC_PM_OPS NULL
1911 #endif /* CONFIG_PM_SLEEP */
1913 static const struct of_device_id xgmac_of_match
[] = {
1914 { .compatible
= "calxeda,hb-xgmac", },
1917 MODULE_DEVICE_TABLE(of
, xgmac_of_match
);
1919 static struct platform_driver xgmac_driver
= {
1921 .name
= "calxedaxgmac",
1922 .of_match_table
= xgmac_of_match
,
1924 .probe
= xgmac_probe
,
1925 .remove
= xgmac_remove
,
1926 .driver
.pm
= XGMAC_PM_OPS
,
1929 module_platform_driver(xgmac_driver
);
1931 MODULE_AUTHOR("Calxeda, Inc.");
1932 MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
1933 MODULE_LICENSE("GPL v2");