2 * Freescale Ethernet controllers
4 * Copyright (c) 2005 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/spinlock.h>
28 #include <linux/mii.h>
29 #include <linux/ethtool.h>
30 #include <linux/bitops.h>
32 #include <linux/platform_device.h>
33 #include <linux/of_address.h>
34 #include <linux/of_device.h>
35 #include <linux/of_irq.h>
36 #include <linux/gfp.h>
39 #include <linux/uaccess.h>
44 /*************************************************/
46 #if defined(CONFIG_CPM1)
47 /* for a CPM1 __raw_xxx's are sufficient */
48 #define __fs_out32(addr, x) __raw_writel(x, addr)
49 #define __fs_out16(addr, x) __raw_writew(x, addr)
50 #define __fs_in32(addr) __raw_readl(addr)
51 #define __fs_in16(addr) __raw_readw(addr)
53 /* for others play it safe */
54 #define __fs_out32(addr, x) out_be32(addr, x)
55 #define __fs_out16(addr, x) out_be16(addr, x)
56 #define __fs_in32(addr) in_be32(addr)
57 #define __fs_in16(addr) in_be16(addr)
61 #define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
64 #define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
67 #define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
70 #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
73 * Delay to wait for FEC reset command to complete (in us)
75 #define FEC_RESET_DELAY 50
77 static int whack_reset(struct fec __iomem
*fecp
)
81 FW(fecp
, ecntrl
, FEC_ECNTRL_PINMUX
| FEC_ECNTRL_RESET
);
82 for (i
= 0; i
< FEC_RESET_DELAY
; i
++) {
83 if ((FR(fecp
, ecntrl
) & FEC_ECNTRL_RESET
) == 0)
91 static int do_pd_setup(struct fs_enet_private
*fep
)
93 struct platform_device
*ofdev
= to_platform_device(fep
->dev
);
95 fep
->interrupt
= irq_of_parse_and_map(ofdev
->dev
.of_node
, 0);
99 fep
->fec
.fecp
= of_iomap(ofdev
->dev
.of_node
, 0);
106 #define FEC_NAPI_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_TXF)
107 #define FEC_EVENT (FEC_ENET_RXF | FEC_ENET_TXF)
108 #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
109 FEC_ENET_BABT | FEC_ENET_EBERR)
111 static int setup_data(struct net_device
*dev
)
113 struct fs_enet_private
*fep
= netdev_priv(dev
);
115 if (do_pd_setup(fep
) != 0)
121 fep
->ev_napi
= FEC_NAPI_EVENT_MSK
;
123 fep
->ev_err
= FEC_ERR_EVENT_MSK
;
128 static int allocate_bd(struct net_device
*dev
)
130 struct fs_enet_private
*fep
= netdev_priv(dev
);
131 const struct fs_platform_info
*fpi
= fep
->fpi
;
133 fep
->ring_base
= (void __force __iomem
*)dma_alloc_coherent(fep
->dev
,
134 (fpi
->tx_ring
+ fpi
->rx_ring
) *
135 sizeof(cbd_t
), &fep
->ring_mem_addr
,
137 if (fep
->ring_base
== NULL
)
143 static void free_bd(struct net_device
*dev
)
145 struct fs_enet_private
*fep
= netdev_priv(dev
);
146 const struct fs_platform_info
*fpi
= fep
->fpi
;
149 dma_free_coherent(fep
->dev
, (fpi
->tx_ring
+ fpi
->rx_ring
)
151 (void __force
*)fep
->ring_base
,
155 static void cleanup_data(struct net_device
*dev
)
160 static void set_promiscuous_mode(struct net_device
*dev
)
162 struct fs_enet_private
*fep
= netdev_priv(dev
);
163 struct fec __iomem
*fecp
= fep
->fec
.fecp
;
165 FS(fecp
, r_cntrl
, FEC_RCNTRL_PROM
);
168 static void set_multicast_start(struct net_device
*dev
)
170 struct fs_enet_private
*fep
= netdev_priv(dev
);
176 static void set_multicast_one(struct net_device
*dev
, const u8
*mac
)
178 struct fs_enet_private
*fep
= netdev_priv(dev
);
179 int temp
, hash_index
, i
, j
;
184 for (i
= 0; i
< 6; i
++) {
186 for (j
= 0; j
< 8; j
++) {
189 if (msb
^ (byte
& 0x1))
195 temp
= (crc
& 0x3f) >> 1;
196 hash_index
= ((temp
& 0x01) << 4) |
197 ((temp
& 0x02) << 2) |
199 ((temp
& 0x08) >> 2) |
200 ((temp
& 0x10) >> 4);
201 csrVal
= 1 << hash_index
;
203 fep
->fec
.hthi
|= csrVal
;
205 fep
->fec
.htlo
|= csrVal
;
208 static void set_multicast_finish(struct net_device
*dev
)
210 struct fs_enet_private
*fep
= netdev_priv(dev
);
211 struct fec __iomem
*fecp
= fep
->fec
.fecp
;
213 /* if all multi or too many multicasts; just enable all */
214 if ((dev
->flags
& IFF_ALLMULTI
) != 0 ||
215 netdev_mc_count(dev
) > FEC_MAX_MULTICAST_ADDRS
) {
216 fep
->fec
.hthi
= 0xffffffffU
;
217 fep
->fec
.htlo
= 0xffffffffU
;
220 FC(fecp
, r_cntrl
, FEC_RCNTRL_PROM
);
221 FW(fecp
, grp_hash_table_high
, fep
->fec
.hthi
);
222 FW(fecp
, grp_hash_table_low
, fep
->fec
.htlo
);
225 static void set_multicast_list(struct net_device
*dev
)
227 struct netdev_hw_addr
*ha
;
229 if ((dev
->flags
& IFF_PROMISC
) == 0) {
230 set_multicast_start(dev
);
231 netdev_for_each_mc_addr(ha
, dev
)
232 set_multicast_one(dev
, ha
->addr
);
233 set_multicast_finish(dev
);
235 set_promiscuous_mode(dev
);
238 static void restart(struct net_device
*dev
)
240 struct fs_enet_private
*fep
= netdev_priv(dev
);
241 struct fec __iomem
*fecp
= fep
->fec
.fecp
;
242 const struct fs_platform_info
*fpi
= fep
->fpi
;
243 dma_addr_t rx_bd_base_phys
, tx_bd_base_phys
;
247 struct mii_bus
*mii
= dev
->phydev
->mdio
.bus
;
248 struct fec_info
* fec_inf
= mii
->priv
;
250 r
= whack_reset(fep
->fec
.fecp
);
252 dev_err(fep
->dev
, "FEC Reset FAILED!\n");
254 * Set station address.
256 addrhi
= ((u32
) dev
->dev_addr
[0] << 24) |
257 ((u32
) dev
->dev_addr
[1] << 16) |
258 ((u32
) dev
->dev_addr
[2] << 8) |
259 (u32
) dev
->dev_addr
[3];
260 addrlo
= ((u32
) dev
->dev_addr
[4] << 24) |
261 ((u32
) dev
->dev_addr
[5] << 16);
262 FW(fecp
, addr_low
, addrhi
);
263 FW(fecp
, addr_high
, addrlo
);
266 * Reset all multicast.
268 FW(fecp
, grp_hash_table_high
, fep
->fec
.hthi
);
269 FW(fecp
, grp_hash_table_low
, fep
->fec
.htlo
);
272 * Set maximum receive buffer size.
274 FW(fecp
, r_buff_size
, PKT_MAXBLR_SIZE
);
275 #ifdef CONFIG_FS_ENET_MPC5121_FEC
276 FW(fecp
, r_cntrl
, PKT_MAXBUF_SIZE
<< 16);
278 FW(fecp
, r_hash
, PKT_MAXBUF_SIZE
);
281 /* get physical address */
282 rx_bd_base_phys
= fep
->ring_mem_addr
;
283 tx_bd_base_phys
= rx_bd_base_phys
+ sizeof(cbd_t
) * fpi
->rx_ring
;
286 * Set receive and transmit descriptor base.
288 FW(fecp
, r_des_start
, rx_bd_base_phys
);
289 FW(fecp
, x_des_start
, tx_bd_base_phys
);
294 * Enable big endian and don't care about SDMA FC.
296 #ifdef CONFIG_FS_ENET_MPC5121_FEC
297 FS(fecp
, dma_control
, 0xC0000000);
299 FW(fecp
, fun_code
, 0x78000000);
305 FW(fecp
, mii_speed
, fec_inf
->mii_speed
);
308 * Clear any outstanding interrupt.
310 FW(fecp
, ievent
, 0xffc0);
311 #ifndef CONFIG_FS_ENET_MPC5121_FEC
312 FW(fecp
, ivec
, (virq_to_hw(fep
->interrupt
) / 2) << 29);
314 FW(fecp
, r_cntrl
, FEC_RCNTRL_MII_MODE
); /* MII enable */
317 * Only set MII/RMII mode - do not touch maximum frame length
320 FS(fecp
, r_cntrl
, fpi
->use_rmii
?
321 FEC_RCNTRL_RMII_MODE
: FEC_RCNTRL_MII_MODE
);
324 * adjust to duplex mode
326 if (dev
->phydev
->duplex
) {
327 FC(fecp
, r_cntrl
, FEC_RCNTRL_DRT
);
328 FS(fecp
, x_cntrl
, FEC_TCNTRL_FDEN
); /* FD enable */
330 FS(fecp
, r_cntrl
, FEC_RCNTRL_DRT
);
331 FC(fecp
, x_cntrl
, FEC_TCNTRL_FDEN
); /* FD disable */
334 /* Restore multicast and promiscuous settings */
335 set_multicast_list(dev
);
338 * Enable interrupts we wish to service.
340 FW(fecp
, imask
, FEC_ENET_TXF
| FEC_ENET_TXB
|
341 FEC_ENET_RXF
| FEC_ENET_RXB
);
344 * And last, enable the transmit and receive processing.
346 FW(fecp
, ecntrl
, FEC_ECNTRL_PINMUX
| FEC_ECNTRL_ETHER_EN
);
347 FW(fecp
, r_des_active
, 0x01000000);
350 static void stop(struct net_device
*dev
)
352 struct fs_enet_private
*fep
= netdev_priv(dev
);
353 const struct fs_platform_info
*fpi
= fep
->fpi
;
354 struct fec __iomem
*fecp
= fep
->fec
.fecp
;
356 struct fec_info
*feci
= dev
->phydev
->mdio
.bus
->priv
;
360 if ((FR(fecp
, ecntrl
) & FEC_ECNTRL_ETHER_EN
) == 0)
361 return; /* already down */
363 FW(fecp
, x_cntrl
, 0x01); /* Graceful transmit stop */
364 for (i
= 0; ((FR(fecp
, ievent
) & 0x10000000) == 0) &&
365 i
< FEC_RESET_DELAY
; i
++)
368 if (i
== FEC_RESET_DELAY
)
369 dev_warn(fep
->dev
, "FEC timeout on graceful transmit stop\n");
371 * Disable FEC. Let only MII interrupts.
374 FC(fecp
, ecntrl
, FEC_ECNTRL_ETHER_EN
);
378 /* shut down FEC1? that's where the mii bus is */
380 FS(fecp
, r_cntrl
, fpi
->use_rmii
?
381 FEC_RCNTRL_RMII_MODE
:
382 FEC_RCNTRL_MII_MODE
); /* MII/RMII enable */
383 FS(fecp
, ecntrl
, FEC_ECNTRL_PINMUX
| FEC_ECNTRL_ETHER_EN
);
384 FW(fecp
, ievent
, FEC_ENET_MII
);
385 FW(fecp
, mii_speed
, feci
->mii_speed
);
389 static void napi_clear_event_fs(struct net_device
*dev
)
391 struct fs_enet_private
*fep
= netdev_priv(dev
);
392 struct fec __iomem
*fecp
= fep
->fec
.fecp
;
394 FW(fecp
, ievent
, FEC_NAPI_EVENT_MSK
);
397 static void napi_enable_fs(struct net_device
*dev
)
399 struct fs_enet_private
*fep
= netdev_priv(dev
);
400 struct fec __iomem
*fecp
= fep
->fec
.fecp
;
402 FS(fecp
, imask
, FEC_NAPI_EVENT_MSK
);
405 static void napi_disable_fs(struct net_device
*dev
)
407 struct fs_enet_private
*fep
= netdev_priv(dev
);
408 struct fec __iomem
*fecp
= fep
->fec
.fecp
;
410 FC(fecp
, imask
, FEC_NAPI_EVENT_MSK
);
413 static void rx_bd_done(struct net_device
*dev
)
415 struct fs_enet_private
*fep
= netdev_priv(dev
);
416 struct fec __iomem
*fecp
= fep
->fec
.fecp
;
418 FW(fecp
, r_des_active
, 0x01000000);
421 static void tx_kickstart(struct net_device
*dev
)
423 struct fs_enet_private
*fep
= netdev_priv(dev
);
424 struct fec __iomem
*fecp
= fep
->fec
.fecp
;
426 FW(fecp
, x_des_active
, 0x01000000);
429 static u32
get_int_events(struct net_device
*dev
)
431 struct fs_enet_private
*fep
= netdev_priv(dev
);
432 struct fec __iomem
*fecp
= fep
->fec
.fecp
;
434 return FR(fecp
, ievent
) & FR(fecp
, imask
);
437 static void clear_int_events(struct net_device
*dev
, u32 int_events
)
439 struct fs_enet_private
*fep
= netdev_priv(dev
);
440 struct fec __iomem
*fecp
= fep
->fec
.fecp
;
442 FW(fecp
, ievent
, int_events
);
445 static void ev_error(struct net_device
*dev
, u32 int_events
)
447 struct fs_enet_private
*fep
= netdev_priv(dev
);
449 dev_warn(fep
->dev
, "FEC ERROR(s) 0x%x\n", int_events
);
452 static int get_regs(struct net_device
*dev
, void *p
, int *sizep
)
454 struct fs_enet_private
*fep
= netdev_priv(dev
);
456 if (*sizep
< sizeof(struct fec
))
459 memcpy_fromio(p
, fep
->fec
.fecp
, sizeof(struct fec
));
464 static int get_regs_len(struct net_device
*dev
)
466 return sizeof(struct fec
);
469 static void tx_restart(struct net_device
*dev
)
474 /*************************************************************************/
476 const struct fs_ops fs_fec_ops
= {
477 .setup_data
= setup_data
,
478 .cleanup_data
= cleanup_data
,
479 .set_multicast_list
= set_multicast_list
,
482 .napi_clear_event
= napi_clear_event_fs
,
483 .napi_enable
= napi_enable_fs
,
484 .napi_disable
= napi_disable_fs
,
485 .rx_bd_done
= rx_bd_done
,
486 .tx_kickstart
= tx_kickstart
,
487 .get_int_events
= get_int_events
,
488 .clear_int_events
= clear_int_events
,
489 .ev_error
= ev_error
,
490 .get_regs
= get_regs
,
491 .get_regs_len
= get_regs_len
,
492 .tx_restart
= tx_restart
,
493 .allocate_bd
= allocate_bd
,