2 * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/spinlock.h>
28 #include <linux/mii.h>
29 #include <linux/ethtool.h>
30 #include <linux/bitops.h>
32 #include <linux/platform_device.h>
33 #include <linux/of_address.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_platform.h>
38 #include <linux/uaccess.h>
42 /*************************************************/
43 #if defined(CONFIG_CPM1)
44 /* for a 8xx __raw_xxx's are sufficient */
45 #define __fs_out32(addr, x) __raw_writel(x, addr)
46 #define __fs_out16(addr, x) __raw_writew(x, addr)
47 #define __fs_out8(addr, x) __raw_writeb(x, addr)
48 #define __fs_in32(addr) __raw_readl(addr)
49 #define __fs_in16(addr) __raw_readw(addr)
50 #define __fs_in8(addr) __raw_readb(addr)
52 /* for others play it safe */
53 #define __fs_out32(addr, x) out_be32(addr, x)
54 #define __fs_out16(addr, x) out_be16(addr, x)
55 #define __fs_in32(addr) in_be32(addr)
56 #define __fs_in16(addr) in_be16(addr)
57 #define __fs_out8(addr, x) out_8(addr, x)
58 #define __fs_in8(addr) in_8(addr)
61 /* write, read, set bits, clear bits */
62 #define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
63 #define R32(_p, _m) __fs_in32(&(_p)->_m)
64 #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
65 #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
67 #define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
68 #define R16(_p, _m) __fs_in16(&(_p)->_m)
69 #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
70 #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
72 #define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
73 #define R8(_p, _m) __fs_in8(&(_p)->_m)
74 #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
75 #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
77 #define SCC_MAX_MULTICAST_ADDRS 64
80 * Delay to wait for SCC reset command to complete (in us)
82 #define SCC_RESET_DELAY 50
84 static inline int scc_cr_cmd(struct fs_enet_private
*fep
, u32 op
)
86 const struct fs_platform_info
*fpi
= fep
->fpi
;
88 return cpm_command(fpi
->cp_command
, op
);
91 static int do_pd_setup(struct fs_enet_private
*fep
)
93 struct platform_device
*ofdev
= to_platform_device(fep
->dev
);
95 fep
->interrupt
= irq_of_parse_and_map(ofdev
->dev
.of_node
, 0);
99 fep
->scc
.sccp
= of_iomap(ofdev
->dev
.of_node
, 0);
103 fep
->scc
.ep
= of_iomap(ofdev
->dev
.of_node
, 1);
105 iounmap(fep
->scc
.sccp
);
112 #define SCC_NAPI_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
113 #define SCC_EVENT (SCCE_ENET_RXF | SCCE_ENET_TXB)
114 #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
116 static int setup_data(struct net_device
*dev
)
118 struct fs_enet_private
*fep
= netdev_priv(dev
);
125 fep
->ev_napi
= SCC_NAPI_EVENT_MSK
;
126 fep
->ev
= SCC_EVENT
| SCCE_ENET_TXE
;
127 fep
->ev_err
= SCC_ERR_EVENT_MSK
;
132 static int allocate_bd(struct net_device
*dev
)
134 struct fs_enet_private
*fep
= netdev_priv(dev
);
135 const struct fs_platform_info
*fpi
= fep
->fpi
;
137 fep
->ring_mem_addr
= cpm_dpalloc((fpi
->tx_ring
+ fpi
->rx_ring
) *
139 if (IS_ERR_VALUE(fep
->ring_mem_addr
))
142 fep
->ring_base
= (void __iomem __force
*)
143 cpm_dpram_addr(fep
->ring_mem_addr
);
148 static void free_bd(struct net_device
*dev
)
150 struct fs_enet_private
*fep
= netdev_priv(dev
);
153 cpm_dpfree(fep
->ring_mem_addr
);
156 static void cleanup_data(struct net_device
*dev
)
161 static void set_promiscuous_mode(struct net_device
*dev
)
163 struct fs_enet_private
*fep
= netdev_priv(dev
);
164 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
166 S16(sccp
, scc_psmr
, SCC_PSMR_PRO
);
169 static void set_multicast_start(struct net_device
*dev
)
171 struct fs_enet_private
*fep
= netdev_priv(dev
);
172 scc_enet_t __iomem
*ep
= fep
->scc
.ep
;
174 W16(ep
, sen_gaddr1
, 0);
175 W16(ep
, sen_gaddr2
, 0);
176 W16(ep
, sen_gaddr3
, 0);
177 W16(ep
, sen_gaddr4
, 0);
180 static void set_multicast_one(struct net_device
*dev
, const u8
* mac
)
182 struct fs_enet_private
*fep
= netdev_priv(dev
);
183 scc_enet_t __iomem
*ep
= fep
->scc
.ep
;
184 u16 taddrh
, taddrm
, taddrl
;
186 taddrh
= ((u16
) mac
[5] << 8) | mac
[4];
187 taddrm
= ((u16
) mac
[3] << 8) | mac
[2];
188 taddrl
= ((u16
) mac
[1] << 8) | mac
[0];
190 W16(ep
, sen_taddrh
, taddrh
);
191 W16(ep
, sen_taddrm
, taddrm
);
192 W16(ep
, sen_taddrl
, taddrl
);
193 scc_cr_cmd(fep
, CPM_CR_SET_GADDR
);
196 static void set_multicast_finish(struct net_device
*dev
)
198 struct fs_enet_private
*fep
= netdev_priv(dev
);
199 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
200 scc_enet_t __iomem
*ep
= fep
->scc
.ep
;
202 /* clear promiscuous always */
203 C16(sccp
, scc_psmr
, SCC_PSMR_PRO
);
205 /* if all multi or too many multicasts; just enable all */
206 if ((dev
->flags
& IFF_ALLMULTI
) != 0 ||
207 netdev_mc_count(dev
) > SCC_MAX_MULTICAST_ADDRS
) {
209 W16(ep
, sen_gaddr1
, 0xffff);
210 W16(ep
, sen_gaddr2
, 0xffff);
211 W16(ep
, sen_gaddr3
, 0xffff);
212 W16(ep
, sen_gaddr4
, 0xffff);
216 static void set_multicast_list(struct net_device
*dev
)
218 struct netdev_hw_addr
*ha
;
220 if ((dev
->flags
& IFF_PROMISC
) == 0) {
221 set_multicast_start(dev
);
222 netdev_for_each_mc_addr(ha
, dev
)
223 set_multicast_one(dev
, ha
->addr
);
224 set_multicast_finish(dev
);
226 set_promiscuous_mode(dev
);
230 * This function is called to start or restart the FEC during a link
231 * change. This only happens when switching between half and full
234 static void restart(struct net_device
*dev
)
236 struct fs_enet_private
*fep
= netdev_priv(dev
);
237 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
238 scc_enet_t __iomem
*ep
= fep
->scc
.ep
;
239 const struct fs_platform_info
*fpi
= fep
->fpi
;
240 u16 paddrh
, paddrm
, paddrl
;
241 const unsigned char *mac
;
244 C32(sccp
, scc_gsmrl
, SCC_GSMRL_ENR
| SCC_GSMRL_ENT
);
246 /* clear everything (slow & steady does it) */
247 for (i
= 0; i
< sizeof(*ep
); i
++)
248 __fs_out8((u8 __iomem
*)ep
+ i
, 0);
251 W16(ep
, sen_genscc
.scc_rbase
, fep
->ring_mem_addr
);
252 W16(ep
, sen_genscc
.scc_tbase
,
253 fep
->ring_mem_addr
+ sizeof(cbd_t
) * fpi
->rx_ring
);
255 /* Initialize function code registers for big-endian.
257 #ifndef CONFIG_NOT_COHERENT_CACHE
258 W8(ep
, sen_genscc
.scc_rfcr
, SCC_EB
| SCC_GBL
);
259 W8(ep
, sen_genscc
.scc_tfcr
, SCC_EB
| SCC_GBL
);
261 W8(ep
, sen_genscc
.scc_rfcr
, SCC_EB
);
262 W8(ep
, sen_genscc
.scc_tfcr
, SCC_EB
);
265 /* Set maximum bytes per receive buffer.
266 * This appears to be an Ethernet frame size, not the buffer
267 * fragment size. It must be a multiple of four.
269 W16(ep
, sen_genscc
.scc_mrblr
, 0x5f0);
271 /* Set CRC preset and mask.
273 W32(ep
, sen_cpres
, 0xffffffff);
274 W32(ep
, sen_cmask
, 0xdebb20e3);
276 W32(ep
, sen_crcec
, 0); /* CRC Error counter */
277 W32(ep
, sen_alec
, 0); /* alignment error counter */
278 W32(ep
, sen_disfc
, 0); /* discard frame counter */
280 W16(ep
, sen_pads
, 0x8888); /* Tx short frame pad character */
281 W16(ep
, sen_retlim
, 15); /* Retry limit threshold */
283 W16(ep
, sen_maxflr
, 0x5ee); /* maximum frame length register */
285 W16(ep
, sen_minflr
, PKT_MINBUF_SIZE
); /* minimum frame length register */
287 W16(ep
, sen_maxd1
, 0x000005f0); /* maximum DMA1 length */
288 W16(ep
, sen_maxd2
, 0x000005f0); /* maximum DMA2 length */
290 /* Clear hash tables.
292 W16(ep
, sen_gaddr1
, 0);
293 W16(ep
, sen_gaddr2
, 0);
294 W16(ep
, sen_gaddr3
, 0);
295 W16(ep
, sen_gaddr4
, 0);
296 W16(ep
, sen_iaddr1
, 0);
297 W16(ep
, sen_iaddr2
, 0);
298 W16(ep
, sen_iaddr3
, 0);
299 W16(ep
, sen_iaddr4
, 0);
304 paddrh
= ((u16
) mac
[5] << 8) | mac
[4];
305 paddrm
= ((u16
) mac
[3] << 8) | mac
[2];
306 paddrl
= ((u16
) mac
[1] << 8) | mac
[0];
308 W16(ep
, sen_paddrh
, paddrh
);
309 W16(ep
, sen_paddrm
, paddrm
);
310 W16(ep
, sen_paddrl
, paddrl
);
312 W16(ep
, sen_pper
, 0);
313 W16(ep
, sen_taddrl
, 0);
314 W16(ep
, sen_taddrm
, 0);
315 W16(ep
, sen_taddrh
, 0);
319 scc_cr_cmd(fep
, CPM_CR_INIT_TRX
);
321 W16(sccp
, scc_scce
, 0xffff);
323 /* Enable interrupts we wish to service.
325 W16(sccp
, scc_sccm
, SCCE_ENET_TXE
| SCCE_ENET_RXF
| SCCE_ENET_TXB
);
327 /* Set GSMR_H to enable all normal operating modes.
328 * Set GSMR_L to enable Ethernet to MC68160.
330 W32(sccp
, scc_gsmrh
, 0);
332 SCC_GSMRL_TCI
| SCC_GSMRL_TPL_48
| SCC_GSMRL_TPP_10
|
333 SCC_GSMRL_MODE_ENET
);
335 /* Set sync/delimiters.
337 W16(sccp
, scc_dsr
, 0xd555);
339 /* Set processing mode. Use Ethernet CRC, catch broadcast, and
340 * start frame search 22 bit times after RENA.
342 W16(sccp
, scc_psmr
, SCC_PSMR_ENCRC
| SCC_PSMR_NIB22
);
344 /* Set full duplex mode if needed */
345 if (dev
->phydev
->duplex
)
346 S16(sccp
, scc_psmr
, SCC_PSMR_LPB
| SCC_PSMR_FDE
);
348 /* Restore multicast and promiscuous settings */
349 set_multicast_list(dev
);
351 S32(sccp
, scc_gsmrl
, SCC_GSMRL_ENR
| SCC_GSMRL_ENT
);
354 static void stop(struct net_device
*dev
)
356 struct fs_enet_private
*fep
= netdev_priv(dev
);
357 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
360 for (i
= 0; (R16(sccp
, scc_sccm
) == 0) && i
< SCC_RESET_DELAY
; i
++)
363 if (i
== SCC_RESET_DELAY
)
364 dev_warn(fep
->dev
, "SCC timeout on graceful transmit stop\n");
366 W16(sccp
, scc_sccm
, 0);
367 C32(sccp
, scc_gsmrl
, SCC_GSMRL_ENR
| SCC_GSMRL_ENT
);
372 static void napi_clear_event_fs(struct net_device
*dev
)
374 struct fs_enet_private
*fep
= netdev_priv(dev
);
375 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
377 W16(sccp
, scc_scce
, SCC_NAPI_EVENT_MSK
);
380 static void napi_enable_fs(struct net_device
*dev
)
382 struct fs_enet_private
*fep
= netdev_priv(dev
);
383 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
385 S16(sccp
, scc_sccm
, SCC_NAPI_EVENT_MSK
);
388 static void napi_disable_fs(struct net_device
*dev
)
390 struct fs_enet_private
*fep
= netdev_priv(dev
);
391 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
393 C16(sccp
, scc_sccm
, SCC_NAPI_EVENT_MSK
);
396 static void rx_bd_done(struct net_device
*dev
)
401 static void tx_kickstart(struct net_device
*dev
)
406 static u32
get_int_events(struct net_device
*dev
)
408 struct fs_enet_private
*fep
= netdev_priv(dev
);
409 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
411 return (u32
) R16(sccp
, scc_scce
);
414 static void clear_int_events(struct net_device
*dev
, u32 int_events
)
416 struct fs_enet_private
*fep
= netdev_priv(dev
);
417 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
419 W16(sccp
, scc_scce
, int_events
& 0xffff);
422 static void ev_error(struct net_device
*dev
, u32 int_events
)
424 struct fs_enet_private
*fep
= netdev_priv(dev
);
426 dev_warn(fep
->dev
, "SCC ERROR(s) 0x%x\n", int_events
);
429 static int get_regs(struct net_device
*dev
, void *p
, int *sizep
)
431 struct fs_enet_private
*fep
= netdev_priv(dev
);
433 if (*sizep
< sizeof(scc_t
) + sizeof(scc_enet_t __iomem
*))
436 memcpy_fromio(p
, fep
->scc
.sccp
, sizeof(scc_t
));
437 p
= (char *)p
+ sizeof(scc_t
);
439 memcpy_fromio(p
, fep
->scc
.ep
, sizeof(scc_enet_t __iomem
*));
444 static int get_regs_len(struct net_device
*dev
)
446 return sizeof(scc_t
) + sizeof(scc_enet_t __iomem
*);
449 static void tx_restart(struct net_device
*dev
)
451 struct fs_enet_private
*fep
= netdev_priv(dev
);
453 scc_cr_cmd(fep
, CPM_CR_RESTART_TX
);
458 /*************************************************************************/
460 const struct fs_ops fs_scc_ops
= {
461 .setup_data
= setup_data
,
462 .cleanup_data
= cleanup_data
,
463 .set_multicast_list
= set_multicast_list
,
466 .napi_clear_event
= napi_clear_event_fs
,
467 .napi_enable
= napi_enable_fs
,
468 .napi_disable
= napi_disable_fs
,
469 .rx_bd_done
= rx_bd_done
,
470 .tx_kickstart
= tx_kickstart
,
471 .get_int_events
= get_int_events
,
472 .clear_int_events
= clear_int_events
,
473 .ev_error
= ev_error
,
474 .get_regs
= get_regs
,
475 .get_regs_len
= get_regs_len
,
476 .tx_restart
= tx_restart
,
477 .allocate_bd
= allocate_bd
,