2 * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/init.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h>
30 #include <linux/mii.h>
31 #include <linux/ethtool.h>
32 #include <linux/bitops.h>
34 #include <linux/platform_device.h>
37 #include <asm/uaccess.h>
40 #include <asm/8xx_immap.h>
41 #include <asm/pgtable.h>
42 #include <asm/mpc8xx.h>
43 #include <asm/commproc.h>
46 #ifdef CONFIG_PPC_CPM_NEW_BINDING
47 #include <asm/of_platform.h>
52 /*************************************************/
54 #if defined(CONFIG_CPM1)
55 /* for a 8xx __raw_xxx's are sufficient */
56 #define __fs_out32(addr, x) __raw_writel(x, addr)
57 #define __fs_out16(addr, x) __raw_writew(x, addr)
58 #define __fs_out8(addr, x) __raw_writeb(x, addr)
59 #define __fs_in32(addr) __raw_readl(addr)
60 #define __fs_in16(addr) __raw_readw(addr)
61 #define __fs_in8(addr) __raw_readb(addr)
63 /* for others play it safe */
64 #define __fs_out32(addr, x) out_be32(addr, x)
65 #define __fs_out16(addr, x) out_be16(addr, x)
66 #define __fs_in32(addr) in_be32(addr)
67 #define __fs_in16(addr) in_be16(addr)
70 /* write, read, set bits, clear bits */
71 #define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
72 #define R32(_p, _m) __fs_in32(&(_p)->_m)
73 #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
74 #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
76 #define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
77 #define R16(_p, _m) __fs_in16(&(_p)->_m)
78 #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
79 #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
81 #define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
82 #define R8(_p, _m) __fs_in8(&(_p)->_m)
83 #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
84 #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
86 #define SCC_MAX_MULTICAST_ADDRS 64
89 * Delay to wait for SCC reset command to complete (in us)
91 #define SCC_RESET_DELAY 50
92 #define MAX_CR_CMD_LOOPS 10000
94 static inline int scc_cr_cmd(struct fs_enet_private
*fep
, u32 op
)
96 const struct fs_platform_info
*fpi
= fep
->fpi
;
99 W16(cpmp
, cp_cpcr
, fpi
->cp_command
| CPM_CR_FLG
| (op
<< 8));
100 for (i
= 0; i
< MAX_CR_CMD_LOOPS
; i
++)
101 if ((R16(cpmp
, cp_cpcr
) & CPM_CR_FLG
) == 0)
104 printk(KERN_ERR
"%s(): Not able to issue CPM command\n",
109 static int do_pd_setup(struct fs_enet_private
*fep
)
111 #ifdef CONFIG_PPC_CPM_NEW_BINDING
112 struct of_device
*ofdev
= to_of_device(fep
->dev
);
114 fep
->interrupt
= of_irq_to_resource(ofdev
->node
, 0, NULL
);
115 if (fep
->interrupt
== NO_IRQ
)
118 fep
->scc
.sccp
= of_iomap(ofdev
->node
, 0);
122 fep
->scc
.ep
= of_iomap(ofdev
->node
, 1);
124 iounmap(fep
->scc
.sccp
);
128 struct platform_device
*pdev
= to_platform_device(fep
->dev
);
131 /* Fill out IRQ field */
132 fep
->interrupt
= platform_get_irq_byname(pdev
, "interrupt");
133 if (fep
->interrupt
< 0)
136 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "regs");
137 fep
->scc
.sccp
= ioremap(r
->start
, r
->end
- r
->start
+ 1);
139 if (fep
->scc
.sccp
== NULL
)
142 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "pram");
143 fep
->scc
.ep
= ioremap(r
->start
, r
->end
- r
->start
+ 1);
145 if (fep
->scc
.ep
== NULL
)
152 #define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
153 #define SCC_RX_EVENT (SCCE_ENET_RXF)
154 #define SCC_TX_EVENT (SCCE_ENET_TXB)
155 #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
157 static int setup_data(struct net_device
*dev
)
159 struct fs_enet_private
*fep
= netdev_priv(dev
);
161 #ifdef CONFIG_PPC_CPM_NEW_BINDING
162 struct fs_platform_info
*fpi
= fep
->fpi
;
164 fep
->scc
.idx
= fs_get_scc_index(fpi
->fs_no
);
165 if ((unsigned int)fep
->fcc
.idx
>= 4) /* max 4 SCCs */
168 fpi
->cp_command
= fep
->fcc
.idx
<< 6;
176 fep
->ev_napi_rx
= SCC_NAPI_RX_EVENT_MSK
;
177 fep
->ev_rx
= SCC_RX_EVENT
;
178 fep
->ev_tx
= SCC_TX_EVENT
| SCCE_ENET_TXE
;
179 fep
->ev_err
= SCC_ERR_EVENT_MSK
;
184 static int allocate_bd(struct net_device
*dev
)
186 struct fs_enet_private
*fep
= netdev_priv(dev
);
187 const struct fs_platform_info
*fpi
= fep
->fpi
;
189 fep
->ring_mem_addr
= cpm_dpalloc((fpi
->tx_ring
+ fpi
->rx_ring
) *
191 if (IS_ERR_VALUE(fep
->ring_mem_addr
))
194 fep
->ring_base
= (void __iomem __force
*)
195 cpm_dpram_addr(fep
->ring_mem_addr
);
200 static void free_bd(struct net_device
*dev
)
202 struct fs_enet_private
*fep
= netdev_priv(dev
);
205 cpm_dpfree(fep
->ring_mem_addr
);
208 static void cleanup_data(struct net_device
*dev
)
213 static void set_promiscuous_mode(struct net_device
*dev
)
215 struct fs_enet_private
*fep
= netdev_priv(dev
);
216 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
218 S16(sccp
, scc_psmr
, SCC_PSMR_PRO
);
221 static void set_multicast_start(struct net_device
*dev
)
223 struct fs_enet_private
*fep
= netdev_priv(dev
);
224 scc_enet_t __iomem
*ep
= fep
->scc
.ep
;
226 W16(ep
, sen_gaddr1
, 0);
227 W16(ep
, sen_gaddr2
, 0);
228 W16(ep
, sen_gaddr3
, 0);
229 W16(ep
, sen_gaddr4
, 0);
232 static void set_multicast_one(struct net_device
*dev
, const u8
* mac
)
234 struct fs_enet_private
*fep
= netdev_priv(dev
);
235 scc_enet_t __iomem
*ep
= fep
->scc
.ep
;
236 u16 taddrh
, taddrm
, taddrl
;
238 taddrh
= ((u16
) mac
[5] << 8) | mac
[4];
239 taddrm
= ((u16
) mac
[3] << 8) | mac
[2];
240 taddrl
= ((u16
) mac
[1] << 8) | mac
[0];
242 W16(ep
, sen_taddrh
, taddrh
);
243 W16(ep
, sen_taddrm
, taddrm
);
244 W16(ep
, sen_taddrl
, taddrl
);
245 scc_cr_cmd(fep
, CPM_CR_SET_GADDR
);
248 static void set_multicast_finish(struct net_device
*dev
)
250 struct fs_enet_private
*fep
= netdev_priv(dev
);
251 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
252 scc_enet_t __iomem
*ep
= fep
->scc
.ep
;
254 /* clear promiscuous always */
255 C16(sccp
, scc_psmr
, SCC_PSMR_PRO
);
257 /* if all multi or too many multicasts; just enable all */
258 if ((dev
->flags
& IFF_ALLMULTI
) != 0 ||
259 dev
->mc_count
> SCC_MAX_MULTICAST_ADDRS
) {
261 W16(ep
, sen_gaddr1
, 0xffff);
262 W16(ep
, sen_gaddr2
, 0xffff);
263 W16(ep
, sen_gaddr3
, 0xffff);
264 W16(ep
, sen_gaddr4
, 0xffff);
268 static void set_multicast_list(struct net_device
*dev
)
270 struct dev_mc_list
*pmc
;
272 if ((dev
->flags
& IFF_PROMISC
) == 0) {
273 set_multicast_start(dev
);
274 for (pmc
= dev
->mc_list
; pmc
!= NULL
; pmc
= pmc
->next
)
275 set_multicast_one(dev
, pmc
->dmi_addr
);
276 set_multicast_finish(dev
);
278 set_promiscuous_mode(dev
);
282 * This function is called to start or restart the FEC during a link
283 * change. This only happens when switching between half and full
286 static void restart(struct net_device
*dev
)
288 struct fs_enet_private
*fep
= netdev_priv(dev
);
289 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
290 scc_enet_t __iomem
*ep
= fep
->scc
.ep
;
291 const struct fs_platform_info
*fpi
= fep
->fpi
;
292 u16 paddrh
, paddrm
, paddrl
;
293 const unsigned char *mac
;
296 C32(sccp
, scc_gsmrl
, SCC_GSMRL_ENR
| SCC_GSMRL_ENT
);
298 /* clear everything (slow & steady does it) */
299 for (i
= 0; i
< sizeof(*ep
); i
++)
300 __fs_out8((u8 __iomem
*)ep
+ i
, 0);
303 W16(ep
, sen_genscc
.scc_rbase
, fep
->ring_mem_addr
);
304 W16(ep
, sen_genscc
.scc_tbase
,
305 fep
->ring_mem_addr
+ sizeof(cbd_t
) * fpi
->rx_ring
);
307 /* Initialize function code registers for big-endian.
309 W8(ep
, sen_genscc
.scc_rfcr
, SCC_EB
);
310 W8(ep
, sen_genscc
.scc_tfcr
, SCC_EB
);
312 /* Set maximum bytes per receive buffer.
313 * This appears to be an Ethernet frame size, not the buffer
314 * fragment size. It must be a multiple of four.
316 W16(ep
, sen_genscc
.scc_mrblr
, 0x5f0);
318 /* Set CRC preset and mask.
320 W32(ep
, sen_cpres
, 0xffffffff);
321 W32(ep
, sen_cmask
, 0xdebb20e3);
323 W32(ep
, sen_crcec
, 0); /* CRC Error counter */
324 W32(ep
, sen_alec
, 0); /* alignment error counter */
325 W32(ep
, sen_disfc
, 0); /* discard frame counter */
327 W16(ep
, sen_pads
, 0x8888); /* Tx short frame pad character */
328 W16(ep
, sen_retlim
, 15); /* Retry limit threshold */
330 W16(ep
, sen_maxflr
, 0x5ee); /* maximum frame length register */
332 W16(ep
, sen_minflr
, PKT_MINBUF_SIZE
); /* minimum frame length register */
334 W16(ep
, sen_maxd1
, 0x000005f0); /* maximum DMA1 length */
335 W16(ep
, sen_maxd2
, 0x000005f0); /* maximum DMA2 length */
337 /* Clear hash tables.
339 W16(ep
, sen_gaddr1
, 0);
340 W16(ep
, sen_gaddr2
, 0);
341 W16(ep
, sen_gaddr3
, 0);
342 W16(ep
, sen_gaddr4
, 0);
343 W16(ep
, sen_iaddr1
, 0);
344 W16(ep
, sen_iaddr2
, 0);
345 W16(ep
, sen_iaddr3
, 0);
346 W16(ep
, sen_iaddr4
, 0);
351 paddrh
= ((u16
) mac
[5] << 8) | mac
[4];
352 paddrm
= ((u16
) mac
[3] << 8) | mac
[2];
353 paddrl
= ((u16
) mac
[1] << 8) | mac
[0];
355 W16(ep
, sen_paddrh
, paddrh
);
356 W16(ep
, sen_paddrm
, paddrm
);
357 W16(ep
, sen_paddrl
, paddrl
);
359 W16(ep
, sen_pper
, 0);
360 W16(ep
, sen_taddrl
, 0);
361 W16(ep
, sen_taddrm
, 0);
362 W16(ep
, sen_taddrh
, 0);
366 scc_cr_cmd(fep
, CPM_CR_INIT_TRX
);
368 W16(sccp
, scc_scce
, 0xffff);
370 /* Enable interrupts we wish to service.
372 W16(sccp
, scc_sccm
, SCCE_ENET_TXE
| SCCE_ENET_RXF
| SCCE_ENET_TXB
);
374 /* Set GSMR_H to enable all normal operating modes.
375 * Set GSMR_L to enable Ethernet to MC68160.
377 W32(sccp
, scc_gsmrh
, 0);
379 SCC_GSMRL_TCI
| SCC_GSMRL_TPL_48
| SCC_GSMRL_TPP_10
|
380 SCC_GSMRL_MODE_ENET
);
382 /* Set sync/delimiters.
384 W16(sccp
, scc_dsr
, 0xd555);
386 /* Set processing mode. Use Ethernet CRC, catch broadcast, and
387 * start frame search 22 bit times after RENA.
389 W16(sccp
, scc_psmr
, SCC_PSMR_ENCRC
| SCC_PSMR_NIB22
);
391 /* Set full duplex mode if needed */
392 if (fep
->phydev
->duplex
)
393 S16(sccp
, scc_psmr
, SCC_PSMR_LPB
| SCC_PSMR_FDE
);
395 S32(sccp
, scc_gsmrl
, SCC_GSMRL_ENR
| SCC_GSMRL_ENT
);
398 static void stop(struct net_device
*dev
)
400 struct fs_enet_private
*fep
= netdev_priv(dev
);
401 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
404 for (i
= 0; (R16(sccp
, scc_sccm
) == 0) && i
< SCC_RESET_DELAY
; i
++)
407 if (i
== SCC_RESET_DELAY
)
408 printk(KERN_WARNING DRV_MODULE_NAME
409 ": %s SCC timeout on graceful transmit stop\n",
412 W16(sccp
, scc_sccm
, 0);
413 C32(sccp
, scc_gsmrl
, SCC_GSMRL_ENR
| SCC_GSMRL_ENT
);
418 static void pre_request_irq(struct net_device
*dev
, int irq
)
420 #ifndef CONFIG_PPC_MERGE
421 immap_t
*immap
= fs_enet_immap
;
425 if (irq
>= SIU_IRQ0
&& irq
< SIU_LEVEL7
) {
427 siel
= in_be32(&immap
->im_siu_conf
.sc_siel
);
429 siel
|= (0x80000000 >> irq
);
431 siel
&= ~(0x80000000 >> (irq
& ~1));
432 out_be32(&immap
->im_siu_conf
.sc_siel
, siel
);
437 static void post_free_irq(struct net_device
*dev
, int irq
)
442 static void napi_clear_rx_event(struct net_device
*dev
)
444 struct fs_enet_private
*fep
= netdev_priv(dev
);
445 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
447 W16(sccp
, scc_scce
, SCC_NAPI_RX_EVENT_MSK
);
450 static void napi_enable_rx(struct net_device
*dev
)
452 struct fs_enet_private
*fep
= netdev_priv(dev
);
453 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
455 S16(sccp
, scc_sccm
, SCC_NAPI_RX_EVENT_MSK
);
458 static void napi_disable_rx(struct net_device
*dev
)
460 struct fs_enet_private
*fep
= netdev_priv(dev
);
461 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
463 C16(sccp
, scc_sccm
, SCC_NAPI_RX_EVENT_MSK
);
466 static void rx_bd_done(struct net_device
*dev
)
471 static void tx_kickstart(struct net_device
*dev
)
476 static u32
get_int_events(struct net_device
*dev
)
478 struct fs_enet_private
*fep
= netdev_priv(dev
);
479 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
481 return (u32
) R16(sccp
, scc_scce
);
484 static void clear_int_events(struct net_device
*dev
, u32 int_events
)
486 struct fs_enet_private
*fep
= netdev_priv(dev
);
487 scc_t __iomem
*sccp
= fep
->scc
.sccp
;
489 W16(sccp
, scc_scce
, int_events
& 0xffff);
492 static void ev_error(struct net_device
*dev
, u32 int_events
)
494 printk(KERN_WARNING DRV_MODULE_NAME
495 ": %s SCC ERROR(s) 0x%x\n", dev
->name
, int_events
);
498 static int get_regs(struct net_device
*dev
, void *p
, int *sizep
)
500 struct fs_enet_private
*fep
= netdev_priv(dev
);
502 if (*sizep
< sizeof(scc_t
) + sizeof(scc_enet_t __iomem
*))
505 memcpy_fromio(p
, fep
->scc
.sccp
, sizeof(scc_t
));
506 p
= (char *)p
+ sizeof(scc_t
);
508 memcpy_fromio(p
, fep
->scc
.ep
, sizeof(scc_enet_t __iomem
*));
513 static int get_regs_len(struct net_device
*dev
)
515 return sizeof(scc_t
) + sizeof(scc_enet_t __iomem
*);
518 static void tx_restart(struct net_device
*dev
)
520 struct fs_enet_private
*fep
= netdev_priv(dev
);
522 scc_cr_cmd(fep
, CPM_CR_RESTART_TX
);
527 /*************************************************************************/
529 const struct fs_ops fs_scc_ops
= {
530 .setup_data
= setup_data
,
531 .cleanup_data
= cleanup_data
,
532 .set_multicast_list
= set_multicast_list
,
535 .pre_request_irq
= pre_request_irq
,
536 .post_free_irq
= post_free_irq
,
537 .napi_clear_rx_event
= napi_clear_rx_event
,
538 .napi_enable_rx
= napi_enable_rx
,
539 .napi_disable_rx
= napi_disable_rx
,
540 .rx_bd_done
= rx_bd_done
,
541 .tx_kickstart
= tx_kickstart
,
542 .get_int_events
= get_int_events
,
543 .clear_int_events
= clear_int_events
,
544 .ev_error
= ev_error
,
545 .get_regs
= get_regs
,
546 .get_regs_len
= get_regs_len
,
547 .tx_restart
= tx_restart
,
548 .allocate_bd
= allocate_bd
,