2 * FCC driver for Motorola MPC82xx (PQ2).
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/bitops.h>
33 #include <linux/platform_device.h>
34 #include <linux/phy.h>
35 #include <linux/of_device.h>
36 #include <linux/gfp.h>
38 #include <asm/immap_cpm2.h>
39 #include <asm/mpc8260.h>
42 #include <asm/pgtable.h>
44 #include <asm/uaccess.h>
48 /*************************************************/
50 /* FCC access macros */
52 /* write, read, set bits, clear bits */
53 #define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v))
54 #define R32(_p, _m) in_be32(&(_p)->_m)
55 #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
56 #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
58 #define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v))
59 #define R16(_p, _m) in_be16(&(_p)->_m)
60 #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
61 #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
63 #define W8(_p, _m, _v) out_8(&(_p)->_m, (_v))
64 #define R8(_p, _m) in_8(&(_p)->_m)
65 #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
66 #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
68 /*************************************************/
70 #define FCC_MAX_MULTICAST_ADDRS 64
72 #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
73 #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
76 #define MAX_CR_CMD_LOOPS 10000
78 static inline int fcc_cr_cmd(struct fs_enet_private
*fep
, u32 op
)
80 const struct fs_platform_info
*fpi
= fep
->fpi
;
82 return cpm_command(fpi
->cp_command
, op
);
85 static int do_pd_setup(struct fs_enet_private
*fep
)
87 struct platform_device
*ofdev
= to_platform_device(fep
->dev
);
88 struct fs_platform_info
*fpi
= fep
->fpi
;
91 fep
->interrupt
= of_irq_to_resource(ofdev
->dev
.of_node
, 0, NULL
);
92 if (fep
->interrupt
== NO_IRQ
)
95 fep
->fcc
.fccp
= of_iomap(ofdev
->dev
.of_node
, 0);
99 fep
->fcc
.ep
= of_iomap(ofdev
->dev
.of_node
, 1);
103 fep
->fcc
.fcccp
= of_iomap(ofdev
->dev
.of_node
, 2);
107 fep
->fcc
.mem
= (void __iomem
*)cpm2_immr
;
108 fpi
->dpram_offset
= cpm_dpalloc(128, 32);
109 if (IS_ERR_VALUE(fpi
->dpram_offset
)) {
110 ret
= fpi
->dpram_offset
;
117 iounmap(fep
->fcc
.fcccp
);
119 iounmap(fep
->fcc
.ep
);
121 iounmap(fep
->fcc
.fccp
);
126 #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
127 #define FCC_RX_EVENT (FCC_ENET_RXF)
128 #define FCC_TX_EVENT (FCC_ENET_TXB)
129 #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
131 static int setup_data(struct net_device
*dev
)
133 struct fs_enet_private
*fep
= netdev_priv(dev
);
135 if (do_pd_setup(fep
) != 0)
138 fep
->ev_napi_rx
= FCC_NAPI_RX_EVENT_MSK
;
139 fep
->ev_rx
= FCC_RX_EVENT
;
140 fep
->ev_tx
= FCC_TX_EVENT
;
141 fep
->ev_err
= FCC_ERR_EVENT_MSK
;
146 static int allocate_bd(struct net_device
*dev
)
148 struct fs_enet_private
*fep
= netdev_priv(dev
);
149 const struct fs_platform_info
*fpi
= fep
->fpi
;
151 fep
->ring_base
= (void __iomem __force
*)dma_alloc_coherent(fep
->dev
,
152 (fpi
->tx_ring
+ fpi
->rx_ring
) *
153 sizeof(cbd_t
), &fep
->ring_mem_addr
,
155 if (fep
->ring_base
== NULL
)
161 static void free_bd(struct net_device
*dev
)
163 struct fs_enet_private
*fep
= netdev_priv(dev
);
164 const struct fs_platform_info
*fpi
= fep
->fpi
;
167 dma_free_coherent(fep
->dev
,
168 (fpi
->tx_ring
+ fpi
->rx_ring
) * sizeof(cbd_t
),
169 (void __force
*)fep
->ring_base
, fep
->ring_mem_addr
);
172 static void cleanup_data(struct net_device
*dev
)
177 static void set_promiscuous_mode(struct net_device
*dev
)
179 struct fs_enet_private
*fep
= netdev_priv(dev
);
180 fcc_t __iomem
*fccp
= fep
->fcc
.fccp
;
182 S32(fccp
, fcc_fpsmr
, FCC_PSMR_PRO
);
185 static void set_multicast_start(struct net_device
*dev
)
187 struct fs_enet_private
*fep
= netdev_priv(dev
);
188 fcc_enet_t __iomem
*ep
= fep
->fcc
.ep
;
190 W32(ep
, fen_gaddrh
, 0);
191 W32(ep
, fen_gaddrl
, 0);
194 static void set_multicast_one(struct net_device
*dev
, const u8
*mac
)
196 struct fs_enet_private
*fep
= netdev_priv(dev
);
197 fcc_enet_t __iomem
*ep
= fep
->fcc
.ep
;
198 u16 taddrh
, taddrm
, taddrl
;
200 taddrh
= ((u16
)mac
[5] << 8) | mac
[4];
201 taddrm
= ((u16
)mac
[3] << 8) | mac
[2];
202 taddrl
= ((u16
)mac
[1] << 8) | mac
[0];
204 W16(ep
, fen_taddrh
, taddrh
);
205 W16(ep
, fen_taddrm
, taddrm
);
206 W16(ep
, fen_taddrl
, taddrl
);
207 fcc_cr_cmd(fep
, CPM_CR_SET_GADDR
);
210 static void set_multicast_finish(struct net_device
*dev
)
212 struct fs_enet_private
*fep
= netdev_priv(dev
);
213 fcc_t __iomem
*fccp
= fep
->fcc
.fccp
;
214 fcc_enet_t __iomem
*ep
= fep
->fcc
.ep
;
216 /* clear promiscuous always */
217 C32(fccp
, fcc_fpsmr
, FCC_PSMR_PRO
);
219 /* if all multi or too many multicasts; just enable all */
220 if ((dev
->flags
& IFF_ALLMULTI
) != 0 ||
221 netdev_mc_count(dev
) > FCC_MAX_MULTICAST_ADDRS
) {
223 W32(ep
, fen_gaddrh
, 0xffffffff);
224 W32(ep
, fen_gaddrl
, 0xffffffff);
228 fep
->fcc
.gaddrh
= R32(ep
, fen_gaddrh
);
229 fep
->fcc
.gaddrl
= R32(ep
, fen_gaddrl
);
232 static void set_multicast_list(struct net_device
*dev
)
234 struct netdev_hw_addr
*ha
;
236 if ((dev
->flags
& IFF_PROMISC
) == 0) {
237 set_multicast_start(dev
);
238 netdev_for_each_mc_addr(ha
, dev
)
239 set_multicast_one(dev
, ha
->addr
);
240 set_multicast_finish(dev
);
242 set_promiscuous_mode(dev
);
245 static void restart(struct net_device
*dev
)
247 struct fs_enet_private
*fep
= netdev_priv(dev
);
248 const struct fs_platform_info
*fpi
= fep
->fpi
;
249 fcc_t __iomem
*fccp
= fep
->fcc
.fccp
;
250 fcc_c_t __iomem
*fcccp
= fep
->fcc
.fcccp
;
251 fcc_enet_t __iomem
*ep
= fep
->fcc
.ep
;
252 dma_addr_t rx_bd_base_phys
, tx_bd_base_phys
;
253 u16 paddrh
, paddrm
, paddrl
;
254 const unsigned char *mac
;
257 C32(fccp
, fcc_gfmr
, FCC_GFMR_ENR
| FCC_GFMR_ENT
);
259 /* clear everything (slow & steady does it) */
260 for (i
= 0; i
< sizeof(*ep
); i
++)
261 out_8((u8 __iomem
*)ep
+ i
, 0);
263 /* get physical address */
264 rx_bd_base_phys
= fep
->ring_mem_addr
;
265 tx_bd_base_phys
= rx_bd_base_phys
+ sizeof(cbd_t
) * fpi
->rx_ring
;
268 W32(ep
, fen_genfcc
.fcc_rbase
, rx_bd_base_phys
);
269 W32(ep
, fen_genfcc
.fcc_tbase
, tx_bd_base_phys
);
271 /* Set maximum bytes per receive buffer.
272 * It must be a multiple of 32.
274 W16(ep
, fen_genfcc
.fcc_mrblr
, PKT_MAXBLR_SIZE
);
276 W32(ep
, fen_genfcc
.fcc_rstate
, (CPMFCR_GBL
| CPMFCR_EB
) << 24);
277 W32(ep
, fen_genfcc
.fcc_tstate
, (CPMFCR_GBL
| CPMFCR_EB
) << 24);
279 /* Allocate space in the reserved FCC area of DPRAM for the
280 * internal buffers. No one uses this space (yet), so we
281 * can do this. Later, we will add resource management for
285 W16(ep
, fen_genfcc
.fcc_riptr
, fpi
->dpram_offset
);
286 W16(ep
, fen_genfcc
.fcc_tiptr
, fpi
->dpram_offset
+ 32);
288 W16(ep
, fen_padptr
, fpi
->dpram_offset
+ 64);
290 /* fill with special symbol... */
291 memset_io(fep
->fcc
.mem
+ fpi
->dpram_offset
+ 64, 0x88, 32);
293 W32(ep
, fen_genfcc
.fcc_rbptr
, 0);
294 W32(ep
, fen_genfcc
.fcc_tbptr
, 0);
295 W32(ep
, fen_genfcc
.fcc_rcrc
, 0);
296 W32(ep
, fen_genfcc
.fcc_tcrc
, 0);
297 W16(ep
, fen_genfcc
.fcc_res1
, 0);
298 W32(ep
, fen_genfcc
.fcc_res2
, 0);
301 W32(ep
, fen_camptr
, 0);
303 /* Set CRC preset and mask */
304 W32(ep
, fen_cmask
, 0xdebb20e3);
305 W32(ep
, fen_cpres
, 0xffffffff);
307 W32(ep
, fen_crcec
, 0); /* CRC Error counter */
308 W32(ep
, fen_alec
, 0); /* alignment error counter */
309 W32(ep
, fen_disfc
, 0); /* discard frame counter */
310 W16(ep
, fen_retlim
, 15); /* Retry limit threshold */
311 W16(ep
, fen_pper
, 0); /* Normal persistence */
313 /* set group address */
314 W32(ep
, fen_gaddrh
, fep
->fcc
.gaddrh
);
315 W32(ep
, fen_gaddrl
, fep
->fcc
.gaddrh
);
317 /* Clear hash filter tables */
318 W32(ep
, fen_iaddrh
, 0);
319 W32(ep
, fen_iaddrl
, 0);
321 /* Clear the Out-of-sequence TxBD */
322 W16(ep
, fen_tfcstat
, 0);
323 W16(ep
, fen_tfclen
, 0);
324 W32(ep
, fen_tfcptr
, 0);
326 W16(ep
, fen_mflr
, PKT_MAXBUF_SIZE
); /* maximum frame length register */
327 W16(ep
, fen_minflr
, PKT_MINBUF_SIZE
); /* minimum frame length register */
331 paddrh
= ((u16
)mac
[5] << 8) | mac
[4];
332 paddrm
= ((u16
)mac
[3] << 8) | mac
[2];
333 paddrl
= ((u16
)mac
[1] << 8) | mac
[0];
335 W16(ep
, fen_paddrh
, paddrh
);
336 W16(ep
, fen_paddrm
, paddrm
);
337 W16(ep
, fen_paddrl
, paddrl
);
339 W16(ep
, fen_taddrh
, 0);
340 W16(ep
, fen_taddrm
, 0);
341 W16(ep
, fen_taddrl
, 0);
343 W16(ep
, fen_maxd1
, 1520); /* maximum DMA1 length */
344 W16(ep
, fen_maxd2
, 1520); /* maximum DMA2 length */
346 /* Clear stat counters, in case we ever enable RMON */
347 W32(ep
, fen_octc
, 0);
348 W32(ep
, fen_colc
, 0);
349 W32(ep
, fen_broc
, 0);
350 W32(ep
, fen_mulc
, 0);
351 W32(ep
, fen_uspc
, 0);
352 W32(ep
, fen_frgc
, 0);
353 W32(ep
, fen_ospc
, 0);
354 W32(ep
, fen_jbrc
, 0);
355 W32(ep
, fen_p64c
, 0);
356 W32(ep
, fen_p65c
, 0);
357 W32(ep
, fen_p128c
, 0);
358 W32(ep
, fen_p256c
, 0);
359 W32(ep
, fen_p512c
, 0);
360 W32(ep
, fen_p1024c
, 0);
362 W16(ep
, fen_rfthr
, 0); /* Suggested by manual */
363 W16(ep
, fen_rfcnt
, 0);
364 W16(ep
, fen_cftype
, 0);
368 /* adjust to speed (for RMII mode) */
370 if (fep
->phydev
->speed
== 100)
371 C8(fcccp
, fcc_gfemr
, 0x20);
373 S8(fcccp
, fcc_gfemr
, 0x20);
376 fcc_cr_cmd(fep
, CPM_CR_INIT_TRX
);
379 W16(fccp
, fcc_fcce
, 0xffff);
381 /* Enable interrupts we wish to service */
382 W16(fccp
, fcc_fccm
, FCC_ENET_TXE
| FCC_ENET_RXF
| FCC_ENET_TXB
);
384 /* Set GFMR to enable Ethernet operating mode */
385 W32(fccp
, fcc_gfmr
, FCC_GFMR_TCI
| FCC_GFMR_MODE_ENET
);
387 /* set sync/delimiters */
388 W16(fccp
, fcc_fdsr
, 0xd555);
390 W32(fccp
, fcc_fpsmr
, FCC_PSMR_ENCRC
);
393 S32(fccp
, fcc_fpsmr
, FCC_PSMR_RMII
);
395 /* adjust to duplex mode */
396 if (fep
->phydev
->duplex
)
397 S32(fccp
, fcc_fpsmr
, FCC_PSMR_FDE
| FCC_PSMR_LPB
);
399 C32(fccp
, fcc_fpsmr
, FCC_PSMR_FDE
| FCC_PSMR_LPB
);
401 /* Restore multicast and promiscuous settings */
402 set_multicast_list(dev
);
404 S32(fccp
, fcc_gfmr
, FCC_GFMR_ENR
| FCC_GFMR_ENT
);
407 static void stop(struct net_device
*dev
)
409 struct fs_enet_private
*fep
= netdev_priv(dev
);
410 fcc_t __iomem
*fccp
= fep
->fcc
.fccp
;
413 C32(fccp
, fcc_gfmr
, FCC_GFMR_ENR
| FCC_GFMR_ENT
);
416 W16(fccp
, fcc_fcce
, 0xffff);
418 /* clear interrupt mask */
419 W16(fccp
, fcc_fccm
, 0);
424 static void napi_clear_rx_event(struct net_device
*dev
)
426 struct fs_enet_private
*fep
= netdev_priv(dev
);
427 fcc_t __iomem
*fccp
= fep
->fcc
.fccp
;
429 W16(fccp
, fcc_fcce
, FCC_NAPI_RX_EVENT_MSK
);
432 static void napi_enable_rx(struct net_device
*dev
)
434 struct fs_enet_private
*fep
= netdev_priv(dev
);
435 fcc_t __iomem
*fccp
= fep
->fcc
.fccp
;
437 S16(fccp
, fcc_fccm
, FCC_NAPI_RX_EVENT_MSK
);
440 static void napi_disable_rx(struct net_device
*dev
)
442 struct fs_enet_private
*fep
= netdev_priv(dev
);
443 fcc_t __iomem
*fccp
= fep
->fcc
.fccp
;
445 C16(fccp
, fcc_fccm
, FCC_NAPI_RX_EVENT_MSK
);
448 static void rx_bd_done(struct net_device
*dev
)
453 static void tx_kickstart(struct net_device
*dev
)
455 struct fs_enet_private
*fep
= netdev_priv(dev
);
456 fcc_t __iomem
*fccp
= fep
->fcc
.fccp
;
458 S16(fccp
, fcc_ftodr
, 0x8000);
461 static u32
get_int_events(struct net_device
*dev
)
463 struct fs_enet_private
*fep
= netdev_priv(dev
);
464 fcc_t __iomem
*fccp
= fep
->fcc
.fccp
;
466 return (u32
)R16(fccp
, fcc_fcce
);
469 static void clear_int_events(struct net_device
*dev
, u32 int_events
)
471 struct fs_enet_private
*fep
= netdev_priv(dev
);
472 fcc_t __iomem
*fccp
= fep
->fcc
.fccp
;
474 W16(fccp
, fcc_fcce
, int_events
& 0xffff);
477 static void ev_error(struct net_device
*dev
, u32 int_events
)
479 struct fs_enet_private
*fep
= netdev_priv(dev
);
481 dev_warn(fep
->dev
, "FS_ENET ERROR(s) 0x%x\n", int_events
);
484 static int get_regs(struct net_device
*dev
, void *p
, int *sizep
)
486 struct fs_enet_private
*fep
= netdev_priv(dev
);
488 if (*sizep
< sizeof(fcc_t
) + sizeof(fcc_enet_t
) + 1)
491 memcpy_fromio(p
, fep
->fcc
.fccp
, sizeof(fcc_t
));
492 p
= (char *)p
+ sizeof(fcc_t
);
494 memcpy_fromio(p
, fep
->fcc
.ep
, sizeof(fcc_enet_t
));
495 p
= (char *)p
+ sizeof(fcc_enet_t
);
497 memcpy_fromio(p
, fep
->fcc
.fcccp
, 1);
501 static int get_regs_len(struct net_device
*dev
)
503 return sizeof(fcc_t
) + sizeof(fcc_enet_t
) + 1;
506 /* Some transmit errors cause the transmitter to shut
507 * down. We now issue a restart transmit.
508 * Also, to workaround 8260 device erratum CPM37, we must
509 * disable and then re-enable the transmitterfollowing a
510 * Late Collision, Underrun, or Retry Limit error.
511 * In addition, tbptr may point beyond BDs beyond still marked
512 * as ready due to internal pipelining, so we need to look back
513 * through the BDs and adjust tbptr to point to the last BD
514 * marked as ready. This may result in some buffers being
517 static void tx_restart(struct net_device
*dev
)
519 struct fs_enet_private
*fep
= netdev_priv(dev
);
520 fcc_t __iomem
*fccp
= fep
->fcc
.fccp
;
521 const struct fs_platform_info
*fpi
= fep
->fpi
;
522 fcc_enet_t __iomem
*ep
= fep
->fcc
.ep
;
523 cbd_t __iomem
*curr_tbptr
;
524 cbd_t __iomem
*recheck_bd
;
525 cbd_t __iomem
*prev_bd
;
526 cbd_t __iomem
*last_tx_bd
;
528 last_tx_bd
= fep
->tx_bd_base
+ (fpi
->tx_ring
* sizeof(cbd_t
));
530 /* get the current bd held in TBPTR and scan back from this point */
531 recheck_bd
= curr_tbptr
= (cbd_t __iomem
*)
532 ((R32(ep
, fen_genfcc
.fcc_tbptr
) - fep
->ring_mem_addr
) +
535 prev_bd
= (recheck_bd
== fep
->tx_bd_base
) ? last_tx_bd
: recheck_bd
- 1;
537 /* Move through the bds in reverse, look for the earliest buffer
538 * that is not ready. Adjust TBPTR to the following buffer */
539 while ((CBDR_SC(prev_bd
) & BD_ENET_TX_READY
) != 0) {
540 /* Go back one buffer */
541 recheck_bd
= prev_bd
;
543 /* update the previous buffer */
544 prev_bd
= (prev_bd
== fep
->tx_bd_base
) ? last_tx_bd
: prev_bd
- 1;
546 /* We should never see all bds marked as ready, check anyway */
547 if (recheck_bd
== curr_tbptr
)
550 /* Now update the TBPTR and dirty flag to the current buffer */
551 W32(ep
, fen_genfcc
.fcc_tbptr
,
552 (uint
) (((void *)recheck_bd
- fep
->ring_base
) +
553 fep
->ring_mem_addr
));
554 fep
->dirty_tx
= recheck_bd
;
556 C32(fccp
, fcc_gfmr
, FCC_GFMR_ENT
);
558 S32(fccp
, fcc_gfmr
, FCC_GFMR_ENT
);
560 fcc_cr_cmd(fep
, CPM_CR_RESTART_TX
);
563 /*************************************************************************/
565 const struct fs_ops fs_fcc_ops
= {
566 .setup_data
= setup_data
,
567 .cleanup_data
= cleanup_data
,
568 .set_multicast_list
= set_multicast_list
,
571 .napi_clear_rx_event
= napi_clear_rx_event
,
572 .napi_enable_rx
= napi_enable_rx
,
573 .napi_disable_rx
= napi_disable_rx
,
574 .rx_bd_done
= rx_bd_done
,
575 .tx_kickstart
= tx_kickstart
,
576 .get_int_events
= get_int_events
,
577 .clear_int_events
= clear_int_events
,
578 .ev_error
= ev_error
,
579 .get_regs
= get_regs
,
580 .get_regs_len
= get_regs_len
,
581 .tx_restart
= tx_restart
,
582 .allocate_bd
= allocate_bd
,