2 * linux/drivers/net/arm/eth_s3c4510b.c
4 * Copyright (c) 2004 Cucy Systems (http://www.cucy.com)
5 * Curt Brune <curt@cucy.com>
7 * Re-written from scratch for 2.6.x after studying the original 2.4.x
10 * Copyright (C) 2002 Mac Wang <mac@os.nctu.edu.tw>
14 #include <linux/config.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/skbuff.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
24 #include <asm/arch/hardware.h>
25 #include "eth_s3c4510b.h"
27 #define __DRIVER_NAME "Samsung S3C4510B Ethernet Driver version 0.2 (2004-06-13) <curt@cucy.com>"
31 # define _DPRINTK(format, args...) \
32 printk (KERN_INFO "%s():%05d "format".\n" , __FUNCTION__ , __LINE__ , ## args);
34 # define _DPRINTK(format, args...)
37 #define _EPRINTK(format, args...) \
38 printk (KERN_ERR "%s():%05d "format".\n" , __FUNCTION__ , __LINE__ , ## args);
42 /* Frame Descriptors */
43 TX_FrameDesc m_txFDbase
[ETH_NTxFrames
]; /* array of TX frame descriptors */
44 RX_FrameDesc m_rxFDbase
[ETH_NRxFrames
]; /* array of RX frame descriptors */
45 volatile TX_FrameDesc
*m_curTX_FD
; /* current TX FD to queue */
46 volatile TX_FrameDesc
*m_oldTX_FD
; /* oldest TX FD queued, but not transmitted */
47 volatile RX_FrameDesc
*m_curRX_FD
; /* current RX FD to receive */
49 struct net_device_stats stats
;
53 /* This struct must be 16 byte aligned */
55 volatile RX_FrameDesc
*m_RxFD
;
56 struct net_device
*m_dev
;
60 static s32 __skb_head_offset
;
63 ** Avoid memcpy in RX handler by pre-allocating the socket buffers
66 // static void __skb_destruct( struct sk_buff *skb);
67 static void __skb_prepare( struct net_device
*dev
, volatile RX_FrameDesc
*pRxFD
)
71 skb
= dev_alloc_skb( sizeof(ETHFrame
) + 16 + 2);
72 if ( unlikely(!skb
)) {
73 _EPRINTK(" unable to allocate skb...");
76 // _DPRINTK("allocate skb: 0x%08x", (u32)skb);
80 /* attach skb to FD */
82 pRxFD
->m_frameDataPtr
.bf
.dataPtr
= (u32
)skb
->data
| CACHE_DISABLE_MASK
;
83 pRxFD
->m_frameDataPtr
.bf
.owner
= 0x1; /* BDMA owner */
87 static s32
RxFDinit( struct net_device
*dev
) {
89 struct eth_priv
*priv
= (struct eth_priv
*) dev
->priv
;
91 volatile RX_FrameDesc
*rxFDbase
;
94 /* determine skb initial headroom for later use in the skb destructor */
95 skb
= dev_alloc_skb(256);
96 __skb_head_offset
= skb_headroom( skb
);
99 /* store start of Rx descriptors and set current */
100 rxFDbase
= priv
->m_curRX_FD
=
101 (RX_FrameDesc
*)((u32
)priv
->m_rxFDbase
| CACHE_DISABLE_MASK
);
102 for ( i
= 0; i
< ETH_NRxFrames
; i
++) {
103 __skb_prepare( dev
, &rxFDbase
[i
]);
104 priv
->m_rxFDbase
[i
].m_reserved
= 0x0;
105 priv
->m_rxFDbase
[i
].m_status
.ui
= 0x0;
106 priv
->m_rxFDbase
[i
].m_nextFD
= &rxFDbase
[i
+1];
107 // _DPRINTK("rxFDbase[%d]: 0x%08x", i, (u32)&rxFDbase[i]);
110 /* make the list circular */
111 priv
->m_rxFDbase
[i
-1].m_nextFD
= &rxFDbase
[0];
113 outl( (unsigned int)rxFDbase
, REG_BDMARXPTR
);
118 static s32
TxFDinit( struct net_device
*dev
) {
120 struct eth_priv
*priv
= (struct eth_priv
*) dev
->priv
;
122 volatile TX_FrameDesc
*txFDbase
;
124 /* store start of Tx descriptors and set current */
125 txFDbase
= priv
->m_curTX_FD
= priv
->m_oldTX_FD
=
126 (TX_FrameDesc
*) ((u32
)priv
->m_txFDbase
| CACHE_DISABLE_MASK
);
128 for ( i
= 0; i
< ETH_NTxFrames
; i
++) {
129 priv
->m_txFDbase
[i
].m_frameDataPtr
.ui
= 0x0; /* CPU owner */
130 priv
->m_txFDbase
[i
].m_opt
.ui
= 0x0;
131 priv
->m_txFDbase
[i
].m_status
.ui
= 0x0;
132 priv
->m_txFDbase
[i
].m_nextFD
= &txFDbase
[i
+1];
133 // _DPRINTK("txFDbase[%d]: 0x%08x", i, (u32)&txFDbase[i]);
136 /* make the list circular */
137 priv
->m_txFDbase
[i
-1].m_nextFD
= &txFDbase
[0];
139 outl( (unsigned int)txFDbase
, REG_BDMATXPTR
);
144 static irqreturn_t
__s3c4510b_rx_int(int irq
, void *dev_id
, struct pt_regs
*regs
)
147 struct net_device
*dev
= (struct net_device
*) dev_id
;
148 struct eth_priv
*priv
= (struct eth_priv
*) dev
->priv
;
149 volatile RX_FrameDesc
*pRxFD
;
150 volatile RX_FrameDesc
*cRxFD
;
152 spin_lock(&priv
->lock
);
156 pRxFD
= priv
->m_curRX_FD
;
157 cRxFD
= (RX_FrameDesc
*)inl(REG_BDMARXPTR
);
159 /* clear received frame bit */
160 outl( ETH_S_BRxRDF
, REG_BDMASTAT
);
163 if ( likely( pRxFD
->m_status
.bf
.good
)) {
166 __skb_prepare( dev
, pRxFD
);
168 /* reserve two words used by protocol layers */
170 skb_put(skb
, pRxFD
->m_status
.bf
.len
);
171 skb
->protocol
= eth_type_trans(skb
, dev
);
172 priv
->stats
.rx_packets
++;
173 priv
->stats
.rx_bytes
+= pRxFD
->m_status
.bf
.len
;
177 priv
->stats
.rx_errors
++;
178 if( pRxFD
->m_status
.bf
.overFlow
)
179 priv
->stats
.rx_fifo_errors
++;
180 if( pRxFD
->m_status
.bf
.overMax
)
181 priv
->stats
.rx_length_errors
++;
182 if( pRxFD
->m_status
.bf
.crcErr
)
183 priv
->stats
.rx_crc_errors
++;
184 if( pRxFD
->m_status
.bf
.longErr
)
185 priv
->stats
.rx_length_errors
++;
186 if( pRxFD
->m_status
.bf
.alignErr
)
187 priv
->stats
.rx_frame_errors
++;
189 ** No good category for these errors
190 if( pRxFD->m_status.bf.parityErr)
195 /* set owner back to CPU */
196 pRxFD
->m_frameDataPtr
.bf
.owner
= 1;
198 pRxFD
->m_status
.ui
= 0x0;
199 /* advance to next descriptor */
200 pRxFD
= pRxFD
->m_nextFD
;
202 } while ( pRxFD
!= cRxFD
);
204 priv
->m_curRX_FD
= pRxFD
;
208 spin_unlock(&priv
->lock
);
214 static irqreturn_t
__s3c4510b_tx_int(int irq
, void *dev_id
, struct pt_regs
*regs
)
216 struct net_device
*dev
= (struct net_device
*) dev_id
;
217 struct eth_priv
*priv
= (struct eth_priv
*) dev
->priv
;
218 volatile TX_FrameDesc
*pTxFD
;
219 volatile TX_FrameDesc
*cTxFD
;
221 spin_lock(&priv
->lock
);
223 pTxFD
= priv
->m_oldTX_FD
;
224 cTxFD
= (TX_FrameDesc
*)inl(REG_BDMATXPTR
);
226 while ( pTxFD
!= cTxFD
) {
228 if ( likely(pTxFD
->m_status
.bf
.complete
)) {
229 priv
->stats
.tx_packets
++;
231 if( pTxFD
->m_status
.bf
.exColl
) {
232 _EPRINTK("TX collision detected");
233 priv
->stats
.tx_errors
++;
234 priv
->stats
.collisions
++;
236 if( pTxFD
->m_status
.bf
.underRun
) {
237 _EPRINTK("TX Underrun detected");
238 priv
->stats
.tx_errors
++;
239 priv
->stats
.tx_fifo_errors
++;
241 if( pTxFD
->m_status
.bf
.noCarrier
) {
242 _EPRINTK("TX no carrier detected");
243 priv
->stats
.tx_errors
++;
244 priv
->stats
.tx_carrier_errors
++;
246 if( pTxFD
->m_status
.bf
.lateColl
) {
247 _EPRINTK("TX late collision detected");
248 priv
->stats
.tx_errors
++;
249 priv
->stats
.tx_window_errors
++;
251 if( pTxFD
->m_status
.bf
.parityErr
) {
252 _EPRINTK("TX parity error detected");
253 priv
->stats
.tx_errors
++;
254 priv
->stats
.tx_aborted_errors
++;
257 dev_kfree_skb_irq( pTxFD
->skb
);
258 pTxFD
= pTxFD
->m_nextFD
;
261 priv
->m_oldTX_FD
= pTxFD
;
265 spin_unlock(&priv
->lock
);
271 static int __s3c4510b_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
275 struct eth_priv
*priv
= (struct eth_priv
*) dev
->priv
;
277 // _DPRINTK("entered with dev = 0x%08x", (unsigned int)dev);
279 len
= skb
->len
< ETH_ZLEN
? ETH_ZLEN
: skb
->len
;
280 dev
->trans_start
= jiffies
;
282 if ( unlikely( priv
->m_curTX_FD
->m_frameDataPtr
.bf
.owner
)) {
283 _EPRINTK("Ethernet TX Frame. CPU not owner");
287 /* this needs to be word aligned for the BDMA -- round down */
288 addr
= ((u32
)skb
->data
& ~0x3) | CACHE_DISABLE_MASK
;
289 priv
->m_curTX_FD
->m_frameDataPtr
.bf
.dataPtr
= addr
;
291 /* Set TX Frame flags */
292 priv
->m_curTX_FD
->m_opt
.bf
.widgetAlign
= (u32
)skb
->data
- addr
; /* compenstate for alignment */
293 priv
->m_curTX_FD
->m_opt
.bf
.frameDataDir
= 1;
294 priv
->m_curTX_FD
->m_opt
.bf
.littleEndian
= 1;
295 priv
->m_curTX_FD
->m_opt
.bf
.macTxIrqEnbl
= 1;
296 priv
->m_curTX_FD
->m_opt
.bf
.no_crc
= 0;
297 priv
->m_curTX_FD
->m_opt
.bf
.no_padding
= 0;
299 /* Set TX Frame length */
300 priv
->m_curTX_FD
->m_status
.bf
.len
= len
;
302 priv
->m_curTX_FD
->skb
= skb
;
304 /* Change ownership to BDMA */
305 priv
->m_curTX_FD
->m_frameDataPtr
.bf
.owner
= 1;
307 /* Change the Tx frame descriptor for next use */
308 priv
->m_curTX_FD
= priv
->m_curTX_FD
->m_nextFD
;
312 /* Enable MAC and BDMA Tx control register */
313 outl( ETH_BTxBRST
| /* BDMA Tx burst size 16 words */
314 ETH_BTxMSL110
| /* BDMA Tx wait to fill 6/8 of the BDMA */
315 ETH_BTxSTSKO
| /* BDMA Tx interrupt(Stop) on non-owner TX FD */
316 ETH_BTxEn
, /* BDMA Tx Enable */
319 outl( ETH_EnComp
| /* interrupt when the MAC transmits or discards packet */
320 ETH_TxEn
| /* MAC transmit enable */
321 ETH_EnUnder
| /* interrupt on Underrun */
322 ETH_EnNCarr
| /* interrupt on No Carrier */
323 ETH_EnExColl
| /* interrupt if 16 collision occur */
324 ETH_EnLateColl
| /* interrupt if collision occurs after 512 bit times(64 bytes times) */
325 ETH_EnTxPar
, /* interrupt if the MAC transmit FIFO has a parity error */
332 static struct irqaction __rx_irqaction
= {
335 handler
: __s3c4510b_rx_int
,
338 static struct irqaction __tx_irqaction
= {
341 handler
: __s3c4510b_tx_int
,
344 static int __s3c4510b_open(struct net_device
*dev
)
346 unsigned long status
;
348 /* Disable interrupts */
349 INT_DISABLE(INT_BDMARX
);
350 INT_DISABLE(INT_MACTX
);
355 __rx_irqaction
.dev_id
= (void *)dev
;
356 status
= setup_irq( INT_BDMARX
, &__rx_irqaction
);
357 if ( unlikely(status
)) {
358 printk( KERN_ERR
"Unabled to hook irq %d for ethernet RX\n", INT_BDMARX
);
365 __tx_irqaction
.dev_id
= (void *)dev
;
366 status
= setup_irq( INT_MACTX
, &__tx_irqaction
);
367 if ( unlikely(status
)) {
368 printk( KERN_ERR
"Unabled to hook irq %d for ethernet TX\n", INT_MACTX
);
372 /* setup DBMA and MAC */
373 outl( ETH_BRxRS
, REG_BDMARXCON
); /* reset BDMA RX machine */
374 outl( ETH_BTxRS
, REG_BDMATXCON
); /* reset BDMA TX machine */
375 outl( ETH_SwReset
, REG_MACCON
); /* reset MAC machine */
376 outl( sizeof( ETHFrame
), REG_BDMARXLSZ
);
377 outl( ETH_FullDup
, REG_MACCON
); /* enable full duplex */
379 /* init frame descriptors */
383 outl( (dev
->dev_addr
[0] << 24) |
384 (dev
->dev_addr
[1] << 16) |
385 (dev
->dev_addr
[2] << 8) |
386 (dev
->dev_addr
[3]) , REG_CAM_BASE
);
387 outl( (dev
->dev_addr
[4] << 24) |
388 (dev
->dev_addr
[5] << 16) , REG_CAM_BASE
+ 4);
390 outl( 0x0001, REG_CAMEN
);
391 outl( ETH_CompEn
| /* enable compare mode (check against the CAM) */
392 ETH_BroadAcc
, /* accept broadcast packetes */
395 INT_ENABLE(INT_BDMARX
);
396 INT_ENABLE(INT_MACTX
);
398 /* enable RX machinery */
399 outl( ETH_BRxBRST
| /* BDMA Rx Burst Size 16 words */
400 ETH_BRxSTSKO
| /* BDMA Rx interrupt(Stop) on non-owner RX FD */
401 ETH_BRxMAINC
| /* BDMA Rx Memory Address increment */
402 ETH_BRxDIE
| /* BDMA Rx Every Received Frame Interrupt Enable */
403 ETH_BRxNLIE
| /* BDMA Rx NULL List Interrupt Enable */
404 ETH_BRxNOIE
| /* BDMA Rx Not Owner Interrupt Enable */
405 ETH_BRxLittle
| /* BDMA Rx Little endian */
406 ETH_BRxWA10
| /* BDMA Rx Word Alignment- two invalid bytes */
407 ETH_BRxEn
, /* BDMA Rx Enable */
410 outl( ETH_RxEn
| /* enable MAC RX */
411 ETH_StripCRC
| /* check and strip CRC */
412 ETH_EnCRCErr
| /* interrupt on CRC error */
413 ETH_EnOver
| /* interrupt on overflow error */
414 ETH_EnLongErr
| /* interrupt on long frame error */
415 ETH_EnRxPar
, /* interrupt on MAC FIFO parity error */
418 netif_start_queue(dev
);
423 static int __s3c4510b_stop(struct net_device
*dev
)
426 INT_DISABLE(INT_BDMARX
);
427 INT_DISABLE(INT_MACTX
);
429 outl( 0, REG_BDMATXCON
);
430 outl( 0, REG_BDMARXCON
);
431 outl( 0, REG_MACTXCON
);
432 outl( 0, REG_MACRXCON
);
434 free_irq(INT_BDMARX
, dev
);
435 free_irq(INT_MACTX
, dev
);
437 netif_stop_queue(dev
);
442 struct net_device_stats
*__s3c4510b_get_stats(struct net_device
*dev
)
444 return &((struct eth_priv
*)dev
->priv
)->stats
;
448 * The init function, invoked by register_netdev()
450 static int __s3c4510b_init(struct net_device
*dev
)
454 /* assign net_device methods */
455 dev
->open
= __s3c4510b_open
;
456 dev
->stop
= __s3c4510b_stop
;
457 // dev->ioctl = __s3c4510b_ioctl;
458 dev
->get_stats
= __s3c4510b_get_stats
;
459 // dev->tx_timeout = __s3c4510b_tx_timeout;
460 dev
->hard_start_xmit
= __s3c4510b_start_xmit
;
462 dev
->irq
= INT_BDMARX
;
463 dev
->tx_queue_len
= ETH_NTxFrames
;
465 dev
->watchdog_timeo
= HZ
;
467 /* set MAC address */
468 dev
->dev_addr
[0] = 0x00;
469 dev
->dev_addr
[1] = 0x40;
470 dev
->dev_addr
[2] = 0x95;
471 dev
->dev_addr
[3] = 0x36;
472 dev
->dev_addr
[4] = 0x35;
473 dev
->dev_addr
[5] = 0x33;
475 SET_MODULE_OWNER(dev
);
477 dev
->priv
= kmalloc(sizeof(struct eth_priv
), GFP_KERNEL
);
478 if( dev
->priv
== NULL
)
480 memset(dev
->priv
, 0, sizeof(struct eth_priv
));
481 spin_lock_init(&((struct eth_priv
*) dev
->priv
)->lock
);
485 struct net_device __s3c4510b_netdevs
= {
486 init
: __s3c4510b_init
,
489 static int __init
__s3c4510b_init_module(void)
493 printk(KERN_INFO
"%s\n", __DRIVER_NAME
);
495 if( (status
= register_netdev( &__s3c4510b_netdevs
)))
496 printk("S3C4510 eth: Error %i registering interface %s\n", status
, __s3c4510b_netdevs
.name
);
501 static void __exit
__s3c4510b_cleanup(void)
503 kfree( __s3c4510b_netdevs
.priv
);
504 unregister_netdev( &__s3c4510b_netdevs
);
508 module_init(__s3c4510b_init_module
);
509 module_exit(__s3c4510b_cleanup
);
511 MODULE_DESCRIPTION("Samsung S3C4510B ethernet driver");
512 MODULE_AUTHOR("Curt Brune <curt@cucy.com>");
513 MODULE_LICENSE("GPL");