2 * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
4 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/types.h>
16 #include <linux/interrupt.h>
17 #include <linux/string.h>
18 #include <linux/delay.h>
19 #include <linux/netdevice.h>
20 #include <linux/platform_device.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
24 #include <asm/sgi/hpc3.h>
25 #include <asm/sgi/ip22.h>
26 #include <asm/sgi/seeq.h>
30 static char *sgiseeqstr
= "SGI Seeq8003";
33 * If you want speed, you do something silly, it always has worked for me. So,
34 * with that in mind, I've decided to make this driver look completely like a
35 * stupid Lance from a driver architecture perspective. Only difference is that
36 * here our "ring buffer" looks and acts like a real Lance one does but is
37 * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised
38 * how a stupid idea like this can pay off in performance, not to mention
39 * making this driver 2,000 times easier to write. ;-)
42 /* Tune these if we tend to run out often etc. */
43 #define SEEQ_RX_BUFFERS 16
44 #define SEEQ_TX_BUFFERS 16
46 #define PKT_BUF_SZ 1584
48 #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
49 #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
50 #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
51 #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
53 #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
54 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
55 sp->tx_old - sp->tx_new - 1)
57 #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \
58 (dma_addr_t)((unsigned long)(v) - \
59 (unsigned long)((sp)->rx_desc)))
61 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
62 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
64 static int rx_copybreak
= 100;
66 #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
68 struct sgiseeq_rx_desc
{
69 volatile struct hpc_dma_desc rdma
;
74 struct sgiseeq_tx_desc
{
75 volatile struct hpc_dma_desc tdma
;
81 * Warning: This structure is laid out in a certain way because HPC dma
82 * descriptors must be 8-byte aligned. So don't touch this without
85 struct sgiseeq_init_block
{ /* Note the name ;-) */
86 struct sgiseeq_rx_desc rxvector
[SEEQ_RX_BUFFERS
];
87 struct sgiseeq_tx_desc txvector
[SEEQ_TX_BUFFERS
];
90 struct sgiseeq_private
{
91 struct sgiseeq_init_block
*srings
;
92 dma_addr_t srings_dma
;
94 /* Ptrs to the descriptors in uncached space. */
95 struct sgiseeq_rx_desc
*rx_desc
;
96 struct sgiseeq_tx_desc
*tx_desc
;
99 struct hpc3_ethregs
*hregs
;
100 struct sgiseeq_regs
*sregs
;
102 /* Ring entry counters. */
103 unsigned int rx_new
, tx_new
;
104 unsigned int rx_old
, tx_old
;
107 unsigned char control
;
113 static inline void dma_sync_desc_cpu(struct net_device
*dev
, void *addr
)
115 dma_cache_sync(dev
->dev
.parent
, addr
, sizeof(struct sgiseeq_rx_desc
),
119 static inline void dma_sync_desc_dev(struct net_device
*dev
, void *addr
)
121 dma_cache_sync(dev
->dev
.parent
, addr
, sizeof(struct sgiseeq_rx_desc
),
125 static inline void hpc3_eth_reset(struct hpc3_ethregs
*hregs
)
127 hregs
->reset
= HPC3_ERST_CRESET
| HPC3_ERST_CLRIRQ
;
132 static inline void reset_hpc3_and_seeq(struct hpc3_ethregs
*hregs
,
133 struct sgiseeq_regs
*sregs
)
135 hregs
->rx_ctrl
= hregs
->tx_ctrl
= 0;
136 hpc3_eth_reset(hregs
);
139 #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
140 SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
142 static inline void seeq_go(struct sgiseeq_private
*sp
,
143 struct hpc3_ethregs
*hregs
,
144 struct sgiseeq_regs
*sregs
)
146 sregs
->rstat
= sp
->mode
| RSTAT_GO_BITS
;
147 hregs
->rx_ctrl
= HPC3_ERXCTRL_ACTIVE
;
150 static inline void __sgiseeq_set_mac_address(struct net_device
*dev
)
152 struct sgiseeq_private
*sp
= netdev_priv(dev
);
153 struct sgiseeq_regs
*sregs
= sp
->sregs
;
156 sregs
->tstat
= SEEQ_TCMD_RB0
;
157 for (i
= 0; i
< 6; i
++)
158 sregs
->rw
.eth_addr
[i
] = dev
->dev_addr
[i
];
161 static int sgiseeq_set_mac_address(struct net_device
*dev
, void *addr
)
163 struct sgiseeq_private
*sp
= netdev_priv(dev
);
164 struct sockaddr
*sa
= addr
;
166 memcpy(dev
->dev_addr
, sa
->sa_data
, dev
->addr_len
);
168 spin_lock_irq(&sp
->tx_lock
);
169 __sgiseeq_set_mac_address(dev
);
170 spin_unlock_irq(&sp
->tx_lock
);
175 #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
176 #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
177 #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
179 static int seeq_init_ring(struct net_device
*dev
)
181 struct sgiseeq_private
*sp
= netdev_priv(dev
);
184 netif_stop_queue(dev
);
185 sp
->rx_new
= sp
->tx_new
= 0;
186 sp
->rx_old
= sp
->tx_old
= 0;
188 __sgiseeq_set_mac_address(dev
);
191 for(i
= 0; i
< SEEQ_TX_BUFFERS
; i
++) {
192 sp
->tx_desc
[i
].tdma
.cntinfo
= TCNTINFO_INIT
;
193 dma_sync_desc_dev(dev
, &sp
->tx_desc
[i
]);
196 /* And now the rx ring. */
197 for (i
= 0; i
< SEEQ_RX_BUFFERS
; i
++) {
198 if (!sp
->rx_desc
[i
].skb
) {
200 struct sk_buff
*skb
= netdev_alloc_skb(dev
, PKT_BUF_SZ
);
205 dma_addr
= dma_map_single(dev
->dev
.parent
,
207 PKT_BUF_SZ
, DMA_FROM_DEVICE
);
208 sp
->rx_desc
[i
].skb
= skb
;
209 sp
->rx_desc
[i
].rdma
.pbuf
= dma_addr
;
211 sp
->rx_desc
[i
].rdma
.cntinfo
= RCNTINFO_INIT
;
212 dma_sync_desc_dev(dev
, &sp
->rx_desc
[i
]);
214 sp
->rx_desc
[i
- 1].rdma
.cntinfo
|= HPCDMA_EOR
;
215 dma_sync_desc_dev(dev
, &sp
->rx_desc
[i
- 1]);
219 static void seeq_purge_ring(struct net_device
*dev
)
221 struct sgiseeq_private
*sp
= netdev_priv(dev
);
225 for (i
= 0; i
< SEEQ_TX_BUFFERS
; i
++) {
226 if (sp
->tx_desc
[i
].skb
) {
227 dev_kfree_skb(sp
->tx_desc
[i
].skb
);
228 sp
->tx_desc
[i
].skb
= NULL
;
232 /* And now the rx ring. */
233 for (i
= 0; i
< SEEQ_RX_BUFFERS
; i
++) {
234 if (sp
->rx_desc
[i
].skb
) {
235 dev_kfree_skb(sp
->rx_desc
[i
].skb
);
236 sp
->rx_desc
[i
].skb
= NULL
;
242 static struct sgiseeq_private
*gpriv
;
243 static struct net_device
*gdev
;
245 static void sgiseeq_dump_rings(void)
248 struct sgiseeq_rx_desc
*r
= gpriv
->rx_desc
;
249 struct sgiseeq_tx_desc
*t
= gpriv
->tx_desc
;
250 struct hpc3_ethregs
*hregs
= gpriv
->hregs
;
256 printk("RING DUMP:\n");
257 for (i
= 0; i
< SEEQ_RX_BUFFERS
; i
++) {
258 printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
259 i
, (&r
[i
]), r
[i
].rdma
.pbuf
, r
[i
].rdma
.cntinfo
,
262 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
263 i
, (&r
[i
]), r
[i
].rdma
.pbuf
, r
[i
].rdma
.cntinfo
,
266 for (i
= 0; i
< SEEQ_TX_BUFFERS
; i
++) {
267 printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
268 i
, (&t
[i
]), t
[i
].tdma
.pbuf
, t
[i
].tdma
.cntinfo
,
271 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
272 i
, (&t
[i
]), t
[i
].tdma
.pbuf
, t
[i
].tdma
.cntinfo
,
275 printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
276 gpriv
->rx_new
, gpriv
->rx_old
, gpriv
->tx_new
, gpriv
->tx_old
);
277 printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
278 hregs
->rx_cbptr
, hregs
->rx_ndptr
, hregs
->rx_ctrl
);
279 printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
280 hregs
->tx_cbptr
, hregs
->tx_ndptr
, hregs
->tx_ctrl
);
284 #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
285 #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
287 static int init_seeq(struct net_device
*dev
, struct sgiseeq_private
*sp
,
288 struct sgiseeq_regs
*sregs
)
290 struct hpc3_ethregs
*hregs
= sp
->hregs
;
293 reset_hpc3_and_seeq(hregs
, sregs
);
294 err
= seeq_init_ring(dev
);
298 /* Setup to field the proper interrupt types. */
300 sregs
->tstat
= TSTAT_INIT_EDLC
;
301 sregs
->rw
.wregs
.control
= sp
->control
;
302 sregs
->rw
.wregs
.frame_gap
= 0;
304 sregs
->tstat
= TSTAT_INIT_SEEQ
;
307 hregs
->rx_ndptr
= VIRT_TO_DMA(sp
, sp
->rx_desc
);
308 hregs
->tx_ndptr
= VIRT_TO_DMA(sp
, sp
->tx_desc
);
310 seeq_go(sp
, hregs
, sregs
);
314 static void record_rx_errors(struct net_device
*dev
, unsigned char status
)
316 if (status
& SEEQ_RSTAT_OVERF
||
317 status
& SEEQ_RSTAT_SFRAME
)
318 dev
->stats
.rx_over_errors
++;
319 if (status
& SEEQ_RSTAT_CERROR
)
320 dev
->stats
.rx_crc_errors
++;
321 if (status
& SEEQ_RSTAT_DERROR
)
322 dev
->stats
.rx_frame_errors
++;
323 if (status
& SEEQ_RSTAT_REOF
)
324 dev
->stats
.rx_errors
++;
327 static inline void rx_maybe_restart(struct sgiseeq_private
*sp
,
328 struct hpc3_ethregs
*hregs
,
329 struct sgiseeq_regs
*sregs
)
331 if (!(hregs
->rx_ctrl
& HPC3_ERXCTRL_ACTIVE
)) {
332 hregs
->rx_ndptr
= VIRT_TO_DMA(sp
, sp
->rx_desc
+ sp
->rx_new
);
333 seeq_go(sp
, hregs
, sregs
);
337 static inline void sgiseeq_rx(struct net_device
*dev
, struct sgiseeq_private
*sp
,
338 struct hpc3_ethregs
*hregs
,
339 struct sgiseeq_regs
*sregs
)
341 struct sgiseeq_rx_desc
*rd
;
342 struct sk_buff
*skb
= NULL
;
343 struct sk_buff
*newskb
;
344 unsigned char pkt_status
;
346 unsigned int orig_end
= PREV_RX(sp
->rx_new
);
348 /* Service every received packet. */
349 rd
= &sp
->rx_desc
[sp
->rx_new
];
350 dma_sync_desc_cpu(dev
, rd
);
351 while (!(rd
->rdma
.cntinfo
& HPCDMA_OWN
)) {
352 len
= PKT_BUF_SZ
- (rd
->rdma
.cntinfo
& HPCDMA_BCNT
) - 3;
353 dma_unmap_single(dev
->dev
.parent
, rd
->rdma
.pbuf
,
354 PKT_BUF_SZ
, DMA_FROM_DEVICE
);
355 pkt_status
= rd
->skb
->data
[len
];
356 if (pkt_status
& SEEQ_RSTAT_FIG
) {
358 /* We don't want to receive our own packets */
359 if (memcmp(rd
->skb
->data
+ 6, dev
->dev_addr
, ETH_ALEN
)) {
360 if (len
> rx_copybreak
) {
362 newskb
= netdev_alloc_skb(dev
, PKT_BUF_SZ
);
368 skb_reserve(newskb
, 2);
370 skb
= netdev_alloc_skb_ip_align(dev
, len
);
372 skb_copy_to_linear_data(skb
, rd
->skb
->data
, len
);
379 skb
->protocol
= eth_type_trans(skb
, dev
);
381 dev
->stats
.rx_packets
++;
382 dev
->stats
.rx_bytes
+= len
;
384 printk(KERN_NOTICE
"%s: Memory squeeze, deferring packet.\n",
386 dev
->stats
.rx_dropped
++;
389 /* Silently drop my own packets */
393 record_rx_errors(dev
, pkt_status
);
397 rd
->rdma
.pbuf
= dma_map_single(dev
->dev
.parent
,
399 PKT_BUF_SZ
, DMA_FROM_DEVICE
);
401 /* Return the entry to the ring pool. */
402 rd
->rdma
.cntinfo
= RCNTINFO_INIT
;
403 sp
->rx_new
= NEXT_RX(sp
->rx_new
);
404 dma_sync_desc_dev(dev
, rd
);
405 rd
= &sp
->rx_desc
[sp
->rx_new
];
406 dma_sync_desc_cpu(dev
, rd
);
408 dma_sync_desc_cpu(dev
, &sp
->rx_desc
[orig_end
]);
409 sp
->rx_desc
[orig_end
].rdma
.cntinfo
&= ~(HPCDMA_EOR
);
410 dma_sync_desc_dev(dev
, &sp
->rx_desc
[orig_end
]);
411 dma_sync_desc_cpu(dev
, &sp
->rx_desc
[PREV_RX(sp
->rx_new
)]);
412 sp
->rx_desc
[PREV_RX(sp
->rx_new
)].rdma
.cntinfo
|= HPCDMA_EOR
;
413 dma_sync_desc_dev(dev
, &sp
->rx_desc
[PREV_RX(sp
->rx_new
)]);
414 rx_maybe_restart(sp
, hregs
, sregs
);
417 static inline void tx_maybe_reset_collisions(struct sgiseeq_private
*sp
,
418 struct sgiseeq_regs
*sregs
)
421 sregs
->rw
.wregs
.control
= sp
->control
& ~(SEEQ_CTRL_XCNT
);
422 sregs
->rw
.wregs
.control
= sp
->control
;
426 static inline void kick_tx(struct net_device
*dev
,
427 struct sgiseeq_private
*sp
,
428 struct hpc3_ethregs
*hregs
)
430 struct sgiseeq_tx_desc
*td
;
433 /* If the HPC aint doin nothin, and there are more packets
434 * with ETXD cleared and XIU set we must make very certain
435 * that we restart the HPC else we risk locking up the
436 * adapter. The following code is only safe iff the HPCDMA
439 td
= &sp
->tx_desc
[i
];
440 dma_sync_desc_cpu(dev
, td
);
441 while ((td
->tdma
.cntinfo
& (HPCDMA_XIU
| HPCDMA_ETXD
)) ==
442 (HPCDMA_XIU
| HPCDMA_ETXD
)) {
444 td
= &sp
->tx_desc
[i
];
445 dma_sync_desc_cpu(dev
, td
);
447 if (td
->tdma
.cntinfo
& HPCDMA_XIU
) {
448 hregs
->tx_ndptr
= VIRT_TO_DMA(sp
, td
);
449 hregs
->tx_ctrl
= HPC3_ETXCTRL_ACTIVE
;
453 static inline void sgiseeq_tx(struct net_device
*dev
, struct sgiseeq_private
*sp
,
454 struct hpc3_ethregs
*hregs
,
455 struct sgiseeq_regs
*sregs
)
457 struct sgiseeq_tx_desc
*td
;
458 unsigned long status
= hregs
->tx_ctrl
;
461 tx_maybe_reset_collisions(sp
, sregs
);
463 if (!(status
& (HPC3_ETXCTRL_ACTIVE
| SEEQ_TSTAT_PTRANS
))) {
464 /* Oops, HPC detected some sort of error. */
465 if (status
& SEEQ_TSTAT_R16
)
466 dev
->stats
.tx_aborted_errors
++;
467 if (status
& SEEQ_TSTAT_UFLOW
)
468 dev
->stats
.tx_fifo_errors
++;
469 if (status
& SEEQ_TSTAT_LCLS
)
470 dev
->stats
.collisions
++;
474 for (j
= sp
->tx_old
; j
!= sp
->tx_new
; j
= NEXT_TX(j
)) {
475 td
= &sp
->tx_desc
[j
];
477 dma_sync_desc_cpu(dev
, td
);
478 if (!(td
->tdma
.cntinfo
& (HPCDMA_XIU
)))
480 if (!(td
->tdma
.cntinfo
& (HPCDMA_ETXD
))) {
481 if (!(status
& HPC3_ETXCTRL_ACTIVE
)) {
482 hregs
->tx_ndptr
= VIRT_TO_DMA(sp
, td
);
483 hregs
->tx_ctrl
= HPC3_ETXCTRL_ACTIVE
;
487 dev
->stats
.tx_packets
++;
488 sp
->tx_old
= NEXT_TX(sp
->tx_old
);
489 td
->tdma
.cntinfo
&= ~(HPCDMA_XIU
| HPCDMA_XIE
);
490 td
->tdma
.cntinfo
|= HPCDMA_EOX
;
492 dev_kfree_skb_any(td
->skb
);
495 dma_sync_desc_dev(dev
, td
);
499 static irqreturn_t
sgiseeq_interrupt(int irq
, void *dev_id
)
501 struct net_device
*dev
= (struct net_device
*) dev_id
;
502 struct sgiseeq_private
*sp
= netdev_priv(dev
);
503 struct hpc3_ethregs
*hregs
= sp
->hregs
;
504 struct sgiseeq_regs
*sregs
= sp
->sregs
;
506 spin_lock(&sp
->tx_lock
);
508 /* Ack the IRQ and set software state. */
509 hregs
->reset
= HPC3_ERST_CLRIRQ
;
511 /* Always check for received packets. */
512 sgiseeq_rx(dev
, sp
, hregs
, sregs
);
514 /* Only check for tx acks if we have something queued. */
515 if (sp
->tx_old
!= sp
->tx_new
)
516 sgiseeq_tx(dev
, sp
, hregs
, sregs
);
518 if ((TX_BUFFS_AVAIL(sp
) > 0) && netif_queue_stopped(dev
)) {
519 netif_wake_queue(dev
);
521 spin_unlock(&sp
->tx_lock
);
526 static int sgiseeq_open(struct net_device
*dev
)
528 struct sgiseeq_private
*sp
= netdev_priv(dev
);
529 struct sgiseeq_regs
*sregs
= sp
->sregs
;
530 unsigned int irq
= dev
->irq
;
533 if (request_irq(irq
, sgiseeq_interrupt
, 0, sgiseeqstr
, dev
)) {
534 printk(KERN_ERR
"Seeq8003: Can't get irq %d\n", dev
->irq
);
538 err
= init_seeq(dev
, sp
, sregs
);
542 netif_start_queue(dev
);
552 static int sgiseeq_close(struct net_device
*dev
)
554 struct sgiseeq_private
*sp
= netdev_priv(dev
);
555 struct sgiseeq_regs
*sregs
= sp
->sregs
;
556 unsigned int irq
= dev
->irq
;
558 netif_stop_queue(dev
);
560 /* Shutdown the Seeq. */
561 reset_hpc3_and_seeq(sp
->hregs
, sregs
);
563 seeq_purge_ring(dev
);
568 static inline int sgiseeq_reset(struct net_device
*dev
)
570 struct sgiseeq_private
*sp
= netdev_priv(dev
);
571 struct sgiseeq_regs
*sregs
= sp
->sregs
;
574 err
= init_seeq(dev
, sp
, sregs
);
578 dev
->trans_start
= jiffies
; /* prevent tx timeout */
579 netif_wake_queue(dev
);
584 static int sgiseeq_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
586 struct sgiseeq_private
*sp
= netdev_priv(dev
);
587 struct hpc3_ethregs
*hregs
= sp
->hregs
;
589 struct sgiseeq_tx_desc
*td
;
592 spin_lock_irqsave(&sp
->tx_lock
, flags
);
596 if (len
< ETH_ZLEN
) {
597 if (skb_padto(skb
, ETH_ZLEN
)) {
598 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
604 dev
->stats
.tx_bytes
+= len
;
606 td
= &sp
->tx_desc
[entry
];
607 dma_sync_desc_cpu(dev
, td
);
609 /* Create entry. There are so many races with adding a new
610 * descriptor to the chain:
611 * 1) Assume that the HPC is off processing a DMA chain while
612 * we are changing all of the following.
613 * 2) Do no allow the HPC to look at a new descriptor until
614 * we have completely set up it's state. This means, do
615 * not clear HPCDMA_EOX in the current last descritptor
616 * until the one we are adding looks consistent and could
617 * be processes right now.
618 * 3) The tx interrupt code must notice when we've added a new
619 * entry and the HPC got to the end of the chain before we
620 * added this new entry and restarted it.
623 td
->tdma
.pbuf
= dma_map_single(dev
->dev
.parent
, skb
->data
,
625 td
->tdma
.cntinfo
= (len
& HPCDMA_BCNT
) |
626 HPCDMA_XIU
| HPCDMA_EOXP
| HPCDMA_XIE
| HPCDMA_EOX
;
627 dma_sync_desc_dev(dev
, td
);
628 if (sp
->tx_old
!= sp
->tx_new
) {
629 struct sgiseeq_tx_desc
*backend
;
631 backend
= &sp
->tx_desc
[PREV_TX(sp
->tx_new
)];
632 dma_sync_desc_cpu(dev
, backend
);
633 backend
->tdma
.cntinfo
&= ~HPCDMA_EOX
;
634 dma_sync_desc_dev(dev
, backend
);
636 sp
->tx_new
= NEXT_TX(sp
->tx_new
); /* Advance. */
638 /* Maybe kick the HPC back into motion. */
639 if (!(hregs
->tx_ctrl
& HPC3_ETXCTRL_ACTIVE
))
640 kick_tx(dev
, sp
, hregs
);
642 if (!TX_BUFFS_AVAIL(sp
))
643 netif_stop_queue(dev
);
644 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
649 static void timeout(struct net_device
*dev
)
651 printk(KERN_NOTICE
"%s: transmit timed out, resetting\n", dev
->name
);
654 dev
->trans_start
= jiffies
; /* prevent tx timeout */
655 netif_wake_queue(dev
);
658 static void sgiseeq_set_multicast(struct net_device
*dev
)
660 struct sgiseeq_private
*sp
= netdev_priv(dev
);
661 unsigned char oldmode
= sp
->mode
;
663 if(dev
->flags
& IFF_PROMISC
)
664 sp
->mode
= SEEQ_RCMD_RANY
;
665 else if ((dev
->flags
& IFF_ALLMULTI
) || !netdev_mc_empty(dev
))
666 sp
->mode
= SEEQ_RCMD_RBMCAST
;
668 sp
->mode
= SEEQ_RCMD_RBCAST
;
670 /* XXX I know this sucks, but is there a better way to reprogram
671 * XXX the receiver? At least, this shouldn't happen too often.
674 if (oldmode
!= sp
->mode
)
678 static inline void setup_tx_ring(struct net_device
*dev
,
679 struct sgiseeq_tx_desc
*buf
,
682 struct sgiseeq_private
*sp
= netdev_priv(dev
);
685 while (i
< (nbufs
- 1)) {
686 buf
[i
].tdma
.pnext
= VIRT_TO_DMA(sp
, buf
+ i
+ 1);
687 buf
[i
].tdma
.pbuf
= 0;
688 dma_sync_desc_dev(dev
, &buf
[i
]);
691 buf
[i
].tdma
.pnext
= VIRT_TO_DMA(sp
, buf
);
692 dma_sync_desc_dev(dev
, &buf
[i
]);
695 static inline void setup_rx_ring(struct net_device
*dev
,
696 struct sgiseeq_rx_desc
*buf
,
699 struct sgiseeq_private
*sp
= netdev_priv(dev
);
702 while (i
< (nbufs
- 1)) {
703 buf
[i
].rdma
.pnext
= VIRT_TO_DMA(sp
, buf
+ i
+ 1);
704 buf
[i
].rdma
.pbuf
= 0;
705 dma_sync_desc_dev(dev
, &buf
[i
]);
708 buf
[i
].rdma
.pbuf
= 0;
709 buf
[i
].rdma
.pnext
= VIRT_TO_DMA(sp
, buf
);
710 dma_sync_desc_dev(dev
, &buf
[i
]);
713 static const struct net_device_ops sgiseeq_netdev_ops
= {
714 .ndo_open
= sgiseeq_open
,
715 .ndo_stop
= sgiseeq_close
,
716 .ndo_start_xmit
= sgiseeq_start_xmit
,
717 .ndo_tx_timeout
= timeout
,
718 .ndo_set_rx_mode
= sgiseeq_set_multicast
,
719 .ndo_set_mac_address
= sgiseeq_set_mac_address
,
720 .ndo_change_mtu
= eth_change_mtu
,
721 .ndo_validate_addr
= eth_validate_addr
,
724 static int __devinit
sgiseeq_probe(struct platform_device
*pdev
)
726 struct sgiseeq_platform_data
*pd
= pdev
->dev
.platform_data
;
727 struct hpc3_regs
*hpcregs
= pd
->hpc
;
728 struct sgiseeq_init_block
*sr
;
729 unsigned int irq
= pd
->irq
;
730 struct sgiseeq_private
*sp
;
731 struct net_device
*dev
;
734 dev
= alloc_etherdev(sizeof (struct sgiseeq_private
));
736 printk(KERN_ERR
"Sgiseeq: Etherdev alloc failed, aborting.\n");
741 platform_set_drvdata(pdev
, dev
);
742 sp
= netdev_priv(dev
);
744 /* Make private data page aligned */
745 sr
= dma_alloc_noncoherent(&pdev
->dev
, sizeof(*sp
->srings
),
746 &sp
->srings_dma
, GFP_KERNEL
);
748 printk(KERN_ERR
"Sgiseeq: Page alloc failed, aborting.\n");
750 goto err_out_free_dev
;
753 sp
->rx_desc
= sp
->srings
->rxvector
;
754 sp
->tx_desc
= sp
->srings
->txvector
;
756 /* A couple calculations now, saves many cycles later. */
757 setup_rx_ring(dev
, sp
->rx_desc
, SEEQ_RX_BUFFERS
);
758 setup_tx_ring(dev
, sp
->tx_desc
, SEEQ_TX_BUFFERS
);
760 memcpy(dev
->dev_addr
, pd
->mac
, ETH_ALEN
);
766 sp
->sregs
= (struct sgiseeq_regs
*) &hpcregs
->eth_ext
[0];
767 sp
->hregs
= &hpcregs
->ethregs
;
768 sp
->name
= sgiseeqstr
;
769 sp
->mode
= SEEQ_RCMD_RBCAST
;
771 /* Setup PIO and DMA transfer timing */
772 sp
->hregs
->pconfig
= 0x161;
773 sp
->hregs
->dconfig
= HPC3_EDCFG_FIRQ
| HPC3_EDCFG_FEOP
|
774 HPC3_EDCFG_FRXDC
| HPC3_EDCFG_PTO
| 0x026;
776 /* Setup PIO and DMA transfer timing */
777 sp
->hregs
->pconfig
= 0x161;
778 sp
->hregs
->dconfig
= HPC3_EDCFG_FIRQ
| HPC3_EDCFG_FEOP
|
779 HPC3_EDCFG_FRXDC
| HPC3_EDCFG_PTO
| 0x026;
781 /* Reset the chip. */
782 hpc3_eth_reset(sp
->hregs
);
784 sp
->is_edlc
= !(sp
->sregs
->rw
.rregs
.collision_tx
[0] & 0xff);
786 sp
->control
= SEEQ_CTRL_XCNT
| SEEQ_CTRL_ACCNT
|
787 SEEQ_CTRL_SFLAG
| SEEQ_CTRL_ESHORT
|
790 dev
->netdev_ops
= &sgiseeq_netdev_ops
;
791 dev
->watchdog_timeo
= (200 * HZ
) / 1000;
794 if (register_netdev(dev
)) {
795 printk(KERN_ERR
"Sgiseeq: Cannot register net device, "
798 goto err_out_free_page
;
801 printk(KERN_INFO
"%s: %s %pM\n", dev
->name
, sgiseeqstr
, dev
->dev_addr
);
806 free_page((unsigned long) sp
->srings
);
814 static int __exit
sgiseeq_remove(struct platform_device
*pdev
)
816 struct net_device
*dev
= platform_get_drvdata(pdev
);
817 struct sgiseeq_private
*sp
= netdev_priv(dev
);
819 unregister_netdev(dev
);
820 dma_free_noncoherent(&pdev
->dev
, sizeof(*sp
->srings
), sp
->srings
,
823 platform_set_drvdata(pdev
, NULL
);
828 static struct platform_driver sgiseeq_driver
= {
829 .probe
= sgiseeq_probe
,
830 .remove
= __exit_p(sgiseeq_remove
),
833 .owner
= THIS_MODULE
,
837 module_platform_driver(sgiseeq_driver
);
839 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
840 MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
841 MODULE_LICENSE("GPL");
842 MODULE_ALIAS("platform:sgiseeq");