1 /* $Id: sgiseeq.c,v 1.9 1998/10/14 23:40:46 ralf Exp $
3 * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
5 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/ptrace.h>
12 #include <linux/ioport.h>
14 #include <linux/malloc.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
19 #include <asm/segment.h>
20 #include <asm/system.h>
21 #include <asm/bitops.h>
23 #include <asm/pgtable.h>
24 #include <linux/errno.h>
25 #include <asm/byteorder.h>
27 #include <linux/socket.h>
28 #include <linux/route.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
34 #include <asm/sgihpc.h>
35 #include <asm/sgialib.h>
39 static char *version
=
40 "sgiseeq.c: David S. Miller (dm@engr.sgi.com)\n";
42 static char *sgiseeqstr
= "SGI Seeq8003";
44 /* If you want speed, you do something silly, it always has worked
45 * for me. So, with that in mind, I've decided to make this driver
46 * look completely like a stupid Lance from a driver architecture
47 * perspective. Only difference is that here our "ring buffer" looks
48 * and acts like a real Lance one does but is layed out like how the
49 * HPC DMA and the Seeq want it to. You'd be surprised how a stupid
50 * idea like this can pay off in performance, not to mention making
51 * this driver 2,000 times easier to write. ;-)
54 /* Tune these if we tend to run out often etc. */
55 #define SEEQ_RX_BUFFERS 16
56 #define SEEQ_TX_BUFFERS 16
58 #define PKT_BUF_SZ 1584
60 #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
61 #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
62 #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
63 #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
65 #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
66 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
67 sp->tx_old - sp->tx_new - 1)
71 struct sgiseeq_rx_desc
{
72 struct hpc_dma_desc rdma
;
73 unsigned long buf_vaddr
;
76 struct sgiseeq_tx_desc
{
77 struct hpc_dma_desc tdma
;
78 unsigned long buf_vaddr
;
81 /* Warning: This structure is layed out in a certain way because
82 * HPC dma descriptors must be 8-byte aligned. So don't
83 * touch this without some care.
85 struct sgiseeq_init_block
{ /* Note the name ;-) */
86 /* Ptrs to the descriptors in KSEG1 uncached space. */
87 struct sgiseeq_rx_desc
*rx_desc
;
88 struct sgiseeq_tx_desc
*tx_desc
;
89 unsigned long _padding
[30]; /* Pad out to largest cache line size. */
91 struct sgiseeq_rx_desc rxvector
[SEEQ_RX_BUFFERS
];
92 struct sgiseeq_tx_desc txvector
[SEEQ_TX_BUFFERS
];
95 struct sgiseeq_private
{
96 volatile struct sgiseeq_init_block srings
;
98 volatile struct hpc3_ethregs
*hregs
;
99 volatile struct sgiseeq_regs
*sregs
;
101 /* Ring entry counters. */
102 unsigned int rx_new
, tx_new
;
103 unsigned int rx_old
, tx_old
;
106 unsigned char control
;
109 struct enet_statistics stats
;
112 static inline void hpc3_eth_reset(volatile struct hpc3_ethregs
*hregs
)
114 hregs
->rx_reset
= (HPC3_ERXRST_CRESET
| HPC3_ERXRST_CLRIRQ
);
119 static inline void reset_hpc3_and_seeq(volatile struct hpc3_ethregs
*hregs
,
120 volatile struct sgiseeq_regs
*sregs
)
122 hregs
->rx_ctrl
= hregs
->tx_ctrl
= 0;
123 hpc3_eth_reset(hregs
);
126 #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
127 SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
129 static inline void seeq_go(struct sgiseeq_private
*sp
,
130 volatile struct hpc3_ethregs
*hregs
,
131 volatile struct sgiseeq_regs
*sregs
)
133 sregs
->rstat
= sp
->mode
| RSTAT_GO_BITS
;
134 hregs
->rx_ctrl
= HPC3_ERXCTRL_ACTIVE
;
137 static inline void seeq_load_eaddr(struct net_device
*dev
,
138 volatile struct sgiseeq_regs
*sregs
)
142 sregs
->tstat
= SEEQ_TCMD_RB0
;
143 for(i
= 0; i
< 6; i
++)
144 sregs
->rw
.eth_addr
[i
] = dev
->dev_addr
[i
];
147 #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
148 #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
149 #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
151 static void seeq_init_ring(struct net_device
*dev
)
153 struct sgiseeq_private
*sp
= (struct sgiseeq_private
*) dev
->priv
;
154 volatile struct sgiseeq_init_block
*ib
= &sp
->srings
;
158 sp
->rx_new
= sp
->tx_new
= 0;
159 sp
->rx_old
= sp
->tx_old
= 0;
161 seeq_load_eaddr(dev
, sp
->sregs
);
163 /* XXX for now just accept packets directly to us
164 * XXX and ether-broadcast. Will do multicast and
165 * XXX promiscuous mode later. -davem
167 sp
->mode
= SEEQ_RCMD_RBCAST
;
170 for(i
= 0; i
< SEEQ_TX_BUFFERS
; i
++) {
171 if(!ib
->tx_desc
[i
].tdma
.pbuf
) {
172 unsigned long buffer
;
174 buffer
= (unsigned long) kmalloc(PKT_BUF_SZ
, GFP_KERNEL
);
175 ib
->tx_desc
[i
].buf_vaddr
= KSEG1ADDR(buffer
);
176 ib
->tx_desc
[i
].tdma
.pbuf
= PHYSADDR(buffer
);
177 // flush_cache_all();
179 ib
->tx_desc
[i
].tdma
.cntinfo
= (TCNTINFO_INIT
);
182 /* And now the rx ring. */
183 for(i
= 0; i
< SEEQ_RX_BUFFERS
; i
++) {
184 if(!ib
->rx_desc
[i
].rdma
.pbuf
) {
185 unsigned long buffer
;
187 buffer
= (unsigned long) kmalloc(PKT_BUF_SZ
, GFP_KERNEL
);
188 ib
->rx_desc
[i
].buf_vaddr
= KSEG1ADDR(buffer
);
189 ib
->rx_desc
[i
].rdma
.pbuf
= PHYSADDR(buffer
);
190 // flush_cache_all();
192 ib
->rx_desc
[i
].rdma
.cntinfo
= (RCNTINFO_INIT
);
194 ib
->rx_desc
[i
- 1].rdma
.cntinfo
|= (HPCDMA_EOR
);
198 static struct sgiseeq_private
*gpriv
;
199 static struct net_device
*gdev
;
201 void sgiseeq_dump_rings(void)
204 struct sgiseeq_rx_desc
*r
= gpriv
->srings
.rx_desc
;
205 struct sgiseeq_tx_desc
*t
= gpriv
->srings
.tx_desc
;
206 volatile struct hpc3_ethregs
*hregs
= gpriv
->hregs
;
212 printk("RING DUMP:\n");
213 for(i
= 0; i
< SEEQ_RX_BUFFERS
; i
++) {
214 printk("RX [%d]: @(%p) [%08lx,%08lx,%08lx] ",
215 i
, (&r
[i
]), r
[i
].rdma
.pbuf
, r
[i
].rdma
.cntinfo
,
218 printk("-- [%d]: @(%p) [%08lx,%08lx,%08lx]\n",
219 i
, (&r
[i
]), r
[i
].rdma
.pbuf
, r
[i
].rdma
.cntinfo
,
222 for(i
= 0; i
< SEEQ_TX_BUFFERS
; i
++) {
223 printk("TX [%d]: @(%p) [%08lx,%08lx,%08lx] ",
224 i
, (&t
[i
]), t
[i
].tdma
.pbuf
, t
[i
].tdma
.cntinfo
,
227 printk("-- [%d]: @(%p) [%08lx,%08lx,%08lx]\n",
228 i
, (&t
[i
]), t
[i
].tdma
.pbuf
, t
[i
].tdma
.cntinfo
,
231 printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
232 gpriv
->rx_new
, gpriv
->rx_old
, gpriv
->tx_new
, gpriv
->tx_old
);
233 printk("RREGS: rx_cbptr[%08lx] rx_ndptr[%08lx] rx_ctrl[%08lx]\n",
234 hregs
->rx_cbptr
, hregs
->rx_ndptr
, hregs
->rx_ctrl
);
235 printk("TREGS: tx_cbptr[%08lx] tx_ndptr[%08lx] tx_ctrl[%08lx]\n",
236 hregs
->tx_cbptr
, hregs
->tx_ndptr
, hregs
->tx_ctrl
);
240 #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
241 #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
242 #define RDMACFG_INIT (HPC3_ERXDCFG_FRXDC | HPC3_ERXDCFG_FEOP | HPC3_ERXDCFG_FIRQ)
244 static void init_seeq(struct net_device
*dev
, struct sgiseeq_private
*sp
,
245 volatile struct sgiseeq_regs
*sregs
)
247 volatile struct hpc3_ethregs
*hregs
= sp
->hregs
;
249 reset_hpc3_and_seeq(hregs
, sregs
);
252 /* Setup to field the proper interrupt types. */
254 sregs
->tstat
= (TSTAT_INIT_EDLC
);
255 sregs
->rw
.wregs
.control
= sp
->control
;
256 sregs
->rw
.wregs
.frame_gap
= 0;
258 sregs
->tstat
= (TSTAT_INIT_SEEQ
);
261 hregs
->rx_dconfig
|= RDMACFG_INIT
;
263 hregs
->rx_ndptr
= PHYSADDR(&sp
->srings
.rx_desc
[0]);
264 hregs
->tx_ndptr
= PHYSADDR(&sp
->srings
.tx_desc
[0]);
266 seeq_go(sp
, hregs
, sregs
);
269 static inline void record_rx_errors(struct sgiseeq_private
*sp
,
270 unsigned char status
)
272 if(status
& SEEQ_RSTAT_OVERF
||
273 status
& SEEQ_RSTAT_SFRAME
)
274 sp
->stats
.rx_over_errors
++;
275 if(status
& SEEQ_RSTAT_CERROR
)
276 sp
->stats
.rx_crc_errors
++;
277 if(status
& SEEQ_RSTAT_DERROR
)
278 sp
->stats
.rx_frame_errors
++;
279 if(status
& SEEQ_RSTAT_REOF
)
280 sp
->stats
.rx_errors
++;
283 static inline void rx_maybe_restart(struct sgiseeq_private
*sp
,
284 volatile struct hpc3_ethregs
*hregs
,
285 volatile struct sgiseeq_regs
*sregs
)
287 if(!(hregs
->rx_ctrl
& HPC3_ERXCTRL_ACTIVE
)) {
288 hregs
->rx_ndptr
= PHYSADDR(&sp
->srings
.rx_desc
[sp
->rx_new
]);
289 seeq_go(sp
, hregs
, sregs
);
293 #define for_each_rx(rd, sp) for((rd) = &(sp)->srings.rx_desc[(sp)->rx_new]; \
294 !((rd)->rdma.cntinfo & HPCDMA_OWN); \
295 (rd) = &(sp)->srings.rx_desc[(sp)->rx_new])
297 static inline void sgiseeq_rx(struct net_device
*dev
, struct sgiseeq_private
*sp
,
298 volatile struct hpc3_ethregs
*hregs
,
299 volatile struct sgiseeq_regs
*sregs
)
301 struct sgiseeq_rx_desc
*rd
;
302 struct sk_buff
*skb
= 0;
303 unsigned char pkt_status
;
304 unsigned char *pkt_pointer
= 0;
306 unsigned int orig_end
= PREV_RX(sp
->rx_new
);
308 /* Service every received packet. */
309 for_each_rx(rd
, sp
) {
310 len
= (PKT_BUF_SZ
- (rd
->rdma
.cntinfo
& HPCDMA_BCNT
) - 3);
311 pkt_pointer
= (unsigned char *)rd
->buf_vaddr
;
312 pkt_status
= pkt_pointer
[len
+ 2];
314 if(pkt_status
& SEEQ_RSTAT_FIG
) {
316 skb
= dev_alloc_skb(len
+ 2);
323 /* Copy out of kseg1 to avoid silly cache flush. */
324 eth_copy_and_sum(skb
, pkt_pointer
+ 2, len
, 0);
325 skb
->protocol
= eth_type_trans(skb
, dev
);
327 sp
->stats
.rx_packets
++;
329 printk ("%s: Memory squeeze, deferring packet.\n",
331 sp
->stats
.rx_dropped
++;
334 record_rx_errors(sp
, pkt_status
);
337 /* Return the entry to the ring pool. */
338 rd
->rdma
.cntinfo
= (RCNTINFO_INIT
);
339 sp
->rx_new
= NEXT_RX(sp
->rx_new
);
341 sp
->srings
.rx_desc
[orig_end
].rdma
.cntinfo
&= ~(HPCDMA_EOR
);
342 sp
->srings
.rx_desc
[PREV_RX(sp
->rx_new
)].rdma
.cntinfo
|= HPCDMA_EOR
;
343 rx_maybe_restart(sp
, hregs
, sregs
);
346 static inline void tx_maybe_reset_collisions(struct sgiseeq_private
*sp
,
347 volatile struct sgiseeq_regs
*sregs
)
350 sregs
->rw
.wregs
.control
= sp
->control
& ~(SEEQ_CTRL_XCNT
);
351 sregs
->rw
.wregs
.control
= sp
->control
;
355 static inline void kick_tx(struct sgiseeq_tx_desc
*td
,
356 volatile struct hpc3_ethregs
*hregs
)
358 /* If the HPC aint doin nothin, and there are more packets
359 * with ETXD cleared and XIU set we must make very certain
360 * that we restart the HPC else we risk locking up the
361 * adapter. The following code is only safe iff the HPCDMA
364 while((td
->tdma
.cntinfo
& (HPCDMA_XIU
| HPCDMA_ETXD
)) ==
365 (HPCDMA_XIU
| HPCDMA_ETXD
))
366 td
= (struct sgiseeq_tx_desc
*)
367 KSEG1ADDR(td
->tdma
.pnext
);
368 if(td
->tdma
.cntinfo
& HPCDMA_XIU
) {
369 hregs
->tx_ndptr
= PHYSADDR(td
);
370 hregs
->tx_ctrl
= HPC3_ETXCTRL_ACTIVE
;
374 static inline void sgiseeq_tx(struct net_device
*dev
, struct sgiseeq_private
*sp
,
375 volatile struct hpc3_ethregs
*hregs
,
376 volatile struct sgiseeq_regs
*sregs
)
378 struct sgiseeq_tx_desc
*td
;
379 unsigned long status
= hregs
->tx_ctrl
;
382 tx_maybe_reset_collisions(sp
, sregs
);
384 if(!(status
& (HPC3_ETXCTRL_ACTIVE
| SEEQ_TSTAT_PTRANS
))) {
385 /* Oops, HPC detected some sort of error. */
386 if(status
& SEEQ_TSTAT_R16
)
387 sp
->stats
.tx_aborted_errors
++;
388 if(status
& SEEQ_TSTAT_UFLOW
)
389 sp
->stats
.tx_fifo_errors
++;
390 if(status
& SEEQ_TSTAT_LCLS
)
391 sp
->stats
.collisions
++;
395 for(j
= sp
->tx_old
; j
!= sp
->tx_new
; j
= NEXT_TX(j
)) {
396 td
= &sp
->srings
.tx_desc
[j
];
398 if(!(td
->tdma
.cntinfo
& (HPCDMA_XIU
)))
400 if(!(td
->tdma
.cntinfo
& (HPCDMA_ETXD
))) {
401 if(!(status
& HPC3_ETXCTRL_ACTIVE
)) {
402 hregs
->tx_ndptr
= PHYSADDR(td
);
403 hregs
->tx_ctrl
= HPC3_ETXCTRL_ACTIVE
;
407 sp
->stats
.tx_packets
++;
408 sp
->tx_old
= NEXT_TX(sp
->tx_old
);
409 td
->tdma
.cntinfo
&= ~(HPCDMA_XIU
| HPCDMA_XIE
);
410 td
->tdma
.cntinfo
|= HPCDMA_EOX
;
414 static inline void tx_maybe_unbusy(struct sgiseeq_private
*sp
,
415 struct net_device
*dev
)
417 if((TX_BUFFS_AVAIL(sp
) >= 0) && dev
->tbusy
) {
423 static void sgiseeq_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
425 struct net_device
*dev
= (struct net_device
*) dev_id
;
426 struct sgiseeq_private
*sp
= (struct sgiseeq_private
*) dev
->priv
;
427 volatile struct hpc3_ethregs
*hregs
= sp
->hregs
;
428 volatile struct sgiseeq_regs
*sregs
= sp
->sregs
;
430 /* Ack the IRQ and set software state. */
431 hregs
->rx_reset
= HPC3_ERXRST_CLRIRQ
;
434 /* Always check for received packets. */
435 sgiseeq_rx(dev
, sp
, hregs
, sregs
);
437 /* Only check for tx acks iff we have something queued. */
438 if(sp
->tx_old
!= sp
->tx_new
)
439 sgiseeq_tx(dev
, sp
, hregs
, sregs
);
441 tx_maybe_unbusy(sp
, dev
);
445 static int sgiseeq_open(struct net_device
*dev
)
447 struct sgiseeq_private
*sp
= (struct sgiseeq_private
*)dev
->priv
;
448 volatile struct sgiseeq_regs
*sregs
= sp
->sregs
;
451 save_flags(flags
); cli();
452 if(request_irq(dev
->irq
, sgiseeq_interrupt
, 0, sgiseeqstr
, (void *) dev
)) {
453 printk("Seeq8003: Can't get irq %d\n", dev
->irq
);
454 restore_flags(flags
);
458 init_seeq(dev
, sp
, sregs
);
463 restore_flags(flags
);
467 static int sgiseeq_close(struct net_device
*dev
)
469 struct sgiseeq_private
*sp
= (struct sgiseeq_private
*) dev
->priv
;
470 volatile struct sgiseeq_regs
*sregs
= sp
->sregs
;
475 /* Shutdown the Seeq. */
476 reset_hpc3_and_seeq(sp
->hregs
, sregs
);
478 free_irq(dev
->irq
, dev
);
483 static inline int sgiseeq_reset(struct net_device
*dev
)
485 struct sgiseeq_private
*sp
= (struct sgiseeq_private
*) dev
->priv
;
486 volatile struct sgiseeq_regs
*sregs
= sp
->sregs
;
488 init_seeq(dev
, sp
, sregs
);
490 dev
->trans_start
= jiffies
;
498 void sgiseeq_my_reset(void)
504 static inline int verify_tx(struct sgiseeq_private
*sp
,
505 struct net_device
*dev
,
508 /* Are we bolixed? */
510 int tickssofar
= jiffies
- dev
->trans_start
;
514 printk("%s: transmit timed out, ticks=%d resetting\n",
515 dev
->name
, tickssofar
);
519 /* Are we getting in someone else's way? */
520 if(test_and_set_bit(0, (void *) &dev
->tbusy
) != 0) {
521 printk("%s: Transmitter access conflict.\n", dev
->name
);
525 /* Can we even send anything? */
526 if(!TX_BUFFS_AVAIL(sp
))
532 static int sgiseeq_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
534 struct sgiseeq_private
*sp
= (struct sgiseeq_private
*) dev
->priv
;
535 volatile struct hpc3_ethregs
*hregs
= sp
->hregs
;
537 struct sgiseeq_tx_desc
*td
;
538 int skblen
, len
, entry
;
540 if(verify_tx(sp
, dev
, skb
))
541 return -1; /* Yeee... */
543 save_flags(flags
); cli();
547 len
= (skblen
<= ETH_ZLEN
) ? ETH_ZLEN
: skblen
;
549 td
= &sp
->srings
.tx_desc
[entry
];
551 /* Create entry. There are so many races with adding a new
552 * descriptor to the chain:
553 * 1) Assume that the HPC is off processing a DMA chain while
554 * we are changing all of the following.
555 * 2) Do no allow the HPC to look at a new descriptor until
556 * we have completely set up it's state. This means, do
557 * not clear HPCDMA_EOX in the current last descritptor
558 * until the one we are adding looks consistant and could
559 * be processes right now.
560 * 3) The tx interrupt code must notice when we've added a new
561 * entry and the HPC got to the end of the chain before we
562 * added this new entry and restarted it.
564 memcpy((char *)td
->buf_vaddr
, skb
->data
, skblen
);
565 td
->tdma
.cntinfo
= ((len
) & HPCDMA_BCNT
) |
566 (HPCDMA_XIU
| HPCDMA_EOXP
| HPCDMA_XIE
| HPCDMA_EOX
);
567 if(sp
->tx_old
!= sp
->tx_new
) {
568 struct sgiseeq_tx_desc
*backend
;
570 backend
= &sp
->srings
.tx_desc
[PREV_TX(sp
->tx_new
)];
571 backend
->tdma
.cntinfo
&= ~(HPCDMA_EOX
);
573 sp
->tx_new
= NEXT_TX(sp
->tx_new
); /* Advance. */
575 /* Maybe kick the HPC back into motion. */
576 if(!(hregs
->tx_ctrl
& HPC3_ETXCTRL_ACTIVE
))
577 kick_tx(&sp
->srings
.tx_desc
[sp
->tx_old
], hregs
);
579 dev
->trans_start
= jiffies
;
582 if(TX_BUFFS_AVAIL(sp
))
584 restore_flags(flags
);
588 static struct enet_statistics
*sgiseeq_get_stats(struct net_device
*dev
)
590 struct sgiseeq_private
*sp
= (struct sgiseeq_private
*) dev
->priv
;
595 static void sgiseeq_set_multicast(struct net_device
*dev
)
599 static inline void setup_tx_ring(struct sgiseeq_tx_desc
*buf
, int nbufs
)
603 while(i
< (nbufs
- 1)) {
604 buf
[i
].tdma
.pnext
= PHYSADDR(&buf
[i
+ 1]);
605 buf
[i
].tdma
.pbuf
= 0;
608 buf
[i
].tdma
.pnext
= PHYSADDR(&buf
[0]);
611 static inline void setup_rx_ring(struct sgiseeq_rx_desc
*buf
, int nbufs
)
615 while(i
< (nbufs
- 1)) {
616 buf
[i
].rdma
.pnext
= PHYSADDR(&buf
[i
+ 1]);
617 buf
[i
].rdma
.pbuf
= 0;
620 buf
[i
].rdma
.pbuf
= 0;
621 buf
[i
].rdma
.pnext
= PHYSADDR(&buf
[0]);
624 static char onboard_eth_addr
[6];
626 #define ALIGNED(x) ((((unsigned long)(x)) + 0xf) & ~(0xf))
628 int sgiseeq_init(struct net_device
*dev
, struct sgiseeq_regs
*sregs
,
629 struct hpc3_ethregs
*hregs
, int irq
)
631 static unsigned version_printed
= 0;
633 struct sgiseeq_private
*sp
;
636 dev
= init_etherdev(0, sizeof(struct sgiseeq_private
));
638 dev
->priv
= (struct sgiseeq_private
*) get_free_page(GFP_KERNEL
);
639 if(dev
->priv
== NULL
)
643 if(!version_printed
++)
646 printk("%s: SGI Seeq8003 ", dev
->name
);
648 for(i
= 0; i
< 6; i
++)
650 dev
->dev_addr
[i
] = onboard_eth_addr
[i
],
655 sp
= (struct sgiseeq_private
*) dev
->priv
;
660 memset((char *)dev
->priv
, 0, sizeof(struct sgiseeq_private
));
663 sp
->name
= sgiseeqstr
;
665 sp
->srings
.rx_desc
= (struct sgiseeq_rx_desc
*)
666 (KSEG1ADDR(ALIGNED(&sp
->srings
.rxvector
[0])));
667 dma_cache_wback_inv((unsigned long)&sp
->srings
.rxvector
,
668 sizeof(sp
->srings
.rxvector
));
669 sp
->srings
.tx_desc
= (struct sgiseeq_tx_desc
*)
670 (KSEG1ADDR(ALIGNED(&sp
->srings
.txvector
[0])));
671 dma_cache_wback_inv((unsigned long)&sp
->srings
.txvector
,
672 sizeof(sp
->srings
.txvector
));
674 /* A couple calculations now, saves many cycles later. */
675 setup_rx_ring(sp
->srings
.rx_desc
, SEEQ_RX_BUFFERS
);
676 setup_tx_ring(sp
->srings
.tx_desc
, SEEQ_TX_BUFFERS
);
678 /* Reset the chip. */
679 hpc3_eth_reset((volatile struct hpc3_ethregs
*) hregs
);
681 sp
->is_edlc
= !(sregs
->rw
.rregs
.collision_tx
[0] & 0xff);
683 sp
->control
= (SEEQ_CTRL_XCNT
| SEEQ_CTRL_ACCNT
|
684 SEEQ_CTRL_SFLAG
| SEEQ_CTRL_ESHORT
|
688 dev
->open
= sgiseeq_open
;
689 dev
->stop
= sgiseeq_close
;
690 dev
->hard_start_xmit
= sgiseeq_start_xmit
;
691 dev
->get_stats
= sgiseeq_get_stats
;
692 dev
->set_multicast_list
= sgiseeq_set_multicast
;
700 static inline unsigned char str2hexnum(unsigned char c
)
702 if(c
>= '0' && c
<= '9')
704 if(c
>= 'a' && c
<= 'f')
709 static inline void str2eaddr(unsigned char *ea
, unsigned char *str
)
713 for(i
= 0; i
< 6; i
++) {
718 num
= str2hexnum(*str
++) << 4;
719 num
|= (str2hexnum(*str
++));
724 int sgiseeq_probe(struct net_device
*dev
)
726 static int initialized
;
729 if (initialized
) /* Already initialized? */
733 /* First get the ethernet address of the onboard
734 * interface from ARCS.
735 * (This is fragile; PROM doesn't like running from cache.)
737 ep
= romvec
->get_evar("eaddr");
738 str2eaddr(onboard_eth_addr
, ep
);
739 return sgiseeq_init(dev
,
740 (struct sgiseeq_regs
*) (KSEG1ADDR(0x1fbd4000)),
741 &hpc3c0
->ethregs
, 3);