sched: clean up wakeup balancing, move wake_affine()
[wrt350n-kernel.git] / drivers / net / tulip / interrupt.c
blob6284afd14bbb5fe42789b0a6f9531172ef9adf12
1 /*
2 drivers/net/tulip/interrupt.c
4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
11 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
17 #include <linux/pci.h>
18 #include "tulip.h"
19 #include <linux/etherdevice.h>
21 int tulip_rx_copybreak;
22 unsigned int tulip_max_interrupt_work;
24 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
25 #define MIT_SIZE 15
26 #define MIT_TABLE 15 /* We use 0 or max */
28 static unsigned int mit_table[MIT_SIZE+1] =
30 /* CRS11 21143 hardware Mitigation Control Interrupt
31 We use only RX mitigation we other techniques for
32 TX intr. mitigation.
34 31 Cycle Size (timer control)
35 30:27 TX timer in 16 * Cycle size
36 26:24 TX No pkts before Int.
37 23:20 RX timer in Cycle size
38 19:17 RX No pkts before Int.
39 16 Continues Mode (CM)
42 0x0, /* IM disabled */
43 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
44 0x80150000,
45 0x80270000,
46 0x80370000,
47 0x80490000,
48 0x80590000,
49 0x80690000,
50 0x807B0000,
51 0x808B0000,
52 0x809D0000,
53 0x80AD0000,
54 0x80BD0000,
55 0x80CF0000,
56 0x80DF0000,
57 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
58 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
60 #endif
63 int tulip_refill_rx(struct net_device *dev)
65 struct tulip_private *tp = netdev_priv(dev);
66 int entry;
67 int refilled = 0;
69 /* Refill the Rx ring buffers. */
70 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
71 entry = tp->dirty_rx % RX_RING_SIZE;
72 if (tp->rx_buffers[entry].skb == NULL) {
73 struct sk_buff *skb;
74 dma_addr_t mapping;
76 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
77 if (skb == NULL)
78 break;
80 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
81 PCI_DMA_FROMDEVICE);
82 tp->rx_buffers[entry].mapping = mapping;
84 skb->dev = dev; /* Mark as being used by this device. */
85 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
86 refilled++;
88 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
90 if(tp->chip_id == LC82C168) {
91 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
92 /* Rx stopped due to out of buffers,
93 * restart it
95 iowrite32(0x01, tp->base_addr + CSR2);
98 return refilled;
101 #ifdef CONFIG_TULIP_NAPI
103 void oom_timer(unsigned long data)
105 struct net_device *dev = (struct net_device *)data;
106 struct tulip_private *tp = netdev_priv(dev);
107 netif_rx_schedule(dev, &tp->napi);
110 int tulip_poll(struct napi_struct *napi, int budget)
112 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
113 struct net_device *dev = tp->dev;
114 int entry = tp->cur_rx % RX_RING_SIZE;
115 int work_done = 0;
116 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
117 int received = 0;
118 #endif
120 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
122 /* that one buffer is needed for mit activation; or might be a
123 bug in the ring buffer code; check later -- JHS*/
125 if (budget >=RX_RING_SIZE) budget--;
126 #endif
128 if (tulip_debug > 4)
129 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
130 tp->rx_ring[entry].status);
132 do {
133 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
134 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
135 break;
137 /* Acknowledge current RX interrupt sources. */
138 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
141 /* If we own the next entry, it is a new packet. Send it up. */
142 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
143 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
145 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146 break;
148 if (tulip_debug > 5)
149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
150 dev->name, entry, status);
152 if (++work_done >= budget)
153 goto not_done;
155 if ((status & 0x38008300) != 0x0300) {
156 if ((status & 0x38000300) != 0x0300) {
157 /* Ingore earlier buffers. */
158 if ((status & 0xffff) != 0x7fff) {
159 if (tulip_debug > 1)
160 printk(KERN_WARNING "%s: Oversized Ethernet frame "
161 "spanned multiple buffers, status %8.8x!\n",
162 dev->name, status);
163 tp->stats.rx_length_errors++;
165 } else if (status & RxDescFatalErr) {
166 /* There was a fatal error. */
167 if (tulip_debug > 2)
168 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
169 dev->name, status);
170 tp->stats.rx_errors++; /* end of a packet.*/
171 if (status & 0x0890) tp->stats.rx_length_errors++;
172 if (status & 0x0004) tp->stats.rx_frame_errors++;
173 if (status & 0x0002) tp->stats.rx_crc_errors++;
174 if (status & 0x0001) tp->stats.rx_fifo_errors++;
176 } else {
177 /* Omit the four octet CRC from the length. */
178 short pkt_len = ((status >> 16) & 0x7ff) - 4;
179 struct sk_buff *skb;
181 #ifndef final_version
182 if (pkt_len > 1518) {
183 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
184 dev->name, pkt_len, pkt_len);
185 pkt_len = 1518;
186 tp->stats.rx_length_errors++;
188 #endif
189 /* Check if the packet is long enough to accept without copying
190 to a minimally-sized skbuff. */
191 if (pkt_len < tulip_rx_copybreak
192 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
193 skb_reserve(skb, 2); /* 16 byte align the IP header */
194 pci_dma_sync_single_for_cpu(tp->pdev,
195 tp->rx_buffers[entry].mapping,
196 pkt_len, PCI_DMA_FROMDEVICE);
197 #if ! defined(__alpha__)
198 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
199 pkt_len);
200 skb_put(skb, pkt_len);
201 #else
202 memcpy(skb_put(skb, pkt_len),
203 tp->rx_buffers[entry].skb->data,
204 pkt_len);
205 #endif
206 pci_dma_sync_single_for_device(tp->pdev,
207 tp->rx_buffers[entry].mapping,
208 pkt_len, PCI_DMA_FROMDEVICE);
209 } else { /* Pass up the skb already on the Rx ring. */
210 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
211 pkt_len);
213 #ifndef final_version
214 if (tp->rx_buffers[entry].mapping !=
215 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
216 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
217 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
218 dev->name,
219 le32_to_cpu(tp->rx_ring[entry].buffer1),
220 (unsigned long long)tp->rx_buffers[entry].mapping,
221 skb->head, temp);
223 #endif
225 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
226 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
228 tp->rx_buffers[entry].skb = NULL;
229 tp->rx_buffers[entry].mapping = 0;
231 skb->protocol = eth_type_trans(skb, dev);
233 netif_receive_skb(skb);
235 dev->last_rx = jiffies;
236 tp->stats.rx_packets++;
237 tp->stats.rx_bytes += pkt_len;
239 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
240 received++;
241 #endif
243 entry = (++tp->cur_rx) % RX_RING_SIZE;
244 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
245 tulip_refill_rx(dev);
249 /* New ack strategy... irq does not ack Rx any longer
250 hopefully this helps */
252 /* Really bad things can happen here... If new packet arrives
253 * and an irq arrives (tx or just due to occasionally unset
254 * mask), it will be acked by irq handler, but new thread
255 * is not scheduled. It is major hole in design.
256 * No idea how to fix this if "playing with fire" will fail
257 * tomorrow (night 011029). If it will not fail, we won
258 * finally: amount of IO did not increase at all. */
259 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
261 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
263 /* We use this simplistic scheme for IM. It's proven by
264 real life installations. We can have IM enabled
265 continuesly but this would cause unnecessary latency.
266 Unfortunely we can't use all the NET_RX_* feedback here.
267 This would turn on IM for devices that is not contributing
268 to backlog congestion with unnecessary latency.
270 We monitor the device RX-ring and have:
272 HW Interrupt Mitigation either ON or OFF.
274 ON: More then 1 pkt received (per intr.) OR we are dropping
275 OFF: Only 1 pkt received
277 Note. We only use min and max (0, 15) settings from mit_table */
280 if( tp->flags & HAS_INTR_MITIGATION) {
281 if( received > 1 ) {
282 if( ! tp->mit_on ) {
283 tp->mit_on = 1;
284 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
287 else {
288 if( tp->mit_on ) {
289 tp->mit_on = 0;
290 iowrite32(0, tp->base_addr + CSR11);
295 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
297 tulip_refill_rx(dev);
299 /* If RX ring is not full we are out of memory. */
300 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
301 goto oom;
303 /* Remove us from polling list and enable RX intr. */
305 netif_rx_complete(dev, napi);
306 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
308 /* The last op happens after poll completion. Which means the following:
309 * 1. it can race with disabling irqs in irq handler
310 * 2. it can race with dise/enabling irqs in other poll threads
311 * 3. if an irq raised after beginning loop, it will be immediately
312 * triggered here.
314 * Summarizing: the logic results in some redundant irqs both
315 * due to races in masking and due to too late acking of already
316 * processed irqs. But it must not result in losing events.
319 return work_done;
321 not_done:
322 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
323 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
324 tulip_refill_rx(dev);
326 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
327 goto oom;
329 return work_done;
331 oom: /* Executed with RX ints disabled */
333 /* Start timer, stop polling, but do not enable rx interrupts. */
334 mod_timer(&tp->oom_timer, jiffies+1);
336 /* Think: timer_pending() was an explicit signature of bug.
337 * Timer can be pending now but fired and completed
338 * before we did netif_rx_complete(). See? We would lose it. */
340 /* remove ourselves from the polling list */
341 netif_rx_complete(dev, napi);
343 return work_done;
346 #else /* CONFIG_TULIP_NAPI */
348 static int tulip_rx(struct net_device *dev)
350 struct tulip_private *tp = netdev_priv(dev);
351 int entry = tp->cur_rx % RX_RING_SIZE;
352 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
353 int received = 0;
355 if (tulip_debug > 4)
356 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
357 tp->rx_ring[entry].status);
358 /* If we own the next entry, it is a new packet. Send it up. */
359 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
360 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
362 if (tulip_debug > 5)
363 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
364 dev->name, entry, status);
365 if (--rx_work_limit < 0)
366 break;
367 if ((status & 0x38008300) != 0x0300) {
368 if ((status & 0x38000300) != 0x0300) {
369 /* Ingore earlier buffers. */
370 if ((status & 0xffff) != 0x7fff) {
371 if (tulip_debug > 1)
372 printk(KERN_WARNING "%s: Oversized Ethernet frame "
373 "spanned multiple buffers, status %8.8x!\n",
374 dev->name, status);
375 tp->stats.rx_length_errors++;
377 } else if (status & RxDescFatalErr) {
378 /* There was a fatal error. */
379 if (tulip_debug > 2)
380 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
381 dev->name, status);
382 tp->stats.rx_errors++; /* end of a packet.*/
383 if (status & 0x0890) tp->stats.rx_length_errors++;
384 if (status & 0x0004) tp->stats.rx_frame_errors++;
385 if (status & 0x0002) tp->stats.rx_crc_errors++;
386 if (status & 0x0001) tp->stats.rx_fifo_errors++;
388 } else {
389 /* Omit the four octet CRC from the length. */
390 short pkt_len = ((status >> 16) & 0x7ff) - 4;
391 struct sk_buff *skb;
393 #ifndef final_version
394 if (pkt_len > 1518) {
395 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
396 dev->name, pkt_len, pkt_len);
397 pkt_len = 1518;
398 tp->stats.rx_length_errors++;
400 #endif
402 /* Check if the packet is long enough to accept without copying
403 to a minimally-sized skbuff. */
404 if (pkt_len < tulip_rx_copybreak
405 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
406 skb_reserve(skb, 2); /* 16 byte align the IP header */
407 pci_dma_sync_single_for_cpu(tp->pdev,
408 tp->rx_buffers[entry].mapping,
409 pkt_len, PCI_DMA_FROMDEVICE);
410 #if ! defined(__alpha__)
411 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
412 pkt_len);
413 skb_put(skb, pkt_len);
414 #else
415 memcpy(skb_put(skb, pkt_len),
416 tp->rx_buffers[entry].skb->data,
417 pkt_len);
418 #endif
419 pci_dma_sync_single_for_device(tp->pdev,
420 tp->rx_buffers[entry].mapping,
421 pkt_len, PCI_DMA_FROMDEVICE);
422 } else { /* Pass up the skb already on the Rx ring. */
423 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
424 pkt_len);
426 #ifndef final_version
427 if (tp->rx_buffers[entry].mapping !=
428 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
429 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
430 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
431 dev->name,
432 le32_to_cpu(tp->rx_ring[entry].buffer1),
433 (long long)tp->rx_buffers[entry].mapping,
434 skb->head, temp);
436 #endif
438 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
439 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
441 tp->rx_buffers[entry].skb = NULL;
442 tp->rx_buffers[entry].mapping = 0;
444 skb->protocol = eth_type_trans(skb, dev);
446 netif_rx(skb);
448 dev->last_rx = jiffies;
449 tp->stats.rx_packets++;
450 tp->stats.rx_bytes += pkt_len;
452 received++;
453 entry = (++tp->cur_rx) % RX_RING_SIZE;
455 return received;
457 #endif /* CONFIG_TULIP_NAPI */
459 static inline unsigned int phy_interrupt (struct net_device *dev)
461 #ifdef __hppa__
462 struct tulip_private *tp = netdev_priv(dev);
463 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
465 if (csr12 != tp->csr12_shadow) {
466 /* ack interrupt */
467 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
468 tp->csr12_shadow = csr12;
469 /* do link change stuff */
470 spin_lock(&tp->lock);
471 tulip_check_duplex(dev);
472 spin_unlock(&tp->lock);
473 /* clear irq ack bit */
474 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
476 return 1;
478 #endif
480 return 0;
483 /* The interrupt handler does all of the Rx thread work and cleans up
484 after the Tx thread. */
485 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
487 struct net_device *dev = (struct net_device *)dev_instance;
488 struct tulip_private *tp = netdev_priv(dev);
489 void __iomem *ioaddr = tp->base_addr;
490 int csr5;
491 int missed;
492 int rx = 0;
493 int tx = 0;
494 int oi = 0;
495 int maxrx = RX_RING_SIZE;
496 int maxtx = TX_RING_SIZE;
497 int maxoi = TX_RING_SIZE;
498 #ifdef CONFIG_TULIP_NAPI
499 int rxd = 0;
500 #else
501 int entry;
502 #endif
503 unsigned int work_count = tulip_max_interrupt_work;
504 unsigned int handled = 0;
506 /* Let's see whether the interrupt really is for us */
507 csr5 = ioread32(ioaddr + CSR5);
509 if (tp->flags & HAS_PHY_IRQ)
510 handled = phy_interrupt (dev);
512 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
513 return IRQ_RETVAL(handled);
515 tp->nir++;
517 do {
519 #ifdef CONFIG_TULIP_NAPI
521 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
522 rxd++;
523 /* Mask RX intrs and add the device to poll list. */
524 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
525 netif_rx_schedule(dev, &tp->napi);
527 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
528 break;
531 /* Acknowledge the interrupt sources we handle here ASAP
532 the poll function does Rx and RxNoBuf acking */
534 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
536 #else
537 /* Acknowledge all of the current interrupt sources ASAP. */
538 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
541 if (csr5 & (RxIntr | RxNoBuf)) {
542 rx += tulip_rx(dev);
543 tulip_refill_rx(dev);
546 #endif /* CONFIG_TULIP_NAPI */
548 if (tulip_debug > 4)
549 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
550 dev->name, csr5, ioread32(ioaddr + CSR5));
553 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
554 unsigned int dirty_tx;
556 spin_lock(&tp->lock);
558 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
559 dirty_tx++) {
560 int entry = dirty_tx % TX_RING_SIZE;
561 int status = le32_to_cpu(tp->tx_ring[entry].status);
563 if (status < 0)
564 break; /* It still has not been Txed */
566 /* Check for Rx filter setup frames. */
567 if (tp->tx_buffers[entry].skb == NULL) {
568 /* test because dummy frames not mapped */
569 if (tp->tx_buffers[entry].mapping)
570 pci_unmap_single(tp->pdev,
571 tp->tx_buffers[entry].mapping,
572 sizeof(tp->setup_frame),
573 PCI_DMA_TODEVICE);
574 continue;
577 if (status & 0x8000) {
578 /* There was an major error, log it. */
579 #ifndef final_version
580 if (tulip_debug > 1)
581 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
582 dev->name, status);
583 #endif
584 tp->stats.tx_errors++;
585 if (status & 0x4104) tp->stats.tx_aborted_errors++;
586 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
587 if (status & 0x0200) tp->stats.tx_window_errors++;
588 if (status & 0x0002) tp->stats.tx_fifo_errors++;
589 if ((status & 0x0080) && tp->full_duplex == 0)
590 tp->stats.tx_heartbeat_errors++;
591 } else {
592 tp->stats.tx_bytes +=
593 tp->tx_buffers[entry].skb->len;
594 tp->stats.collisions += (status >> 3) & 15;
595 tp->stats.tx_packets++;
598 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
599 tp->tx_buffers[entry].skb->len,
600 PCI_DMA_TODEVICE);
602 /* Free the original skb. */
603 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
604 tp->tx_buffers[entry].skb = NULL;
605 tp->tx_buffers[entry].mapping = 0;
606 tx++;
609 #ifndef final_version
610 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
611 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
612 dev->name, dirty_tx, tp->cur_tx);
613 dirty_tx += TX_RING_SIZE;
615 #endif
617 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
618 netif_wake_queue(dev);
620 tp->dirty_tx = dirty_tx;
621 if (csr5 & TxDied) {
622 if (tulip_debug > 2)
623 printk(KERN_WARNING "%s: The transmitter stopped."
624 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
625 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
626 tulip_restart_rxtx(tp);
628 spin_unlock(&tp->lock);
631 /* Log errors. */
632 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
633 if (csr5 == 0xffffffff)
634 break;
635 if (csr5 & TxJabber) tp->stats.tx_errors++;
636 if (csr5 & TxFIFOUnderflow) {
637 if ((tp->csr6 & 0xC000) != 0xC000)
638 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
639 else
640 tp->csr6 |= 0x00200000; /* Store-n-forward. */
641 /* Restart the transmit process. */
642 tulip_restart_rxtx(tp);
643 iowrite32(0, ioaddr + CSR1);
645 if (csr5 & (RxDied | RxNoBuf)) {
646 if (tp->flags & COMET_MAC_ADDR) {
647 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
648 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
651 if (csr5 & RxDied) { /* Missed a Rx frame. */
652 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
653 tp->stats.rx_errors++;
654 tulip_start_rxtx(tp);
657 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
658 * call is ever done under the spinlock
660 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
661 if (tp->link_change)
662 (tp->link_change)(dev, csr5);
664 if (csr5 & SystemError) {
665 int error = (csr5 >> 23) & 7;
666 /* oops, we hit a PCI error. The code produced corresponds
667 * to the reason:
668 * 0 - parity error
669 * 1 - master abort
670 * 2 - target abort
671 * Note that on parity error, we should do a software reset
672 * of the chip to get it back into a sane state (according
673 * to the 21142/3 docs that is).
674 * -- rmk
676 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
677 dev->name, tp->nir, error);
679 /* Clear all error sources, included undocumented ones! */
680 iowrite32(0x0800f7ba, ioaddr + CSR5);
681 oi++;
683 if (csr5 & TimerInt) {
685 if (tulip_debug > 2)
686 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
687 dev->name, csr5);
688 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
689 tp->ttimer = 0;
690 oi++;
692 if (tx > maxtx || rx > maxrx || oi > maxoi) {
693 if (tulip_debug > 1)
694 printk(KERN_WARNING "%s: Too much work during an interrupt, "
695 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
697 /* Acknowledge all interrupt sources. */
698 iowrite32(0x8001ffff, ioaddr + CSR5);
699 if (tp->flags & HAS_INTR_MITIGATION) {
700 /* Josip Loncaric at ICASE did extensive experimentation
701 to develop a good interrupt mitigation setting.*/
702 iowrite32(0x8b240000, ioaddr + CSR11);
703 } else if (tp->chip_id == LC82C168) {
704 /* the LC82C168 doesn't have a hw timer.*/
705 iowrite32(0x00, ioaddr + CSR7);
706 mod_timer(&tp->timer, RUN_AT(HZ/50));
707 } else {
708 /* Mask all interrupting sources, set timer to
709 re-enable. */
710 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
711 iowrite32(0x0012, ioaddr + CSR11);
713 break;
716 work_count--;
717 if (work_count == 0)
718 break;
720 csr5 = ioread32(ioaddr + CSR5);
722 #ifdef CONFIG_TULIP_NAPI
723 if (rxd)
724 csr5 &= ~RxPollInt;
725 } while ((csr5 & (TxNoBuf |
726 TxDied |
727 TxIntr |
728 TimerInt |
729 /* Abnormal intr. */
730 RxDied |
731 TxFIFOUnderflow |
732 TxJabber |
733 TPLnkFail |
734 SystemError )) != 0);
735 #else
736 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
738 tulip_refill_rx(dev);
740 /* check if the card is in suspend mode */
741 entry = tp->dirty_rx % RX_RING_SIZE;
742 if (tp->rx_buffers[entry].skb == NULL) {
743 if (tulip_debug > 1)
744 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
745 if (tp->chip_id == LC82C168) {
746 iowrite32(0x00, ioaddr + CSR7);
747 mod_timer(&tp->timer, RUN_AT(HZ/50));
748 } else {
749 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
750 if (tulip_debug > 1)
751 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
752 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
753 ioaddr + CSR7);
754 iowrite32(TimerInt, ioaddr + CSR5);
755 iowrite32(12, ioaddr + CSR11);
756 tp->ttimer = 1;
760 #endif /* CONFIG_TULIP_NAPI */
762 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
763 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
766 if (tulip_debug > 4)
767 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
768 dev->name, ioread32(ioaddr + CSR5));
770 return IRQ_HANDLED;