fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / drivers / net / tulip / interrupt.c
blob365331446387395ca7e878adb45dd22d0065c156
1 /*
2 drivers/net/tulip/interrupt.c
4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
11 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
17 #include <linux/pci.h>
18 #include "tulip.h"
19 #include <linux/etherdevice.h>
21 int tulip_rx_copybreak;
22 unsigned int tulip_max_interrupt_work;
24 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
25 #define MIT_SIZE 15
26 #define MIT_TABLE 15 /* We use 0 or max */
28 static unsigned int mit_table[MIT_SIZE+1] =
30 /* CRS11 21143 hardware Mitigation Control Interrupt
31 We use only RX mitigation we other techniques for
32 TX intr. mitigation.
34 31 Cycle Size (timer control)
35 30:27 TX timer in 16 * Cycle size
36 26:24 TX No pkts before Int.
37 23:20 RX timer in Cycle size
38 19:17 RX No pkts before Int.
39 16 Continues Mode (CM)
42 0x0, /* IM disabled */
43 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
44 0x80150000,
45 0x80270000,
46 0x80370000,
47 0x80490000,
48 0x80590000,
49 0x80690000,
50 0x807B0000,
51 0x808B0000,
52 0x809D0000,
53 0x80AD0000,
54 0x80BD0000,
55 0x80CF0000,
56 0x80DF0000,
57 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
58 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
60 #endif
63 int tulip_refill_rx(struct net_device *dev)
65 struct tulip_private *tp = netdev_priv(dev);
66 int entry;
67 int refilled = 0;
69 /* Refill the Rx ring buffers. */
70 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
71 entry = tp->dirty_rx % RX_RING_SIZE;
72 if (tp->rx_buffers[entry].skb == NULL) {
73 struct sk_buff *skb;
74 dma_addr_t mapping;
76 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
77 if (skb == NULL)
78 break;
80 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
81 PCI_DMA_FROMDEVICE);
82 tp->rx_buffers[entry].mapping = mapping;
84 skb->dev = dev; /* Mark as being used by this device. */
85 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
86 refilled++;
88 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
90 if(tp->chip_id == LC82C168) {
91 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
92 /* Rx stopped due to out of buffers,
93 * restart it
95 iowrite32(0x01, tp->base_addr + CSR2);
98 return refilled;
101 #ifdef CONFIG_TULIP_NAPI
103 void oom_timer(unsigned long data)
105 struct net_device *dev = (struct net_device *)data;
106 struct tulip_private *tp = netdev_priv(dev);
107 netif_rx_schedule(dev, &tp->napi);
110 int tulip_poll(struct napi_struct *napi, int budget)
112 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
113 struct net_device *dev = tp->dev;
114 int entry = tp->cur_rx % RX_RING_SIZE;
115 int work_done = 0;
116 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
117 int received = 0;
118 #endif
120 if (!netif_running(dev))
121 goto done;
123 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
125 /* that one buffer is needed for mit activation; or might be a
126 bug in the ring buffer code; check later -- JHS*/
128 if (budget >=RX_RING_SIZE) budget--;
129 #endif
131 if (tulip_debug > 4)
132 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
133 tp->rx_ring[entry].status);
135 do {
136 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
137 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
138 break;
140 /* Acknowledge current RX interrupt sources. */
141 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
144 /* If we own the next entry, it is a new packet. Send it up. */
145 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
146 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
148 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
149 break;
151 if (tulip_debug > 5)
152 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
153 dev->name, entry, status);
154 if (work_done++ >= budget)
155 goto not_done;
157 if ((status & 0x38008300) != 0x0300) {
158 if ((status & 0x38000300) != 0x0300) {
159 /* Ingore earlier buffers. */
160 if ((status & 0xffff) != 0x7fff) {
161 if (tulip_debug > 1)
162 printk(KERN_WARNING "%s: Oversized Ethernet frame "
163 "spanned multiple buffers, status %8.8x!\n",
164 dev->name, status);
165 tp->stats.rx_length_errors++;
167 } else if (status & RxDescFatalErr) {
168 /* There was a fatal error. */
169 if (tulip_debug > 2)
170 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
171 dev->name, status);
172 tp->stats.rx_errors++; /* end of a packet.*/
173 if (status & 0x0890) tp->stats.rx_length_errors++;
174 if (status & 0x0004) tp->stats.rx_frame_errors++;
175 if (status & 0x0002) tp->stats.rx_crc_errors++;
176 if (status & 0x0001) tp->stats.rx_fifo_errors++;
178 } else {
179 /* Omit the four octet CRC from the length. */
180 short pkt_len = ((status >> 16) & 0x7ff) - 4;
181 struct sk_buff *skb;
183 #ifndef final_version
184 if (pkt_len > 1518) {
185 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
186 dev->name, pkt_len, pkt_len);
187 pkt_len = 1518;
188 tp->stats.rx_length_errors++;
190 #endif
191 /* Check if the packet is long enough to accept without copying
192 to a minimally-sized skbuff. */
193 if (pkt_len < tulip_rx_copybreak
194 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
195 skb_reserve(skb, 2); /* 16 byte align the IP header */
196 pci_dma_sync_single_for_cpu(tp->pdev,
197 tp->rx_buffers[entry].mapping,
198 pkt_len, PCI_DMA_FROMDEVICE);
199 #if ! defined(__alpha__)
200 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
201 pkt_len);
202 skb_put(skb, pkt_len);
203 #else
204 memcpy(skb_put(skb, pkt_len),
205 tp->rx_buffers[entry].skb->data,
206 pkt_len);
207 #endif
208 pci_dma_sync_single_for_device(tp->pdev,
209 tp->rx_buffers[entry].mapping,
210 pkt_len, PCI_DMA_FROMDEVICE);
211 } else { /* Pass up the skb already on the Rx ring. */
212 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
213 pkt_len);
215 #ifndef final_version
216 if (tp->rx_buffers[entry].mapping !=
217 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
218 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
219 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
220 dev->name,
221 le32_to_cpu(tp->rx_ring[entry].buffer1),
222 (unsigned long long)tp->rx_buffers[entry].mapping,
223 skb->head, temp);
225 #endif
227 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
228 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
230 tp->rx_buffers[entry].skb = NULL;
231 tp->rx_buffers[entry].mapping = 0;
233 skb->protocol = eth_type_trans(skb, dev);
235 netif_receive_skb(skb);
237 dev->last_rx = jiffies;
238 tp->stats.rx_packets++;
239 tp->stats.rx_bytes += pkt_len;
241 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
242 received++;
243 #endif
245 entry = (++tp->cur_rx) % RX_RING_SIZE;
246 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
247 tulip_refill_rx(dev);
251 /* New ack strategy... irq does not ack Rx any longer
252 hopefully this helps */
254 /* Really bad things can happen here... If new packet arrives
255 * and an irq arrives (tx or just due to occasionally unset
256 * mask), it will be acked by irq handler, but new thread
257 * is not scheduled. It is major hole in design.
258 * No idea how to fix this if "playing with fire" will fail
259 * tomorrow (night 011029). If it will not fail, we won
260 * finally: amount of IO did not increase at all. */
261 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
263 done:
265 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
267 /* We use this simplistic scheme for IM. It's proven by
268 real life installations. We can have IM enabled
269 continuesly but this would cause unnecessary latency.
270 Unfortunely we can't use all the NET_RX_* feedback here.
271 This would turn on IM for devices that is not contributing
272 to backlog congestion with unnecessary latency.
274 We monitor the device RX-ring and have:
276 HW Interrupt Mitigation either ON or OFF.
278 ON: More then 1 pkt received (per intr.) OR we are dropping
279 OFF: Only 1 pkt received
281 Note. We only use min and max (0, 15) settings from mit_table */
284 if( tp->flags & HAS_INTR_MITIGATION) {
285 if( received > 1 ) {
286 if( ! tp->mit_on ) {
287 tp->mit_on = 1;
288 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
291 else {
292 if( tp->mit_on ) {
293 tp->mit_on = 0;
294 iowrite32(0, tp->base_addr + CSR11);
299 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
301 tulip_refill_rx(dev);
303 /* If RX ring is not full we are out of memory. */
304 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
305 goto oom;
307 /* Remove us from polling list and enable RX intr. */
309 netif_rx_complete(dev, napi);
310 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
312 /* The last op happens after poll completion. Which means the following:
313 * 1. it can race with disabling irqs in irq handler
314 * 2. it can race with dise/enabling irqs in other poll threads
315 * 3. if an irq raised after beginning loop, it will be immediately
316 * triggered here.
318 * Summarizing: the logic results in some redundant irqs both
319 * due to races in masking and due to too late acking of already
320 * processed irqs. But it must not result in losing events.
323 return work_done;
325 not_done:
326 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
327 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
328 tulip_refill_rx(dev);
330 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
331 goto oom;
333 return work_done;
335 oom: /* Executed with RX ints disabled */
337 /* Start timer, stop polling, but do not enable rx interrupts. */
338 mod_timer(&tp->oom_timer, jiffies+1);
340 /* Think: timer_pending() was an explicit signature of bug.
341 * Timer can be pending now but fired and completed
342 * before we did netif_rx_complete(). See? We would lose it. */
344 /* remove ourselves from the polling list */
345 netif_rx_complete(dev, napi);
347 return work_done;
350 #else /* CONFIG_TULIP_NAPI */
352 static int tulip_rx(struct net_device *dev)
354 struct tulip_private *tp = netdev_priv(dev);
355 int entry = tp->cur_rx % RX_RING_SIZE;
356 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
357 int received = 0;
359 if (tulip_debug > 4)
360 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
361 tp->rx_ring[entry].status);
362 /* If we own the next entry, it is a new packet. Send it up. */
363 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
364 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
366 if (tulip_debug > 5)
367 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
368 dev->name, entry, status);
369 if (--rx_work_limit < 0)
370 break;
371 if ((status & 0x38008300) != 0x0300) {
372 if ((status & 0x38000300) != 0x0300) {
373 /* Ingore earlier buffers. */
374 if ((status & 0xffff) != 0x7fff) {
375 if (tulip_debug > 1)
376 printk(KERN_WARNING "%s: Oversized Ethernet frame "
377 "spanned multiple buffers, status %8.8x!\n",
378 dev->name, status);
379 tp->stats.rx_length_errors++;
381 } else if (status & RxDescFatalErr) {
382 /* There was a fatal error. */
383 if (tulip_debug > 2)
384 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
385 dev->name, status);
386 tp->stats.rx_errors++; /* end of a packet.*/
387 if (status & 0x0890) tp->stats.rx_length_errors++;
388 if (status & 0x0004) tp->stats.rx_frame_errors++;
389 if (status & 0x0002) tp->stats.rx_crc_errors++;
390 if (status & 0x0001) tp->stats.rx_fifo_errors++;
392 } else {
393 /* Omit the four octet CRC from the length. */
394 short pkt_len = ((status >> 16) & 0x7ff) - 4;
395 struct sk_buff *skb;
397 #ifndef final_version
398 if (pkt_len > 1518) {
399 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
400 dev->name, pkt_len, pkt_len);
401 pkt_len = 1518;
402 tp->stats.rx_length_errors++;
404 #endif
406 /* Check if the packet is long enough to accept without copying
407 to a minimally-sized skbuff. */
408 if (pkt_len < tulip_rx_copybreak
409 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
410 skb_reserve(skb, 2); /* 16 byte align the IP header */
411 pci_dma_sync_single_for_cpu(tp->pdev,
412 tp->rx_buffers[entry].mapping,
413 pkt_len, PCI_DMA_FROMDEVICE);
414 #if ! defined(__alpha__)
415 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
416 pkt_len);
417 skb_put(skb, pkt_len);
418 #else
419 memcpy(skb_put(skb, pkt_len),
420 tp->rx_buffers[entry].skb->data,
421 pkt_len);
422 #endif
423 pci_dma_sync_single_for_device(tp->pdev,
424 tp->rx_buffers[entry].mapping,
425 pkt_len, PCI_DMA_FROMDEVICE);
426 } else { /* Pass up the skb already on the Rx ring. */
427 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
428 pkt_len);
430 #ifndef final_version
431 if (tp->rx_buffers[entry].mapping !=
432 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
433 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
434 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
435 dev->name,
436 le32_to_cpu(tp->rx_ring[entry].buffer1),
437 (long long)tp->rx_buffers[entry].mapping,
438 skb->head, temp);
440 #endif
442 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
443 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
445 tp->rx_buffers[entry].skb = NULL;
446 tp->rx_buffers[entry].mapping = 0;
448 skb->protocol = eth_type_trans(skb, dev);
450 netif_rx(skb);
452 dev->last_rx = jiffies;
453 tp->stats.rx_packets++;
454 tp->stats.rx_bytes += pkt_len;
456 received++;
457 entry = (++tp->cur_rx) % RX_RING_SIZE;
459 return received;
461 #endif /* CONFIG_TULIP_NAPI */
463 static inline unsigned int phy_interrupt (struct net_device *dev)
465 #ifdef __hppa__
466 struct tulip_private *tp = netdev_priv(dev);
467 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
469 if (csr12 != tp->csr12_shadow) {
470 /* ack interrupt */
471 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
472 tp->csr12_shadow = csr12;
473 /* do link change stuff */
474 spin_lock(&tp->lock);
475 tulip_check_duplex(dev);
476 spin_unlock(&tp->lock);
477 /* clear irq ack bit */
478 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
480 return 1;
482 #endif
484 return 0;
487 /* The interrupt handler does all of the Rx thread work and cleans up
488 after the Tx thread. */
489 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
491 struct net_device *dev = (struct net_device *)dev_instance;
492 struct tulip_private *tp = netdev_priv(dev);
493 void __iomem *ioaddr = tp->base_addr;
494 int csr5;
495 int missed;
496 int rx = 0;
497 int tx = 0;
498 int oi = 0;
499 int maxrx = RX_RING_SIZE;
500 int maxtx = TX_RING_SIZE;
501 int maxoi = TX_RING_SIZE;
502 #ifdef CONFIG_TULIP_NAPI
503 int rxd = 0;
504 #else
505 int entry;
506 #endif
507 unsigned int work_count = tulip_max_interrupt_work;
508 unsigned int handled = 0;
510 /* Let's see whether the interrupt really is for us */
511 csr5 = ioread32(ioaddr + CSR5);
513 if (tp->flags & HAS_PHY_IRQ)
514 handled = phy_interrupt (dev);
516 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
517 return IRQ_RETVAL(handled);
519 tp->nir++;
521 do {
523 #ifdef CONFIG_TULIP_NAPI
525 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
526 rxd++;
527 /* Mask RX intrs and add the device to poll list. */
528 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
529 netif_rx_schedule(dev, &tp->napi);
531 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
532 break;
535 /* Acknowledge the interrupt sources we handle here ASAP
536 the poll function does Rx and RxNoBuf acking */
538 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
540 #else
541 /* Acknowledge all of the current interrupt sources ASAP. */
542 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
545 if (csr5 & (RxIntr | RxNoBuf)) {
546 rx += tulip_rx(dev);
547 tulip_refill_rx(dev);
550 #endif /* CONFIG_TULIP_NAPI */
552 if (tulip_debug > 4)
553 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
554 dev->name, csr5, ioread32(ioaddr + CSR5));
557 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
558 unsigned int dirty_tx;
560 spin_lock(&tp->lock);
562 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
563 dirty_tx++) {
564 int entry = dirty_tx % TX_RING_SIZE;
565 int status = le32_to_cpu(tp->tx_ring[entry].status);
567 if (status < 0)
568 break; /* It still has not been Txed */
570 /* Check for Rx filter setup frames. */
571 if (tp->tx_buffers[entry].skb == NULL) {
572 /* test because dummy frames not mapped */
573 if (tp->tx_buffers[entry].mapping)
574 pci_unmap_single(tp->pdev,
575 tp->tx_buffers[entry].mapping,
576 sizeof(tp->setup_frame),
577 PCI_DMA_TODEVICE);
578 continue;
581 if (status & 0x8000) {
582 /* There was an major error, log it. */
583 #ifndef final_version
584 if (tulip_debug > 1)
585 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
586 dev->name, status);
587 #endif
588 tp->stats.tx_errors++;
589 if (status & 0x4104) tp->stats.tx_aborted_errors++;
590 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
591 if (status & 0x0200) tp->stats.tx_window_errors++;
592 if (status & 0x0002) tp->stats.tx_fifo_errors++;
593 if ((status & 0x0080) && tp->full_duplex == 0)
594 tp->stats.tx_heartbeat_errors++;
595 } else {
596 tp->stats.tx_bytes +=
597 tp->tx_buffers[entry].skb->len;
598 tp->stats.collisions += (status >> 3) & 15;
599 tp->stats.tx_packets++;
602 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
603 tp->tx_buffers[entry].skb->len,
604 PCI_DMA_TODEVICE);
606 /* Free the original skb. */
607 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
608 tp->tx_buffers[entry].skb = NULL;
609 tp->tx_buffers[entry].mapping = 0;
610 tx++;
613 #ifndef final_version
614 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
615 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
616 dev->name, dirty_tx, tp->cur_tx);
617 dirty_tx += TX_RING_SIZE;
619 #endif
621 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
622 netif_wake_queue(dev);
624 tp->dirty_tx = dirty_tx;
625 if (csr5 & TxDied) {
626 if (tulip_debug > 2)
627 printk(KERN_WARNING "%s: The transmitter stopped."
628 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
629 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
630 tulip_restart_rxtx(tp);
632 spin_unlock(&tp->lock);
635 /* Log errors. */
636 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
637 if (csr5 == 0xffffffff)
638 break;
639 if (csr5 & TxJabber) tp->stats.tx_errors++;
640 if (csr5 & TxFIFOUnderflow) {
641 if ((tp->csr6 & 0xC000) != 0xC000)
642 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
643 else
644 tp->csr6 |= 0x00200000; /* Store-n-forward. */
645 /* Restart the transmit process. */
646 tulip_restart_rxtx(tp);
647 iowrite32(0, ioaddr + CSR1);
649 if (csr5 & (RxDied | RxNoBuf)) {
650 if (tp->flags & COMET_MAC_ADDR) {
651 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
652 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
655 if (csr5 & RxDied) { /* Missed a Rx frame. */
656 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
657 tp->stats.rx_errors++;
658 tulip_start_rxtx(tp);
661 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
662 * call is ever done under the spinlock
664 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
665 if (tp->link_change)
666 (tp->link_change)(dev, csr5);
668 if (csr5 & SystemError) {
669 int error = (csr5 >> 23) & 7;
670 /* oops, we hit a PCI error. The code produced corresponds
671 * to the reason:
672 * 0 - parity error
673 * 1 - master abort
674 * 2 - target abort
675 * Note that on parity error, we should do a software reset
676 * of the chip to get it back into a sane state (according
677 * to the 21142/3 docs that is).
678 * -- rmk
680 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
681 dev->name, tp->nir, error);
683 /* Clear all error sources, included undocumented ones! */
684 iowrite32(0x0800f7ba, ioaddr + CSR5);
685 oi++;
687 if (csr5 & TimerInt) {
689 if (tulip_debug > 2)
690 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
691 dev->name, csr5);
692 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
693 tp->ttimer = 0;
694 oi++;
696 if (tx > maxtx || rx > maxrx || oi > maxoi) {
697 if (tulip_debug > 1)
698 printk(KERN_WARNING "%s: Too much work during an interrupt, "
699 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
701 /* Acknowledge all interrupt sources. */
702 iowrite32(0x8001ffff, ioaddr + CSR5);
703 if (tp->flags & HAS_INTR_MITIGATION) {
704 /* Josip Loncaric at ICASE did extensive experimentation
705 to develop a good interrupt mitigation setting.*/
706 iowrite32(0x8b240000, ioaddr + CSR11);
707 } else if (tp->chip_id == LC82C168) {
708 /* the LC82C168 doesn't have a hw timer.*/
709 iowrite32(0x00, ioaddr + CSR7);
710 mod_timer(&tp->timer, RUN_AT(HZ/50));
711 } else {
712 /* Mask all interrupting sources, set timer to
713 re-enable. */
714 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
715 iowrite32(0x0012, ioaddr + CSR11);
717 break;
720 work_count--;
721 if (work_count == 0)
722 break;
724 csr5 = ioread32(ioaddr + CSR5);
726 #ifdef CONFIG_TULIP_NAPI
727 if (rxd)
728 csr5 &= ~RxPollInt;
729 } while ((csr5 & (TxNoBuf |
730 TxDied |
731 TxIntr |
732 TimerInt |
733 /* Abnormal intr. */
734 RxDied |
735 TxFIFOUnderflow |
736 TxJabber |
737 TPLnkFail |
738 SystemError )) != 0);
739 #else
740 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
742 tulip_refill_rx(dev);
744 /* check if the card is in suspend mode */
745 entry = tp->dirty_rx % RX_RING_SIZE;
746 if (tp->rx_buffers[entry].skb == NULL) {
747 if (tulip_debug > 1)
748 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
749 if (tp->chip_id == LC82C168) {
750 iowrite32(0x00, ioaddr + CSR7);
751 mod_timer(&tp->timer, RUN_AT(HZ/50));
752 } else {
753 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
754 if (tulip_debug > 1)
755 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
756 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
757 ioaddr + CSR7);
758 iowrite32(TimerInt, ioaddr + CSR5);
759 iowrite32(12, ioaddr + CSR11);
760 tp->ttimer = 1;
764 #endif /* CONFIG_TULIP_NAPI */
766 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
767 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
770 if (tulip_debug > 4)
771 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
772 dev->name, ioread32(ioaddr + CSR5));
774 return IRQ_HANDLED;