2 drivers/net/tulip/interrupt.c
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
11 for more information on this driver.
12 Please submit bugs to http://bugzilla.kernel.org/ .
16 #include <linux/pci.h>
18 #include <linux/etherdevice.h>
20 int tulip_rx_copybreak
;
21 unsigned int tulip_max_interrupt_work
;
23 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
25 #define MIT_TABLE 15 /* We use 0 or max */
27 static unsigned int mit_table
[MIT_SIZE
+1] =
29 /* CRS11 21143 hardware Mitigation Control Interrupt
30 We use only RX mitigation we other techniques for
33 31 Cycle Size (timer control)
34 30:27 TX timer in 16 * Cycle size
35 26:24 TX No pkts before Int.
36 23:20 RX timer in Cycle size
37 19:17 RX No pkts before Int.
38 16 Continues Mode (CM)
41 0x0, /* IM disabled */
42 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
56 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
57 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
62 int tulip_refill_rx(struct net_device
*dev
)
64 struct tulip_private
*tp
= netdev_priv(dev
);
68 /* Refill the Rx ring buffers. */
69 for (; tp
->cur_rx
- tp
->dirty_rx
> 0; tp
->dirty_rx
++) {
70 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
71 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
75 skb
= tp
->rx_buffers
[entry
].skb
= dev_alloc_skb(PKT_BUF_SZ
);
79 mapping
= pci_map_single(tp
->pdev
, skb
->data
, PKT_BUF_SZ
,
81 tp
->rx_buffers
[entry
].mapping
= mapping
;
83 skb
->dev
= dev
; /* Mark as being used by this device. */
84 tp
->rx_ring
[entry
].buffer1
= cpu_to_le32(mapping
);
87 tp
->rx_ring
[entry
].status
= cpu_to_le32(DescOwned
);
89 if(tp
->chip_id
== LC82C168
) {
90 if(((ioread32(tp
->base_addr
+ CSR5
)>>17)&0x07) == 4) {
91 /* Rx stopped due to out of buffers,
94 iowrite32(0x01, tp
->base_addr
+ CSR2
);
100 #ifdef CONFIG_TULIP_NAPI
102 void oom_timer(unsigned long data
)
104 struct net_device
*dev
= (struct net_device
*)data
;
105 struct tulip_private
*tp
= netdev_priv(dev
);
106 napi_schedule(&tp
->napi
);
109 int tulip_poll(struct napi_struct
*napi
, int budget
)
111 struct tulip_private
*tp
= container_of(napi
, struct tulip_private
, napi
);
112 struct net_device
*dev
= tp
->dev
;
113 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
115 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
119 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
121 /* that one buffer is needed for mit activation; or might be a
122 bug in the ring buffer code; check later -- JHS*/
124 if (budget
>=RX_RING_SIZE
) budget
--;
128 printk(KERN_DEBUG
" In tulip_rx(), entry %d %8.8x.\n", entry
,
129 tp
->rx_ring
[entry
].status
);
132 if (ioread32(tp
->base_addr
+ CSR5
) == 0xffffffff) {
133 printk(KERN_DEBUG
" In tulip_poll(), hardware disappeared.\n");
136 /* Acknowledge current RX interrupt sources. */
137 iowrite32((RxIntr
| RxNoBuf
), tp
->base_addr
+ CSR5
);
140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
142 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
145 if (tp
->dirty_rx
+ RX_RING_SIZE
== tp
->cur_rx
)
149 printk(KERN_DEBUG
"%s: In tulip_rx(), entry %d %8.8x.\n",
150 dev
->name
, entry
, status
);
152 if (++work_done
>= budget
)
156 * Omit the four octet CRC from the length.
157 * (May not be considered valid until we have
158 * checked status for RxLengthOver2047 bits)
160 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
163 * Maximum pkt_len is 1518 (1514 + vlan header)
164 * Anything higher than this is always invalid
165 * regardless of RxLengthOver2047 bits
168 if ((status
& (RxLengthOver2047
|
170 RxDescCollisionSeen
|
173 RxWholePkt
)) != RxWholePkt
175 if ((status
& (RxLengthOver2047
|
176 RxWholePkt
)) != RxWholePkt
) {
177 /* Ingore earlier buffers. */
178 if ((status
& 0xffff) != 0x7fff) {
180 printk(KERN_WARNING
"%s: Oversized Ethernet frame "
181 "spanned multiple buffers, status %8.8x!\n",
183 tp
->stats
.rx_length_errors
++;
186 /* There was a fatal error. */
188 printk(KERN_DEBUG
"%s: Receive error, Rx status %8.8x.\n",
190 tp
->stats
.rx_errors
++; /* end of a packet.*/
191 if (pkt_len
> 1518 ||
192 (status
& RxDescRunt
))
193 tp
->stats
.rx_length_errors
++;
195 if (status
& 0x0004) tp
->stats
.rx_frame_errors
++;
196 if (status
& 0x0002) tp
->stats
.rx_crc_errors
++;
197 if (status
& 0x0001) tp
->stats
.rx_fifo_errors
++;
202 /* Check if the packet is long enough to accept without copying
203 to a minimally-sized skbuff. */
204 if (pkt_len
< tulip_rx_copybreak
205 && (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
206 skb_reserve(skb
, 2); /* 16 byte align the IP header */
207 pci_dma_sync_single_for_cpu(tp
->pdev
,
208 tp
->rx_buffers
[entry
].mapping
,
209 pkt_len
, PCI_DMA_FROMDEVICE
);
210 #if ! defined(__alpha__)
211 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
213 skb_put(skb
, pkt_len
);
215 memcpy(skb_put(skb
, pkt_len
),
216 tp
->rx_buffers
[entry
].skb
->data
,
219 pci_dma_sync_single_for_device(tp
->pdev
,
220 tp
->rx_buffers
[entry
].mapping
,
221 pkt_len
, PCI_DMA_FROMDEVICE
);
222 } else { /* Pass up the skb already on the Rx ring. */
223 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
226 #ifndef final_version
227 if (tp
->rx_buffers
[entry
].mapping
!=
228 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
229 printk(KERN_ERR
"%s: Internal fault: The skbuff addresses "
230 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
232 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
233 (unsigned long long)tp
->rx_buffers
[entry
].mapping
,
238 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
239 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
241 tp
->rx_buffers
[entry
].skb
= NULL
;
242 tp
->rx_buffers
[entry
].mapping
= 0;
244 skb
->protocol
= eth_type_trans(skb
, dev
);
246 netif_receive_skb(skb
);
248 tp
->stats
.rx_packets
++;
249 tp
->stats
.rx_bytes
+= pkt_len
;
251 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
255 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
256 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/4)
257 tulip_refill_rx(dev
);
261 /* New ack strategy... irq does not ack Rx any longer
262 hopefully this helps */
264 /* Really bad things can happen here... If new packet arrives
265 * and an irq arrives (tx or just due to occasionally unset
266 * mask), it will be acked by irq handler, but new thread
267 * is not scheduled. It is major hole in design.
268 * No idea how to fix this if "playing with fire" will fail
269 * tomorrow (night 011029). If it will not fail, we won
270 * finally: amount of IO did not increase at all. */
271 } while ((ioread32(tp
->base_addr
+ CSR5
) & RxIntr
));
273 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
275 /* We use this simplistic scheme for IM. It's proven by
276 real life installations. We can have IM enabled
277 continuesly but this would cause unnecessary latency.
278 Unfortunely we can't use all the NET_RX_* feedback here.
279 This would turn on IM for devices that is not contributing
280 to backlog congestion with unnecessary latency.
282 We monitor the device RX-ring and have:
284 HW Interrupt Mitigation either ON or OFF.
286 ON: More then 1 pkt received (per intr.) OR we are dropping
287 OFF: Only 1 pkt received
289 Note. We only use min and max (0, 15) settings from mit_table */
292 if( tp
->flags
& HAS_INTR_MITIGATION
) {
296 iowrite32(mit_table
[MIT_TABLE
], tp
->base_addr
+ CSR11
);
302 iowrite32(0, tp
->base_addr
+ CSR11
);
307 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
309 tulip_refill_rx(dev
);
311 /* If RX ring is not full we are out of memory. */
312 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
315 /* Remove us from polling list and enable RX intr. */
318 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, tp
->base_addr
+CSR7
);
320 /* The last op happens after poll completion. Which means the following:
321 * 1. it can race with disabling irqs in irq handler
322 * 2. it can race with dise/enabling irqs in other poll threads
323 * 3. if an irq raised after beginning loop, it will be immediately
326 * Summarizing: the logic results in some redundant irqs both
327 * due to races in masking and due to too late acking of already
328 * processed irqs. But it must not result in losing events.
334 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/2 ||
335 tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
336 tulip_refill_rx(dev
);
338 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
343 oom
: /* Executed with RX ints disabled */
345 /* Start timer, stop polling, but do not enable rx interrupts. */
346 mod_timer(&tp
->oom_timer
, jiffies
+1);
348 /* Think: timer_pending() was an explicit signature of bug.
349 * Timer can be pending now but fired and completed
350 * before we did napi_complete(). See? We would lose it. */
352 /* remove ourselves from the polling list */
358 #else /* CONFIG_TULIP_NAPI */
360 static int tulip_rx(struct net_device
*dev
)
362 struct tulip_private
*tp
= netdev_priv(dev
);
363 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
364 int rx_work_limit
= tp
->dirty_rx
+ RX_RING_SIZE
- tp
->cur_rx
;
368 printk(KERN_DEBUG
" In tulip_rx(), entry %d %8.8x.\n", entry
,
369 tp
->rx_ring
[entry
].status
);
370 /* If we own the next entry, it is a new packet. Send it up. */
371 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
372 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
376 printk(KERN_DEBUG
"%s: In tulip_rx(), entry %d %8.8x.\n",
377 dev
->name
, entry
, status
);
378 if (--rx_work_limit
< 0)
382 Omit the four octet CRC from the length.
383 (May not be considered valid until we have
384 checked status for RxLengthOver2047 bits)
386 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
388 Maximum pkt_len is 1518 (1514 + vlan header)
389 Anything higher than this is always invalid
390 regardless of RxLengthOver2047 bits
393 if ((status
& (RxLengthOver2047
|
395 RxDescCollisionSeen
|
398 RxWholePkt
)) != RxWholePkt
400 if ((status
& (RxLengthOver2047
|
401 RxWholePkt
)) != RxWholePkt
) {
402 /* Ingore earlier buffers. */
403 if ((status
& 0xffff) != 0x7fff) {
405 printk(KERN_WARNING
"%s: Oversized Ethernet frame "
406 "spanned multiple buffers, status %8.8x!\n",
408 tp
->stats
.rx_length_errors
++;
411 /* There was a fatal error. */
413 printk(KERN_DEBUG
"%s: Receive error, Rx status %8.8x.\n",
415 tp
->stats
.rx_errors
++; /* end of a packet.*/
416 if (pkt_len
> 1518 ||
417 (status
& RxDescRunt
))
418 tp
->stats
.rx_length_errors
++;
419 if (status
& 0x0004) tp
->stats
.rx_frame_errors
++;
420 if (status
& 0x0002) tp
->stats
.rx_crc_errors
++;
421 if (status
& 0x0001) tp
->stats
.rx_fifo_errors
++;
426 /* Check if the packet is long enough to accept without copying
427 to a minimally-sized skbuff. */
428 if (pkt_len
< tulip_rx_copybreak
429 && (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
430 skb_reserve(skb
, 2); /* 16 byte align the IP header */
431 pci_dma_sync_single_for_cpu(tp
->pdev
,
432 tp
->rx_buffers
[entry
].mapping
,
433 pkt_len
, PCI_DMA_FROMDEVICE
);
434 #if ! defined(__alpha__)
435 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
437 skb_put(skb
, pkt_len
);
439 memcpy(skb_put(skb
, pkt_len
),
440 tp
->rx_buffers
[entry
].skb
->data
,
443 pci_dma_sync_single_for_device(tp
->pdev
,
444 tp
->rx_buffers
[entry
].mapping
,
445 pkt_len
, PCI_DMA_FROMDEVICE
);
446 } else { /* Pass up the skb already on the Rx ring. */
447 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
450 #ifndef final_version
451 if (tp
->rx_buffers
[entry
].mapping
!=
452 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
453 printk(KERN_ERR
"%s: Internal fault: The skbuff addresses "
454 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
456 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
457 (long long)tp
->rx_buffers
[entry
].mapping
,
462 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
463 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
465 tp
->rx_buffers
[entry
].skb
= NULL
;
466 tp
->rx_buffers
[entry
].mapping
= 0;
468 skb
->protocol
= eth_type_trans(skb
, dev
);
472 tp
->stats
.rx_packets
++;
473 tp
->stats
.rx_bytes
+= pkt_len
;
476 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
480 #endif /* CONFIG_TULIP_NAPI */
482 static inline unsigned int phy_interrupt (struct net_device
*dev
)
485 struct tulip_private
*tp
= netdev_priv(dev
);
486 int csr12
= ioread32(tp
->base_addr
+ CSR12
) & 0xff;
488 if (csr12
!= tp
->csr12_shadow
) {
490 iowrite32(csr12
| 0x02, tp
->base_addr
+ CSR12
);
491 tp
->csr12_shadow
= csr12
;
492 /* do link change stuff */
493 spin_lock(&tp
->lock
);
494 tulip_check_duplex(dev
);
495 spin_unlock(&tp
->lock
);
496 /* clear irq ack bit */
497 iowrite32(csr12
& ~0x02, tp
->base_addr
+ CSR12
);
506 /* The interrupt handler does all of the Rx thread work and cleans up
507 after the Tx thread. */
508 irqreturn_t
tulip_interrupt(int irq
, void *dev_instance
)
510 struct net_device
*dev
= (struct net_device
*)dev_instance
;
511 struct tulip_private
*tp
= netdev_priv(dev
);
512 void __iomem
*ioaddr
= tp
->base_addr
;
518 int maxrx
= RX_RING_SIZE
;
519 int maxtx
= TX_RING_SIZE
;
520 int maxoi
= TX_RING_SIZE
;
521 #ifdef CONFIG_TULIP_NAPI
526 unsigned int work_count
= tulip_max_interrupt_work
;
527 unsigned int handled
= 0;
529 /* Let's see whether the interrupt really is for us */
530 csr5
= ioread32(ioaddr
+ CSR5
);
532 if (tp
->flags
& HAS_PHY_IRQ
)
533 handled
= phy_interrupt (dev
);
535 if ((csr5
& (NormalIntr
|AbnormalIntr
)) == 0)
536 return IRQ_RETVAL(handled
);
542 #ifdef CONFIG_TULIP_NAPI
544 if (!rxd
&& (csr5
& (RxIntr
| RxNoBuf
))) {
546 /* Mask RX intrs and add the device to poll list. */
547 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
&~RxPollInt
, ioaddr
+ CSR7
);
548 napi_schedule(&tp
->napi
);
550 if (!(csr5
&~(AbnormalIntr
|NormalIntr
|RxPollInt
|TPLnkPass
)))
554 /* Acknowledge the interrupt sources we handle here ASAP
555 the poll function does Rx and RxNoBuf acking */
557 iowrite32(csr5
& 0x0001ff3f, ioaddr
+ CSR5
);
560 /* Acknowledge all of the current interrupt sources ASAP. */
561 iowrite32(csr5
& 0x0001ffff, ioaddr
+ CSR5
);
564 if (csr5
& (RxIntr
| RxNoBuf
)) {
566 tulip_refill_rx(dev
);
569 #endif /* CONFIG_TULIP_NAPI */
572 printk(KERN_DEBUG
"%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
573 dev
->name
, csr5
, ioread32(ioaddr
+ CSR5
));
576 if (csr5
& (TxNoBuf
| TxDied
| TxIntr
| TimerInt
)) {
577 unsigned int dirty_tx
;
579 spin_lock(&tp
->lock
);
581 for (dirty_tx
= tp
->dirty_tx
; tp
->cur_tx
- dirty_tx
> 0;
583 int entry
= dirty_tx
% TX_RING_SIZE
;
584 int status
= le32_to_cpu(tp
->tx_ring
[entry
].status
);
587 break; /* It still has not been Txed */
589 /* Check for Rx filter setup frames. */
590 if (tp
->tx_buffers
[entry
].skb
== NULL
) {
591 /* test because dummy frames not mapped */
592 if (tp
->tx_buffers
[entry
].mapping
)
593 pci_unmap_single(tp
->pdev
,
594 tp
->tx_buffers
[entry
].mapping
,
595 sizeof(tp
->setup_frame
),
600 if (status
& 0x8000) {
601 /* There was an major error, log it. */
602 #ifndef final_version
604 printk(KERN_DEBUG
"%s: Transmit error, Tx status %8.8x.\n",
607 tp
->stats
.tx_errors
++;
608 if (status
& 0x4104) tp
->stats
.tx_aborted_errors
++;
609 if (status
& 0x0C00) tp
->stats
.tx_carrier_errors
++;
610 if (status
& 0x0200) tp
->stats
.tx_window_errors
++;
611 if (status
& 0x0002) tp
->stats
.tx_fifo_errors
++;
612 if ((status
& 0x0080) && tp
->full_duplex
== 0)
613 tp
->stats
.tx_heartbeat_errors
++;
615 tp
->stats
.tx_bytes
+=
616 tp
->tx_buffers
[entry
].skb
->len
;
617 tp
->stats
.collisions
+= (status
>> 3) & 15;
618 tp
->stats
.tx_packets
++;
621 pci_unmap_single(tp
->pdev
, tp
->tx_buffers
[entry
].mapping
,
622 tp
->tx_buffers
[entry
].skb
->len
,
625 /* Free the original skb. */
626 dev_kfree_skb_irq(tp
->tx_buffers
[entry
].skb
);
627 tp
->tx_buffers
[entry
].skb
= NULL
;
628 tp
->tx_buffers
[entry
].mapping
= 0;
632 #ifndef final_version
633 if (tp
->cur_tx
- dirty_tx
> TX_RING_SIZE
) {
634 printk(KERN_ERR
"%s: Out-of-sync dirty pointer, %d vs. %d.\n",
635 dev
->name
, dirty_tx
, tp
->cur_tx
);
636 dirty_tx
+= TX_RING_SIZE
;
640 if (tp
->cur_tx
- dirty_tx
< TX_RING_SIZE
- 2)
641 netif_wake_queue(dev
);
643 tp
->dirty_tx
= dirty_tx
;
646 printk(KERN_WARNING
"%s: The transmitter stopped."
647 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
648 dev
->name
, csr5
, ioread32(ioaddr
+ CSR6
), tp
->csr6
);
649 tulip_restart_rxtx(tp
);
651 spin_unlock(&tp
->lock
);
655 if (csr5
& AbnormalIntr
) { /* Abnormal error summary bit. */
656 if (csr5
== 0xffffffff)
658 if (csr5
& TxJabber
) tp
->stats
.tx_errors
++;
659 if (csr5
& TxFIFOUnderflow
) {
660 if ((tp
->csr6
& 0xC000) != 0xC000)
661 tp
->csr6
+= 0x4000; /* Bump up the Tx threshold */
663 tp
->csr6
|= 0x00200000; /* Store-n-forward. */
664 /* Restart the transmit process. */
665 tulip_restart_rxtx(tp
);
666 iowrite32(0, ioaddr
+ CSR1
);
668 if (csr5
& (RxDied
| RxNoBuf
)) {
669 if (tp
->flags
& COMET_MAC_ADDR
) {
670 iowrite32(tp
->mc_filter
[0], ioaddr
+ 0xAC);
671 iowrite32(tp
->mc_filter
[1], ioaddr
+ 0xB0);
674 if (csr5
& RxDied
) { /* Missed a Rx frame. */
675 tp
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ CSR8
) & 0xffff;
676 tp
->stats
.rx_errors
++;
677 tulip_start_rxtx(tp
);
680 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
681 * call is ever done under the spinlock
683 if (csr5
& (TPLnkPass
| TPLnkFail
| 0x08000000)) {
685 (tp
->link_change
)(dev
, csr5
);
687 if (csr5
& SystemError
) {
688 int error
= (csr5
>> 23) & 7;
689 /* oops, we hit a PCI error. The code produced corresponds
694 * Note that on parity error, we should do a software reset
695 * of the chip to get it back into a sane state (according
696 * to the 21142/3 docs that is).
699 printk(KERN_ERR
"%s: (%lu) System Error occurred (%d)\n",
700 dev
->name
, tp
->nir
, error
);
702 /* Clear all error sources, included undocumented ones! */
703 iowrite32(0x0800f7ba, ioaddr
+ CSR5
);
706 if (csr5
& TimerInt
) {
709 printk(KERN_ERR
"%s: Re-enabling interrupts, %8.8x.\n",
711 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, ioaddr
+ CSR7
);
715 if (tx
> maxtx
|| rx
> maxrx
|| oi
> maxoi
) {
717 printk(KERN_WARNING
"%s: Too much work during an interrupt, "
718 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev
->name
, csr5
, tp
->nir
, tx
, rx
, oi
);
720 /* Acknowledge all interrupt sources. */
721 iowrite32(0x8001ffff, ioaddr
+ CSR5
);
722 if (tp
->flags
& HAS_INTR_MITIGATION
) {
723 /* Josip Loncaric at ICASE did extensive experimentation
724 to develop a good interrupt mitigation setting.*/
725 iowrite32(0x8b240000, ioaddr
+ CSR11
);
726 } else if (tp
->chip_id
== LC82C168
) {
727 /* the LC82C168 doesn't have a hw timer.*/
728 iowrite32(0x00, ioaddr
+ CSR7
);
729 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
731 /* Mask all interrupting sources, set timer to
733 iowrite32(((~csr5
) & 0x0001ebef) | AbnormalIntr
| TimerInt
, ioaddr
+ CSR7
);
734 iowrite32(0x0012, ioaddr
+ CSR11
);
743 csr5
= ioread32(ioaddr
+ CSR5
);
745 #ifdef CONFIG_TULIP_NAPI
748 } while ((csr5
& (TxNoBuf
|
757 SystemError
)) != 0);
759 } while ((csr5
& (NormalIntr
|AbnormalIntr
)) != 0);
761 tulip_refill_rx(dev
);
763 /* check if the card is in suspend mode */
764 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
765 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
767 printk(KERN_WARNING
"%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev
->name
, tp
->nir
, tp
->cur_rx
, tp
->ttimer
, rx
);
768 if (tp
->chip_id
== LC82C168
) {
769 iowrite32(0x00, ioaddr
+ CSR7
);
770 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
772 if (tp
->ttimer
== 0 || (ioread32(ioaddr
+ CSR11
) & 0xffff) == 0) {
774 printk(KERN_WARNING
"%s: in rx suspend mode: (%lu) set timer\n", dev
->name
, tp
->nir
);
775 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
| TimerInt
,
777 iowrite32(TimerInt
, ioaddr
+ CSR5
);
778 iowrite32(12, ioaddr
+ CSR11
);
783 #endif /* CONFIG_TULIP_NAPI */
785 if ((missed
= ioread32(ioaddr
+ CSR8
) & 0x1ffff)) {
786 tp
->stats
.rx_dropped
+= missed
& 0x10000 ? 0x10000 : missed
;
790 printk(KERN_DEBUG
"%s: exiting interrupt, csr5=%#4.4x.\n",
791 dev
->name
, ioread32(ioaddr
+ CSR5
));