2 drivers/net/tulip/interrupt.c
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
11 for more information on this driver.
12 Please submit bugs to http://bugzilla.kernel.org/ .
16 #include <linux/pci.h>
18 #include <linux/etherdevice.h>
20 int tulip_rx_copybreak
;
21 unsigned int tulip_max_interrupt_work
;
23 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
25 #define MIT_TABLE 15 /* We use 0 or max */
27 static unsigned int mit_table
[MIT_SIZE
+1] =
29 /* CRS11 21143 hardware Mitigation Control Interrupt
30 We use only RX mitigation we other techniques for
33 31 Cycle Size (timer control)
34 30:27 TX timer in 16 * Cycle size
35 26:24 TX No pkts before Int.
36 23:20 RX timer in Cycle size
37 19:17 RX No pkts before Int.
38 16 Continues Mode (CM)
41 0x0, /* IM disabled */
42 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
56 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
57 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
62 int tulip_refill_rx(struct net_device
*dev
)
64 struct tulip_private
*tp
= netdev_priv(dev
);
68 /* Refill the Rx ring buffers. */
69 for (; tp
->cur_rx
- tp
->dirty_rx
> 0; tp
->dirty_rx
++) {
70 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
71 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
75 skb
= tp
->rx_buffers
[entry
].skb
= dev_alloc_skb(PKT_BUF_SZ
);
79 mapping
= pci_map_single(tp
->pdev
, skb
->data
, PKT_BUF_SZ
,
81 tp
->rx_buffers
[entry
].mapping
= mapping
;
83 skb
->dev
= dev
; /* Mark as being used by this device. */
84 tp
->rx_ring
[entry
].buffer1
= cpu_to_le32(mapping
);
87 tp
->rx_ring
[entry
].status
= cpu_to_le32(DescOwned
);
89 if(tp
->chip_id
== LC82C168
) {
90 if(((ioread32(tp
->base_addr
+ CSR5
)>>17)&0x07) == 4) {
91 /* Rx stopped due to out of buffers,
94 iowrite32(0x01, tp
->base_addr
+ CSR2
);
100 #ifdef CONFIG_TULIP_NAPI
102 void oom_timer(unsigned long data
)
104 struct net_device
*dev
= (struct net_device
*)data
;
105 struct tulip_private
*tp
= netdev_priv(dev
);
106 napi_schedule(&tp
->napi
);
109 int tulip_poll(struct napi_struct
*napi
, int budget
)
111 struct tulip_private
*tp
= container_of(napi
, struct tulip_private
, napi
);
112 struct net_device
*dev
= tp
->dev
;
113 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
115 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
119 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
121 /* that one buffer is needed for mit activation; or might be a
122 bug in the ring buffer code; check later -- JHS*/
124 if (budget
>=RX_RING_SIZE
) budget
--;
128 printk(KERN_DEBUG
" In tulip_rx(), entry %d %08x\n",
129 entry
, tp
->rx_ring
[entry
].status
);
132 if (ioread32(tp
->base_addr
+ CSR5
) == 0xffffffff) {
133 printk(KERN_DEBUG
" In tulip_poll(), hardware disappeared\n");
136 /* Acknowledge current RX interrupt sources. */
137 iowrite32((RxIntr
| RxNoBuf
), tp
->base_addr
+ CSR5
);
140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
142 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
145 if (tp
->dirty_rx
+ RX_RING_SIZE
== tp
->cur_rx
)
149 printk(KERN_DEBUG
"%s: In tulip_rx(), entry %d %08x\n",
150 dev
->name
, entry
, status
);
152 if (++work_done
>= budget
)
156 * Omit the four octet CRC from the length.
157 * (May not be considered valid until we have
158 * checked status for RxLengthOver2047 bits)
160 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
163 * Maximum pkt_len is 1518 (1514 + vlan header)
164 * Anything higher than this is always invalid
165 * regardless of RxLengthOver2047 bits
168 if ((status
& (RxLengthOver2047
|
170 RxDescCollisionSeen
|
173 RxWholePkt
)) != RxWholePkt
||
175 if ((status
& (RxLengthOver2047
|
176 RxWholePkt
)) != RxWholePkt
) {
177 /* Ingore earlier buffers. */
178 if ((status
& 0xffff) != 0x7fff) {
181 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
183 tp
->stats
.rx_length_errors
++;
186 /* There was a fatal error. */
188 printk(KERN_DEBUG
"%s: Receive error, Rx status %08x\n",
190 tp
->stats
.rx_errors
++; /* end of a packet.*/
191 if (pkt_len
> 1518 ||
192 (status
& RxDescRunt
))
193 tp
->stats
.rx_length_errors
++;
195 if (status
& 0x0004) tp
->stats
.rx_frame_errors
++;
196 if (status
& 0x0002) tp
->stats
.rx_crc_errors
++;
197 if (status
& 0x0001) tp
->stats
.rx_fifo_errors
++;
202 /* Check if the packet is long enough to accept without copying
203 to a minimally-sized skbuff. */
204 if (pkt_len
< tulip_rx_copybreak
&&
205 (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
206 skb_reserve(skb
, 2); /* 16 byte align the IP header */
207 pci_dma_sync_single_for_cpu(tp
->pdev
,
208 tp
->rx_buffers
[entry
].mapping
,
209 pkt_len
, PCI_DMA_FROMDEVICE
);
210 #if ! defined(__alpha__)
211 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
213 skb_put(skb
, pkt_len
);
215 memcpy(skb_put(skb
, pkt_len
),
216 tp
->rx_buffers
[entry
].skb
->data
,
219 pci_dma_sync_single_for_device(tp
->pdev
,
220 tp
->rx_buffers
[entry
].mapping
,
221 pkt_len
, PCI_DMA_FROMDEVICE
);
222 } else { /* Pass up the skb already on the Rx ring. */
223 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
226 #ifndef final_version
227 if (tp
->rx_buffers
[entry
].mapping
!=
228 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
230 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
231 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
232 (unsigned long long)tp
->rx_buffers
[entry
].mapping
,
237 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
238 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
240 tp
->rx_buffers
[entry
].skb
= NULL
;
241 tp
->rx_buffers
[entry
].mapping
= 0;
243 skb
->protocol
= eth_type_trans(skb
, dev
);
245 netif_receive_skb(skb
);
247 tp
->stats
.rx_packets
++;
248 tp
->stats
.rx_bytes
+= pkt_len
;
250 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
254 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
255 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/4)
256 tulip_refill_rx(dev
);
260 /* New ack strategy... irq does not ack Rx any longer
261 hopefully this helps */
263 /* Really bad things can happen here... If new packet arrives
264 * and an irq arrives (tx or just due to occasionally unset
265 * mask), it will be acked by irq handler, but new thread
266 * is not scheduled. It is major hole in design.
267 * No idea how to fix this if "playing with fire" will fail
268 * tomorrow (night 011029). If it will not fail, we won
269 * finally: amount of IO did not increase at all. */
270 } while ((ioread32(tp
->base_addr
+ CSR5
) & RxIntr
));
272 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
274 /* We use this simplistic scheme for IM. It's proven by
275 real life installations. We can have IM enabled
276 continuesly but this would cause unnecessary latency.
277 Unfortunely we can't use all the NET_RX_* feedback here.
278 This would turn on IM for devices that is not contributing
279 to backlog congestion with unnecessary latency.
281 We monitor the device RX-ring and have:
283 HW Interrupt Mitigation either ON or OFF.
285 ON: More then 1 pkt received (per intr.) OR we are dropping
286 OFF: Only 1 pkt received
288 Note. We only use min and max (0, 15) settings from mit_table */
291 if( tp
->flags
& HAS_INTR_MITIGATION
) {
295 iowrite32(mit_table
[MIT_TABLE
], tp
->base_addr
+ CSR11
);
301 iowrite32(0, tp
->base_addr
+ CSR11
);
306 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
308 tulip_refill_rx(dev
);
310 /* If RX ring is not full we are out of memory. */
311 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
314 /* Remove us from polling list and enable RX intr. */
317 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, tp
->base_addr
+CSR7
);
319 /* The last op happens after poll completion. Which means the following:
320 * 1. it can race with disabling irqs in irq handler
321 * 2. it can race with dise/enabling irqs in other poll threads
322 * 3. if an irq raised after beginning loop, it will be immediately
325 * Summarizing: the logic results in some redundant irqs both
326 * due to races in masking and due to too late acking of already
327 * processed irqs. But it must not result in losing events.
333 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/2 ||
334 tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
335 tulip_refill_rx(dev
);
337 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
342 oom
: /* Executed with RX ints disabled */
344 /* Start timer, stop polling, but do not enable rx interrupts. */
345 mod_timer(&tp
->oom_timer
, jiffies
+1);
347 /* Think: timer_pending() was an explicit signature of bug.
348 * Timer can be pending now but fired and completed
349 * before we did napi_complete(). See? We would lose it. */
351 /* remove ourselves from the polling list */
357 #else /* CONFIG_TULIP_NAPI */
359 static int tulip_rx(struct net_device
*dev
)
361 struct tulip_private
*tp
= netdev_priv(dev
);
362 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
363 int rx_work_limit
= tp
->dirty_rx
+ RX_RING_SIZE
- tp
->cur_rx
;
367 printk(KERN_DEBUG
" In tulip_rx(), entry %d %08x\n",
368 entry
, tp
->rx_ring
[entry
].status
);
369 /* If we own the next entry, it is a new packet. Send it up. */
370 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
371 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
375 printk(KERN_DEBUG
"%s: In tulip_rx(), entry %d %08x\n",
376 dev
->name
, entry
, status
);
377 if (--rx_work_limit
< 0)
381 Omit the four octet CRC from the length.
382 (May not be considered valid until we have
383 checked status for RxLengthOver2047 bits)
385 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
387 Maximum pkt_len is 1518 (1514 + vlan header)
388 Anything higher than this is always invalid
389 regardless of RxLengthOver2047 bits
392 if ((status
& (RxLengthOver2047
|
394 RxDescCollisionSeen
|
397 RxWholePkt
)) != RxWholePkt
||
399 if ((status
& (RxLengthOver2047
|
400 RxWholePkt
)) != RxWholePkt
) {
401 /* Ingore earlier buffers. */
402 if ((status
& 0xffff) != 0x7fff) {
405 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
407 tp
->stats
.rx_length_errors
++;
410 /* There was a fatal error. */
412 printk(KERN_DEBUG
"%s: Receive error, Rx status %08x\n",
414 tp
->stats
.rx_errors
++; /* end of a packet.*/
415 if (pkt_len
> 1518 ||
416 (status
& RxDescRunt
))
417 tp
->stats
.rx_length_errors
++;
418 if (status
& 0x0004) tp
->stats
.rx_frame_errors
++;
419 if (status
& 0x0002) tp
->stats
.rx_crc_errors
++;
420 if (status
& 0x0001) tp
->stats
.rx_fifo_errors
++;
425 /* Check if the packet is long enough to accept without copying
426 to a minimally-sized skbuff. */
427 if (pkt_len
< tulip_rx_copybreak
&&
428 (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
429 skb_reserve(skb
, 2); /* 16 byte align the IP header */
430 pci_dma_sync_single_for_cpu(tp
->pdev
,
431 tp
->rx_buffers
[entry
].mapping
,
432 pkt_len
, PCI_DMA_FROMDEVICE
);
433 #if ! defined(__alpha__)
434 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
436 skb_put(skb
, pkt_len
);
438 memcpy(skb_put(skb
, pkt_len
),
439 tp
->rx_buffers
[entry
].skb
->data
,
442 pci_dma_sync_single_for_device(tp
->pdev
,
443 tp
->rx_buffers
[entry
].mapping
,
444 pkt_len
, PCI_DMA_FROMDEVICE
);
445 } else { /* Pass up the skb already on the Rx ring. */
446 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
449 #ifndef final_version
450 if (tp
->rx_buffers
[entry
].mapping
!=
451 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
453 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
454 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
455 (long long)tp
->rx_buffers
[entry
].mapping
,
460 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
461 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
463 tp
->rx_buffers
[entry
].skb
= NULL
;
464 tp
->rx_buffers
[entry
].mapping
= 0;
466 skb
->protocol
= eth_type_trans(skb
, dev
);
470 tp
->stats
.rx_packets
++;
471 tp
->stats
.rx_bytes
+= pkt_len
;
474 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
478 #endif /* CONFIG_TULIP_NAPI */
480 static inline unsigned int phy_interrupt (struct net_device
*dev
)
483 struct tulip_private
*tp
= netdev_priv(dev
);
484 int csr12
= ioread32(tp
->base_addr
+ CSR12
) & 0xff;
486 if (csr12
!= tp
->csr12_shadow
) {
488 iowrite32(csr12
| 0x02, tp
->base_addr
+ CSR12
);
489 tp
->csr12_shadow
= csr12
;
490 /* do link change stuff */
491 spin_lock(&tp
->lock
);
492 tulip_check_duplex(dev
);
493 spin_unlock(&tp
->lock
);
494 /* clear irq ack bit */
495 iowrite32(csr12
& ~0x02, tp
->base_addr
+ CSR12
);
504 /* The interrupt handler does all of the Rx thread work and cleans up
505 after the Tx thread. */
506 irqreturn_t
tulip_interrupt(int irq
, void *dev_instance
)
508 struct net_device
*dev
= (struct net_device
*)dev_instance
;
509 struct tulip_private
*tp
= netdev_priv(dev
);
510 void __iomem
*ioaddr
= tp
->base_addr
;
516 int maxrx
= RX_RING_SIZE
;
517 int maxtx
= TX_RING_SIZE
;
518 int maxoi
= TX_RING_SIZE
;
519 #ifdef CONFIG_TULIP_NAPI
524 unsigned int work_count
= tulip_max_interrupt_work
;
525 unsigned int handled
= 0;
527 /* Let's see whether the interrupt really is for us */
528 csr5
= ioread32(ioaddr
+ CSR5
);
530 if (tp
->flags
& HAS_PHY_IRQ
)
531 handled
= phy_interrupt (dev
);
533 if ((csr5
& (NormalIntr
|AbnormalIntr
)) == 0)
534 return IRQ_RETVAL(handled
);
540 #ifdef CONFIG_TULIP_NAPI
542 if (!rxd
&& (csr5
& (RxIntr
| RxNoBuf
))) {
544 /* Mask RX intrs and add the device to poll list. */
545 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
&~RxPollInt
, ioaddr
+ CSR7
);
546 napi_schedule(&tp
->napi
);
548 if (!(csr5
&~(AbnormalIntr
|NormalIntr
|RxPollInt
|TPLnkPass
)))
552 /* Acknowledge the interrupt sources we handle here ASAP
553 the poll function does Rx and RxNoBuf acking */
555 iowrite32(csr5
& 0x0001ff3f, ioaddr
+ CSR5
);
558 /* Acknowledge all of the current interrupt sources ASAP. */
559 iowrite32(csr5
& 0x0001ffff, ioaddr
+ CSR5
);
562 if (csr5
& (RxIntr
| RxNoBuf
)) {
564 tulip_refill_rx(dev
);
567 #endif /* CONFIG_TULIP_NAPI */
570 printk(KERN_DEBUG
"%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
571 dev
->name
, csr5
, ioread32(ioaddr
+ CSR5
));
574 if (csr5
& (TxNoBuf
| TxDied
| TxIntr
| TimerInt
)) {
575 unsigned int dirty_tx
;
577 spin_lock(&tp
->lock
);
579 for (dirty_tx
= tp
->dirty_tx
; tp
->cur_tx
- dirty_tx
> 0;
581 int entry
= dirty_tx
% TX_RING_SIZE
;
582 int status
= le32_to_cpu(tp
->tx_ring
[entry
].status
);
585 break; /* It still has not been Txed */
587 /* Check for Rx filter setup frames. */
588 if (tp
->tx_buffers
[entry
].skb
== NULL
) {
589 /* test because dummy frames not mapped */
590 if (tp
->tx_buffers
[entry
].mapping
)
591 pci_unmap_single(tp
->pdev
,
592 tp
->tx_buffers
[entry
].mapping
,
593 sizeof(tp
->setup_frame
),
598 if (status
& 0x8000) {
599 /* There was an major error, log it. */
600 #ifndef final_version
602 printk(KERN_DEBUG
"%s: Transmit error, Tx status %08x\n",
605 tp
->stats
.tx_errors
++;
606 if (status
& 0x4104) tp
->stats
.tx_aborted_errors
++;
607 if (status
& 0x0C00) tp
->stats
.tx_carrier_errors
++;
608 if (status
& 0x0200) tp
->stats
.tx_window_errors
++;
609 if (status
& 0x0002) tp
->stats
.tx_fifo_errors
++;
610 if ((status
& 0x0080) && tp
->full_duplex
== 0)
611 tp
->stats
.tx_heartbeat_errors
++;
613 tp
->stats
.tx_bytes
+=
614 tp
->tx_buffers
[entry
].skb
->len
;
615 tp
->stats
.collisions
+= (status
>> 3) & 15;
616 tp
->stats
.tx_packets
++;
619 pci_unmap_single(tp
->pdev
, tp
->tx_buffers
[entry
].mapping
,
620 tp
->tx_buffers
[entry
].skb
->len
,
623 /* Free the original skb. */
624 dev_kfree_skb_irq(tp
->tx_buffers
[entry
].skb
);
625 tp
->tx_buffers
[entry
].skb
= NULL
;
626 tp
->tx_buffers
[entry
].mapping
= 0;
630 #ifndef final_version
631 if (tp
->cur_tx
- dirty_tx
> TX_RING_SIZE
) {
633 "Out-of-sync dirty pointer, %d vs. %d\n",
634 dirty_tx
, tp
->cur_tx
);
635 dirty_tx
+= TX_RING_SIZE
;
639 if (tp
->cur_tx
- dirty_tx
< TX_RING_SIZE
- 2)
640 netif_wake_queue(dev
);
642 tp
->dirty_tx
= dirty_tx
;
646 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
647 csr5
, ioread32(ioaddr
+ CSR6
),
649 tulip_restart_rxtx(tp
);
651 spin_unlock(&tp
->lock
);
655 if (csr5
& AbnormalIntr
) { /* Abnormal error summary bit. */
656 if (csr5
== 0xffffffff)
658 if (csr5
& TxJabber
) tp
->stats
.tx_errors
++;
659 if (csr5
& TxFIFOUnderflow
) {
660 if ((tp
->csr6
& 0xC000) != 0xC000)
661 tp
->csr6
+= 0x4000; /* Bump up the Tx threshold */
663 tp
->csr6
|= 0x00200000; /* Store-n-forward. */
664 /* Restart the transmit process. */
665 tulip_restart_rxtx(tp
);
666 iowrite32(0, ioaddr
+ CSR1
);
668 if (csr5
& (RxDied
| RxNoBuf
)) {
669 if (tp
->flags
& COMET_MAC_ADDR
) {
670 iowrite32(tp
->mc_filter
[0], ioaddr
+ 0xAC);
671 iowrite32(tp
->mc_filter
[1], ioaddr
+ 0xB0);
674 if (csr5
& RxDied
) { /* Missed a Rx frame. */
675 tp
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ CSR8
) & 0xffff;
676 tp
->stats
.rx_errors
++;
677 tulip_start_rxtx(tp
);
680 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
681 * call is ever done under the spinlock
683 if (csr5
& (TPLnkPass
| TPLnkFail
| 0x08000000)) {
685 (tp
->link_change
)(dev
, csr5
);
687 if (csr5
& SystemError
) {
688 int error
= (csr5
>> 23) & 7;
689 /* oops, we hit a PCI error. The code produced corresponds
694 * Note that on parity error, we should do a software reset
695 * of the chip to get it back into a sane state (according
696 * to the 21142/3 docs that is).
700 "(%lu) System Error occurred (%d)\n",
703 /* Clear all error sources, included undocumented ones! */
704 iowrite32(0x0800f7ba, ioaddr
+ CSR5
);
707 if (csr5
& TimerInt
) {
711 "Re-enabling interrupts, %08x\n",
713 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, ioaddr
+ CSR7
);
717 if (tx
> maxtx
|| rx
> maxrx
|| oi
> maxoi
) {
719 dev_warn(&dev
->dev
, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
720 csr5
, tp
->nir
, tx
, rx
, oi
);
722 /* Acknowledge all interrupt sources. */
723 iowrite32(0x8001ffff, ioaddr
+ CSR5
);
724 if (tp
->flags
& HAS_INTR_MITIGATION
) {
725 /* Josip Loncaric at ICASE did extensive experimentation
726 to develop a good interrupt mitigation setting.*/
727 iowrite32(0x8b240000, ioaddr
+ CSR11
);
728 } else if (tp
->chip_id
== LC82C168
) {
729 /* the LC82C168 doesn't have a hw timer.*/
730 iowrite32(0x00, ioaddr
+ CSR7
);
731 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
733 /* Mask all interrupting sources, set timer to
735 iowrite32(((~csr5
) & 0x0001ebef) | AbnormalIntr
| TimerInt
, ioaddr
+ CSR7
);
736 iowrite32(0x0012, ioaddr
+ CSR11
);
745 csr5
= ioread32(ioaddr
+ CSR5
);
747 #ifdef CONFIG_TULIP_NAPI
750 } while ((csr5
& (TxNoBuf
|
759 SystemError
)) != 0);
761 } while ((csr5
& (NormalIntr
|AbnormalIntr
)) != 0);
763 tulip_refill_rx(dev
);
765 /* check if the card is in suspend mode */
766 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
767 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
770 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
771 tp
->nir
, tp
->cur_rx
, tp
->ttimer
, rx
);
772 if (tp
->chip_id
== LC82C168
) {
773 iowrite32(0x00, ioaddr
+ CSR7
);
774 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
776 if (tp
->ttimer
== 0 || (ioread32(ioaddr
+ CSR11
) & 0xffff) == 0) {
779 "in rx suspend mode: (%lu) set timer\n",
781 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
| TimerInt
,
783 iowrite32(TimerInt
, ioaddr
+ CSR5
);
784 iowrite32(12, ioaddr
+ CSR11
);
789 #endif /* CONFIG_TULIP_NAPI */
791 if ((missed
= ioread32(ioaddr
+ CSR8
) & 0x1ffff)) {
792 tp
->stats
.rx_dropped
+= missed
& 0x10000 ? 0x10000 : missed
;
796 printk(KERN_DEBUG
"%s: exiting interrupt, csr5=%#04x\n",
797 dev
->name
, ioread32(ioaddr
+ CSR5
));