2 drivers/net/tulip/interrupt.c
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
11 for more information on this driver.
12 Please submit bugs to http://bugzilla.kernel.org/ .
16 #include <linux/pci.h>
18 #include <linux/etherdevice.h>
20 int tulip_rx_copybreak
;
21 unsigned int tulip_max_interrupt_work
;
23 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
25 #define MIT_TABLE 15 /* We use 0 or max */
27 static unsigned int mit_table
[MIT_SIZE
+1] =
29 /* CRS11 21143 hardware Mitigation Control Interrupt
30 We use only RX mitigation we other techniques for
33 31 Cycle Size (timer control)
34 30:27 TX timer in 16 * Cycle size
35 26:24 TX No pkts before Int.
36 23:20 RX timer in Cycle size
37 19:17 RX No pkts before Int.
38 16 Continues Mode (CM)
41 0x0, /* IM disabled */
42 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
56 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
57 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
62 int tulip_refill_rx(struct net_device
*dev
)
64 struct tulip_private
*tp
= netdev_priv(dev
);
68 /* Refill the Rx ring buffers. */
69 for (; tp
->cur_rx
- tp
->dirty_rx
> 0; tp
->dirty_rx
++) {
70 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
71 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
75 skb
= tp
->rx_buffers
[entry
].skb
= dev_alloc_skb(PKT_BUF_SZ
);
79 mapping
= pci_map_single(tp
->pdev
, skb
->data
, PKT_BUF_SZ
,
81 tp
->rx_buffers
[entry
].mapping
= mapping
;
83 skb
->dev
= dev
; /* Mark as being used by this device. */
84 tp
->rx_ring
[entry
].buffer1
= cpu_to_le32(mapping
);
87 tp
->rx_ring
[entry
].status
= cpu_to_le32(DescOwned
);
89 if(tp
->chip_id
== LC82C168
) {
90 if(((ioread32(tp
->base_addr
+ CSR5
)>>17)&0x07) == 4) {
91 /* Rx stopped due to out of buffers,
94 iowrite32(0x01, tp
->base_addr
+ CSR2
);
100 #ifdef CONFIG_TULIP_NAPI
102 void oom_timer(unsigned long data
)
104 struct net_device
*dev
= (struct net_device
*)data
;
105 struct tulip_private
*tp
= netdev_priv(dev
);
106 napi_schedule(&tp
->napi
);
109 int tulip_poll(struct napi_struct
*napi
, int budget
)
111 struct tulip_private
*tp
= container_of(napi
, struct tulip_private
, napi
);
112 struct net_device
*dev
= tp
->dev
;
113 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
115 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
119 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
121 /* that one buffer is needed for mit activation; or might be a
122 bug in the ring buffer code; check later -- JHS*/
124 if (budget
>=RX_RING_SIZE
) budget
--;
128 printk(KERN_DEBUG
" In tulip_rx(), entry %d %08x\n",
129 entry
, tp
->rx_ring
[entry
].status
);
132 if (ioread32(tp
->base_addr
+ CSR5
) == 0xffffffff) {
133 printk(KERN_DEBUG
" In tulip_poll(), hardware disappeared\n");
136 /* Acknowledge current RX interrupt sources. */
137 iowrite32((RxIntr
| RxNoBuf
), tp
->base_addr
+ CSR5
);
140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
142 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
145 if (tp
->dirty_rx
+ RX_RING_SIZE
== tp
->cur_rx
)
149 printk(KERN_DEBUG
"%s: In tulip_rx(), entry %d %08x\n",
150 dev
->name
, entry
, status
);
152 if (++work_done
>= budget
)
156 * Omit the four octet CRC from the length.
157 * (May not be considered valid until we have
158 * checked status for RxLengthOver2047 bits)
160 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
163 * Maximum pkt_len is 1518 (1514 + vlan header)
164 * Anything higher than this is always invalid
165 * regardless of RxLengthOver2047 bits
168 if ((status
& (RxLengthOver2047
|
170 RxDescCollisionSeen
|
173 RxWholePkt
)) != RxWholePkt
||
175 if ((status
& (RxLengthOver2047
|
176 RxWholePkt
)) != RxWholePkt
) {
177 /* Ingore earlier buffers. */
178 if ((status
& 0xffff) != 0x7fff) {
181 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
183 dev
->stats
.rx_length_errors
++;
186 /* There was a fatal error. */
188 printk(KERN_DEBUG
"%s: Receive error, Rx status %08x\n",
190 dev
->stats
.rx_errors
++; /* end of a packet.*/
191 if (pkt_len
> 1518 ||
192 (status
& RxDescRunt
))
193 dev
->stats
.rx_length_errors
++;
196 dev
->stats
.rx_frame_errors
++;
198 dev
->stats
.rx_crc_errors
++;
200 dev
->stats
.rx_fifo_errors
++;
205 /* Check if the packet is long enough to accept without copying
206 to a minimally-sized skbuff. */
207 if (pkt_len
< tulip_rx_copybreak
&&
208 (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
209 skb_reserve(skb
, 2); /* 16 byte align the IP header */
210 pci_dma_sync_single_for_cpu(tp
->pdev
,
211 tp
->rx_buffers
[entry
].mapping
,
212 pkt_len
, PCI_DMA_FROMDEVICE
);
213 #if ! defined(__alpha__)
214 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
216 skb_put(skb
, pkt_len
);
218 memcpy(skb_put(skb
, pkt_len
),
219 tp
->rx_buffers
[entry
].skb
->data
,
222 pci_dma_sync_single_for_device(tp
->pdev
,
223 tp
->rx_buffers
[entry
].mapping
,
224 pkt_len
, PCI_DMA_FROMDEVICE
);
225 } else { /* Pass up the skb already on the Rx ring. */
226 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
229 #ifndef final_version
230 if (tp
->rx_buffers
[entry
].mapping
!=
231 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
233 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
234 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
235 (unsigned long long)tp
->rx_buffers
[entry
].mapping
,
240 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
241 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
243 tp
->rx_buffers
[entry
].skb
= NULL
;
244 tp
->rx_buffers
[entry
].mapping
= 0;
246 skb
->protocol
= eth_type_trans(skb
, dev
);
248 netif_receive_skb(skb
);
250 dev
->stats
.rx_packets
++;
251 dev
->stats
.rx_bytes
+= pkt_len
;
253 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
257 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
258 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/4)
259 tulip_refill_rx(dev
);
263 /* New ack strategy... irq does not ack Rx any longer
264 hopefully this helps */
266 /* Really bad things can happen here... If new packet arrives
267 * and an irq arrives (tx or just due to occasionally unset
268 * mask), it will be acked by irq handler, but new thread
269 * is not scheduled. It is major hole in design.
270 * No idea how to fix this if "playing with fire" will fail
271 * tomorrow (night 011029). If it will not fail, we won
272 * finally: amount of IO did not increase at all. */
273 } while ((ioread32(tp
->base_addr
+ CSR5
) & RxIntr
));
275 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
277 /* We use this simplistic scheme for IM. It's proven by
278 real life installations. We can have IM enabled
279 continuesly but this would cause unnecessary latency.
280 Unfortunely we can't use all the NET_RX_* feedback here.
281 This would turn on IM for devices that is not contributing
282 to backlog congestion with unnecessary latency.
284 We monitor the device RX-ring and have:
286 HW Interrupt Mitigation either ON or OFF.
288 ON: More then 1 pkt received (per intr.) OR we are dropping
289 OFF: Only 1 pkt received
291 Note. We only use min and max (0, 15) settings from mit_table */
294 if( tp
->flags
& HAS_INTR_MITIGATION
) {
298 iowrite32(mit_table
[MIT_TABLE
], tp
->base_addr
+ CSR11
);
304 iowrite32(0, tp
->base_addr
+ CSR11
);
309 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
311 tulip_refill_rx(dev
);
313 /* If RX ring is not full we are out of memory. */
314 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
317 /* Remove us from polling list and enable RX intr. */
320 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, tp
->base_addr
+CSR7
);
322 /* The last op happens after poll completion. Which means the following:
323 * 1. it can race with disabling irqs in irq handler
324 * 2. it can race with dise/enabling irqs in other poll threads
325 * 3. if an irq raised after beginning loop, it will be immediately
328 * Summarizing: the logic results in some redundant irqs both
329 * due to races in masking and due to too late acking of already
330 * processed irqs. But it must not result in losing events.
336 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/2 ||
337 tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
338 tulip_refill_rx(dev
);
340 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
345 oom
: /* Executed with RX ints disabled */
347 /* Start timer, stop polling, but do not enable rx interrupts. */
348 mod_timer(&tp
->oom_timer
, jiffies
+1);
350 /* Think: timer_pending() was an explicit signature of bug.
351 * Timer can be pending now but fired and completed
352 * before we did napi_complete(). See? We would lose it. */
354 /* remove ourselves from the polling list */
360 #else /* CONFIG_TULIP_NAPI */
362 static int tulip_rx(struct net_device
*dev
)
364 struct tulip_private
*tp
= netdev_priv(dev
);
365 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
366 int rx_work_limit
= tp
->dirty_rx
+ RX_RING_SIZE
- tp
->cur_rx
;
370 printk(KERN_DEBUG
" In tulip_rx(), entry %d %08x\n",
371 entry
, tp
->rx_ring
[entry
].status
);
372 /* If we own the next entry, it is a new packet. Send it up. */
373 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
374 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
378 printk(KERN_DEBUG
"%s: In tulip_rx(), entry %d %08x\n",
379 dev
->name
, entry
, status
);
380 if (--rx_work_limit
< 0)
384 Omit the four octet CRC from the length.
385 (May not be considered valid until we have
386 checked status for RxLengthOver2047 bits)
388 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
390 Maximum pkt_len is 1518 (1514 + vlan header)
391 Anything higher than this is always invalid
392 regardless of RxLengthOver2047 bits
395 if ((status
& (RxLengthOver2047
|
397 RxDescCollisionSeen
|
400 RxWholePkt
)) != RxWholePkt
||
402 if ((status
& (RxLengthOver2047
|
403 RxWholePkt
)) != RxWholePkt
) {
404 /* Ingore earlier buffers. */
405 if ((status
& 0xffff) != 0x7fff) {
408 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
410 dev
->stats
.rx_length_errors
++;
413 /* There was a fatal error. */
415 printk(KERN_DEBUG
"%s: Receive error, Rx status %08x\n",
417 dev
->stats
.rx_errors
++; /* end of a packet.*/
418 if (pkt_len
> 1518 ||
419 (status
& RxDescRunt
))
420 dev
->stats
.rx_length_errors
++;
422 dev
->stats
.rx_frame_errors
++;
424 dev
->stats
.rx_crc_errors
++;
426 dev
->stats
.rx_fifo_errors
++;
431 /* Check if the packet is long enough to accept without copying
432 to a minimally-sized skbuff. */
433 if (pkt_len
< tulip_rx_copybreak
&&
434 (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
435 skb_reserve(skb
, 2); /* 16 byte align the IP header */
436 pci_dma_sync_single_for_cpu(tp
->pdev
,
437 tp
->rx_buffers
[entry
].mapping
,
438 pkt_len
, PCI_DMA_FROMDEVICE
);
439 #if ! defined(__alpha__)
440 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
442 skb_put(skb
, pkt_len
);
444 memcpy(skb_put(skb
, pkt_len
),
445 tp
->rx_buffers
[entry
].skb
->data
,
448 pci_dma_sync_single_for_device(tp
->pdev
,
449 tp
->rx_buffers
[entry
].mapping
,
450 pkt_len
, PCI_DMA_FROMDEVICE
);
451 } else { /* Pass up the skb already on the Rx ring. */
452 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
455 #ifndef final_version
456 if (tp
->rx_buffers
[entry
].mapping
!=
457 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
459 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
460 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
461 (long long)tp
->rx_buffers
[entry
].mapping
,
466 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
467 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
469 tp
->rx_buffers
[entry
].skb
= NULL
;
470 tp
->rx_buffers
[entry
].mapping
= 0;
472 skb
->protocol
= eth_type_trans(skb
, dev
);
476 dev
->stats
.rx_packets
++;
477 dev
->stats
.rx_bytes
+= pkt_len
;
480 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
484 #endif /* CONFIG_TULIP_NAPI */
486 static inline unsigned int phy_interrupt (struct net_device
*dev
)
489 struct tulip_private
*tp
= netdev_priv(dev
);
490 int csr12
= ioread32(tp
->base_addr
+ CSR12
) & 0xff;
492 if (csr12
!= tp
->csr12_shadow
) {
494 iowrite32(csr12
| 0x02, tp
->base_addr
+ CSR12
);
495 tp
->csr12_shadow
= csr12
;
496 /* do link change stuff */
497 spin_lock(&tp
->lock
);
498 tulip_check_duplex(dev
);
499 spin_unlock(&tp
->lock
);
500 /* clear irq ack bit */
501 iowrite32(csr12
& ~0x02, tp
->base_addr
+ CSR12
);
510 /* The interrupt handler does all of the Rx thread work and cleans up
511 after the Tx thread. */
512 irqreturn_t
tulip_interrupt(int irq
, void *dev_instance
)
514 struct net_device
*dev
= (struct net_device
*)dev_instance
;
515 struct tulip_private
*tp
= netdev_priv(dev
);
516 void __iomem
*ioaddr
= tp
->base_addr
;
522 int maxrx
= RX_RING_SIZE
;
523 int maxtx
= TX_RING_SIZE
;
524 int maxoi
= TX_RING_SIZE
;
525 #ifdef CONFIG_TULIP_NAPI
530 unsigned int work_count
= tulip_max_interrupt_work
;
531 unsigned int handled
= 0;
533 /* Let's see whether the interrupt really is for us */
534 csr5
= ioread32(ioaddr
+ CSR5
);
536 if (tp
->flags
& HAS_PHY_IRQ
)
537 handled
= phy_interrupt (dev
);
539 if ((csr5
& (NormalIntr
|AbnormalIntr
)) == 0)
540 return IRQ_RETVAL(handled
);
546 #ifdef CONFIG_TULIP_NAPI
548 if (!rxd
&& (csr5
& (RxIntr
| RxNoBuf
))) {
550 /* Mask RX intrs and add the device to poll list. */
551 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
&~RxPollInt
, ioaddr
+ CSR7
);
552 napi_schedule(&tp
->napi
);
554 if (!(csr5
&~(AbnormalIntr
|NormalIntr
|RxPollInt
|TPLnkPass
)))
558 /* Acknowledge the interrupt sources we handle here ASAP
559 the poll function does Rx and RxNoBuf acking */
561 iowrite32(csr5
& 0x0001ff3f, ioaddr
+ CSR5
);
564 /* Acknowledge all of the current interrupt sources ASAP. */
565 iowrite32(csr5
& 0x0001ffff, ioaddr
+ CSR5
);
568 if (csr5
& (RxIntr
| RxNoBuf
)) {
570 tulip_refill_rx(dev
);
573 #endif /* CONFIG_TULIP_NAPI */
576 printk(KERN_DEBUG
"%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
577 dev
->name
, csr5
, ioread32(ioaddr
+ CSR5
));
580 if (csr5
& (TxNoBuf
| TxDied
| TxIntr
| TimerInt
)) {
581 unsigned int dirty_tx
;
583 spin_lock(&tp
->lock
);
585 for (dirty_tx
= tp
->dirty_tx
; tp
->cur_tx
- dirty_tx
> 0;
587 int entry
= dirty_tx
% TX_RING_SIZE
;
588 int status
= le32_to_cpu(tp
->tx_ring
[entry
].status
);
591 break; /* It still has not been Txed */
593 /* Check for Rx filter setup frames. */
594 if (tp
->tx_buffers
[entry
].skb
== NULL
) {
595 /* test because dummy frames not mapped */
596 if (tp
->tx_buffers
[entry
].mapping
)
597 pci_unmap_single(tp
->pdev
,
598 tp
->tx_buffers
[entry
].mapping
,
599 sizeof(tp
->setup_frame
),
604 if (status
& 0x8000) {
605 /* There was an major error, log it. */
606 #ifndef final_version
608 printk(KERN_DEBUG
"%s: Transmit error, Tx status %08x\n",
611 dev
->stats
.tx_errors
++;
613 dev
->stats
.tx_aborted_errors
++;
615 dev
->stats
.tx_carrier_errors
++;
617 dev
->stats
.tx_window_errors
++;
619 dev
->stats
.tx_fifo_errors
++;
620 if ((status
& 0x0080) && tp
->full_duplex
== 0)
621 dev
->stats
.tx_heartbeat_errors
++;
623 dev
->stats
.tx_bytes
+=
624 tp
->tx_buffers
[entry
].skb
->len
;
625 dev
->stats
.collisions
+= (status
>> 3) & 15;
626 dev
->stats
.tx_packets
++;
629 pci_unmap_single(tp
->pdev
, tp
->tx_buffers
[entry
].mapping
,
630 tp
->tx_buffers
[entry
].skb
->len
,
633 /* Free the original skb. */
634 dev_kfree_skb_irq(tp
->tx_buffers
[entry
].skb
);
635 tp
->tx_buffers
[entry
].skb
= NULL
;
636 tp
->tx_buffers
[entry
].mapping
= 0;
640 #ifndef final_version
641 if (tp
->cur_tx
- dirty_tx
> TX_RING_SIZE
) {
643 "Out-of-sync dirty pointer, %d vs. %d\n",
644 dirty_tx
, tp
->cur_tx
);
645 dirty_tx
+= TX_RING_SIZE
;
649 if (tp
->cur_tx
- dirty_tx
< TX_RING_SIZE
- 2)
650 netif_wake_queue(dev
);
652 tp
->dirty_tx
= dirty_tx
;
656 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
657 csr5
, ioread32(ioaddr
+ CSR6
),
659 tulip_restart_rxtx(tp
);
661 spin_unlock(&tp
->lock
);
665 if (csr5
& AbnormalIntr
) { /* Abnormal error summary bit. */
666 if (csr5
== 0xffffffff)
669 dev
->stats
.tx_errors
++;
670 if (csr5
& TxFIFOUnderflow
) {
671 if ((tp
->csr6
& 0xC000) != 0xC000)
672 tp
->csr6
+= 0x4000; /* Bump up the Tx threshold */
674 tp
->csr6
|= 0x00200000; /* Store-n-forward. */
675 /* Restart the transmit process. */
676 tulip_restart_rxtx(tp
);
677 iowrite32(0, ioaddr
+ CSR1
);
679 if (csr5
& (RxDied
| RxNoBuf
)) {
680 if (tp
->flags
& COMET_MAC_ADDR
) {
681 iowrite32(tp
->mc_filter
[0], ioaddr
+ 0xAC);
682 iowrite32(tp
->mc_filter
[1], ioaddr
+ 0xB0);
685 if (csr5
& RxDied
) { /* Missed a Rx frame. */
686 dev
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ CSR8
) & 0xffff;
687 dev
->stats
.rx_errors
++;
688 tulip_start_rxtx(tp
);
691 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
692 * call is ever done under the spinlock
694 if (csr5
& (TPLnkPass
| TPLnkFail
| 0x08000000)) {
696 (tp
->link_change
)(dev
, csr5
);
698 if (csr5
& SystemError
) {
699 int error
= (csr5
>> 23) & 7;
700 /* oops, we hit a PCI error. The code produced corresponds
705 * Note that on parity error, we should do a software reset
706 * of the chip to get it back into a sane state (according
707 * to the 21142/3 docs that is).
711 "(%lu) System Error occurred (%d)\n",
714 /* Clear all error sources, included undocumented ones! */
715 iowrite32(0x0800f7ba, ioaddr
+ CSR5
);
718 if (csr5
& TimerInt
) {
722 "Re-enabling interrupts, %08x\n",
724 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, ioaddr
+ CSR7
);
728 if (tx
> maxtx
|| rx
> maxrx
|| oi
> maxoi
) {
730 dev_warn(&dev
->dev
, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
731 csr5
, tp
->nir
, tx
, rx
, oi
);
733 /* Acknowledge all interrupt sources. */
734 iowrite32(0x8001ffff, ioaddr
+ CSR5
);
735 if (tp
->flags
& HAS_INTR_MITIGATION
) {
736 /* Josip Loncaric at ICASE did extensive experimentation
737 to develop a good interrupt mitigation setting.*/
738 iowrite32(0x8b240000, ioaddr
+ CSR11
);
739 } else if (tp
->chip_id
== LC82C168
) {
740 /* the LC82C168 doesn't have a hw timer.*/
741 iowrite32(0x00, ioaddr
+ CSR7
);
742 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
744 /* Mask all interrupting sources, set timer to
746 iowrite32(((~csr5
) & 0x0001ebef) | AbnormalIntr
| TimerInt
, ioaddr
+ CSR7
);
747 iowrite32(0x0012, ioaddr
+ CSR11
);
756 csr5
= ioread32(ioaddr
+ CSR5
);
758 #ifdef CONFIG_TULIP_NAPI
761 } while ((csr5
& (TxNoBuf
|
770 SystemError
)) != 0);
772 } while ((csr5
& (NormalIntr
|AbnormalIntr
)) != 0);
774 tulip_refill_rx(dev
);
776 /* check if the card is in suspend mode */
777 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
778 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
781 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
782 tp
->nir
, tp
->cur_rx
, tp
->ttimer
, rx
);
783 if (tp
->chip_id
== LC82C168
) {
784 iowrite32(0x00, ioaddr
+ CSR7
);
785 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
787 if (tp
->ttimer
== 0 || (ioread32(ioaddr
+ CSR11
) & 0xffff) == 0) {
790 "in rx suspend mode: (%lu) set timer\n",
792 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
| TimerInt
,
794 iowrite32(TimerInt
, ioaddr
+ CSR5
);
795 iowrite32(12, ioaddr
+ CSR11
);
800 #endif /* CONFIG_TULIP_NAPI */
802 if ((missed
= ioread32(ioaddr
+ CSR8
) & 0x1ffff)) {
803 dev
->stats
.rx_dropped
+= missed
& 0x10000 ? 0x10000 : missed
;
807 printk(KERN_DEBUG
"%s: exiting interrupt, csr5=%#04x\n",
808 dev
->name
, ioread32(ioaddr
+ CSR5
));