2 drivers/net/ethernet/dec/tulip/interrupt.c
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
10 Please submit bugs to http://bugzilla.kernel.org/ .
13 #include <linux/pci.h>
15 #include <linux/etherdevice.h>
17 int tulip_rx_copybreak
;
18 unsigned int tulip_max_interrupt_work
;
20 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
22 #define MIT_TABLE 15 /* We use 0 or max */
24 static unsigned int mit_table
[MIT_SIZE
+1] =
26 /* CRS11 21143 hardware Mitigation Control Interrupt
27 We use only RX mitigation we other techniques for
30 31 Cycle Size (timer control)
31 30:27 TX timer in 16 * Cycle size
32 26:24 TX No pkts before Int.
33 23:20 RX timer in Cycle size
34 19:17 RX No pkts before Int.
35 16 Continues Mode (CM)
38 0x0, /* IM disabled */
39 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
53 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
54 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
59 int tulip_refill_rx(struct net_device
*dev
)
61 struct tulip_private
*tp
= netdev_priv(dev
);
65 /* Refill the Rx ring buffers. */
66 for (; tp
->cur_rx
- tp
->dirty_rx
> 0; tp
->dirty_rx
++) {
67 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
68 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
72 skb
= tp
->rx_buffers
[entry
].skb
=
73 netdev_alloc_skb(dev
, PKT_BUF_SZ
);
77 mapping
= pci_map_single(tp
->pdev
, skb
->data
, PKT_BUF_SZ
,
79 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
)) {
81 tp
->rx_buffers
[entry
].skb
= NULL
;
85 tp
->rx_buffers
[entry
].mapping
= mapping
;
87 tp
->rx_ring
[entry
].buffer1
= cpu_to_le32(mapping
);
90 tp
->rx_ring
[entry
].status
= cpu_to_le32(DescOwned
);
92 if(tp
->chip_id
== LC82C168
) {
93 if(((ioread32(tp
->base_addr
+ CSR5
)>>17)&0x07) == 4) {
94 /* Rx stopped due to out of buffers,
97 iowrite32(0x01, tp
->base_addr
+ CSR2
);
103 #ifdef CONFIG_TULIP_NAPI
105 void oom_timer(unsigned long data
)
107 struct net_device
*dev
= (struct net_device
*)data
;
108 struct tulip_private
*tp
= netdev_priv(dev
);
109 napi_schedule(&tp
->napi
);
112 int tulip_poll(struct napi_struct
*napi
, int budget
)
114 struct tulip_private
*tp
= container_of(napi
, struct tulip_private
, napi
);
115 struct net_device
*dev
= tp
->dev
;
116 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
118 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
122 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
124 /* that one buffer is needed for mit activation; or might be a
125 bug in the ring buffer code; check later -- JHS*/
127 if (budget
>=RX_RING_SIZE
) budget
--;
131 netdev_dbg(dev
, " In tulip_rx(), entry %d %08x\n",
132 entry
, tp
->rx_ring
[entry
].status
);
135 if (ioread32(tp
->base_addr
+ CSR5
) == 0xffffffff) {
136 netdev_dbg(dev
, " In tulip_poll(), hardware disappeared\n");
139 /* Acknowledge current RX interrupt sources. */
140 iowrite32((RxIntr
| RxNoBuf
), tp
->base_addr
+ CSR5
);
143 /* If we own the next entry, it is a new packet. Send it up. */
144 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
145 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
148 if (tp
->dirty_rx
+ RX_RING_SIZE
== tp
->cur_rx
)
152 netdev_dbg(dev
, "In tulip_rx(), entry %d %08x\n",
155 if (++work_done
>= budget
)
159 * Omit the four octet CRC from the length.
160 * (May not be considered valid until we have
161 * checked status for RxLengthOver2047 bits)
163 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
166 * Maximum pkt_len is 1518 (1514 + vlan header)
167 * Anything higher than this is always invalid
168 * regardless of RxLengthOver2047 bits
171 if ((status
& (RxLengthOver2047
|
173 RxDescCollisionSeen
|
176 RxWholePkt
)) != RxWholePkt
||
178 if ((status
& (RxLengthOver2047
|
179 RxWholePkt
)) != RxWholePkt
) {
180 /* Ingore earlier buffers. */
181 if ((status
& 0xffff) != 0x7fff) {
184 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
186 dev
->stats
.rx_length_errors
++;
189 /* There was a fatal error. */
191 netdev_dbg(dev
, "Receive error, Rx status %08x\n",
193 dev
->stats
.rx_errors
++; /* end of a packet.*/
194 if (pkt_len
> 1518 ||
195 (status
& RxDescRunt
))
196 dev
->stats
.rx_length_errors
++;
199 dev
->stats
.rx_frame_errors
++;
201 dev
->stats
.rx_crc_errors
++;
203 dev
->stats
.rx_fifo_errors
++;
208 /* Check if the packet is long enough to accept without copying
209 to a minimally-sized skbuff. */
210 if (pkt_len
< tulip_rx_copybreak
&&
211 (skb
= netdev_alloc_skb(dev
, pkt_len
+ 2)) != NULL
) {
212 skb_reserve(skb
, 2); /* 16 byte align the IP header */
213 pci_dma_sync_single_for_cpu(tp
->pdev
,
214 tp
->rx_buffers
[entry
].mapping
,
215 pkt_len
, PCI_DMA_FROMDEVICE
);
216 #if ! defined(__alpha__)
217 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
219 skb_put(skb
, pkt_len
);
221 memcpy(skb_put(skb
, pkt_len
),
222 tp
->rx_buffers
[entry
].skb
->data
,
225 pci_dma_sync_single_for_device(tp
->pdev
,
226 tp
->rx_buffers
[entry
].mapping
,
227 pkt_len
, PCI_DMA_FROMDEVICE
);
228 } else { /* Pass up the skb already on the Rx ring. */
229 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
232 #ifndef final_version
233 if (tp
->rx_buffers
[entry
].mapping
!=
234 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
236 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
237 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
238 (unsigned long long)tp
->rx_buffers
[entry
].mapping
,
243 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
244 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
246 tp
->rx_buffers
[entry
].skb
= NULL
;
247 tp
->rx_buffers
[entry
].mapping
= 0;
249 skb
->protocol
= eth_type_trans(skb
, dev
);
251 netif_receive_skb(skb
);
253 dev
->stats
.rx_packets
++;
254 dev
->stats
.rx_bytes
+= pkt_len
;
256 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
260 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
261 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/4)
262 tulip_refill_rx(dev
);
266 /* New ack strategy... irq does not ack Rx any longer
267 hopefully this helps */
269 /* Really bad things can happen here... If new packet arrives
270 * and an irq arrives (tx or just due to occasionally unset
271 * mask), it will be acked by irq handler, but new thread
272 * is not scheduled. It is major hole in design.
273 * No idea how to fix this if "playing with fire" will fail
274 * tomorrow (night 011029). If it will not fail, we won
275 * finally: amount of IO did not increase at all. */
276 } while ((ioread32(tp
->base_addr
+ CSR5
) & RxIntr
));
278 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
280 /* We use this simplistic scheme for IM. It's proven by
281 real life installations. We can have IM enabled
282 continuesly but this would cause unnecessary latency.
283 Unfortunely we can't use all the NET_RX_* feedback here.
284 This would turn on IM for devices that is not contributing
285 to backlog congestion with unnecessary latency.
287 We monitor the device RX-ring and have:
289 HW Interrupt Mitigation either ON or OFF.
291 ON: More then 1 pkt received (per intr.) OR we are dropping
292 OFF: Only 1 pkt received
294 Note. We only use min and max (0, 15) settings from mit_table */
297 if( tp
->flags
& HAS_INTR_MITIGATION
) {
301 iowrite32(mit_table
[MIT_TABLE
], tp
->base_addr
+ CSR11
);
307 iowrite32(0, tp
->base_addr
+ CSR11
);
312 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
314 tulip_refill_rx(dev
);
316 /* If RX ring is not full we are out of memory. */
317 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
320 /* Remove us from polling list and enable RX intr. */
322 napi_complete_done(napi
, work_done
);
323 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, tp
->base_addr
+CSR7
);
325 /* The last op happens after poll completion. Which means the following:
326 * 1. it can race with disabling irqs in irq handler
327 * 2. it can race with dise/enabling irqs in other poll threads
328 * 3. if an irq raised after beginning loop, it will be immediately
331 * Summarizing: the logic results in some redundant irqs both
332 * due to races in masking and due to too late acking of already
333 * processed irqs. But it must not result in losing events.
339 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/2 ||
340 tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
341 tulip_refill_rx(dev
);
343 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
348 oom
: /* Executed with RX ints disabled */
350 /* Start timer, stop polling, but do not enable rx interrupts. */
351 mod_timer(&tp
->oom_timer
, jiffies
+1);
353 /* Think: timer_pending() was an explicit signature of bug.
354 * Timer can be pending now but fired and completed
355 * before we did napi_complete(). See? We would lose it. */
357 /* remove ourselves from the polling list */
358 napi_complete_done(napi
, work_done
);
363 #else /* CONFIG_TULIP_NAPI */
365 static int tulip_rx(struct net_device
*dev
)
367 struct tulip_private
*tp
= netdev_priv(dev
);
368 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
369 int rx_work_limit
= tp
->dirty_rx
+ RX_RING_SIZE
- tp
->cur_rx
;
373 netdev_dbg(dev
, "In tulip_rx(), entry %d %08x\n",
374 entry
, tp
->rx_ring
[entry
].status
);
375 /* If we own the next entry, it is a new packet. Send it up. */
376 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
377 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
381 netdev_dbg(dev
, "In tulip_rx(), entry %d %08x\n",
383 if (--rx_work_limit
< 0)
387 Omit the four octet CRC from the length.
388 (May not be considered valid until we have
389 checked status for RxLengthOver2047 bits)
391 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
393 Maximum pkt_len is 1518 (1514 + vlan header)
394 Anything higher than this is always invalid
395 regardless of RxLengthOver2047 bits
398 if ((status
& (RxLengthOver2047
|
400 RxDescCollisionSeen
|
403 RxWholePkt
)) != RxWholePkt
||
405 if ((status
& (RxLengthOver2047
|
406 RxWholePkt
)) != RxWholePkt
) {
407 /* Ingore earlier buffers. */
408 if ((status
& 0xffff) != 0x7fff) {
411 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
413 dev
->stats
.rx_length_errors
++;
416 /* There was a fatal error. */
418 netdev_dbg(dev
, "Receive error, Rx status %08x\n",
420 dev
->stats
.rx_errors
++; /* end of a packet.*/
421 if (pkt_len
> 1518 ||
422 (status
& RxDescRunt
))
423 dev
->stats
.rx_length_errors
++;
425 dev
->stats
.rx_frame_errors
++;
427 dev
->stats
.rx_crc_errors
++;
429 dev
->stats
.rx_fifo_errors
++;
434 /* Check if the packet is long enough to accept without copying
435 to a minimally-sized skbuff. */
436 if (pkt_len
< tulip_rx_copybreak
&&
437 (skb
= netdev_alloc_skb(dev
, pkt_len
+ 2)) != NULL
) {
438 skb_reserve(skb
, 2); /* 16 byte align the IP header */
439 pci_dma_sync_single_for_cpu(tp
->pdev
,
440 tp
->rx_buffers
[entry
].mapping
,
441 pkt_len
, PCI_DMA_FROMDEVICE
);
442 #if ! defined(__alpha__)
443 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
445 skb_put(skb
, pkt_len
);
447 memcpy(skb_put(skb
, pkt_len
),
448 tp
->rx_buffers
[entry
].skb
->data
,
451 pci_dma_sync_single_for_device(tp
->pdev
,
452 tp
->rx_buffers
[entry
].mapping
,
453 pkt_len
, PCI_DMA_FROMDEVICE
);
454 } else { /* Pass up the skb already on the Rx ring. */
455 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
458 #ifndef final_version
459 if (tp
->rx_buffers
[entry
].mapping
!=
460 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
462 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
463 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
464 (long long)tp
->rx_buffers
[entry
].mapping
,
469 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
470 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
472 tp
->rx_buffers
[entry
].skb
= NULL
;
473 tp
->rx_buffers
[entry
].mapping
= 0;
475 skb
->protocol
= eth_type_trans(skb
, dev
);
479 dev
->stats
.rx_packets
++;
480 dev
->stats
.rx_bytes
+= pkt_len
;
483 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
487 #endif /* CONFIG_TULIP_NAPI */
489 static inline unsigned int phy_interrupt (struct net_device
*dev
)
492 struct tulip_private
*tp
= netdev_priv(dev
);
493 int csr12
= ioread32(tp
->base_addr
+ CSR12
) & 0xff;
495 if (csr12
!= tp
->csr12_shadow
) {
497 iowrite32(csr12
| 0x02, tp
->base_addr
+ CSR12
);
498 tp
->csr12_shadow
= csr12
;
499 /* do link change stuff */
500 spin_lock(&tp
->lock
);
501 tulip_check_duplex(dev
);
502 spin_unlock(&tp
->lock
);
503 /* clear irq ack bit */
504 iowrite32(csr12
& ~0x02, tp
->base_addr
+ CSR12
);
513 /* The interrupt handler does all of the Rx thread work and cleans up
514 after the Tx thread. */
515 irqreturn_t
tulip_interrupt(int irq
, void *dev_instance
)
517 struct net_device
*dev
= (struct net_device
*)dev_instance
;
518 struct tulip_private
*tp
= netdev_priv(dev
);
519 void __iomem
*ioaddr
= tp
->base_addr
;
525 int maxrx
= RX_RING_SIZE
;
526 int maxtx
= TX_RING_SIZE
;
527 int maxoi
= TX_RING_SIZE
;
528 #ifdef CONFIG_TULIP_NAPI
533 unsigned int work_count
= tulip_max_interrupt_work
;
534 unsigned int handled
= 0;
536 /* Let's see whether the interrupt really is for us */
537 csr5
= ioread32(ioaddr
+ CSR5
);
539 if (tp
->flags
& HAS_PHY_IRQ
)
540 handled
= phy_interrupt (dev
);
542 if ((csr5
& (NormalIntr
|AbnormalIntr
)) == 0)
543 return IRQ_RETVAL(handled
);
549 #ifdef CONFIG_TULIP_NAPI
551 if (!rxd
&& (csr5
& (RxIntr
| RxNoBuf
))) {
553 /* Mask RX intrs and add the device to poll list. */
554 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
&~RxPollInt
, ioaddr
+ CSR7
);
555 napi_schedule(&tp
->napi
);
557 if (!(csr5
&~(AbnormalIntr
|NormalIntr
|RxPollInt
|TPLnkPass
)))
561 /* Acknowledge the interrupt sources we handle here ASAP
562 the poll function does Rx and RxNoBuf acking */
564 iowrite32(csr5
& 0x0001ff3f, ioaddr
+ CSR5
);
567 /* Acknowledge all of the current interrupt sources ASAP. */
568 iowrite32(csr5
& 0x0001ffff, ioaddr
+ CSR5
);
571 if (csr5
& (RxIntr
| RxNoBuf
)) {
573 tulip_refill_rx(dev
);
576 #endif /* CONFIG_TULIP_NAPI */
579 netdev_dbg(dev
, "interrupt csr5=%#8.8x new csr5=%#8.8x\n",
580 csr5
, ioread32(ioaddr
+ CSR5
));
583 if (csr5
& (TxNoBuf
| TxDied
| TxIntr
| TimerInt
)) {
584 unsigned int dirty_tx
;
586 spin_lock(&tp
->lock
);
588 for (dirty_tx
= tp
->dirty_tx
; tp
->cur_tx
- dirty_tx
> 0;
590 int entry
= dirty_tx
% TX_RING_SIZE
;
591 int status
= le32_to_cpu(tp
->tx_ring
[entry
].status
);
594 break; /* It still has not been Txed */
596 /* Check for Rx filter setup frames. */
597 if (tp
->tx_buffers
[entry
].skb
== NULL
) {
598 /* test because dummy frames not mapped */
599 if (tp
->tx_buffers
[entry
].mapping
)
600 pci_unmap_single(tp
->pdev
,
601 tp
->tx_buffers
[entry
].mapping
,
602 sizeof(tp
->setup_frame
),
607 if (status
& 0x8000) {
608 /* There was an major error, log it. */
609 #ifndef final_version
611 netdev_dbg(dev
, "Transmit error, Tx status %08x\n",
614 dev
->stats
.tx_errors
++;
616 dev
->stats
.tx_aborted_errors
++;
618 dev
->stats
.tx_carrier_errors
++;
620 dev
->stats
.tx_window_errors
++;
622 dev
->stats
.tx_fifo_errors
++;
623 if ((status
& 0x0080) && tp
->full_duplex
== 0)
624 dev
->stats
.tx_heartbeat_errors
++;
626 dev
->stats
.tx_bytes
+=
627 tp
->tx_buffers
[entry
].skb
->len
;
628 dev
->stats
.collisions
+= (status
>> 3) & 15;
629 dev
->stats
.tx_packets
++;
632 pci_unmap_single(tp
->pdev
, tp
->tx_buffers
[entry
].mapping
,
633 tp
->tx_buffers
[entry
].skb
->len
,
636 /* Free the original skb. */
637 dev_kfree_skb_irq(tp
->tx_buffers
[entry
].skb
);
638 tp
->tx_buffers
[entry
].skb
= NULL
;
639 tp
->tx_buffers
[entry
].mapping
= 0;
643 #ifndef final_version
644 if (tp
->cur_tx
- dirty_tx
> TX_RING_SIZE
) {
646 "Out-of-sync dirty pointer, %d vs. %d\n",
647 dirty_tx
, tp
->cur_tx
);
648 dirty_tx
+= TX_RING_SIZE
;
652 if (tp
->cur_tx
- dirty_tx
< TX_RING_SIZE
- 2)
653 netif_wake_queue(dev
);
655 tp
->dirty_tx
= dirty_tx
;
659 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
660 csr5
, ioread32(ioaddr
+ CSR6
),
662 tulip_restart_rxtx(tp
);
664 spin_unlock(&tp
->lock
);
668 if (csr5
& AbnormalIntr
) { /* Abnormal error summary bit. */
669 if (csr5
== 0xffffffff)
672 dev
->stats
.tx_errors
++;
673 if (csr5
& TxFIFOUnderflow
) {
674 if ((tp
->csr6
& 0xC000) != 0xC000)
675 tp
->csr6
+= 0x4000; /* Bump up the Tx threshold */
677 tp
->csr6
|= 0x00200000; /* Store-n-forward. */
678 /* Restart the transmit process. */
679 tulip_restart_rxtx(tp
);
680 iowrite32(0, ioaddr
+ CSR1
);
682 if (csr5
& (RxDied
| RxNoBuf
)) {
683 if (tp
->flags
& COMET_MAC_ADDR
) {
684 iowrite32(tp
->mc_filter
[0], ioaddr
+ 0xAC);
685 iowrite32(tp
->mc_filter
[1], ioaddr
+ 0xB0);
688 if (csr5
& RxDied
) { /* Missed a Rx frame. */
689 dev
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ CSR8
) & 0xffff;
690 dev
->stats
.rx_errors
++;
691 tulip_start_rxtx(tp
);
694 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
695 * call is ever done under the spinlock
697 if (csr5
& (TPLnkPass
| TPLnkFail
| 0x08000000)) {
699 (tp
->link_change
)(dev
, csr5
);
701 if (csr5
& SystemError
) {
702 int error
= (csr5
>> 23) & 7;
703 /* oops, we hit a PCI error. The code produced corresponds
708 * Note that on parity error, we should do a software reset
709 * of the chip to get it back into a sane state (according
710 * to the 21142/3 docs that is).
714 "(%lu) System Error occurred (%d)\n",
717 /* Clear all error sources, included undocumented ones! */
718 iowrite32(0x0800f7ba, ioaddr
+ CSR5
);
721 if (csr5
& TimerInt
) {
725 "Re-enabling interrupts, %08x\n",
727 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, ioaddr
+ CSR7
);
731 if (tx
> maxtx
|| rx
> maxrx
|| oi
> maxoi
) {
733 dev_warn(&dev
->dev
, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
734 csr5
, tp
->nir
, tx
, rx
, oi
);
736 /* Acknowledge all interrupt sources. */
737 iowrite32(0x8001ffff, ioaddr
+ CSR5
);
738 if (tp
->flags
& HAS_INTR_MITIGATION
) {
739 /* Josip Loncaric at ICASE did extensive experimentation
740 to develop a good interrupt mitigation setting.*/
741 iowrite32(0x8b240000, ioaddr
+ CSR11
);
742 } else if (tp
->chip_id
== LC82C168
) {
743 /* the LC82C168 doesn't have a hw timer.*/
744 iowrite32(0x00, ioaddr
+ CSR7
);
745 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
747 /* Mask all interrupting sources, set timer to
749 iowrite32(((~csr5
) & 0x0001ebef) | AbnormalIntr
| TimerInt
, ioaddr
+ CSR7
);
750 iowrite32(0x0012, ioaddr
+ CSR11
);
759 csr5
= ioread32(ioaddr
+ CSR5
);
761 #ifdef CONFIG_TULIP_NAPI
764 } while ((csr5
& (TxNoBuf
|
773 SystemError
)) != 0);
775 } while ((csr5
& (NormalIntr
|AbnormalIntr
)) != 0);
777 tulip_refill_rx(dev
);
779 /* check if the card is in suspend mode */
780 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
781 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
784 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
785 tp
->nir
, tp
->cur_rx
, tp
->ttimer
, rx
);
786 if (tp
->chip_id
== LC82C168
) {
787 iowrite32(0x00, ioaddr
+ CSR7
);
788 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
790 if (tp
->ttimer
== 0 || (ioread32(ioaddr
+ CSR11
) & 0xffff) == 0) {
793 "in rx suspend mode: (%lu) set timer\n",
795 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
| TimerInt
,
797 iowrite32(TimerInt
, ioaddr
+ CSR5
);
798 iowrite32(12, ioaddr
+ CSR11
);
803 #endif /* CONFIG_TULIP_NAPI */
805 if ((missed
= ioread32(ioaddr
+ CSR8
) & 0x1ffff)) {
806 dev
->stats
.rx_dropped
+= missed
& 0x10000 ? 0x10000 : missed
;
810 netdev_dbg(dev
, "exiting interrupt, csr5=%#04x\n",
811 ioread32(ioaddr
+ CSR5
));