2 drivers/net/ethernet/dec/tulip/interrupt.c
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
10 Please submit bugs to http://bugzilla.kernel.org/ .
13 #include <linux/pci.h>
15 #include <linux/etherdevice.h>
17 int tulip_rx_copybreak
;
18 unsigned int tulip_max_interrupt_work
;
20 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
22 #define MIT_TABLE 15 /* We use 0 or max */
24 static unsigned int mit_table
[MIT_SIZE
+1] =
26 /* CRS11 21143 hardware Mitigation Control Interrupt
27 We use only RX mitigation we other techniques for
30 31 Cycle Size (timer control)
31 30:27 TX timer in 16 * Cycle size
32 26:24 TX No pkts before Int.
33 23:20 RX timer in Cycle size
34 19:17 RX No pkts before Int.
35 16 Continues Mode (CM)
38 0x0, /* IM disabled */
39 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
53 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
54 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
59 int tulip_refill_rx(struct net_device
*dev
)
61 struct tulip_private
*tp
= netdev_priv(dev
);
65 /* Refill the Rx ring buffers. */
66 for (; tp
->cur_rx
- tp
->dirty_rx
> 0; tp
->dirty_rx
++) {
67 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
68 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
72 skb
= tp
->rx_buffers
[entry
].skb
=
73 netdev_alloc_skb(dev
, PKT_BUF_SZ
);
77 mapping
= dma_map_single(&tp
->pdev
->dev
, skb
->data
,
78 PKT_BUF_SZ
, DMA_FROM_DEVICE
);
79 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
)) {
81 tp
->rx_buffers
[entry
].skb
= NULL
;
85 tp
->rx_buffers
[entry
].mapping
= mapping
;
87 tp
->rx_ring
[entry
].buffer1
= cpu_to_le32(mapping
);
90 tp
->rx_ring
[entry
].status
= cpu_to_le32(DescOwned
);
92 if(tp
->chip_id
== LC82C168
) {
93 if(((ioread32(tp
->base_addr
+ CSR5
)>>17)&0x07) == 4) {
94 /* Rx stopped due to out of buffers,
97 iowrite32(0x01, tp
->base_addr
+ CSR2
);
103 #ifdef CONFIG_TULIP_NAPI
105 void oom_timer(struct timer_list
*t
)
107 struct tulip_private
*tp
= from_timer(tp
, t
, oom_timer
);
109 napi_schedule(&tp
->napi
);
112 int tulip_poll(struct napi_struct
*napi
, int budget
)
114 struct tulip_private
*tp
= container_of(napi
, struct tulip_private
, napi
);
115 struct net_device
*dev
= tp
->dev
;
116 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
118 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
122 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
124 /* that one buffer is needed for mit activation; or might be a
125 bug in the ring buffer code; check later -- JHS*/
127 if (budget
>=RX_RING_SIZE
) budget
--;
131 netdev_dbg(dev
, " In tulip_rx(), entry %d %08x\n",
132 entry
, tp
->rx_ring
[entry
].status
);
135 if (ioread32(tp
->base_addr
+ CSR5
) == 0xffffffff) {
136 netdev_dbg(dev
, " In tulip_poll(), hardware disappeared\n");
139 /* Acknowledge current RX interrupt sources. */
140 iowrite32((RxIntr
| RxNoBuf
), tp
->base_addr
+ CSR5
);
143 /* If we own the next entry, it is a new packet. Send it up. */
144 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
145 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
148 if (tp
->dirty_rx
+ RX_RING_SIZE
== tp
->cur_rx
)
152 netdev_dbg(dev
, "In tulip_rx(), entry %d %08x\n",
155 if (++work_done
>= budget
)
159 * Omit the four octet CRC from the length.
160 * (May not be considered valid until we have
161 * checked status for RxLengthOver2047 bits)
163 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
166 * Maximum pkt_len is 1518 (1514 + vlan header)
167 * Anything higher than this is always invalid
168 * regardless of RxLengthOver2047 bits
171 if ((status
& (RxLengthOver2047
|
173 RxDescCollisionSeen
|
176 RxWholePkt
)) != RxWholePkt
||
178 if ((status
& (RxLengthOver2047
|
179 RxWholePkt
)) != RxWholePkt
) {
180 /* Ingore earlier buffers. */
181 if ((status
& 0xffff) != 0x7fff) {
184 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
186 dev
->stats
.rx_length_errors
++;
189 /* There was a fatal error. */
191 netdev_dbg(dev
, "Receive error, Rx status %08x\n",
193 dev
->stats
.rx_errors
++; /* end of a packet.*/
194 if (pkt_len
> 1518 ||
195 (status
& RxDescRunt
))
196 dev
->stats
.rx_length_errors
++;
199 dev
->stats
.rx_frame_errors
++;
201 dev
->stats
.rx_crc_errors
++;
203 dev
->stats
.rx_fifo_errors
++;
208 /* Check if the packet is long enough to accept without copying
209 to a minimally-sized skbuff. */
210 if (pkt_len
< tulip_rx_copybreak
&&
211 (skb
= netdev_alloc_skb(dev
, pkt_len
+ 2)) != NULL
) {
212 skb_reserve(skb
, 2); /* 16 byte align the IP header */
213 dma_sync_single_for_cpu(&tp
->pdev
->dev
,
214 tp
->rx_buffers
[entry
].mapping
,
217 #if ! defined(__alpha__)
218 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
220 skb_put(skb
, pkt_len
);
223 tp
->rx_buffers
[entry
].skb
->data
,
226 dma_sync_single_for_device(&tp
->pdev
->dev
,
227 tp
->rx_buffers
[entry
].mapping
,
230 } else { /* Pass up the skb already on the Rx ring. */
231 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
234 #ifndef final_version
235 if (tp
->rx_buffers
[entry
].mapping
!=
236 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
238 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
239 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
240 (unsigned long long)tp
->rx_buffers
[entry
].mapping
,
245 dma_unmap_single(&tp
->pdev
->dev
,
246 tp
->rx_buffers
[entry
].mapping
,
250 tp
->rx_buffers
[entry
].skb
= NULL
;
251 tp
->rx_buffers
[entry
].mapping
= 0;
253 skb
->protocol
= eth_type_trans(skb
, dev
);
255 netif_receive_skb(skb
);
257 dev
->stats
.rx_packets
++;
258 dev
->stats
.rx_bytes
+= pkt_len
;
260 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
264 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
265 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/4)
266 tulip_refill_rx(dev
);
270 /* New ack strategy... irq does not ack Rx any longer
271 hopefully this helps */
273 /* Really bad things can happen here... If new packet arrives
274 * and an irq arrives (tx or just due to occasionally unset
275 * mask), it will be acked by irq handler, but new thread
276 * is not scheduled. It is major hole in design.
277 * No idea how to fix this if "playing with fire" will fail
278 * tomorrow (night 011029). If it will not fail, we won
279 * finally: amount of IO did not increase at all. */
280 } while ((ioread32(tp
->base_addr
+ CSR5
) & RxIntr
));
282 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
284 /* We use this simplistic scheme for IM. It's proven by
285 real life installations. We can have IM enabled
286 continuesly but this would cause unnecessary latency.
287 Unfortunely we can't use all the NET_RX_* feedback here.
288 This would turn on IM for devices that is not contributing
289 to backlog congestion with unnecessary latency.
291 We monitor the device RX-ring and have:
293 HW Interrupt Mitigation either ON or OFF.
295 ON: More then 1 pkt received (per intr.) OR we are dropping
296 OFF: Only 1 pkt received
298 Note. We only use min and max (0, 15) settings from mit_table */
301 if( tp
->flags
& HAS_INTR_MITIGATION
) {
305 iowrite32(mit_table
[MIT_TABLE
], tp
->base_addr
+ CSR11
);
311 iowrite32(0, tp
->base_addr
+ CSR11
);
316 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
318 tulip_refill_rx(dev
);
320 /* If RX ring is not full we are out of memory. */
321 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
324 /* Remove us from polling list and enable RX intr. */
326 napi_complete_done(napi
, work_done
);
327 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, tp
->base_addr
+CSR7
);
329 /* The last op happens after poll completion. Which means the following:
330 * 1. it can race with disabling irqs in irq handler
331 * 2. it can race with dise/enabling irqs in other poll threads
332 * 3. if an irq raised after beginning loop, it will be immediately
335 * Summarizing: the logic results in some redundant irqs both
336 * due to races in masking and due to too late acking of already
337 * processed irqs. But it must not result in losing events.
343 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/2 ||
344 tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
345 tulip_refill_rx(dev
);
347 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
352 oom
: /* Executed with RX ints disabled */
354 /* Start timer, stop polling, but do not enable rx interrupts. */
355 mod_timer(&tp
->oom_timer
, jiffies
+1);
357 /* Think: timer_pending() was an explicit signature of bug.
358 * Timer can be pending now but fired and completed
359 * before we did napi_complete(). See? We would lose it. */
361 /* remove ourselves from the polling list */
362 napi_complete_done(napi
, work_done
);
367 #else /* CONFIG_TULIP_NAPI */
369 static int tulip_rx(struct net_device
*dev
)
371 struct tulip_private
*tp
= netdev_priv(dev
);
372 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
373 int rx_work_limit
= tp
->dirty_rx
+ RX_RING_SIZE
- tp
->cur_rx
;
377 netdev_dbg(dev
, "In tulip_rx(), entry %d %08x\n",
378 entry
, tp
->rx_ring
[entry
].status
);
379 /* If we own the next entry, it is a new packet. Send it up. */
380 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
381 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
385 netdev_dbg(dev
, "In tulip_rx(), entry %d %08x\n",
387 if (--rx_work_limit
< 0)
391 Omit the four octet CRC from the length.
392 (May not be considered valid until we have
393 checked status for RxLengthOver2047 bits)
395 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
397 Maximum pkt_len is 1518 (1514 + vlan header)
398 Anything higher than this is always invalid
399 regardless of RxLengthOver2047 bits
402 if ((status
& (RxLengthOver2047
|
404 RxDescCollisionSeen
|
407 RxWholePkt
)) != RxWholePkt
||
409 if ((status
& (RxLengthOver2047
|
410 RxWholePkt
)) != RxWholePkt
) {
411 /* Ingore earlier buffers. */
412 if ((status
& 0xffff) != 0x7fff) {
415 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
417 dev
->stats
.rx_length_errors
++;
420 /* There was a fatal error. */
422 netdev_dbg(dev
, "Receive error, Rx status %08x\n",
424 dev
->stats
.rx_errors
++; /* end of a packet.*/
425 if (pkt_len
> 1518 ||
426 (status
& RxDescRunt
))
427 dev
->stats
.rx_length_errors
++;
429 dev
->stats
.rx_frame_errors
++;
431 dev
->stats
.rx_crc_errors
++;
433 dev
->stats
.rx_fifo_errors
++;
438 /* Check if the packet is long enough to accept without copying
439 to a minimally-sized skbuff. */
440 if (pkt_len
< tulip_rx_copybreak
&&
441 (skb
= netdev_alloc_skb(dev
, pkt_len
+ 2)) != NULL
) {
442 skb_reserve(skb
, 2); /* 16 byte align the IP header */
443 dma_sync_single_for_cpu(&tp
->pdev
->dev
,
444 tp
->rx_buffers
[entry
].mapping
,
447 #if ! defined(__alpha__)
448 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
450 skb_put(skb
, pkt_len
);
453 tp
->rx_buffers
[entry
].skb
->data
,
456 dma_sync_single_for_device(&tp
->pdev
->dev
,
457 tp
->rx_buffers
[entry
].mapping
,
460 } else { /* Pass up the skb already on the Rx ring. */
461 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
464 #ifndef final_version
465 if (tp
->rx_buffers
[entry
].mapping
!=
466 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
468 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
469 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
470 (long long)tp
->rx_buffers
[entry
].mapping
,
475 dma_unmap_single(&tp
->pdev
->dev
,
476 tp
->rx_buffers
[entry
].mapping
,
477 PKT_BUF_SZ
, DMA_FROM_DEVICE
);
479 tp
->rx_buffers
[entry
].skb
= NULL
;
480 tp
->rx_buffers
[entry
].mapping
= 0;
482 skb
->protocol
= eth_type_trans(skb
, dev
);
486 dev
->stats
.rx_packets
++;
487 dev
->stats
.rx_bytes
+= pkt_len
;
490 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
494 #endif /* CONFIG_TULIP_NAPI */
496 static inline unsigned int phy_interrupt (struct net_device
*dev
)
499 struct tulip_private
*tp
= netdev_priv(dev
);
500 int csr12
= ioread32(tp
->base_addr
+ CSR12
) & 0xff;
502 if (csr12
!= tp
->csr12_shadow
) {
504 iowrite32(csr12
| 0x02, tp
->base_addr
+ CSR12
);
505 tp
->csr12_shadow
= csr12
;
506 /* do link change stuff */
507 spin_lock(&tp
->lock
);
508 tulip_check_duplex(dev
);
509 spin_unlock(&tp
->lock
);
510 /* clear irq ack bit */
511 iowrite32(csr12
& ~0x02, tp
->base_addr
+ CSR12
);
520 /* The interrupt handler does all of the Rx thread work and cleans up
521 after the Tx thread. */
522 irqreturn_t
tulip_interrupt(int irq
, void *dev_instance
)
524 struct net_device
*dev
= (struct net_device
*)dev_instance
;
525 struct tulip_private
*tp
= netdev_priv(dev
);
526 void __iomem
*ioaddr
= tp
->base_addr
;
532 int maxrx
= RX_RING_SIZE
;
533 int maxtx
= TX_RING_SIZE
;
534 int maxoi
= TX_RING_SIZE
;
535 #ifdef CONFIG_TULIP_NAPI
540 unsigned int work_count
= tulip_max_interrupt_work
;
541 unsigned int handled
= 0;
543 /* Let's see whether the interrupt really is for us */
544 csr5
= ioread32(ioaddr
+ CSR5
);
546 if (tp
->flags
& HAS_PHY_IRQ
)
547 handled
= phy_interrupt (dev
);
549 if ((csr5
& (NormalIntr
|AbnormalIntr
)) == 0)
550 return IRQ_RETVAL(handled
);
556 #ifdef CONFIG_TULIP_NAPI
558 if (!rxd
&& (csr5
& (RxIntr
| RxNoBuf
))) {
560 /* Mask RX intrs and add the device to poll list. */
561 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
&~RxPollInt
, ioaddr
+ CSR7
);
562 napi_schedule(&tp
->napi
);
564 if (!(csr5
&~(AbnormalIntr
|NormalIntr
|RxPollInt
|TPLnkPass
)))
568 /* Acknowledge the interrupt sources we handle here ASAP
569 the poll function does Rx and RxNoBuf acking */
571 iowrite32(csr5
& 0x0001ff3f, ioaddr
+ CSR5
);
574 /* Acknowledge all of the current interrupt sources ASAP. */
575 iowrite32(csr5
& 0x0001ffff, ioaddr
+ CSR5
);
578 if (csr5
& (RxIntr
| RxNoBuf
)) {
580 tulip_refill_rx(dev
);
583 #endif /* CONFIG_TULIP_NAPI */
586 netdev_dbg(dev
, "interrupt csr5=%#8.8x new csr5=%#8.8x\n",
587 csr5
, ioread32(ioaddr
+ CSR5
));
590 if (csr5
& (TxNoBuf
| TxDied
| TxIntr
| TimerInt
)) {
591 unsigned int dirty_tx
;
593 spin_lock(&tp
->lock
);
595 for (dirty_tx
= tp
->dirty_tx
; tp
->cur_tx
- dirty_tx
> 0;
597 int entry
= dirty_tx
% TX_RING_SIZE
;
598 int status
= le32_to_cpu(tp
->tx_ring
[entry
].status
);
601 break; /* It still has not been Txed */
603 /* Check for Rx filter setup frames. */
604 if (tp
->tx_buffers
[entry
].skb
== NULL
) {
605 /* test because dummy frames not mapped */
606 if (tp
->tx_buffers
[entry
].mapping
)
607 dma_unmap_single(&tp
->pdev
->dev
,
608 tp
->tx_buffers
[entry
].mapping
,
609 sizeof(tp
->setup_frame
),
614 if (status
& 0x8000) {
615 /* There was an major error, log it. */
616 #ifndef final_version
618 netdev_dbg(dev
, "Transmit error, Tx status %08x\n",
621 dev
->stats
.tx_errors
++;
623 dev
->stats
.tx_aborted_errors
++;
625 dev
->stats
.tx_carrier_errors
++;
627 dev
->stats
.tx_window_errors
++;
629 dev
->stats
.tx_fifo_errors
++;
630 if ((status
& 0x0080) && tp
->full_duplex
== 0)
631 dev
->stats
.tx_heartbeat_errors
++;
633 dev
->stats
.tx_bytes
+=
634 tp
->tx_buffers
[entry
].skb
->len
;
635 dev
->stats
.collisions
+= (status
>> 3) & 15;
636 dev
->stats
.tx_packets
++;
639 dma_unmap_single(&tp
->pdev
->dev
,
640 tp
->tx_buffers
[entry
].mapping
,
641 tp
->tx_buffers
[entry
].skb
->len
,
644 /* Free the original skb. */
645 dev_kfree_skb_irq(tp
->tx_buffers
[entry
].skb
);
646 tp
->tx_buffers
[entry
].skb
= NULL
;
647 tp
->tx_buffers
[entry
].mapping
= 0;
651 #ifndef final_version
652 if (tp
->cur_tx
- dirty_tx
> TX_RING_SIZE
) {
654 "Out-of-sync dirty pointer, %d vs. %d\n",
655 dirty_tx
, tp
->cur_tx
);
656 dirty_tx
+= TX_RING_SIZE
;
660 if (tp
->cur_tx
- dirty_tx
< TX_RING_SIZE
- 2)
661 netif_wake_queue(dev
);
663 tp
->dirty_tx
= dirty_tx
;
667 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
668 csr5
, ioread32(ioaddr
+ CSR6
),
670 tulip_restart_rxtx(tp
);
672 spin_unlock(&tp
->lock
);
676 if (csr5
& AbnormalIntr
) { /* Abnormal error summary bit. */
677 if (csr5
== 0xffffffff)
680 dev
->stats
.tx_errors
++;
681 if (csr5
& TxFIFOUnderflow
) {
682 if ((tp
->csr6
& 0xC000) != 0xC000)
683 tp
->csr6
+= 0x4000; /* Bump up the Tx threshold */
685 tp
->csr6
|= 0x00200000; /* Store-n-forward. */
686 /* Restart the transmit process. */
687 tulip_restart_rxtx(tp
);
688 iowrite32(0, ioaddr
+ CSR1
);
690 if (csr5
& (RxDied
| RxNoBuf
)) {
691 if (tp
->flags
& COMET_MAC_ADDR
) {
692 iowrite32(tp
->mc_filter
[0], ioaddr
+ 0xAC);
693 iowrite32(tp
->mc_filter
[1], ioaddr
+ 0xB0);
696 if (csr5
& RxDied
) { /* Missed a Rx frame. */
697 dev
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ CSR8
) & 0xffff;
698 dev
->stats
.rx_errors
++;
699 tulip_start_rxtx(tp
);
702 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
703 * call is ever done under the spinlock
705 if (csr5
& (TPLnkPass
| TPLnkFail
| 0x08000000)) {
707 (tp
->link_change
)(dev
, csr5
);
709 if (csr5
& SystemError
) {
710 int error
= (csr5
>> 23) & 7;
711 /* oops, we hit a PCI error. The code produced corresponds
716 * Note that on parity error, we should do a software reset
717 * of the chip to get it back into a sane state (according
718 * to the 21142/3 docs that is).
722 "(%lu) System Error occurred (%d)\n",
725 /* Clear all error sources, included undocumented ones! */
726 iowrite32(0x0800f7ba, ioaddr
+ CSR5
);
729 if (csr5
& TimerInt
) {
733 "Re-enabling interrupts, %08x\n",
735 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, ioaddr
+ CSR7
);
739 if (tx
> maxtx
|| rx
> maxrx
|| oi
> maxoi
) {
741 dev_warn(&dev
->dev
, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
742 csr5
, tp
->nir
, tx
, rx
, oi
);
744 /* Acknowledge all interrupt sources. */
745 iowrite32(0x8001ffff, ioaddr
+ CSR5
);
746 if (tp
->flags
& HAS_INTR_MITIGATION
) {
747 /* Josip Loncaric at ICASE did extensive experimentation
748 to develop a good interrupt mitigation setting.*/
749 iowrite32(0x8b240000, ioaddr
+ CSR11
);
750 } else if (tp
->chip_id
== LC82C168
) {
751 /* the LC82C168 doesn't have a hw timer.*/
752 iowrite32(0x00, ioaddr
+ CSR7
);
753 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
755 /* Mask all interrupting sources, set timer to
757 iowrite32(((~csr5
) & 0x0001ebef) | AbnormalIntr
| TimerInt
, ioaddr
+ CSR7
);
758 iowrite32(0x0012, ioaddr
+ CSR11
);
767 csr5
= ioread32(ioaddr
+ CSR5
);
769 #ifdef CONFIG_TULIP_NAPI
772 } while ((csr5
& (TxNoBuf
|
781 SystemError
)) != 0);
783 } while ((csr5
& (NormalIntr
|AbnormalIntr
)) != 0);
785 tulip_refill_rx(dev
);
787 /* check if the card is in suspend mode */
788 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
789 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
792 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
793 tp
->nir
, tp
->cur_rx
, tp
->ttimer
, rx
);
794 if (tp
->chip_id
== LC82C168
) {
795 iowrite32(0x00, ioaddr
+ CSR7
);
796 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
798 if (tp
->ttimer
== 0 || (ioread32(ioaddr
+ CSR11
) & 0xffff) == 0) {
801 "in rx suspend mode: (%lu) set timer\n",
803 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
| TimerInt
,
805 iowrite32(TimerInt
, ioaddr
+ CSR5
);
806 iowrite32(12, ioaddr
+ CSR11
);
811 #endif /* CONFIG_TULIP_NAPI */
813 if ((missed
= ioread32(ioaddr
+ CSR8
) & 0x1ffff)) {
814 dev
->stats
.rx_dropped
+= missed
& 0x10000 ? 0x10000 : missed
;
818 netdev_dbg(dev
, "exiting interrupt, csr5=%#04x\n",
819 ioread32(ioaddr
+ CSR5
));