2 drivers/net/ethernet/dec/tulip/interrupt.c
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
10 Please submit bugs to http://bugzilla.kernel.org/ .
13 #include <linux/pci.h>
15 #include <linux/etherdevice.h>
17 int tulip_rx_copybreak
;
18 unsigned int tulip_max_interrupt_work
;
20 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
22 #define MIT_TABLE 15 /* We use 0 or max */
24 static unsigned int mit_table
[MIT_SIZE
+1] =
26 /* CRS11 21143 hardware Mitigation Control Interrupt
27 We use only RX mitigation we other techniques for
30 31 Cycle Size (timer control)
31 30:27 TX timer in 16 * Cycle size
32 26:24 TX No pkts before Int.
33 23:20 RX timer in Cycle size
34 19:17 RX No pkts before Int.
35 16 Continues Mode (CM)
38 0x0, /* IM disabled */
39 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
53 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
54 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
59 int tulip_refill_rx(struct net_device
*dev
)
61 struct tulip_private
*tp
= netdev_priv(dev
);
65 /* Refill the Rx ring buffers. */
66 for (; tp
->cur_rx
- tp
->dirty_rx
> 0; tp
->dirty_rx
++) {
67 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
68 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
72 skb
= tp
->rx_buffers
[entry
].skb
=
73 netdev_alloc_skb(dev
, PKT_BUF_SZ
);
77 mapping
= pci_map_single(tp
->pdev
, skb
->data
, PKT_BUF_SZ
,
79 tp
->rx_buffers
[entry
].mapping
= mapping
;
81 tp
->rx_ring
[entry
].buffer1
= cpu_to_le32(mapping
);
84 tp
->rx_ring
[entry
].status
= cpu_to_le32(DescOwned
);
86 if(tp
->chip_id
== LC82C168
) {
87 if(((ioread32(tp
->base_addr
+ CSR5
)>>17)&0x07) == 4) {
88 /* Rx stopped due to out of buffers,
91 iowrite32(0x01, tp
->base_addr
+ CSR2
);
97 #ifdef CONFIG_TULIP_NAPI
99 void oom_timer(unsigned long data
)
101 struct net_device
*dev
= (struct net_device
*)data
;
102 struct tulip_private
*tp
= netdev_priv(dev
);
103 napi_schedule(&tp
->napi
);
106 int tulip_poll(struct napi_struct
*napi
, int budget
)
108 struct tulip_private
*tp
= container_of(napi
, struct tulip_private
, napi
);
109 struct net_device
*dev
= tp
->dev
;
110 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
112 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
116 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
118 /* that one buffer is needed for mit activation; or might be a
119 bug in the ring buffer code; check later -- JHS*/
121 if (budget
>=RX_RING_SIZE
) budget
--;
125 netdev_dbg(dev
, " In tulip_rx(), entry %d %08x\n",
126 entry
, tp
->rx_ring
[entry
].status
);
129 if (ioread32(tp
->base_addr
+ CSR5
) == 0xffffffff) {
130 netdev_dbg(dev
, " In tulip_poll(), hardware disappeared\n");
133 /* Acknowledge current RX interrupt sources. */
134 iowrite32((RxIntr
| RxNoBuf
), tp
->base_addr
+ CSR5
);
137 /* If we own the next entry, it is a new packet. Send it up. */
138 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
139 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
142 if (tp
->dirty_rx
+ RX_RING_SIZE
== tp
->cur_rx
)
146 netdev_dbg(dev
, "In tulip_rx(), entry %d %08x\n",
149 if (++work_done
>= budget
)
153 * Omit the four octet CRC from the length.
154 * (May not be considered valid until we have
155 * checked status for RxLengthOver2047 bits)
157 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
160 * Maximum pkt_len is 1518 (1514 + vlan header)
161 * Anything higher than this is always invalid
162 * regardless of RxLengthOver2047 bits
165 if ((status
& (RxLengthOver2047
|
167 RxDescCollisionSeen
|
170 RxWholePkt
)) != RxWholePkt
||
172 if ((status
& (RxLengthOver2047
|
173 RxWholePkt
)) != RxWholePkt
) {
174 /* Ingore earlier buffers. */
175 if ((status
& 0xffff) != 0x7fff) {
178 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
180 dev
->stats
.rx_length_errors
++;
183 /* There was a fatal error. */
185 netdev_dbg(dev
, "Receive error, Rx status %08x\n",
187 dev
->stats
.rx_errors
++; /* end of a packet.*/
188 if (pkt_len
> 1518 ||
189 (status
& RxDescRunt
))
190 dev
->stats
.rx_length_errors
++;
193 dev
->stats
.rx_frame_errors
++;
195 dev
->stats
.rx_crc_errors
++;
197 dev
->stats
.rx_fifo_errors
++;
202 /* Check if the packet is long enough to accept without copying
203 to a minimally-sized skbuff. */
204 if (pkt_len
< tulip_rx_copybreak
&&
205 (skb
= netdev_alloc_skb(dev
, pkt_len
+ 2)) != NULL
) {
206 skb_reserve(skb
, 2); /* 16 byte align the IP header */
207 pci_dma_sync_single_for_cpu(tp
->pdev
,
208 tp
->rx_buffers
[entry
].mapping
,
209 pkt_len
, PCI_DMA_FROMDEVICE
);
210 #if ! defined(__alpha__)
211 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
213 skb_put(skb
, pkt_len
);
215 memcpy(skb_put(skb
, pkt_len
),
216 tp
->rx_buffers
[entry
].skb
->data
,
219 pci_dma_sync_single_for_device(tp
->pdev
,
220 tp
->rx_buffers
[entry
].mapping
,
221 pkt_len
, PCI_DMA_FROMDEVICE
);
222 } else { /* Pass up the skb already on the Rx ring. */
223 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
226 #ifndef final_version
227 if (tp
->rx_buffers
[entry
].mapping
!=
228 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
230 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
231 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
232 (unsigned long long)tp
->rx_buffers
[entry
].mapping
,
237 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
238 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
240 tp
->rx_buffers
[entry
].skb
= NULL
;
241 tp
->rx_buffers
[entry
].mapping
= 0;
243 skb
->protocol
= eth_type_trans(skb
, dev
);
245 netif_receive_skb(skb
);
247 dev
->stats
.rx_packets
++;
248 dev
->stats
.rx_bytes
+= pkt_len
;
250 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
254 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
255 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/4)
256 tulip_refill_rx(dev
);
260 /* New ack strategy... irq does not ack Rx any longer
261 hopefully this helps */
263 /* Really bad things can happen here... If new packet arrives
264 * and an irq arrives (tx or just due to occasionally unset
265 * mask), it will be acked by irq handler, but new thread
266 * is not scheduled. It is major hole in design.
267 * No idea how to fix this if "playing with fire" will fail
268 * tomorrow (night 011029). If it will not fail, we won
269 * finally: amount of IO did not increase at all. */
270 } while ((ioread32(tp
->base_addr
+ CSR5
) & RxIntr
));
272 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
274 /* We use this simplistic scheme for IM. It's proven by
275 real life installations. We can have IM enabled
276 continuesly but this would cause unnecessary latency.
277 Unfortunely we can't use all the NET_RX_* feedback here.
278 This would turn on IM for devices that is not contributing
279 to backlog congestion with unnecessary latency.
281 We monitor the device RX-ring and have:
283 HW Interrupt Mitigation either ON or OFF.
285 ON: More then 1 pkt received (per intr.) OR we are dropping
286 OFF: Only 1 pkt received
288 Note. We only use min and max (0, 15) settings from mit_table */
291 if( tp
->flags
& HAS_INTR_MITIGATION
) {
295 iowrite32(mit_table
[MIT_TABLE
], tp
->base_addr
+ CSR11
);
301 iowrite32(0, tp
->base_addr
+ CSR11
);
306 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
308 tulip_refill_rx(dev
);
310 /* If RX ring is not full we are out of memory. */
311 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
314 /* Remove us from polling list and enable RX intr. */
317 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, tp
->base_addr
+CSR7
);
319 /* The last op happens after poll completion. Which means the following:
320 * 1. it can race with disabling irqs in irq handler
321 * 2. it can race with dise/enabling irqs in other poll threads
322 * 3. if an irq raised after beginning loop, it will be immediately
325 * Summarizing: the logic results in some redundant irqs both
326 * due to races in masking and due to too late acking of already
327 * processed irqs. But it must not result in losing events.
333 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/2 ||
334 tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
335 tulip_refill_rx(dev
);
337 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
342 oom
: /* Executed with RX ints disabled */
344 /* Start timer, stop polling, but do not enable rx interrupts. */
345 mod_timer(&tp
->oom_timer
, jiffies
+1);
347 /* Think: timer_pending() was an explicit signature of bug.
348 * Timer can be pending now but fired and completed
349 * before we did napi_complete(). See? We would lose it. */
351 /* remove ourselves from the polling list */
357 #else /* CONFIG_TULIP_NAPI */
359 static int tulip_rx(struct net_device
*dev
)
361 struct tulip_private
*tp
= netdev_priv(dev
);
362 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
363 int rx_work_limit
= tp
->dirty_rx
+ RX_RING_SIZE
- tp
->cur_rx
;
367 netdev_dbg(dev
, "In tulip_rx(), entry %d %08x\n",
368 entry
, tp
->rx_ring
[entry
].status
);
369 /* If we own the next entry, it is a new packet. Send it up. */
370 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
371 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
375 netdev_dbg(dev
, "In tulip_rx(), entry %d %08x\n",
377 if (--rx_work_limit
< 0)
381 Omit the four octet CRC from the length.
382 (May not be considered valid until we have
383 checked status for RxLengthOver2047 bits)
385 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
387 Maximum pkt_len is 1518 (1514 + vlan header)
388 Anything higher than this is always invalid
389 regardless of RxLengthOver2047 bits
392 if ((status
& (RxLengthOver2047
|
394 RxDescCollisionSeen
|
397 RxWholePkt
)) != RxWholePkt
||
399 if ((status
& (RxLengthOver2047
|
400 RxWholePkt
)) != RxWholePkt
) {
401 /* Ingore earlier buffers. */
402 if ((status
& 0xffff) != 0x7fff) {
405 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
407 dev
->stats
.rx_length_errors
++;
410 /* There was a fatal error. */
412 netdev_dbg(dev
, "Receive error, Rx status %08x\n",
414 dev
->stats
.rx_errors
++; /* end of a packet.*/
415 if (pkt_len
> 1518 ||
416 (status
& RxDescRunt
))
417 dev
->stats
.rx_length_errors
++;
419 dev
->stats
.rx_frame_errors
++;
421 dev
->stats
.rx_crc_errors
++;
423 dev
->stats
.rx_fifo_errors
++;
428 /* Check if the packet is long enough to accept without copying
429 to a minimally-sized skbuff. */
430 if (pkt_len
< tulip_rx_copybreak
&&
431 (skb
= netdev_alloc_skb(dev
, pkt_len
+ 2)) != NULL
) {
432 skb_reserve(skb
, 2); /* 16 byte align the IP header */
433 pci_dma_sync_single_for_cpu(tp
->pdev
,
434 tp
->rx_buffers
[entry
].mapping
,
435 pkt_len
, PCI_DMA_FROMDEVICE
);
436 #if ! defined(__alpha__)
437 skb_copy_to_linear_data(skb
, tp
->rx_buffers
[entry
].skb
->data
,
439 skb_put(skb
, pkt_len
);
441 memcpy(skb_put(skb
, pkt_len
),
442 tp
->rx_buffers
[entry
].skb
->data
,
445 pci_dma_sync_single_for_device(tp
->pdev
,
446 tp
->rx_buffers
[entry
].mapping
,
447 pkt_len
, PCI_DMA_FROMDEVICE
);
448 } else { /* Pass up the skb already on the Rx ring. */
449 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
452 #ifndef final_version
453 if (tp
->rx_buffers
[entry
].mapping
!=
454 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
456 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
457 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
458 (long long)tp
->rx_buffers
[entry
].mapping
,
463 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
464 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
466 tp
->rx_buffers
[entry
].skb
= NULL
;
467 tp
->rx_buffers
[entry
].mapping
= 0;
469 skb
->protocol
= eth_type_trans(skb
, dev
);
473 dev
->stats
.rx_packets
++;
474 dev
->stats
.rx_bytes
+= pkt_len
;
477 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
481 #endif /* CONFIG_TULIP_NAPI */
483 static inline unsigned int phy_interrupt (struct net_device
*dev
)
486 struct tulip_private
*tp
= netdev_priv(dev
);
487 int csr12
= ioread32(tp
->base_addr
+ CSR12
) & 0xff;
489 if (csr12
!= tp
->csr12_shadow
) {
491 iowrite32(csr12
| 0x02, tp
->base_addr
+ CSR12
);
492 tp
->csr12_shadow
= csr12
;
493 /* do link change stuff */
494 spin_lock(&tp
->lock
);
495 tulip_check_duplex(dev
);
496 spin_unlock(&tp
->lock
);
497 /* clear irq ack bit */
498 iowrite32(csr12
& ~0x02, tp
->base_addr
+ CSR12
);
507 /* The interrupt handler does all of the Rx thread work and cleans up
508 after the Tx thread. */
509 irqreturn_t
tulip_interrupt(int irq
, void *dev_instance
)
511 struct net_device
*dev
= (struct net_device
*)dev_instance
;
512 struct tulip_private
*tp
= netdev_priv(dev
);
513 void __iomem
*ioaddr
= tp
->base_addr
;
519 int maxrx
= RX_RING_SIZE
;
520 int maxtx
= TX_RING_SIZE
;
521 int maxoi
= TX_RING_SIZE
;
522 #ifdef CONFIG_TULIP_NAPI
527 unsigned int work_count
= tulip_max_interrupt_work
;
528 unsigned int handled
= 0;
530 /* Let's see whether the interrupt really is for us */
531 csr5
= ioread32(ioaddr
+ CSR5
);
533 if (tp
->flags
& HAS_PHY_IRQ
)
534 handled
= phy_interrupt (dev
);
536 if ((csr5
& (NormalIntr
|AbnormalIntr
)) == 0)
537 return IRQ_RETVAL(handled
);
543 #ifdef CONFIG_TULIP_NAPI
545 if (!rxd
&& (csr5
& (RxIntr
| RxNoBuf
))) {
547 /* Mask RX intrs and add the device to poll list. */
548 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
&~RxPollInt
, ioaddr
+ CSR7
);
549 napi_schedule(&tp
->napi
);
551 if (!(csr5
&~(AbnormalIntr
|NormalIntr
|RxPollInt
|TPLnkPass
)))
555 /* Acknowledge the interrupt sources we handle here ASAP
556 the poll function does Rx and RxNoBuf acking */
558 iowrite32(csr5
& 0x0001ff3f, ioaddr
+ CSR5
);
561 /* Acknowledge all of the current interrupt sources ASAP. */
562 iowrite32(csr5
& 0x0001ffff, ioaddr
+ CSR5
);
565 if (csr5
& (RxIntr
| RxNoBuf
)) {
567 tulip_refill_rx(dev
);
570 #endif /* CONFIG_TULIP_NAPI */
573 netdev_dbg(dev
, "interrupt csr5=%#8.8x new csr5=%#8.8x\n",
574 csr5
, ioread32(ioaddr
+ CSR5
));
577 if (csr5
& (TxNoBuf
| TxDied
| TxIntr
| TimerInt
)) {
578 unsigned int dirty_tx
;
580 spin_lock(&tp
->lock
);
582 for (dirty_tx
= tp
->dirty_tx
; tp
->cur_tx
- dirty_tx
> 0;
584 int entry
= dirty_tx
% TX_RING_SIZE
;
585 int status
= le32_to_cpu(tp
->tx_ring
[entry
].status
);
588 break; /* It still has not been Txed */
590 /* Check for Rx filter setup frames. */
591 if (tp
->tx_buffers
[entry
].skb
== NULL
) {
592 /* test because dummy frames not mapped */
593 if (tp
->tx_buffers
[entry
].mapping
)
594 pci_unmap_single(tp
->pdev
,
595 tp
->tx_buffers
[entry
].mapping
,
596 sizeof(tp
->setup_frame
),
601 if (status
& 0x8000) {
602 /* There was an major error, log it. */
603 #ifndef final_version
605 netdev_dbg(dev
, "Transmit error, Tx status %08x\n",
608 dev
->stats
.tx_errors
++;
610 dev
->stats
.tx_aborted_errors
++;
612 dev
->stats
.tx_carrier_errors
++;
614 dev
->stats
.tx_window_errors
++;
616 dev
->stats
.tx_fifo_errors
++;
617 if ((status
& 0x0080) && tp
->full_duplex
== 0)
618 dev
->stats
.tx_heartbeat_errors
++;
620 dev
->stats
.tx_bytes
+=
621 tp
->tx_buffers
[entry
].skb
->len
;
622 dev
->stats
.collisions
+= (status
>> 3) & 15;
623 dev
->stats
.tx_packets
++;
626 pci_unmap_single(tp
->pdev
, tp
->tx_buffers
[entry
].mapping
,
627 tp
->tx_buffers
[entry
].skb
->len
,
630 /* Free the original skb. */
631 dev_kfree_skb_irq(tp
->tx_buffers
[entry
].skb
);
632 tp
->tx_buffers
[entry
].skb
= NULL
;
633 tp
->tx_buffers
[entry
].mapping
= 0;
637 #ifndef final_version
638 if (tp
->cur_tx
- dirty_tx
> TX_RING_SIZE
) {
640 "Out-of-sync dirty pointer, %d vs. %d\n",
641 dirty_tx
, tp
->cur_tx
);
642 dirty_tx
+= TX_RING_SIZE
;
646 if (tp
->cur_tx
- dirty_tx
< TX_RING_SIZE
- 2)
647 netif_wake_queue(dev
);
649 tp
->dirty_tx
= dirty_tx
;
653 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
654 csr5
, ioread32(ioaddr
+ CSR6
),
656 tulip_restart_rxtx(tp
);
658 spin_unlock(&tp
->lock
);
662 if (csr5
& AbnormalIntr
) { /* Abnormal error summary bit. */
663 if (csr5
== 0xffffffff)
666 dev
->stats
.tx_errors
++;
667 if (csr5
& TxFIFOUnderflow
) {
668 if ((tp
->csr6
& 0xC000) != 0xC000)
669 tp
->csr6
+= 0x4000; /* Bump up the Tx threshold */
671 tp
->csr6
|= 0x00200000; /* Store-n-forward. */
672 /* Restart the transmit process. */
673 tulip_restart_rxtx(tp
);
674 iowrite32(0, ioaddr
+ CSR1
);
676 if (csr5
& (RxDied
| RxNoBuf
)) {
677 if (tp
->flags
& COMET_MAC_ADDR
) {
678 iowrite32(tp
->mc_filter
[0], ioaddr
+ 0xAC);
679 iowrite32(tp
->mc_filter
[1], ioaddr
+ 0xB0);
682 if (csr5
& RxDied
) { /* Missed a Rx frame. */
683 dev
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ CSR8
) & 0xffff;
684 dev
->stats
.rx_errors
++;
685 tulip_start_rxtx(tp
);
688 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
689 * call is ever done under the spinlock
691 if (csr5
& (TPLnkPass
| TPLnkFail
| 0x08000000)) {
693 (tp
->link_change
)(dev
, csr5
);
695 if (csr5
& SystemError
) {
696 int error
= (csr5
>> 23) & 7;
697 /* oops, we hit a PCI error. The code produced corresponds
702 * Note that on parity error, we should do a software reset
703 * of the chip to get it back into a sane state (according
704 * to the 21142/3 docs that is).
708 "(%lu) System Error occurred (%d)\n",
711 /* Clear all error sources, included undocumented ones! */
712 iowrite32(0x0800f7ba, ioaddr
+ CSR5
);
715 if (csr5
& TimerInt
) {
719 "Re-enabling interrupts, %08x\n",
721 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, ioaddr
+ CSR7
);
725 if (tx
> maxtx
|| rx
> maxrx
|| oi
> maxoi
) {
727 dev_warn(&dev
->dev
, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
728 csr5
, tp
->nir
, tx
, rx
, oi
);
730 /* Acknowledge all interrupt sources. */
731 iowrite32(0x8001ffff, ioaddr
+ CSR5
);
732 if (tp
->flags
& HAS_INTR_MITIGATION
) {
733 /* Josip Loncaric at ICASE did extensive experimentation
734 to develop a good interrupt mitigation setting.*/
735 iowrite32(0x8b240000, ioaddr
+ CSR11
);
736 } else if (tp
->chip_id
== LC82C168
) {
737 /* the LC82C168 doesn't have a hw timer.*/
738 iowrite32(0x00, ioaddr
+ CSR7
);
739 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
741 /* Mask all interrupting sources, set timer to
743 iowrite32(((~csr5
) & 0x0001ebef) | AbnormalIntr
| TimerInt
, ioaddr
+ CSR7
);
744 iowrite32(0x0012, ioaddr
+ CSR11
);
753 csr5
= ioread32(ioaddr
+ CSR5
);
755 #ifdef CONFIG_TULIP_NAPI
758 } while ((csr5
& (TxNoBuf
|
767 SystemError
)) != 0);
769 } while ((csr5
& (NormalIntr
|AbnormalIntr
)) != 0);
771 tulip_refill_rx(dev
);
773 /* check if the card is in suspend mode */
774 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
775 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
778 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
779 tp
->nir
, tp
->cur_rx
, tp
->ttimer
, rx
);
780 if (tp
->chip_id
== LC82C168
) {
781 iowrite32(0x00, ioaddr
+ CSR7
);
782 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
784 if (tp
->ttimer
== 0 || (ioread32(ioaddr
+ CSR11
) & 0xffff) == 0) {
787 "in rx suspend mode: (%lu) set timer\n",
789 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
| TimerInt
,
791 iowrite32(TimerInt
, ioaddr
+ CSR5
);
792 iowrite32(12, ioaddr
+ CSR11
);
797 #endif /* CONFIG_TULIP_NAPI */
799 if ((missed
= ioread32(ioaddr
+ CSR8
) & 0x1ffff)) {
800 dev
->stats
.rx_dropped
+= missed
& 0x10000 ? 0x10000 : missed
;
804 netdev_dbg(dev
, "exiting interrupt, csr5=%#04x\n",
805 ioread32(ioaddr
+ CSR5
));