2 drivers/net/tulip/interrupt.c
4 Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
5 Copyright 2000 The Linux Kernel Team
6 Written/copyright 1994-1999 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU Public License, incorporated herein by reference.
11 Please refer to Documentation/networking/tulip.txt for more
12 information on this driver.
17 #include <linux/etherdevice.h>
18 #include <linux/pci.h>
21 int tulip_rx_copybreak
;
22 unsigned int tulip_max_interrupt_work
;
26 static int tulip_refill_rx(struct net_device
*dev
)
28 struct tulip_private
*tp
= (struct tulip_private
*)dev
->priv
;
32 /* Refill the Rx ring buffers. */
33 for (; tp
->cur_rx
- tp
->dirty_rx
> 0; tp
->dirty_rx
++) {
34 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
35 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
39 skb
= tp
->rx_buffers
[entry
].skb
= dev_alloc_skb(PKT_BUF_SZ
);
43 mapping
= pci_map_single(tp
->pdev
, skb
->tail
, PKT_BUF_SZ
,
45 tp
->rx_buffers
[entry
].mapping
= mapping
;
47 skb
->dev
= dev
; /* Mark as being used by this device. */
48 tp
->rx_ring
[entry
].buffer1
= cpu_to_le32(mapping
);
51 tp
->rx_ring
[entry
].status
= cpu_to_le32(DescOwned
);
57 static int tulip_rx(struct net_device
*dev
)
59 struct tulip_private
*tp
= (struct tulip_private
*)dev
->priv
;
60 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
61 int rx_work_limit
= tp
->dirty_rx
+ RX_RING_SIZE
- tp
->cur_rx
;
65 printk(KERN_DEBUG
" In tulip_rx(), entry %d %8.8x.\n", entry
,
66 tp
->rx_ring
[entry
].status
);
67 /* If we own the next entry, it is a new packet. Send it up. */
68 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
69 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
72 printk(KERN_DEBUG
"%s: In tulip_rx(), entry %d %8.8x.\n",
73 dev
->name
, entry
, status
);
74 if (--rx_work_limit
< 0)
76 if ((status
& 0x38008300) != 0x0300) {
77 if ((status
& 0x38000300) != 0x0300) {
78 /* Ingore earlier buffers. */
79 if ((status
& 0xffff) != 0x7fff) {
81 printk(KERN_WARNING
"%s: Oversized Ethernet frame "
82 "spanned multiple buffers, status %8.8x!\n",
84 tp
->stats
.rx_length_errors
++;
86 } else if (status
& RxDescFatalErr
) {
87 /* There was a fatal error. */
89 printk(KERN_DEBUG
"%s: Receive error, Rx status %8.8x.\n",
91 tp
->stats
.rx_errors
++; /* end of a packet.*/
92 if (status
& 0x0890) tp
->stats
.rx_length_errors
++;
93 if (status
& 0x0004) tp
->stats
.rx_frame_errors
++;
94 if (status
& 0x0002) tp
->stats
.rx_crc_errors
++;
95 if (status
& 0x0001) tp
->stats
.rx_fifo_errors
++;
98 /* Omit the four octet CRC from the length. */
99 short pkt_len
= ((status
>> 16) & 0x7ff) - 4;
102 #ifndef final_version
103 if (pkt_len
> 1518) {
104 printk(KERN_WARNING
"%s: Bogus packet size of %d (%#x).\n",
105 dev
->name
, pkt_len
, pkt_len
);
107 tp
->stats
.rx_length_errors
++;
110 /* Check if the packet is long enough to accept without copying
111 to a minimally-sized skbuff. */
112 if (pkt_len
< tulip_rx_copybreak
113 && (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
115 skb_reserve(skb
, 2); /* 16 byte align the IP header */
116 pci_dma_sync_single(tp
->pdev
,
117 tp
->rx_buffers
[entry
].mapping
,
118 pkt_len
, PCI_DMA_FROMDEVICE
);
119 #if ! defined(__alpha__)
120 eth_copy_and_sum(skb
, tp
->rx_buffers
[entry
].skb
->tail
,
122 skb_put(skb
, pkt_len
);
124 memcpy(skb_put(skb
, pkt_len
),
125 tp
->rx_buffers
[entry
].skb
->tail
,
128 } else { /* Pass up the skb already on the Rx ring. */
129 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
132 #ifndef final_version
133 if (tp
->rx_buffers
[entry
].mapping
!=
134 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
135 printk(KERN_ERR
"%s: Internal fault: The skbuff addresses "
136 "do not match in tulip_rx: %08x vs. %08x %p / %p.\n",
138 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
139 tp
->rx_buffers
[entry
].mapping
,
144 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
145 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
147 tp
->rx_buffers
[entry
].skb
= NULL
;
148 tp
->rx_buffers
[entry
].mapping
= 0;
150 skb
->protocol
= eth_type_trans(skb
, dev
);
152 dev
->last_rx
= jiffies
;
153 tp
->stats
.rx_packets
++;
154 tp
->stats
.rx_bytes
+= pkt_len
;
157 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
164 /* The interrupt handler does all of the Rx thread work and cleans up
165 after the Tx thread. */
166 void tulip_interrupt(int irq
, void *dev_instance
, struct pt_regs
*regs
)
168 struct net_device
*dev
= (struct net_device
*)dev_instance
;
169 struct tulip_private
*tp
= (struct tulip_private
*)dev
->priv
;
170 long ioaddr
= dev
->base_addr
;
177 int maxrx
= RX_RING_SIZE
;
178 int maxtx
= TX_RING_SIZE
;
179 int maxoi
= TX_RING_SIZE
;
180 unsigned int work_count
= tulip_max_interrupt_work
;
182 /* Let's see whether the interrupt really is for us */
183 csr5
= inl(ioaddr
+ CSR5
);
185 if ((csr5
& (NormalIntr
|AbnormalIntr
)) == 0)
191 /* Acknowledge all of the current interrupt sources ASAP. */
192 outl(csr5
& 0x0001ffff, ioaddr
+ CSR5
);
195 printk(KERN_DEBUG
"%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
196 dev
->name
, csr5
, inl(dev
->base_addr
+ CSR5
));
198 if (csr5
& (RxIntr
| RxNoBuf
)) {
200 tulip_refill_rx(dev
);
203 if (csr5
& (TxNoBuf
| TxDied
| TxIntr
| TimerInt
)) {
204 unsigned int dirty_tx
;
206 spin_lock(&tp
->lock
);
208 for (dirty_tx
= tp
->dirty_tx
; tp
->cur_tx
- dirty_tx
> 0;
210 int entry
= dirty_tx
% TX_RING_SIZE
;
211 int status
= le32_to_cpu(tp
->tx_ring
[entry
].status
);
214 break; /* It still has not been Txed */
216 /* Check for Rx filter setup frames. */
217 if (tp
->tx_buffers
[entry
].skb
== NULL
) {
218 /* test because dummy frames not mapped */
219 if (tp
->tx_buffers
[entry
].mapping
)
220 pci_unmap_single(tp
->pdev
,
221 tp
->tx_buffers
[entry
].mapping
,
222 sizeof(tp
->setup_frame
),
227 if (status
& 0x8000) {
228 /* There was an major error, log it. */
229 #ifndef final_version
231 printk(KERN_DEBUG
"%s: Transmit error, Tx status %8.8x.\n",
234 tp
->stats
.tx_errors
++;
235 if (status
& 0x4104) tp
->stats
.tx_aborted_errors
++;
236 if (status
& 0x0C00) tp
->stats
.tx_carrier_errors
++;
237 if (status
& 0x0200) tp
->stats
.tx_window_errors
++;
238 if (status
& 0x0002) tp
->stats
.tx_fifo_errors
++;
239 if ((status
& 0x0080) && tp
->full_duplex
== 0)
240 tp
->stats
.tx_heartbeat_errors
++;
242 tp
->stats
.tx_bytes
+=
243 tp
->tx_buffers
[entry
].skb
->len
;
244 tp
->stats
.collisions
+= (status
>> 3) & 15;
245 tp
->stats
.tx_packets
++;
248 pci_unmap_single(tp
->pdev
, tp
->tx_buffers
[entry
].mapping
,
249 tp
->tx_buffers
[entry
].skb
->len
,
252 /* Free the original skb. */
253 dev_kfree_skb_irq(tp
->tx_buffers
[entry
].skb
);
254 tp
->tx_buffers
[entry
].skb
= NULL
;
255 tp
->tx_buffers
[entry
].mapping
= 0;
259 #ifndef final_version
260 if (tp
->cur_tx
- dirty_tx
> TX_RING_SIZE
) {
261 printk(KERN_ERR
"%s: Out-of-sync dirty pointer, %d vs. %d.\n",
262 dev
->name
, dirty_tx
, tp
->cur_tx
);
263 dirty_tx
+= TX_RING_SIZE
;
267 if (tp
->cur_tx
- dirty_tx
< TX_RING_SIZE
- 2)
268 netif_wake_queue(dev
);
270 tp
->dirty_tx
= dirty_tx
;
273 printk(KERN_WARNING
"%s: The transmitter stopped."
274 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
275 dev
->name
, csr5
, inl(ioaddr
+ CSR6
), tp
->csr6
);
276 tulip_restart_rxtx(tp
, tp
->csr6
);
278 spin_unlock(&tp
->lock
);
282 if (csr5
& AbnormalIntr
) { /* Abnormal error summary bit. */
283 if (csr5
== 0xffffffff)
285 if (csr5
& TxJabber
) tp
->stats
.tx_errors
++;
286 if (csr5
& TxFIFOUnderflow
) {
287 if ((tp
->csr6
& 0xC000) != 0xC000)
288 tp
->csr6
+= 0x4000; /* Bump up the Tx threshold */
290 tp
->csr6
|= 0x00200000; /* Store-n-forward. */
291 /* Restart the transmit process. */
292 tulip_restart_rxtx(tp
, tp
->csr6
);
293 outl(0, ioaddr
+ CSR1
);
295 if (csr5
& RxDied
) { /* Missed a Rx frame. */
296 tp
->stats
.rx_errors
++;
297 tp
->stats
.rx_missed_errors
+= inl(ioaddr
+ CSR8
) & 0xffff;
298 tulip_outl_csr(tp
, tp
->csr6
| csr6_st
| csr6_sr
, CSR6
);
301 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
302 * call is ever done under the spinlock
304 if (csr5
& (TPLnkPass
| TPLnkFail
| 0x08000000)) {
306 (tp
->link_change
)(dev
, csr5
);
308 if (csr5
& SytemError
) {
309 printk(KERN_ERR
"%s: (%lu) System Error occured\n", dev
->name
, tp
->nir
);
311 /* Clear all error sources, included undocumented ones! */
312 outl(0x0800f7ba, ioaddr
+ CSR5
);
315 if (csr5
& TimerInt
) {
318 printk(KERN_ERR
"%s: Re-enabling interrupts, %8.8x.\n",
320 outl(tulip_tbl
[tp
->chip_id
].valid_intrs
, ioaddr
+ CSR7
);
324 if (tx
> maxtx
|| rx
> maxrx
|| oi
> maxoi
) {
326 printk(KERN_WARNING
"%s: Too much work during an interrupt, "
327 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev
->name
, csr5
, tp
->nir
, tx
, rx
, oi
);
329 /* Acknowledge all interrupt sources. */
330 outl(0x8001ffff, ioaddr
+ CSR5
);
331 if (tp
->flags
& HAS_INTR_MITIGATION
) {
332 /* Josip Loncaric at ICASE did extensive experimentation
333 to develop a good interrupt mitigation setting.*/
334 outl(0x8b240000, ioaddr
+ CSR11
);
336 /* Mask all interrupting sources, set timer to
338 outl(((~csr5
) & 0x0001ebef) | AbnormalIntr
| TimerInt
, ioaddr
+ CSR7
);
339 outl(0x0012, ioaddr
+ CSR11
);
348 csr5
= inl(ioaddr
+ CSR5
);
349 } while ((csr5
& (NormalIntr
|AbnormalIntr
)) != 0);
351 tulip_refill_rx(dev
);
353 /* check if the card is in suspend mode */
354 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
355 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
357 printk(KERN_WARNING
"%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev
->name
, tp
->nir
, tp
->cur_rx
, tp
->ttimer
, rx
);
358 if (tp
->ttimer
== 0 || (inl(ioaddr
+ CSR11
) & 0xffff) == 0) {
360 printk(KERN_WARNING
"%s: in rx suspend mode: (%lu) set timer\n", dev
->name
, tp
->nir
);
361 outl(tulip_tbl
[tp
->chip_id
].valid_intrs
| TimerInt
,
363 outl(TimerInt
, ioaddr
+ CSR5
);
364 outl(12, ioaddr
+ CSR11
);
369 if ((missed
= inl(ioaddr
+ CSR8
) & 0x1ffff)) {
370 tp
->stats
.rx_dropped
+= missed
& 0x10000 ? 0x10000 : missed
;
374 printk(KERN_DEBUG
"%s: exiting interrupt, csr5=%#4.4x.\n",
375 dev
->name
, inl(ioaddr
+ CSR5
));