The LDT fixes in particular fix some potentially random strange behaviour.
[davej-history.git] / drivers / net / tulip / interrupt.c
blob5014726c303132716a0151706b9602e0ad99681d
1 /*
2 drivers/net/tulip/interrupt.c
4 Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
5 Copyright 2000 The Linux Kernel Team
6 Written/copyright 1994-1999 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU Public License, incorporated herein by reference.
11 Please refer to Documentation/networking/tulip.txt for more
12 information on this driver.
16 #include "tulip.h"
17 #include <linux/etherdevice.h>
18 #include <linux/pci.h>
21 int tulip_rx_copybreak;
22 unsigned int tulip_max_interrupt_work;
26 static int tulip_refill_rx(struct net_device *dev)
28 struct tulip_private *tp = (struct tulip_private *)dev->priv;
29 int entry;
30 int refilled = 0;
32 /* Refill the Rx ring buffers. */
33 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
34 entry = tp->dirty_rx % RX_RING_SIZE;
35 if (tp->rx_buffers[entry].skb == NULL) {
36 struct sk_buff *skb;
37 dma_addr_t mapping;
39 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
40 if (skb == NULL)
41 break;
43 mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
44 PCI_DMA_FROMDEVICE);
45 tp->rx_buffers[entry].mapping = mapping;
47 skb->dev = dev; /* Mark as being used by this device. */
48 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
49 refilled++;
51 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
53 return refilled;
57 static int tulip_rx(struct net_device *dev)
59 struct tulip_private *tp = (struct tulip_private *)dev->priv;
60 int entry = tp->cur_rx % RX_RING_SIZE;
61 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
62 int received = 0;
64 if (tulip_debug > 4)
65 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
66 tp->rx_ring[entry].status);
67 /* If we own the next entry, it is a new packet. Send it up. */
68 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
69 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
71 if (tulip_debug > 5)
72 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
73 dev->name, entry, status);
74 if (--rx_work_limit < 0)
75 break;
76 if ((status & 0x38008300) != 0x0300) {
77 if ((status & 0x38000300) != 0x0300) {
78 /* Ingore earlier buffers. */
79 if ((status & 0xffff) != 0x7fff) {
80 if (tulip_debug > 1)
81 printk(KERN_WARNING "%s: Oversized Ethernet frame "
82 "spanned multiple buffers, status %8.8x!\n",
83 dev->name, status);
84 tp->stats.rx_length_errors++;
86 } else if (status & RxDescFatalErr) {
87 /* There was a fatal error. */
88 if (tulip_debug > 2)
89 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
90 dev->name, status);
91 tp->stats.rx_errors++; /* end of a packet.*/
92 if (status & 0x0890) tp->stats.rx_length_errors++;
93 if (status & 0x0004) tp->stats.rx_frame_errors++;
94 if (status & 0x0002) tp->stats.rx_crc_errors++;
95 if (status & 0x0001) tp->stats.rx_fifo_errors++;
97 } else {
98 /* Omit the four octet CRC from the length. */
99 short pkt_len = ((status >> 16) & 0x7ff) - 4;
100 struct sk_buff *skb;
102 #ifndef final_version
103 if (pkt_len > 1518) {
104 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
105 dev->name, pkt_len, pkt_len);
106 pkt_len = 1518;
107 tp->stats.rx_length_errors++;
109 #endif
110 /* Check if the packet is long enough to accept without copying
111 to a minimally-sized skbuff. */
112 if (pkt_len < tulip_rx_copybreak
113 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
114 skb->dev = dev;
115 skb_reserve(skb, 2); /* 16 byte align the IP header */
116 pci_dma_sync_single(tp->pdev,
117 tp->rx_buffers[entry].mapping,
118 pkt_len, PCI_DMA_FROMDEVICE);
119 #if ! defined(__alpha__)
120 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
121 pkt_len, 0);
122 skb_put(skb, pkt_len);
123 #else
124 memcpy(skb_put(skb, pkt_len),
125 tp->rx_buffers[entry].skb->tail,
126 pkt_len);
127 #endif
128 } else { /* Pass up the skb already on the Rx ring. */
129 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
130 pkt_len);
132 #ifndef final_version
133 if (tp->rx_buffers[entry].mapping !=
134 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
135 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
136 "do not match in tulip_rx: %08x vs. %08x %p / %p.\n",
137 dev->name,
138 le32_to_cpu(tp->rx_ring[entry].buffer1),
139 tp->rx_buffers[entry].mapping,
140 skb->head, temp);
142 #endif
144 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
145 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
147 tp->rx_buffers[entry].skb = NULL;
148 tp->rx_buffers[entry].mapping = 0;
150 skb->protocol = eth_type_trans(skb, dev);
151 netif_rx(skb);
152 dev->last_rx = jiffies;
153 tp->stats.rx_packets++;
154 tp->stats.rx_bytes += pkt_len;
156 received++;
157 entry = (++tp->cur_rx) % RX_RING_SIZE;
160 return received;
164 /* The interrupt handler does all of the Rx thread work and cleans up
165 after the Tx thread. */
166 void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
168 struct net_device *dev = (struct net_device *)dev_instance;
169 struct tulip_private *tp = (struct tulip_private *)dev->priv;
170 long ioaddr = dev->base_addr;
171 int csr5;
172 int entry;
173 int missed;
174 int rx = 0;
175 int tx = 0;
176 int oi = 0;
177 int maxrx = RX_RING_SIZE;
178 int maxtx = TX_RING_SIZE;
179 int maxoi = TX_RING_SIZE;
180 unsigned int work_count = tulip_max_interrupt_work;
182 /* Let's see whether the interrupt really is for us */
183 csr5 = inl(ioaddr + CSR5);
185 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
186 return;
188 tp->nir++;
190 do {
191 /* Acknowledge all of the current interrupt sources ASAP. */
192 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
194 if (tulip_debug > 4)
195 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
196 dev->name, csr5, inl(dev->base_addr + CSR5));
198 if (csr5 & (RxIntr | RxNoBuf)) {
199 rx += tulip_rx(dev);
200 tulip_refill_rx(dev);
203 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
204 unsigned int dirty_tx;
206 spin_lock(&tp->lock);
208 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
209 dirty_tx++) {
210 int entry = dirty_tx % TX_RING_SIZE;
211 int status = le32_to_cpu(tp->tx_ring[entry].status);
213 if (status < 0)
214 break; /* It still has not been Txed */
216 /* Check for Rx filter setup frames. */
217 if (tp->tx_buffers[entry].skb == NULL) {
218 /* test because dummy frames not mapped */
219 if (tp->tx_buffers[entry].mapping)
220 pci_unmap_single(tp->pdev,
221 tp->tx_buffers[entry].mapping,
222 sizeof(tp->setup_frame),
223 PCI_DMA_TODEVICE);
224 continue;
227 if (status & 0x8000) {
228 /* There was an major error, log it. */
229 #ifndef final_version
230 if (tulip_debug > 1)
231 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
232 dev->name, status);
233 #endif
234 tp->stats.tx_errors++;
235 if (status & 0x4104) tp->stats.tx_aborted_errors++;
236 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
237 if (status & 0x0200) tp->stats.tx_window_errors++;
238 if (status & 0x0002) tp->stats.tx_fifo_errors++;
239 if ((status & 0x0080) && tp->full_duplex == 0)
240 tp->stats.tx_heartbeat_errors++;
241 } else {
242 tp->stats.tx_bytes +=
243 tp->tx_buffers[entry].skb->len;
244 tp->stats.collisions += (status >> 3) & 15;
245 tp->stats.tx_packets++;
248 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
249 tp->tx_buffers[entry].skb->len,
250 PCI_DMA_TODEVICE);
252 /* Free the original skb. */
253 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
254 tp->tx_buffers[entry].skb = NULL;
255 tp->tx_buffers[entry].mapping = 0;
256 tx++;
259 #ifndef final_version
260 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
261 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
262 dev->name, dirty_tx, tp->cur_tx);
263 dirty_tx += TX_RING_SIZE;
265 #endif
267 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
268 netif_wake_queue(dev);
270 tp->dirty_tx = dirty_tx;
271 if (csr5 & TxDied) {
272 if (tulip_debug > 2)
273 printk(KERN_WARNING "%s: The transmitter stopped."
274 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
275 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
276 tulip_restart_rxtx(tp, tp->csr6);
278 spin_unlock(&tp->lock);
281 /* Log errors. */
282 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
283 if (csr5 == 0xffffffff)
284 break;
285 if (csr5 & TxJabber) tp->stats.tx_errors++;
286 if (csr5 & TxFIFOUnderflow) {
287 if ((tp->csr6 & 0xC000) != 0xC000)
288 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
289 else
290 tp->csr6 |= 0x00200000; /* Store-n-forward. */
291 /* Restart the transmit process. */
292 tulip_restart_rxtx(tp, tp->csr6);
293 outl(0, ioaddr + CSR1);
295 if (csr5 & RxDied) { /* Missed a Rx frame. */
296 tp->stats.rx_errors++;
297 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
298 tulip_outl_csr(tp, tp->csr6 | csr6_st | csr6_sr, CSR6);
301 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
302 * call is ever done under the spinlock
304 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
305 if (tp->link_change)
306 (tp->link_change)(dev, csr5);
308 if (csr5 & SytemError) {
309 printk(KERN_ERR "%s: (%lu) System Error occured\n", dev->name, tp->nir);
311 /* Clear all error sources, included undocumented ones! */
312 outl(0x0800f7ba, ioaddr + CSR5);
313 oi++;
315 if (csr5 & TimerInt) {
317 if (tulip_debug > 2)
318 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
319 dev->name, csr5);
320 outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
321 tp->ttimer = 0;
322 oi++;
324 if (tx > maxtx || rx > maxrx || oi > maxoi) {
325 if (tulip_debug > 1)
326 printk(KERN_WARNING "%s: Too much work during an interrupt, "
327 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
329 /* Acknowledge all interrupt sources. */
330 outl(0x8001ffff, ioaddr + CSR5);
331 if (tp->flags & HAS_INTR_MITIGATION) {
332 /* Josip Loncaric at ICASE did extensive experimentation
333 to develop a good interrupt mitigation setting.*/
334 outl(0x8b240000, ioaddr + CSR11);
335 } else {
336 /* Mask all interrupting sources, set timer to
337 re-enable. */
338 outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
339 outl(0x0012, ioaddr + CSR11);
341 break;
344 work_count--;
345 if (work_count == 0)
346 break;
348 csr5 = inl(ioaddr + CSR5);
349 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
351 tulip_refill_rx(dev);
353 /* check if the card is in suspend mode */
354 entry = tp->dirty_rx % RX_RING_SIZE;
355 if (tp->rx_buffers[entry].skb == NULL) {
356 if (tulip_debug > 1)
357 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
358 if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
359 if (tulip_debug > 1)
360 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
361 outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
362 ioaddr + CSR7);
363 outl(TimerInt, ioaddr + CSR5);
364 outl(12, ioaddr + CSR11);
365 tp->ttimer = 1;
369 if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
370 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
373 if (tulip_debug > 4)
374 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
375 dev->name, inl(ioaddr + CSR5));