3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 *------------------------------------------------------------------------------
11 * et1310_tx.c - Routines used to perform data transmission.
13 *------------------------------------------------------------------------------
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
75 #include <linux/delay.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
86 #include "et1310_phy.h"
87 #include "et131x_adapter.h"
88 #include "et1310_tx.h"
91 static inline void et131x_free_send_packet(struct et131x_adapter
*etdev
,
93 static int et131x_send_packet(struct sk_buff
*skb
,
94 struct et131x_adapter
*etdev
);
95 static int nic_send_packet(struct et131x_adapter
*etdev
, struct tcb
*tcb
);
98 * et131x_tx_dma_memory_alloc
99 * @adapter: pointer to our private adapter structure
101 * Returns 0 on success and errno on failure (as defined in errno.h).
103 * Allocates memory that will be visible both to the device and to the CPU.
104 * The OS will pass us packets, pointers to which we will insert in the Tx
105 * Descriptor queue. The device will read this queue to find the packets in
106 * memory. The device will update the "status" in memory each time it xmits a
109 int et131x_tx_dma_memory_alloc(struct et131x_adapter
*adapter
)
112 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
114 /* Allocate memory for the TCB's (Transmit Control Block) */
115 adapter
->tx_ring
.tcb_ring
=
116 kcalloc(NUM_TCB
, sizeof(struct tcb
), GFP_ATOMIC
| GFP_DMA
);
117 if (!adapter
->tx_ring
.tcb_ring
) {
118 dev_err(&adapter
->pdev
->dev
, "Cannot alloc memory for TCBs\n");
122 /* Allocate enough memory for the Tx descriptor ring, and allocate
123 * some extra so that the ring can be aligned on a 4k boundary.
125 desc_size
= (sizeof(struct tx_desc
) * NUM_DESC_PER_RING_TX
) + 4096 - 1;
126 tx_ring
->tx_desc_ring
=
127 (struct tx_desc
*) pci_alloc_consistent(adapter
->pdev
, desc_size
,
128 &tx_ring
->tx_desc_ring_pa
);
129 if (!adapter
->tx_ring
.tx_desc_ring
) {
130 dev_err(&adapter
->pdev
->dev
,
131 "Cannot alloc memory for Tx Ring\n");
135 /* Save physical address
137 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
138 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
139 * are ever returned, make sure the high part is retrieved here before
140 * storing the adjusted address.
142 /* Allocate memory for the Tx status block */
143 tx_ring
->tx_status
= pci_alloc_consistent(adapter
->pdev
,
145 &tx_ring
->tx_status_pa
);
146 if (!adapter
->tx_ring
.tx_status_pa
) {
147 dev_err(&adapter
->pdev
->dev
,
148 "Cannot alloc memory for Tx status block\n");
155 * et131x_tx_dma_memory_free - Free all memory allocated within this module
156 * @adapter: pointer to our private adapter structure
158 * Returns 0 on success and errno on failure (as defined in errno.h).
160 void et131x_tx_dma_memory_free(struct et131x_adapter
*adapter
)
164 if (adapter
->tx_ring
.tx_desc_ring
) {
165 /* Free memory relating to Tx rings here */
166 desc_size
= (sizeof(struct tx_desc
) * NUM_DESC_PER_RING_TX
)
168 pci_free_consistent(adapter
->pdev
,
170 adapter
->tx_ring
.tx_desc_ring
,
171 adapter
->tx_ring
.tx_desc_ring_pa
);
172 adapter
->tx_ring
.tx_desc_ring
= NULL
;
175 /* Free memory for the Tx status block */
176 if (adapter
->tx_ring
.tx_status
) {
177 pci_free_consistent(adapter
->pdev
,
179 adapter
->tx_ring
.tx_status
,
180 adapter
->tx_ring
.tx_status_pa
);
182 adapter
->tx_ring
.tx_status
= NULL
;
184 /* Free the memory for the tcb structures */
185 kfree(adapter
->tx_ring
.tcb_ring
);
189 * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
190 * @etdev: pointer to our private adapter structure
192 * Configure the transmit engine with the ring buffers we have created
193 * and prepare it for use.
195 void ConfigTxDmaRegs(struct et131x_adapter
*etdev
)
197 struct txdma_regs __iomem
*txdma
= &etdev
->regs
->txdma
;
199 /* Load the hardware with the start of the transmit descriptor ring. */
200 writel((u32
) ((u64
)etdev
->tx_ring
.tx_desc_ring_pa
>> 32),
202 writel((u32
) etdev
->tx_ring
.tx_desc_ring_pa
,
205 /* Initialise the transmit DMA engine */
206 writel(NUM_DESC_PER_RING_TX
- 1, &txdma
->pr_num_des
);
208 /* Load the completion writeback physical address */
209 writel((u32
)((u64
)etdev
->tx_ring
.tx_status_pa
>> 32),
210 &txdma
->dma_wb_base_hi
);
211 writel((u32
)etdev
->tx_ring
.tx_status_pa
, &txdma
->dma_wb_base_lo
);
213 *etdev
->tx_ring
.tx_status
= 0;
215 writel(0, &txdma
->service_request
);
216 etdev
->tx_ring
.send_idx
= 0;
220 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
221 * @etdev: pointer to our adapter structure
223 void et131x_tx_dma_disable(struct et131x_adapter
*etdev
)
225 /* Setup the tramsmit dma configuration register */
226 writel(ET_TXDMA_CSR_HALT
|ET_TXDMA_SNGL_EPKT
,
227 &etdev
->regs
->txdma
.csr
);
231 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
232 * @etdev: pointer to our adapter structure
234 * Mainly used after a return to the D0 (full-power) state from a lower state.
236 void et131x_tx_dma_enable(struct et131x_adapter
*etdev
)
238 /* Setup the transmit dma configuration register for normal
241 writel(ET_TXDMA_SNGL_EPKT
|(PARM_DMA_CACHE_DEF
<< ET_TXDMA_CACHE_SHIFT
),
242 &etdev
->regs
->txdma
.csr
);
246 * et131x_init_send - Initialize send data structures
247 * @adapter: pointer to our private adapter structure
249 void et131x_init_send(struct et131x_adapter
*adapter
)
253 struct tx_ring
*tx_ring
;
255 /* Setup some convenience pointers */
256 tx_ring
= &adapter
->tx_ring
;
257 tcb
= adapter
->tx_ring
.tcb_ring
;
259 tx_ring
->tcb_qhead
= tcb
;
261 memset(tcb
, 0, sizeof(struct tcb
) * NUM_TCB
);
263 /* Go through and set up each TCB */
264 for (ct
= 0; ct
++ < NUM_TCB
; tcb
++)
265 /* Set the link pointer in HW TCB to the next TCB in the
270 /* Set the tail pointer */
272 tx_ring
->tcb_qtail
= tcb
;
274 /* Curr send queue should now be empty */
275 tx_ring
->send_head
= NULL
;
276 tx_ring
->send_tail
= NULL
;
280 * et131x_send_packets - This function is called by the OS to send packets
281 * @skb: the packet(s) to send
282 * @netdev:device on which to TX the above packet(s)
284 * Return 0 in almost all cases; non-zero value in extreme hard failure only
286 int et131x_send_packets(struct sk_buff
*skb
, struct net_device
*netdev
)
289 struct et131x_adapter
*etdev
= NULL
;
291 etdev
= netdev_priv(netdev
);
293 /* Send these packets
295 * NOTE: The Linux Tx entry point is only given one packet at a time
296 * to Tx, so the PacketCount and it's array used makes no sense here
299 /* TCB is not available */
300 if (etdev
->tx_ring
.used
>= NUM_TCB
) {
301 /* NOTE: If there's an error on send, no need to queue the
302 * packet under Linux; if we just send an error up to the
303 * netif layer, it will resend the skb to us.
307 /* We need to see if the link is up; if it's not, make the
308 * netif layer think we're good and drop the packet
310 if ((etdev
->flags
& fMP_ADAPTER_FAIL_SEND_MASK
) ||
311 !netif_carrier_ok(netdev
)) {
312 dev_kfree_skb_any(skb
);
315 etdev
->net_stats
.tx_dropped
++;
317 status
= et131x_send_packet(skb
, etdev
);
318 if (status
!= 0 && status
!= -ENOMEM
) {
319 /* On any other error, make netif think we're
320 * OK and drop the packet
322 dev_kfree_skb_any(skb
);
324 etdev
->net_stats
.tx_dropped
++;
332 * et131x_send_packet - Do the work to send a packet
333 * @skb: the packet(s) to send
334 * @etdev: a pointer to the device's private adapter structure
336 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
338 * Assumption: Send spinlock has been acquired
340 static int et131x_send_packet(struct sk_buff
*skb
,
341 struct et131x_adapter
*etdev
)
344 struct tcb
*tcb
= NULL
;
348 /* All packets must have at least a MAC address and a protocol type */
349 if (skb
->len
< ETH_HLEN
)
352 /* Get a TCB for this packet */
353 spin_lock_irqsave(&etdev
->TCBReadyQLock
, flags
);
355 tcb
= etdev
->tx_ring
.tcb_qhead
;
358 spin_unlock_irqrestore(&etdev
->TCBReadyQLock
, flags
);
362 etdev
->tx_ring
.tcb_qhead
= tcb
->next
;
364 if (etdev
->tx_ring
.tcb_qhead
== NULL
)
365 etdev
->tx_ring
.tcb_qtail
= NULL
;
367 spin_unlock_irqrestore(&etdev
->TCBReadyQLock
, flags
);
371 if (skb
->data
!= NULL
&& skb
->len
- skb
->data_len
>= 6) {
372 shbufva
= (u16
*) skb
->data
;
374 if ((shbufva
[0] == 0xffff) &&
375 (shbufva
[1] == 0xffff) && (shbufva
[2] == 0xffff)) {
376 tcb
->flags
|= fMP_DEST_BROAD
;
377 } else if ((shbufva
[0] & 0x3) == 0x0001) {
378 tcb
->flags
|= fMP_DEST_MULTI
;
384 /* Call the NIC specific send handler. */
385 status
= nic_send_packet(etdev
, tcb
);
388 spin_lock_irqsave(&etdev
->TCBReadyQLock
, flags
);
390 if (etdev
->tx_ring
.tcb_qtail
)
391 etdev
->tx_ring
.tcb_qtail
->next
= tcb
;
393 /* Apparently ready Q is empty. */
394 etdev
->tx_ring
.tcb_qhead
= tcb
;
396 etdev
->tx_ring
.tcb_qtail
= tcb
;
397 spin_unlock_irqrestore(&etdev
->TCBReadyQLock
, flags
);
400 WARN_ON(etdev
->tx_ring
.used
> NUM_TCB
);
405 * nic_send_packet - NIC specific send handler for version B silicon.
406 * @etdev: pointer to our adapter
407 * @tcb: pointer to struct tcb
409 * Returns 0 or errno.
411 static int nic_send_packet(struct et131x_adapter
*etdev
, struct tcb
*tcb
)
414 struct tx_desc desc
[24]; /* 24 x 16 byte */
416 u32 thiscopy
, remainder
;
417 struct sk_buff
*skb
= tcb
->skb
;
418 u32 nr_frags
= skb_shinfo(skb
)->nr_frags
+ 1;
419 struct skb_frag_struct
*frags
= &skb_shinfo(skb
)->frags
[0];
422 /* Part of the optimizations of this send routine restrict us to
423 * sending 24 fragments at a pass. In practice we should never see
424 * more than 5 fragments.
426 * NOTE: The older version of this function (below) can handle any
427 * number of fragments. If needed, we can call this function,
428 * although it is less efficient.
433 memset(desc
, 0, sizeof(struct tx_desc
) * (nr_frags
+ 1));
435 for (i
= 0; i
< nr_frags
; i
++) {
436 /* If there is something in this element, lets get a
437 * descriptor from the ring and get the necessary data
440 /* If the fragments are smaller than a standard MTU,
441 * then map them to a single descriptor in the Tx
442 * Desc ring. However, if they're larger, as is
443 * possible with support for jumbo packets, then
444 * split them each across 2 descriptors.
446 * This will work until we determine why the hardware
447 * doesn't seem to like large fragments.
449 if ((skb
->len
- skb
->data_len
) <= 1514) {
450 desc
[frag
].addr_hi
= 0;
451 /* Low 16bits are length, high is vlan and
452 unused currently so zero */
453 desc
[frag
].len_vlan
=
454 skb
->len
- skb
->data_len
;
456 /* NOTE: Here, the dma_addr_t returned from
457 * pci_map_single() is implicitly cast as a
458 * u32. Although dma_addr_t can be
459 * 64-bit, the address returned by
460 * pci_map_single() is always 32-bit
461 * addressable (as defined by the pci/dma
464 desc
[frag
++].addr_lo
=
465 pci_map_single(etdev
->pdev
,
471 desc
[frag
].addr_hi
= 0;
472 desc
[frag
].len_vlan
=
473 (skb
->len
- skb
->data_len
) / 2;
475 /* NOTE: Here, the dma_addr_t returned from
476 * pci_map_single() is implicitly cast as a
477 * u32. Although dma_addr_t can be
478 * 64-bit, the address returned by
479 * pci_map_single() is always 32-bit
480 * addressable (as defined by the pci/dma
483 desc
[frag
++].addr_lo
=
484 pci_map_single(etdev
->pdev
,
489 desc
[frag
].addr_hi
= 0;
491 desc
[frag
].len_vlan
=
492 (skb
->len
- skb
->data_len
) / 2;
494 /* NOTE: Here, the dma_addr_t returned from
495 * pci_map_single() is implicitly cast as a
496 * u32. Although dma_addr_t can be
497 * 64-bit, the address returned by
498 * pci_map_single() is always 32-bit
499 * addressable (as defined by the pci/dma
502 desc
[frag
++].addr_lo
=
503 pci_map_single(etdev
->pdev
,
512 desc
[frag
].addr_hi
= 0;
513 desc
[frag
].len_vlan
=
516 /* NOTE: Here, the dma_addr_t returned from
517 * pci_map_page() is implicitly cast as a u32.
518 * Although dma_addr_t can be 64-bit, the address
519 * returned by pci_map_page() is always 32-bit
520 * addressable (as defined by the pci/dma subsystem)
522 desc
[frag
++].addr_lo
=
523 pci_map_page(etdev
->pdev
,
525 frags
[i
- 1].page_offset
,
534 if (etdev
->linkspeed
== TRUEPHY_SPEED_1000MBPS
) {
535 if (++etdev
->tx_ring
.since_irq
== PARM_TX_NUM_BUFS_DEF
) {
536 /* Last element & Interrupt flag */
537 desc
[frag
- 1].flags
= 0x5;
538 etdev
->tx_ring
.since_irq
= 0;
539 } else { /* Last element */
540 desc
[frag
- 1].flags
= 0x1;
543 desc
[frag
- 1].flags
= 0x5;
545 desc
[0].flags
|= 2; /* First element flag */
547 tcb
->index_start
= etdev
->tx_ring
.send_idx
;
550 spin_lock_irqsave(&etdev
->send_hw_lock
, flags
);
552 thiscopy
= NUM_DESC_PER_RING_TX
-
553 INDEX10(etdev
->tx_ring
.send_idx
);
555 if (thiscopy
>= frag
) {
559 remainder
= frag
- thiscopy
;
562 memcpy(etdev
->tx_ring
.tx_desc_ring
+
563 INDEX10(etdev
->tx_ring
.send_idx
), desc
,
564 sizeof(struct tx_desc
) * thiscopy
);
566 add_10bit(&etdev
->tx_ring
.send_idx
, thiscopy
);
568 if (INDEX10(etdev
->tx_ring
.send_idx
) == 0 ||
569 INDEX10(etdev
->tx_ring
.send_idx
) == NUM_DESC_PER_RING_TX
) {
570 etdev
->tx_ring
.send_idx
&= ~ET_DMA10_MASK
;
571 etdev
->tx_ring
.send_idx
^= ET_DMA10_WRAP
;
575 memcpy(etdev
->tx_ring
.tx_desc_ring
,
577 sizeof(struct tx_desc
) * remainder
);
579 add_10bit(&etdev
->tx_ring
.send_idx
, remainder
);
582 if (INDEX10(etdev
->tx_ring
.send_idx
) == 0) {
583 if (etdev
->tx_ring
.send_idx
)
584 tcb
->index
= NUM_DESC_PER_RING_TX
- 1;
586 tcb
->index
= ET_DMA10_WRAP
|(NUM_DESC_PER_RING_TX
- 1);
588 tcb
->index
= etdev
->tx_ring
.send_idx
- 1;
590 spin_lock(&etdev
->TCBSendQLock
);
592 if (etdev
->tx_ring
.send_tail
)
593 etdev
->tx_ring
.send_tail
->next
= tcb
;
595 etdev
->tx_ring
.send_head
= tcb
;
597 etdev
->tx_ring
.send_tail
= tcb
;
599 WARN_ON(tcb
->next
!= NULL
);
601 etdev
->tx_ring
.used
++;
603 spin_unlock(&etdev
->TCBSendQLock
);
605 /* Write the new write pointer back to the device. */
606 writel(etdev
->tx_ring
.send_idx
,
607 &etdev
->regs
->txdma
.service_request
);
609 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
610 * timer to wake us up if this packet isn't followed by N more.
612 if (etdev
->linkspeed
== TRUEPHY_SPEED_1000MBPS
) {
613 writel(PARM_TX_TIME_INT_DEF
* NANO_IN_A_MICRO
,
614 &etdev
->regs
->global
.watchdog_timer
);
616 spin_unlock_irqrestore(&etdev
->send_hw_lock
, flags
);
623 * et131x_free_send_packet - Recycle a struct tcb
624 * @etdev: pointer to our adapter
625 * @tcb: pointer to struct tcb
627 * Complete the packet if necessary
628 * Assumption - Send spinlock has been acquired
630 inline void et131x_free_send_packet(struct et131x_adapter
*etdev
,
634 struct tx_desc
*desc
= NULL
;
635 struct net_device_stats
*stats
= &etdev
->net_stats
;
637 if (tcb
->flags
& fMP_DEST_BROAD
)
638 atomic_inc(&etdev
->stats
.brdcstxmt
);
639 else if (tcb
->flags
& fMP_DEST_MULTI
)
640 atomic_inc(&etdev
->stats
.multixmt
);
642 atomic_inc(&etdev
->stats
.unixmt
);
645 stats
->tx_bytes
+= tcb
->skb
->len
;
647 /* Iterate through the TX descriptors on the ring
648 * corresponding to this packet and umap the fragments
652 desc
= (struct tx_desc
*)(etdev
->tx_ring
.tx_desc_ring
+
653 INDEX10(tcb
->index_start
));
655 pci_unmap_single(etdev
->pdev
,
657 desc
->len_vlan
, PCI_DMA_TODEVICE
);
659 add_10bit(&tcb
->index_start
, 1);
660 if (INDEX10(tcb
->index_start
) >=
661 NUM_DESC_PER_RING_TX
) {
662 tcb
->index_start
&= ~ET_DMA10_MASK
;
663 tcb
->index_start
^= ET_DMA10_WRAP
;
665 } while (desc
!= (etdev
->tx_ring
.tx_desc_ring
+
666 INDEX10(tcb
->index
)));
668 dev_kfree_skb_any(tcb
->skb
);
671 memset(tcb
, 0, sizeof(struct tcb
));
673 /* Add the TCB to the Ready Q */
674 spin_lock_irqsave(&etdev
->TCBReadyQLock
, flags
);
676 etdev
->net_stats
.tx_packets
++;
678 if (etdev
->tx_ring
.tcb_qtail
)
679 etdev
->tx_ring
.tcb_qtail
->next
= tcb
;
681 /* Apparently ready Q is empty. */
682 etdev
->tx_ring
.tcb_qhead
= tcb
;
684 etdev
->tx_ring
.tcb_qtail
= tcb
;
686 spin_unlock_irqrestore(&etdev
->TCBReadyQLock
, flags
);
687 WARN_ON(etdev
->tx_ring
.used
< 0);
691 * et131x_free_busy_send_packets - Free and complete the stopped active sends
692 * @etdev: pointer to our adapter
694 * Assumption - Send spinlock has been acquired
696 void et131x_free_busy_send_packets(struct et131x_adapter
*etdev
)
702 /* Any packets being sent? Check the first TCB on the send list */
703 spin_lock_irqsave(&etdev
->TCBSendQLock
, flags
);
705 tcb
= etdev
->tx_ring
.send_head
;
707 while (tcb
!= NULL
&& freed
< NUM_TCB
) {
708 struct tcb
*next
= tcb
->next
;
710 etdev
->tx_ring
.send_head
= next
;
713 etdev
->tx_ring
.send_tail
= NULL
;
715 etdev
->tx_ring
.used
--;
717 spin_unlock_irqrestore(&etdev
->TCBSendQLock
, flags
);
720 et131x_free_send_packet(etdev
, tcb
);
722 spin_lock_irqsave(&etdev
->TCBSendQLock
, flags
);
724 tcb
= etdev
->tx_ring
.send_head
;
727 WARN_ON(freed
== NUM_TCB
);
729 spin_unlock_irqrestore(&etdev
->TCBSendQLock
, flags
);
731 etdev
->tx_ring
.used
= 0;
735 * et131x_handle_send_interrupt - Interrupt handler for sending processing
736 * @etdev: pointer to our adapter
738 * Re-claim the send resources, complete sends and get more to send from
739 * the send wait queue.
741 * Assumption - Send spinlock has been acquired
743 void et131x_handle_send_interrupt(struct et131x_adapter
*etdev
)
750 serviced
= readl(&etdev
->regs
->txdma
.new_service_complete
);
751 index
= INDEX10(serviced
);
753 /* Has the ring wrapped? Process any descriptors that do not have
754 * the same "wrap" indicator as the current completion indicator
756 spin_lock_irqsave(&etdev
->TCBSendQLock
, flags
);
758 tcb
= etdev
->tx_ring
.send_head
;
761 ((serviced
^ tcb
->index
) & ET_DMA10_WRAP
) &&
762 index
< INDEX10(tcb
->index
)) {
763 etdev
->tx_ring
.used
--;
764 etdev
->tx_ring
.send_head
= tcb
->next
;
765 if (tcb
->next
== NULL
)
766 etdev
->tx_ring
.send_tail
= NULL
;
768 spin_unlock_irqrestore(&etdev
->TCBSendQLock
, flags
);
769 et131x_free_send_packet(etdev
, tcb
);
770 spin_lock_irqsave(&etdev
->TCBSendQLock
, flags
);
772 /* Goto the next packet */
773 tcb
= etdev
->tx_ring
.send_head
;
776 !((serviced
^ tcb
->index
) & ET_DMA10_WRAP
)
777 && index
> (tcb
->index
& ET_DMA10_MASK
)) {
778 etdev
->tx_ring
.used
--;
779 etdev
->tx_ring
.send_head
= tcb
->next
;
780 if (tcb
->next
== NULL
)
781 etdev
->tx_ring
.send_tail
= NULL
;
783 spin_unlock_irqrestore(&etdev
->TCBSendQLock
, flags
);
784 et131x_free_send_packet(etdev
, tcb
);
785 spin_lock_irqsave(&etdev
->TCBSendQLock
, flags
);
787 /* Goto the next packet */
788 tcb
= etdev
->tx_ring
.send_head
;
791 /* Wake up the queue when we hit a low-water mark */
792 if (etdev
->tx_ring
.used
<= NUM_TCB
/ 3)
793 netif_wake_queue(etdev
->netdev
);
795 spin_unlock_irqrestore(&etdev
->TCBSendQLock
, flags
);