3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
11 *------------------------------------------------------------------------------
13 * et1310_tx.c - Routines used to perform data transmission.
15 *------------------------------------------------------------------------------
19 * This software is provided subject to the following terms and conditions,
20 * which you should read carefully before using the software. Using this
21 * software indicates your acceptance of these terms and conditions. If you do
22 * not agree with these terms and conditions, do not use the software.
24 * Copyright © 2005 Agere Systems Inc.
25 * All rights reserved.
27 * Redistribution and use in source or binary forms, with or without
28 * modifications, are permitted provided that the following conditions are met:
30 * . Redistributions of source code must retain the above copyright notice, this
31 * list of conditions and the following Disclaimer as comments in the code as
32 * well as in the documentation and/or other materials provided with the
35 * . Redistributions in binary form must reproduce the above copyright notice,
36 * this list of conditions and the following Disclaimer in the documentation
37 * and/or other materials provided with the distribution.
39 * . Neither the name of Agere Systems Inc. nor the names of the contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
45 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
46 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
47 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
48 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
49 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
50 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
51 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
52 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
53 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
55 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
60 #include "et131x_version.h"
61 #include "et131x_defs.h"
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/module.h>
66 #include <linux/types.h>
67 #include <linux/kernel.h>
69 #include <linux/sched.h>
70 #include <linux/ptrace.h>
71 #include <linux/slab.h>
72 #include <linux/ctype.h>
73 #include <linux/string.h>
74 #include <linux/timer.h>
75 #include <linux/interrupt.h>
77 #include <linux/delay.h>
79 #include <linux/bitops.h>
80 #include <asm/system.h>
82 #include <linux/netdevice.h>
83 #include <linux/etherdevice.h>
84 #include <linux/skbuff.h>
85 #include <linux/if_arp.h>
86 #include <linux/ioport.h>
87 #include <linux/phy.h>
89 #include "et1310_phy.h"
90 #include "et131x_adapter.h"
91 #include "et1310_tx.h"
95 * et131x_tx_dma_memory_alloc
96 * @adapter: pointer to our private adapter structure
98 * Returns 0 on success and errno on failure (as defined in errno.h).
100 * Allocates memory that will be visible both to the device and to the CPU.
101 * The OS will pass us packets, pointers to which we will insert in the Tx
102 * Descriptor queue. The device will read this queue to find the packets in
103 * memory. The device will update the "status" in memory each time it xmits a
106 int et131x_tx_dma_memory_alloc(struct et131x_adapter
*adapter
)
109 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
111 /* Allocate memory for the TCB's (Transmit Control Block) */
112 adapter
->tx_ring
.tcb_ring
=
113 kcalloc(NUM_TCB
, sizeof(struct tcb
), GFP_ATOMIC
| GFP_DMA
);
114 if (!adapter
->tx_ring
.tcb_ring
) {
115 dev_err(&adapter
->pdev
->dev
, "Cannot alloc memory for TCBs\n");
119 /* Allocate enough memory for the Tx descriptor ring, and allocate
120 * some extra so that the ring can be aligned on a 4k boundary.
122 desc_size
= (sizeof(struct tx_desc
) * NUM_DESC_PER_RING_TX
) + 4096 - 1;
123 tx_ring
->tx_desc_ring
=
124 (struct tx_desc
*) pci_alloc_consistent(adapter
->pdev
, desc_size
,
125 &tx_ring
->tx_desc_ring_pa
);
126 if (!adapter
->tx_ring
.tx_desc_ring
) {
127 dev_err(&adapter
->pdev
->dev
,
128 "Cannot alloc memory for Tx Ring\n");
132 /* Save physical address
134 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
135 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
136 * are ever returned, make sure the high part is retrieved here before
137 * storing the adjusted address.
139 /* Allocate memory for the Tx status block */
140 tx_ring
->tx_status
= pci_alloc_consistent(adapter
->pdev
,
142 &tx_ring
->tx_status_pa
);
143 if (!adapter
->tx_ring
.tx_status_pa
) {
144 dev_err(&adapter
->pdev
->dev
,
145 "Cannot alloc memory for Tx status block\n");
152 * et131x_tx_dma_memory_free - Free all memory allocated within this module
153 * @adapter: pointer to our private adapter structure
155 * Returns 0 on success and errno on failure (as defined in errno.h).
157 void et131x_tx_dma_memory_free(struct et131x_adapter
*adapter
)
161 if (adapter
->tx_ring
.tx_desc_ring
) {
162 /* Free memory relating to Tx rings here */
163 desc_size
= (sizeof(struct tx_desc
) * NUM_DESC_PER_RING_TX
)
165 pci_free_consistent(adapter
->pdev
,
167 adapter
->tx_ring
.tx_desc_ring
,
168 adapter
->tx_ring
.tx_desc_ring_pa
);
169 adapter
->tx_ring
.tx_desc_ring
= NULL
;
172 /* Free memory for the Tx status block */
173 if (adapter
->tx_ring
.tx_status
) {
174 pci_free_consistent(adapter
->pdev
,
176 adapter
->tx_ring
.tx_status
,
177 adapter
->tx_ring
.tx_status_pa
);
179 adapter
->tx_ring
.tx_status
= NULL
;
181 /* Free the memory for the tcb structures */
182 kfree(adapter
->tx_ring
.tcb_ring
);
186 * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
187 * @adapter: pointer to our private adapter structure
189 * Configure the transmit engine with the ring buffers we have created
190 * and prepare it for use.
192 void et131x_config_tx_dma_regs(struct et131x_adapter
*adapter
)
194 struct txdma_regs __iomem
*txdma
= &adapter
->regs
->txdma
;
196 /* Load the hardware with the start of the transmit descriptor ring. */
197 writel((u32
) ((u64
)adapter
->tx_ring
.tx_desc_ring_pa
>> 32),
199 writel((u32
) adapter
->tx_ring
.tx_desc_ring_pa
,
202 /* Initialise the transmit DMA engine */
203 writel(NUM_DESC_PER_RING_TX
- 1, &txdma
->pr_num_des
);
205 /* Load the completion writeback physical address */
206 writel((u32
)((u64
)adapter
->tx_ring
.tx_status_pa
>> 32),
207 &txdma
->dma_wb_base_hi
);
208 writel((u32
)adapter
->tx_ring
.tx_status_pa
, &txdma
->dma_wb_base_lo
);
210 *adapter
->tx_ring
.tx_status
= 0;
212 writel(0, &txdma
->service_request
);
213 adapter
->tx_ring
.send_idx
= 0;
217 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
218 * @adapter: pointer to our adapter structure
220 void et131x_tx_dma_disable(struct et131x_adapter
*adapter
)
222 /* Setup the tramsmit dma configuration register */
223 writel(ET_TXDMA_CSR_HALT
|ET_TXDMA_SNGL_EPKT
,
224 &adapter
->regs
->txdma
.csr
);
228 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
229 * @adapter: pointer to our adapter structure
231 * Mainly used after a return to the D0 (full-power) state from a lower state.
233 void et131x_tx_dma_enable(struct et131x_adapter
*adapter
)
235 /* Setup the transmit dma configuration register for normal
238 writel(ET_TXDMA_SNGL_EPKT
|(PARM_DMA_CACHE_DEF
<< ET_TXDMA_CACHE_SHIFT
),
239 &adapter
->regs
->txdma
.csr
);
243 * et131x_init_send - Initialize send data structures
244 * @adapter: pointer to our private adapter structure
246 void et131x_init_send(struct et131x_adapter
*adapter
)
250 struct tx_ring
*tx_ring
;
252 /* Setup some convenience pointers */
253 tx_ring
= &adapter
->tx_ring
;
254 tcb
= adapter
->tx_ring
.tcb_ring
;
256 tx_ring
->tcb_qhead
= tcb
;
258 memset(tcb
, 0, sizeof(struct tcb
) * NUM_TCB
);
260 /* Go through and set up each TCB */
261 for (ct
= 0; ct
++ < NUM_TCB
; tcb
++)
262 /* Set the link pointer in HW TCB to the next TCB in the
267 /* Set the tail pointer */
269 tx_ring
->tcb_qtail
= tcb
;
271 /* Curr send queue should now be empty */
272 tx_ring
->send_head
= NULL
;
273 tx_ring
->send_tail
= NULL
;
277 * nic_send_packet - NIC specific send handler for version B silicon.
278 * @adapter: pointer to our adapter
279 * @tcb: pointer to struct tcb
281 * Returns 0 or errno.
283 static int nic_send_packet(struct et131x_adapter
*adapter
, struct tcb
*tcb
)
286 struct tx_desc desc
[24]; /* 24 x 16 byte */
288 u32 thiscopy
, remainder
;
289 struct sk_buff
*skb
= tcb
->skb
;
290 u32 nr_frags
= skb_shinfo(skb
)->nr_frags
+ 1;
291 struct skb_frag_struct
*frags
= &skb_shinfo(skb
)->frags
[0];
293 struct phy_device
*phydev
= adapter
->phydev
;
295 /* Part of the optimizations of this send routine restrict us to
296 * sending 24 fragments at a pass. In practice we should never see
297 * more than 5 fragments.
299 * NOTE: The older version of this function (below) can handle any
300 * number of fragments. If needed, we can call this function,
301 * although it is less efficient.
306 memset(desc
, 0, sizeof(struct tx_desc
) * (nr_frags
+ 1));
308 for (i
= 0; i
< nr_frags
; i
++) {
309 /* If there is something in this element, lets get a
310 * descriptor from the ring and get the necessary data
313 /* If the fragments are smaller than a standard MTU,
314 * then map them to a single descriptor in the Tx
315 * Desc ring. However, if they're larger, as is
316 * possible with support for jumbo packets, then
317 * split them each across 2 descriptors.
319 * This will work until we determine why the hardware
320 * doesn't seem to like large fragments.
322 if ((skb
->len
- skb
->data_len
) <= 1514) {
323 desc
[frag
].addr_hi
= 0;
324 /* Low 16bits are length, high is vlan and
325 unused currently so zero */
326 desc
[frag
].len_vlan
=
327 skb
->len
- skb
->data_len
;
329 /* NOTE: Here, the dma_addr_t returned from
330 * pci_map_single() is implicitly cast as a
331 * u32. Although dma_addr_t can be
332 * 64-bit, the address returned by
333 * pci_map_single() is always 32-bit
334 * addressable (as defined by the pci/dma
337 desc
[frag
++].addr_lo
=
338 pci_map_single(adapter
->pdev
,
344 desc
[frag
].addr_hi
= 0;
345 desc
[frag
].len_vlan
=
346 (skb
->len
- skb
->data_len
) / 2;
348 /* NOTE: Here, the dma_addr_t returned from
349 * pci_map_single() is implicitly cast as a
350 * u32. Although dma_addr_t can be
351 * 64-bit, the address returned by
352 * pci_map_single() is always 32-bit
353 * addressable (as defined by the pci/dma
356 desc
[frag
++].addr_lo
=
357 pci_map_single(adapter
->pdev
,
362 desc
[frag
].addr_hi
= 0;
364 desc
[frag
].len_vlan
=
365 (skb
->len
- skb
->data_len
) / 2;
367 /* NOTE: Here, the dma_addr_t returned from
368 * pci_map_single() is implicitly cast as a
369 * u32. Although dma_addr_t can be
370 * 64-bit, the address returned by
371 * pci_map_single() is always 32-bit
372 * addressable (as defined by the pci/dma
375 desc
[frag
++].addr_lo
=
376 pci_map_single(adapter
->pdev
,
385 desc
[frag
].addr_hi
= 0;
386 desc
[frag
].len_vlan
=
389 /* NOTE: Here, the dma_addr_t returned from
390 * pci_map_page() is implicitly cast as a u32.
391 * Although dma_addr_t can be 64-bit, the address
392 * returned by pci_map_page() is always 32-bit
393 * addressable (as defined by the pci/dma subsystem)
395 desc
[frag
++].addr_lo
=
396 pci_map_page(adapter
->pdev
,
398 frags
[i
- 1].page_offset
,
407 if (phydev
&& phydev
->speed
== SPEED_1000
) {
408 if (++adapter
->tx_ring
.since_irq
== PARM_TX_NUM_BUFS_DEF
) {
409 /* Last element & Interrupt flag */
410 desc
[frag
- 1].flags
= 0x5;
411 adapter
->tx_ring
.since_irq
= 0;
412 } else { /* Last element */
413 desc
[frag
- 1].flags
= 0x1;
416 desc
[frag
- 1].flags
= 0x5;
418 desc
[0].flags
|= 2; /* First element flag */
420 tcb
->index_start
= adapter
->tx_ring
.send_idx
;
423 spin_lock_irqsave(&adapter
->send_hw_lock
, flags
);
425 thiscopy
= NUM_DESC_PER_RING_TX
-
426 INDEX10(adapter
->tx_ring
.send_idx
);
428 if (thiscopy
>= frag
) {
432 remainder
= frag
- thiscopy
;
435 memcpy(adapter
->tx_ring
.tx_desc_ring
+
436 INDEX10(adapter
->tx_ring
.send_idx
), desc
,
437 sizeof(struct tx_desc
) * thiscopy
);
439 add_10bit(&adapter
->tx_ring
.send_idx
, thiscopy
);
441 if (INDEX10(adapter
->tx_ring
.send_idx
) == 0 ||
442 INDEX10(adapter
->tx_ring
.send_idx
) == NUM_DESC_PER_RING_TX
) {
443 adapter
->tx_ring
.send_idx
&= ~ET_DMA10_MASK
;
444 adapter
->tx_ring
.send_idx
^= ET_DMA10_WRAP
;
448 memcpy(adapter
->tx_ring
.tx_desc_ring
,
450 sizeof(struct tx_desc
) * remainder
);
452 add_10bit(&adapter
->tx_ring
.send_idx
, remainder
);
455 if (INDEX10(adapter
->tx_ring
.send_idx
) == 0) {
456 if (adapter
->tx_ring
.send_idx
)
457 tcb
->index
= NUM_DESC_PER_RING_TX
- 1;
459 tcb
->index
= ET_DMA10_WRAP
|(NUM_DESC_PER_RING_TX
- 1);
461 tcb
->index
= adapter
->tx_ring
.send_idx
- 1;
463 spin_lock(&adapter
->tcb_send_qlock
);
465 if (adapter
->tx_ring
.send_tail
)
466 adapter
->tx_ring
.send_tail
->next
= tcb
;
468 adapter
->tx_ring
.send_head
= tcb
;
470 adapter
->tx_ring
.send_tail
= tcb
;
472 WARN_ON(tcb
->next
!= NULL
);
474 adapter
->tx_ring
.used
++;
476 spin_unlock(&adapter
->tcb_send_qlock
);
478 /* Write the new write pointer back to the device. */
479 writel(adapter
->tx_ring
.send_idx
,
480 &adapter
->regs
->txdma
.service_request
);
482 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
483 * timer to wake us up if this packet isn't followed by N more.
485 if (phydev
&& phydev
->speed
== SPEED_1000
) {
486 writel(PARM_TX_TIME_INT_DEF
* NANO_IN_A_MICRO
,
487 &adapter
->regs
->global
.watchdog_timer
);
489 spin_unlock_irqrestore(&adapter
->send_hw_lock
, flags
);
495 * send_packet - Do the work to send a packet
496 * @skb: the packet(s) to send
497 * @adapter: a pointer to the device's private adapter structure
499 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
501 * Assumption: Send spinlock has been acquired
503 static int send_packet(struct sk_buff
*skb
, struct et131x_adapter
*adapter
)
506 struct tcb
*tcb
= NULL
;
510 /* All packets must have at least a MAC address and a protocol type */
511 if (skb
->len
< ETH_HLEN
)
514 /* Get a TCB for this packet */
515 spin_lock_irqsave(&adapter
->tcb_ready_qlock
, flags
);
517 tcb
= adapter
->tx_ring
.tcb_qhead
;
520 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
524 adapter
->tx_ring
.tcb_qhead
= tcb
->next
;
526 if (adapter
->tx_ring
.tcb_qhead
== NULL
)
527 adapter
->tx_ring
.tcb_qtail
= NULL
;
529 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
533 if (skb
->data
!= NULL
&& skb
->len
- skb
->data_len
>= 6) {
534 shbufva
= (u16
*) skb
->data
;
536 if ((shbufva
[0] == 0xffff) &&
537 (shbufva
[1] == 0xffff) && (shbufva
[2] == 0xffff)) {
538 tcb
->flags
|= fMP_DEST_BROAD
;
539 } else if ((shbufva
[0] & 0x3) == 0x0001) {
540 tcb
->flags
|= fMP_DEST_MULTI
;
546 /* Call the NIC specific send handler. */
547 status
= nic_send_packet(adapter
, tcb
);
550 spin_lock_irqsave(&adapter
->tcb_ready_qlock
, flags
);
552 if (adapter
->tx_ring
.tcb_qtail
)
553 adapter
->tx_ring
.tcb_qtail
->next
= tcb
;
555 /* Apparently ready Q is empty. */
556 adapter
->tx_ring
.tcb_qhead
= tcb
;
558 adapter
->tx_ring
.tcb_qtail
= tcb
;
559 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
562 WARN_ON(adapter
->tx_ring
.used
> NUM_TCB
);
567 * et131x_send_packets - This function is called by the OS to send packets
568 * @skb: the packet(s) to send
569 * @netdev:device on which to TX the above packet(s)
571 * Return 0 in almost all cases; non-zero value in extreme hard failure only
573 int et131x_send_packets(struct sk_buff
*skb
, struct net_device
*netdev
)
576 struct et131x_adapter
*adapter
= NULL
;
578 adapter
= netdev_priv(netdev
);
580 /* Send these packets
582 * NOTE: The Linux Tx entry point is only given one packet at a time
583 * to Tx, so the PacketCount and it's array used makes no sense here
586 /* TCB is not available */
587 if (adapter
->tx_ring
.used
>= NUM_TCB
) {
588 /* NOTE: If there's an error on send, no need to queue the
589 * packet under Linux; if we just send an error up to the
590 * netif layer, it will resend the skb to us.
594 /* We need to see if the link is up; if it's not, make the
595 * netif layer think we're good and drop the packet
597 if ((adapter
->flags
& fMP_ADAPTER_FAIL_SEND_MASK
) ||
598 !netif_carrier_ok(netdev
)) {
599 dev_kfree_skb_any(skb
);
602 adapter
->net_stats
.tx_dropped
++;
604 status
= send_packet(skb
, adapter
);
605 if (status
!= 0 && status
!= -ENOMEM
) {
606 /* On any other error, make netif think we're
607 * OK and drop the packet
609 dev_kfree_skb_any(skb
);
611 adapter
->net_stats
.tx_dropped
++;
619 * free_send_packet - Recycle a struct tcb
620 * @adapter: pointer to our adapter
621 * @tcb: pointer to struct tcb
623 * Complete the packet if necessary
624 * Assumption - Send spinlock has been acquired
626 static inline void free_send_packet(struct et131x_adapter
*adapter
,
630 struct tx_desc
*desc
= NULL
;
631 struct net_device_stats
*stats
= &adapter
->net_stats
;
633 if (tcb
->flags
& fMP_DEST_BROAD
)
634 atomic_inc(&adapter
->stats
.broadcast_pkts_xmtd
);
635 else if (tcb
->flags
& fMP_DEST_MULTI
)
636 atomic_inc(&adapter
->stats
.multicast_pkts_xmtd
);
638 atomic_inc(&adapter
->stats
.unicast_pkts_xmtd
);
641 stats
->tx_bytes
+= tcb
->skb
->len
;
643 /* Iterate through the TX descriptors on the ring
644 * corresponding to this packet and umap the fragments
648 desc
= (struct tx_desc
*)
649 (adapter
->tx_ring
.tx_desc_ring
+
650 INDEX10(tcb
->index_start
));
652 pci_unmap_single(adapter
->pdev
,
654 desc
->len_vlan
, PCI_DMA_TODEVICE
);
656 add_10bit(&tcb
->index_start
, 1);
657 if (INDEX10(tcb
->index_start
) >=
658 NUM_DESC_PER_RING_TX
) {
659 tcb
->index_start
&= ~ET_DMA10_MASK
;
660 tcb
->index_start
^= ET_DMA10_WRAP
;
662 } while (desc
!= (adapter
->tx_ring
.tx_desc_ring
+
663 INDEX10(tcb
->index
)));
665 dev_kfree_skb_any(tcb
->skb
);
668 memset(tcb
, 0, sizeof(struct tcb
));
670 /* Add the TCB to the Ready Q */
671 spin_lock_irqsave(&adapter
->tcb_ready_qlock
, flags
);
673 adapter
->net_stats
.tx_packets
++;
675 if (adapter
->tx_ring
.tcb_qtail
)
676 adapter
->tx_ring
.tcb_qtail
->next
= tcb
;
678 /* Apparently ready Q is empty. */
679 adapter
->tx_ring
.tcb_qhead
= tcb
;
681 adapter
->tx_ring
.tcb_qtail
= tcb
;
683 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
684 WARN_ON(adapter
->tx_ring
.used
< 0);
688 * et131x_free_busy_send_packets - Free and complete the stopped active sends
689 * @adapter: pointer to our adapter
691 * Assumption - Send spinlock has been acquired
693 void et131x_free_busy_send_packets(struct et131x_adapter
*adapter
)
699 /* Any packets being sent? Check the first TCB on the send list */
700 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
702 tcb
= adapter
->tx_ring
.send_head
;
704 while (tcb
!= NULL
&& freed
< NUM_TCB
) {
705 struct tcb
*next
= tcb
->next
;
707 adapter
->tx_ring
.send_head
= next
;
710 adapter
->tx_ring
.send_tail
= NULL
;
712 adapter
->tx_ring
.used
--;
714 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
717 free_send_packet(adapter
, tcb
);
719 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
721 tcb
= adapter
->tx_ring
.send_head
;
724 WARN_ON(freed
== NUM_TCB
);
726 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
728 adapter
->tx_ring
.used
= 0;
732 * et131x_handle_send_interrupt - Interrupt handler for sending processing
733 * @adapter: pointer to our adapter
735 * Re-claim the send resources, complete sends and get more to send from
736 * the send wait queue.
738 * Assumption - Send spinlock has been acquired
740 void et131x_handle_send_interrupt(struct et131x_adapter
*adapter
)
747 serviced
= readl(&adapter
->regs
->txdma
.new_service_complete
);
748 index
= INDEX10(serviced
);
750 /* Has the ring wrapped? Process any descriptors that do not have
751 * the same "wrap" indicator as the current completion indicator
753 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
755 tcb
= adapter
->tx_ring
.send_head
;
758 ((serviced
^ tcb
->index
) & ET_DMA10_WRAP
) &&
759 index
< INDEX10(tcb
->index
)) {
760 adapter
->tx_ring
.used
--;
761 adapter
->tx_ring
.send_head
= tcb
->next
;
762 if (tcb
->next
== NULL
)
763 adapter
->tx_ring
.send_tail
= NULL
;
765 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
766 free_send_packet(adapter
, tcb
);
767 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
769 /* Goto the next packet */
770 tcb
= adapter
->tx_ring
.send_head
;
773 !((serviced
^ tcb
->index
) & ET_DMA10_WRAP
)
774 && index
> (tcb
->index
& ET_DMA10_MASK
)) {
775 adapter
->tx_ring
.used
--;
776 adapter
->tx_ring
.send_head
= tcb
->next
;
777 if (tcb
->next
== NULL
)
778 adapter
->tx_ring
.send_tail
= NULL
;
780 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
781 free_send_packet(adapter
, tcb
);
782 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
784 /* Goto the next packet */
785 tcb
= adapter
->tx_ring
.send_head
;
788 /* Wake up the queue when we hit a low-water mark */
789 if (adapter
->tx_ring
.used
<= NUM_TCB
/ 3)
790 netif_wake_queue(adapter
->netdev
);
792 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);