3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 *------------------------------------------------------------------------------
11 * et1310_tx.c - Routines used to perform data transmission.
13 *------------------------------------------------------------------------------
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
75 #include <linux/delay.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
86 #include "et1310_phy.h"
87 #include "et1310_pm.h"
88 #include "et1310_jagcore.h"
90 #include "et131x_adapter.h"
91 #include "et131x_initpci.h"
92 #include "et131x_isr.h"
94 #include "et1310_tx.h"
97 static void et131x_update_tcb_list(struct et131x_adapter
*etdev
);
98 static void et131x_check_send_wait_list(struct et131x_adapter
*etdev
);
99 static inline void et131x_free_send_packet(struct et131x_adapter
*etdev
,
101 static int et131x_send_packet(struct sk_buff
*skb
,
102 struct et131x_adapter
*etdev
);
103 static int nic_send_packet(struct et131x_adapter
*etdev
, PMP_TCB pMpTcb
);
106 * et131x_tx_dma_memory_alloc
107 * @adapter: pointer to our private adapter structure
109 * Returns 0 on success and errno on failure (as defined in errno.h).
111 * Allocates memory that will be visible both to the device and to the CPU.
112 * The OS will pass us packets, pointers to which we will insert in the Tx
113 * Descriptor queue. The device will read this queue to find the packets in
114 * memory. The device will update the "status" in memory each time it xmits a
117 int et131x_tx_dma_memory_alloc(struct et131x_adapter
*adapter
)
120 TX_RING_t
*tx_ring
= &adapter
->TxRing
;
122 /* Allocate memory for the TCB's (Transmit Control Block) */
123 adapter
->TxRing
.MpTcbMem
= (MP_TCB
*)kcalloc(NUM_TCB
, sizeof(MP_TCB
),
124 GFP_ATOMIC
| GFP_DMA
);
125 if (!adapter
->TxRing
.MpTcbMem
) {
126 dev_err(&adapter
->pdev
->dev
, "Cannot alloc memory for TCBs\n");
130 /* Allocate enough memory for the Tx descriptor ring, and allocate
131 * some extra so that the ring can be aligned on a 4k boundary.
133 desc_size
= (sizeof(TX_DESC_ENTRY_t
) * NUM_DESC_PER_RING_TX
) + 4096 - 1;
134 tx_ring
->pTxDescRingVa
=
135 (PTX_DESC_ENTRY_t
) pci_alloc_consistent(adapter
->pdev
, desc_size
,
136 &tx_ring
->pTxDescRingPa
);
137 if (!adapter
->TxRing
.pTxDescRingVa
) {
138 dev_err(&adapter
->pdev
->dev
, "Cannot alloc memory for Tx Ring\n");
142 /* Save physical address
144 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
145 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
146 * are ever returned, make sure the high part is retrieved here before
147 * storing the adjusted address.
149 tx_ring
->pTxDescRingAdjustedPa
= tx_ring
->pTxDescRingPa
;
151 /* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
152 et131x_align_allocated_memory(adapter
,
153 &tx_ring
->pTxDescRingAdjustedPa
,
154 &tx_ring
->TxDescOffset
, 0x0FFF);
156 tx_ring
->pTxDescRingVa
+= tx_ring
->TxDescOffset
;
158 /* Allocate memory for the Tx status block */
159 tx_ring
->pTxStatusVa
= pci_alloc_consistent(adapter
->pdev
,
160 sizeof(TX_STATUS_BLOCK_t
),
161 &tx_ring
->pTxStatusPa
);
162 if (!adapter
->TxRing
.pTxStatusPa
) {
163 dev_err(&adapter
->pdev
->dev
,
164 "Cannot alloc memory for Tx status block\n");
168 /* Allocate memory for a dummy buffer */
169 tx_ring
->pTxDummyBlkVa
= pci_alloc_consistent(adapter
->pdev
,
171 &tx_ring
->pTxDummyBlkPa
);
172 if (!adapter
->TxRing
.pTxDummyBlkPa
) {
173 dev_err(&adapter
->pdev
->dev
,
174 "Cannot alloc memory for Tx dummy buffer\n");
182 * et131x_tx_dma_memory_free - Free all memory allocated within this module
183 * @adapter: pointer to our private adapter structure
185 * Returns 0 on success and errno on failure (as defined in errno.h).
187 void et131x_tx_dma_memory_free(struct et131x_adapter
*adapter
)
191 if (adapter
->TxRing
.pTxDescRingVa
) {
192 /* Free memory relating to Tx rings here */
193 adapter
->TxRing
.pTxDescRingVa
-= adapter
->TxRing
.TxDescOffset
;
196 (sizeof(TX_DESC_ENTRY_t
) * NUM_DESC_PER_RING_TX
) + 4096 - 1;
198 pci_free_consistent(adapter
->pdev
,
200 adapter
->TxRing
.pTxDescRingVa
,
201 adapter
->TxRing
.pTxDescRingPa
);
203 adapter
->TxRing
.pTxDescRingVa
= NULL
;
206 /* Free memory for the Tx status block */
207 if (adapter
->TxRing
.pTxStatusVa
) {
208 pci_free_consistent(adapter
->pdev
,
209 sizeof(TX_STATUS_BLOCK_t
),
210 adapter
->TxRing
.pTxStatusVa
,
211 adapter
->TxRing
.pTxStatusPa
);
213 adapter
->TxRing
.pTxStatusVa
= NULL
;
216 /* Free memory for the dummy buffer */
217 if (adapter
->TxRing
.pTxDummyBlkVa
) {
218 pci_free_consistent(adapter
->pdev
,
220 adapter
->TxRing
.pTxDummyBlkVa
,
221 adapter
->TxRing
.pTxDummyBlkPa
);
223 adapter
->TxRing
.pTxDummyBlkVa
= NULL
;
226 /* Free the memory for MP_TCB structures */
227 kfree(adapter
->TxRing
.MpTcbMem
);
231 * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
232 * @etdev: pointer to our private adapter structure
234 void ConfigTxDmaRegs(struct et131x_adapter
*etdev
)
236 struct _TXDMA_t __iomem
*txdma
= &etdev
->regs
->txdma
;
238 /* Load the hardware with the start of the transmit descriptor ring. */
239 writel((uint32_t) (etdev
->TxRing
.pTxDescRingAdjustedPa
>> 32),
241 writel((uint32_t) etdev
->TxRing
.pTxDescRingAdjustedPa
,
244 /* Initialise the transmit DMA engine */
245 writel(NUM_DESC_PER_RING_TX
- 1, &txdma
->pr_num_des
.value
);
247 /* Load the completion writeback physical address
249 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
250 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
251 * are ever returned, make sure the high part is retrieved here before
252 * storing the adjusted address.
254 writel(0, &txdma
->dma_wb_base_hi
);
255 writel(etdev
->TxRing
.pTxStatusPa
, &txdma
->dma_wb_base_lo
);
257 memset(etdev
->TxRing
.pTxStatusVa
, 0, sizeof(TX_STATUS_BLOCK_t
));
259 writel(0, &txdma
->service_request
);
260 etdev
->TxRing
.txDmaReadyToSend
= 0;
264 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
265 * @etdev: pointer to our adapter structure
267 void et131x_tx_dma_disable(struct et131x_adapter
*etdev
)
269 /* Setup the tramsmit dma configuration register */
270 writel(ET_TXDMA_CSR_HALT
|ET_TXDMA_SNGL_EPKT
,
271 &etdev
->regs
->txdma
.csr
);
275 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
276 * @etdev: pointer to our adapter structure
278 * Mainly used after a return to the D0 (full-power) state from a lower state.
280 void et131x_tx_dma_enable(struct et131x_adapter
*etdev
)
282 u32 csr
= ET_TXDMA_SNGL_EPKT
;
283 if (etdev
->RegistryPhyLoopbk
)
284 /* TxDMA is disabled for loopback operation. */
285 csr
|= ET_TXDMA_CSR_HALT
;
287 /* Setup the transmit dma configuration register for normal
290 csr
|= PARM_DMA_CACHE_DEF
<< ET_TXDMA_CACHE_SHIFT
;
291 writel(csr
, &etdev
->regs
->txdma
.csr
);
295 * et131x_init_send - Initialize send data structures
296 * @adapter: pointer to our private adapter structure
298 void et131x_init_send(struct et131x_adapter
*adapter
)
304 /* Setup some convenience pointers */
305 tx_ring
= &adapter
->TxRing
;
306 pMpTcb
= adapter
->TxRing
.MpTcbMem
;
308 tx_ring
->TCBReadyQueueHead
= pMpTcb
;
310 /* Go through and set up each TCB */
311 for (TcbCount
= 0; TcbCount
< NUM_TCB
; TcbCount
++) {
312 memset(pMpTcb
, 0, sizeof(MP_TCB
));
314 /* Set the link pointer in HW TCB to the next TCB in the
315 * chain. If this is the last TCB in the chain, also set the
318 if (TcbCount
< NUM_TCB
- 1) {
319 pMpTcb
->Next
= pMpTcb
+ 1;
321 tx_ring
->TCBReadyQueueTail
= pMpTcb
;
322 pMpTcb
->Next
= (PMP_TCB
) NULL
;
328 /* Curr send queue should now be empty */
329 tx_ring
->CurrSendHead
= (PMP_TCB
) NULL
;
330 tx_ring
->CurrSendTail
= (PMP_TCB
) NULL
;
332 INIT_LIST_HEAD(&adapter
->TxRing
.SendWaitQueue
);
336 * et131x_send_packets - This function is called by the OS to send packets
337 * @skb: the packet(s) to send
338 * @netdev:device on which to TX the above packet(s)
340 * Return 0 in almost all cases; non-zero value in extreme hard failure only
342 int et131x_send_packets(struct sk_buff
*skb
, struct net_device
*netdev
)
345 struct et131x_adapter
*etdev
= NULL
;
347 etdev
= netdev_priv(netdev
);
349 /* Send these packets
351 * NOTE: The Linux Tx entry point is only given one packet at a time
352 * to Tx, so the PacketCount and it's array used makes no sense here
355 /* Queue is not empty or TCB is not available */
356 if (!list_empty(&etdev
->TxRing
.SendWaitQueue
) ||
357 MP_TCB_RESOURCES_NOT_AVAILABLE(etdev
)) {
358 /* NOTE: If there's an error on send, no need to queue the
359 * packet under Linux; if we just send an error up to the
360 * netif layer, it will resend the skb to us.
364 /* We need to see if the link is up; if it's not, make the
365 * netif layer think we're good and drop the packet
368 * if( MP_SHOULD_FAIL_SEND( etdev ) ||
369 * etdev->DriverNoPhyAccess )
371 if (MP_SHOULD_FAIL_SEND(etdev
) || etdev
->DriverNoPhyAccess
372 || !netif_carrier_ok(netdev
)) {
373 dev_kfree_skb_any(skb
);
376 etdev
->net_stats
.tx_dropped
++;
378 status
= et131x_send_packet(skb
, etdev
);
380 if (status
== -ENOMEM
) {
382 /* NOTE: If there's an error on send, no need
383 * to queue the packet under Linux; if we just
384 * send an error up to the netif layer, it
385 * will resend the skb to us.
387 } else if (status
!= 0) {
388 /* On any other error, make netif think we're
389 * OK and drop the packet
391 dev_kfree_skb_any(skb
);
393 etdev
->net_stats
.tx_dropped
++;
401 * et131x_send_packet - Do the work to send a packet
402 * @skb: the packet(s) to send
403 * @etdev: a pointer to the device's private adapter structure
405 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
407 * Assumption: Send spinlock has been acquired
409 static int et131x_send_packet(struct sk_buff
*skb
,
410 struct et131x_adapter
*etdev
)
413 PMP_TCB pMpTcb
= NULL
;
417 /* All packets must have at least a MAC address and a protocol type */
418 if (skb
->len
< ETH_HLEN
) {
422 /* Get a TCB for this packet */
423 spin_lock_irqsave(&etdev
->TCBReadyQLock
, flags
);
425 pMpTcb
= etdev
->TxRing
.TCBReadyQueueHead
;
427 if (pMpTcb
== NULL
) {
428 spin_unlock_irqrestore(&etdev
->TCBReadyQLock
, flags
);
432 etdev
->TxRing
.TCBReadyQueueHead
= pMpTcb
->Next
;
434 if (etdev
->TxRing
.TCBReadyQueueHead
== NULL
)
435 etdev
->TxRing
.TCBReadyQueueTail
= NULL
;
437 spin_unlock_irqrestore(&etdev
->TCBReadyQLock
, flags
);
439 pMpTcb
->PacketLength
= skb
->len
;
440 pMpTcb
->Packet
= skb
;
442 if ((skb
->data
!= NULL
) && ((skb
->len
- skb
->data_len
) >= 6)) {
443 shbufva
= (uint16_t *) skb
->data
;
445 if ((shbufva
[0] == 0xffff) &&
446 (shbufva
[1] == 0xffff) && (shbufva
[2] == 0xffff)) {
447 pMpTcb
->Flags
|= fMP_DEST_BROAD
;
448 } else if ((shbufva
[0] & 0x3) == 0x0001) {
449 pMpTcb
->Flags
|= fMP_DEST_MULTI
;
455 /* Call the NIC specific send handler. */
457 status
= nic_send_packet(etdev
, pMpTcb
);
460 spin_lock_irqsave(&etdev
->TCBReadyQLock
, flags
);
462 if (etdev
->TxRing
.TCBReadyQueueTail
) {
463 etdev
->TxRing
.TCBReadyQueueTail
->Next
= pMpTcb
;
465 /* Apparently ready Q is empty. */
466 etdev
->TxRing
.TCBReadyQueueHead
= pMpTcb
;
469 etdev
->TxRing
.TCBReadyQueueTail
= pMpTcb
;
470 spin_unlock_irqrestore(&etdev
->TCBReadyQLock
, flags
);
473 WARN_ON(etdev
->TxRing
.nBusySend
> NUM_TCB
);
478 * nic_send_packet - NIC specific send handler for version B silicon.
479 * @etdev: pointer to our adapter
480 * @pMpTcb: pointer to MP_TCB
482 * Returns 0 or errno.
484 static int nic_send_packet(struct et131x_adapter
*etdev
, PMP_TCB pMpTcb
)
487 TX_DESC_ENTRY_t CurDesc
[24];
488 uint32_t FragmentNumber
= 0;
489 uint32_t thiscopy
, remainder
;
490 struct sk_buff
*pPacket
= pMpTcb
->Packet
;
491 uint32_t FragListCount
= skb_shinfo(pPacket
)->nr_frags
+ 1;
492 struct skb_frag_struct
*pFragList
= &skb_shinfo(pPacket
)->frags
[0];
495 /* Part of the optimizations of this send routine restrict us to
496 * sending 24 fragments at a pass. In practice we should never see
497 * more than 5 fragments.
499 * NOTE: The older version of this function (below) can handle any
500 * number of fragments. If needed, we can call this function,
501 * although it is less efficient.
503 if (FragListCount
> 23) {
507 memset(CurDesc
, 0, sizeof(TX_DESC_ENTRY_t
) * (FragListCount
+ 1));
509 for (loopIndex
= 0; loopIndex
< FragListCount
; loopIndex
++) {
510 /* If there is something in this element, lets get a
511 * descriptor from the ring and get the necessary data
513 if (loopIndex
== 0) {
514 /* If the fragments are smaller than a standard MTU,
515 * then map them to a single descriptor in the Tx
516 * Desc ring. However, if they're larger, as is
517 * possible with support for jumbo packets, then
518 * split them each across 2 descriptors.
520 * This will work until we determine why the hardware
521 * doesn't seem to like large fragments.
523 if ((pPacket
->len
- pPacket
->data_len
) <= 1514) {
524 CurDesc
[FragmentNumber
].DataBufferPtrHigh
= 0;
525 CurDesc
[FragmentNumber
].word2
.bits
.
527 pPacket
->len
- pPacket
->data_len
;
529 /* NOTE: Here, the dma_addr_t returned from
530 * pci_map_single() is implicitly cast as a
531 * uint32_t. Although dma_addr_t can be
532 * 64-bit, the address returned by
533 * pci_map_single() is always 32-bit
534 * addressable (as defined by the pci/dma
537 CurDesc
[FragmentNumber
++].DataBufferPtrLow
=
538 pci_map_single(etdev
->pdev
,
544 CurDesc
[FragmentNumber
].DataBufferPtrHigh
= 0;
545 CurDesc
[FragmentNumber
].word2
.bits
.
547 ((pPacket
->len
- pPacket
->data_len
) / 2);
549 /* NOTE: Here, the dma_addr_t returned from
550 * pci_map_single() is implicitly cast as a
551 * uint32_t. Although dma_addr_t can be
552 * 64-bit, the address returned by
553 * pci_map_single() is always 32-bit
554 * addressable (as defined by the pci/dma
557 CurDesc
[FragmentNumber
++].DataBufferPtrLow
=
558 pci_map_single(etdev
->pdev
,
561 pPacket
->data_len
) / 2),
563 CurDesc
[FragmentNumber
].DataBufferPtrHigh
= 0;
565 CurDesc
[FragmentNumber
].word2
.bits
.
567 ((pPacket
->len
- pPacket
->data_len
) / 2);
569 /* NOTE: Here, the dma_addr_t returned from
570 * pci_map_single() is implicitly cast as a
571 * uint32_t. Although dma_addr_t can be
572 * 64-bit, the address returned by
573 * pci_map_single() is always 32-bit
574 * addressable (as defined by the pci/dma
577 CurDesc
[FragmentNumber
++].DataBufferPtrLow
=
578 pci_map_single(etdev
->pdev
,
581 pPacket
->data_len
) / 2),
583 pPacket
->data_len
) / 2),
587 CurDesc
[FragmentNumber
].DataBufferPtrHigh
= 0;
588 CurDesc
[FragmentNumber
].word2
.bits
.length_in_bytes
=
589 pFragList
[loopIndex
- 1].size
;
591 /* NOTE: Here, the dma_addr_t returned from
592 * pci_map_page() is implicitly cast as a uint32_t.
593 * Although dma_addr_t can be 64-bit, the address
594 * returned by pci_map_page() is always 32-bit
595 * addressable (as defined by the pci/dma subsystem)
597 CurDesc
[FragmentNumber
++].DataBufferPtrLow
=
598 pci_map_page(etdev
->pdev
,
599 pFragList
[loopIndex
- 1].page
,
600 pFragList
[loopIndex
- 1].page_offset
,
601 pFragList
[loopIndex
- 1].size
,
606 if (FragmentNumber
== 0)
609 if (etdev
->linkspeed
== TRUEPHY_SPEED_1000MBPS
) {
610 if (++etdev
->TxRing
.TxPacketsSinceLastinterrupt
==
611 PARM_TX_NUM_BUFS_DEF
) {
612 CurDesc
[FragmentNumber
- 1].word3
.value
= 0x5;
613 etdev
->TxRing
.TxPacketsSinceLastinterrupt
= 0;
615 CurDesc
[FragmentNumber
- 1].word3
.value
= 0x1;
618 CurDesc
[FragmentNumber
- 1].word3
.value
= 0x5;
621 CurDesc
[0].word3
.bits
.f
= 1;
623 pMpTcb
->WrIndexStart
= etdev
->TxRing
.txDmaReadyToSend
;
624 pMpTcb
->PacketStaleCount
= 0;
626 spin_lock_irqsave(&etdev
->SendHWLock
, flags
);
628 thiscopy
= NUM_DESC_PER_RING_TX
-
629 INDEX10(etdev
->TxRing
.txDmaReadyToSend
);
631 if (thiscopy
>= FragmentNumber
) {
633 thiscopy
= FragmentNumber
;
635 remainder
= FragmentNumber
- thiscopy
;
638 memcpy(etdev
->TxRing
.pTxDescRingVa
+
639 INDEX10(etdev
->TxRing
.txDmaReadyToSend
), CurDesc
,
640 sizeof(TX_DESC_ENTRY_t
) * thiscopy
);
642 add_10bit(&etdev
->TxRing
.txDmaReadyToSend
, thiscopy
);
644 if (INDEX10(etdev
->TxRing
.txDmaReadyToSend
)== 0 ||
645 INDEX10(etdev
->TxRing
.txDmaReadyToSend
) == NUM_DESC_PER_RING_TX
) {
646 etdev
->TxRing
.txDmaReadyToSend
&= ~ET_DMA10_MASK
;
647 etdev
->TxRing
.txDmaReadyToSend
^= ET_DMA10_WRAP
;
651 memcpy(etdev
->TxRing
.pTxDescRingVa
,
653 sizeof(TX_DESC_ENTRY_t
) * remainder
);
655 add_10bit(&etdev
->TxRing
.txDmaReadyToSend
, remainder
);
658 if (INDEX10(etdev
->TxRing
.txDmaReadyToSend
) == 0) {
659 if (etdev
->TxRing
.txDmaReadyToSend
)
660 pMpTcb
->WrIndex
= NUM_DESC_PER_RING_TX
- 1;
662 pMpTcb
->WrIndex
= ET_DMA10_WRAP
| (NUM_DESC_PER_RING_TX
- 1);
664 pMpTcb
->WrIndex
= etdev
->TxRing
.txDmaReadyToSend
- 1;
666 spin_lock(&etdev
->TCBSendQLock
);
668 if (etdev
->TxRing
.CurrSendTail
)
669 etdev
->TxRing
.CurrSendTail
->Next
= pMpTcb
;
671 etdev
->TxRing
.CurrSendHead
= pMpTcb
;
673 etdev
->TxRing
.CurrSendTail
= pMpTcb
;
675 WARN_ON(pMpTcb
->Next
!= NULL
);
677 etdev
->TxRing
.nBusySend
++;
679 spin_unlock(&etdev
->TCBSendQLock
);
681 /* Write the new write pointer back to the device. */
682 writel(etdev
->TxRing
.txDmaReadyToSend
,
683 &etdev
->regs
->txdma
.service_request
);
685 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
686 * timer to wake us up if this packet isn't followed by N more.
688 if (etdev
->linkspeed
== TRUEPHY_SPEED_1000MBPS
) {
689 writel(PARM_TX_TIME_INT_DEF
* NANO_IN_A_MICRO
,
690 &etdev
->regs
->global
.watchdog_timer
);
692 spin_unlock_irqrestore(&etdev
->SendHWLock
, flags
);
699 * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
700 * @etdev: pointer to our adapter
701 * @pMpTcb: pointer to MP_TCB
703 * Assumption - Send spinlock has been acquired
705 inline void et131x_free_send_packet(struct et131x_adapter
*etdev
,
709 TX_DESC_ENTRY_t
*desc
= NULL
;
710 struct net_device_stats
*stats
= &etdev
->net_stats
;
712 if (pMpTcb
->Flags
& fMP_DEST_BROAD
)
713 atomic_inc(&etdev
->Stats
.brdcstxmt
);
714 else if (pMpTcb
->Flags
& fMP_DEST_MULTI
)
715 atomic_inc(&etdev
->Stats
.multixmt
);
717 atomic_inc(&etdev
->Stats
.unixmt
);
719 if (pMpTcb
->Packet
) {
720 stats
->tx_bytes
+= pMpTcb
->Packet
->len
;
722 /* Iterate through the TX descriptors on the ring
723 * corresponding to this packet and umap the fragments
728 (TX_DESC_ENTRY_t
*) (etdev
->TxRing
.pTxDescRingVa
+
729 INDEX10(pMpTcb
->WrIndexStart
));
731 pci_unmap_single(etdev
->pdev
,
732 desc
->DataBufferPtrLow
,
733 desc
->word2
.value
, PCI_DMA_TODEVICE
);
735 add_10bit(&pMpTcb
->WrIndexStart
, 1);
736 if (INDEX10(pMpTcb
->WrIndexStart
) >=
737 NUM_DESC_PER_RING_TX
) {
738 pMpTcb
->WrIndexStart
&= ~ET_DMA10_MASK
;
739 pMpTcb
->WrIndexStart
^= ET_DMA10_WRAP
;
741 } while (desc
!= (etdev
->TxRing
.pTxDescRingVa
+
742 INDEX10(pMpTcb
->WrIndex
)));
744 dev_kfree_skb_any(pMpTcb
->Packet
);
747 memset(pMpTcb
, 0, sizeof(MP_TCB
));
749 /* Add the TCB to the Ready Q */
750 spin_lock_irqsave(&etdev
->TCBReadyQLock
, flags
);
752 etdev
->Stats
.opackets
++;
754 if (etdev
->TxRing
.TCBReadyQueueTail
) {
755 etdev
->TxRing
.TCBReadyQueueTail
->Next
= pMpTcb
;
757 /* Apparently ready Q is empty. */
758 etdev
->TxRing
.TCBReadyQueueHead
= pMpTcb
;
761 etdev
->TxRing
.TCBReadyQueueTail
= pMpTcb
;
763 spin_unlock_irqrestore(&etdev
->TCBReadyQLock
, flags
);
764 WARN_ON(etdev
->TxRing
.nBusySend
< 0);
768 * et131x_free_busy_send_packets - Free and complete the stopped active sends
769 * @etdev: pointer to our adapter
771 * Assumption - Send spinlock has been acquired
773 void et131x_free_busy_send_packets(struct et131x_adapter
*etdev
)
776 struct list_head
*entry
;
778 uint32_t FreeCounter
= 0;
780 while (!list_empty(&etdev
->TxRing
.SendWaitQueue
)) {
781 spin_lock_irqsave(&etdev
->SendWaitLock
, flags
);
783 etdev
->TxRing
.nWaitSend
--;
784 spin_unlock_irqrestore(&etdev
->SendWaitLock
, flags
);
786 entry
= etdev
->TxRing
.SendWaitQueue
.next
;
789 etdev
->TxRing
.nWaitSend
= 0;
791 /* Any packets being sent? Check the first TCB on the send list */
792 spin_lock_irqsave(&etdev
->TCBSendQLock
, flags
);
794 pMpTcb
= etdev
->TxRing
.CurrSendHead
;
796 while ((pMpTcb
!= NULL
) && (FreeCounter
< NUM_TCB
)) {
797 PMP_TCB pNext
= pMpTcb
->Next
;
799 etdev
->TxRing
.CurrSendHead
= pNext
;
802 etdev
->TxRing
.CurrSendTail
= NULL
;
804 etdev
->TxRing
.nBusySend
--;
806 spin_unlock_irqrestore(&etdev
->TCBSendQLock
, flags
);
809 et131x_free_send_packet(etdev
, pMpTcb
);
811 spin_lock_irqsave(&etdev
->TCBSendQLock
, flags
);
813 pMpTcb
= etdev
->TxRing
.CurrSendHead
;
816 WARN_ON(FreeCounter
== NUM_TCB
);
818 spin_unlock_irqrestore(&etdev
->TCBSendQLock
, flags
);
820 etdev
->TxRing
.nBusySend
= 0;
824 * et131x_handle_send_interrupt - Interrupt handler for sending processing
825 * @etdev: pointer to our adapter
827 * Re-claim the send resources, complete sends and get more to send from
828 * the send wait queue.
830 * Assumption - Send spinlock has been acquired
832 void et131x_handle_send_interrupt(struct et131x_adapter
*etdev
)
834 /* Mark as completed any packets which have been sent by the device. */
835 et131x_update_tcb_list(etdev
);
837 /* If we queued any transmits because we didn't have any TCBs earlier,
838 * dequeue and send those packets now, as long as we have free TCBs.
840 et131x_check_send_wait_list(etdev
);
844 * et131x_update_tcb_list - Helper routine for Send Interrupt handler
845 * @etdev: pointer to our adapter
847 * Re-claims the send resources and completes sends. Can also be called as
848 * part of the NIC send routine when the "ServiceComplete" indication has
851 static void et131x_update_tcb_list(struct et131x_adapter
*etdev
)
858 ServiceComplete
= readl(&etdev
->regs
->txdma
.NewServiceComplete
);
859 index
= INDEX10(ServiceComplete
);
861 /* Has the ring wrapped? Process any descriptors that do not have
862 * the same "wrap" indicator as the current completion indicator
864 spin_lock_irqsave(&etdev
->TCBSendQLock
, flags
);
866 pMpTcb
= etdev
->TxRing
.CurrSendHead
;
869 ((ServiceComplete
^ pMpTcb
->WrIndex
) & ET_DMA10_WRAP
) &&
870 index
< INDEX10(pMpTcb
->WrIndex
)) {
871 etdev
->TxRing
.nBusySend
--;
872 etdev
->TxRing
.CurrSendHead
= pMpTcb
->Next
;
873 if (pMpTcb
->Next
== NULL
)
874 etdev
->TxRing
.CurrSendTail
= NULL
;
876 spin_unlock_irqrestore(&etdev
->TCBSendQLock
, flags
);
877 et131x_free_send_packet(etdev
, pMpTcb
);
878 spin_lock_irqsave(&etdev
->TCBSendQLock
, flags
);
880 /* Goto the next packet */
881 pMpTcb
= etdev
->TxRing
.CurrSendHead
;
884 !((ServiceComplete
^ pMpTcb
->WrIndex
) & ET_DMA10_WRAP
)
885 && index
> (pMpTcb
->WrIndex
& ET_DMA10_MASK
)) {
886 etdev
->TxRing
.nBusySend
--;
887 etdev
->TxRing
.CurrSendHead
= pMpTcb
->Next
;
888 if (pMpTcb
->Next
== NULL
)
889 etdev
->TxRing
.CurrSendTail
= NULL
;
891 spin_unlock_irqrestore(&etdev
->TCBSendQLock
, flags
);
892 et131x_free_send_packet(etdev
, pMpTcb
);
893 spin_lock_irqsave(&etdev
->TCBSendQLock
, flags
);
895 /* Goto the next packet */
896 pMpTcb
= etdev
->TxRing
.CurrSendHead
;
899 /* Wake up the queue when we hit a low-water mark */
900 if (etdev
->TxRing
.nBusySend
<= (NUM_TCB
/ 3))
901 netif_wake_queue(etdev
->netdev
);
903 spin_unlock_irqrestore(&etdev
->TCBSendQLock
, flags
);
907 * et131x_check_send_wait_list - Helper routine for the interrupt handler
908 * @etdev: pointer to our adapter
910 * Takes packets from the send wait queue and posts them to the device (if
913 static void et131x_check_send_wait_list(struct et131x_adapter
*etdev
)
917 spin_lock_irqsave(&etdev
->SendWaitLock
, flags
);
919 while (!list_empty(&etdev
->TxRing
.SendWaitQueue
) &&
920 MP_TCB_RESOURCES_AVAILABLE(etdev
)) {
921 struct list_head
*entry
;
923 entry
= etdev
->TxRing
.SendWaitQueue
.next
;
925 etdev
->TxRing
.nWaitSend
--;
928 spin_unlock_irqrestore(&etdev
->SendWaitLock
, flags
);