vfs: fix nd->root leak in do_filp_open()
[zen-stable.git] / drivers / staging / et131x / et1310_rx.c
blob8dc559a77ad3300fcf11e4baca938bad940e3754
1 /*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
9 *------------------------------------------------------------------------------
11 * et1310_rx.c - Routines used to perform data reception
13 *------------------------------------------------------------------------------
15 * SOFTWARE LICENSE
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
41 * Disclaimer
43 * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
58 #include "et131x_version.h"
59 #include "et131x_debug.h"
60 #include "et131x_defs.h"
62 #include <linux/pci.h>
63 #include <linux/init.h>
64 #include <linux/module.h>
65 #include <linux/types.h>
66 #include <linux/kernel.h>
68 #include <linux/sched.h>
69 #include <linux/ptrace.h>
70 #include <linux/slab.h>
71 #include <linux/ctype.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/interrupt.h>
75 #include <linux/in.h>
76 #include <linux/delay.h>
77 #include <asm/io.h>
78 #include <asm/system.h>
79 #include <asm/bitops.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/if_arp.h>
85 #include <linux/ioport.h>
87 #include "et1310_phy.h"
88 #include "et1310_pm.h"
89 #include "et1310_jagcore.h"
91 #include "et131x_adapter.h"
92 #include "et131x_initpci.h"
94 #include "et1310_rx.h"
96 /* Data for debugging facilities */
97 #ifdef CONFIG_ET131X_DEBUG
98 extern dbg_info_t *et131x_dbginfo;
99 #endif /* CONFIG_ET131X_DEBUG */
102 void nic_return_rfd(struct et131x_adapter *pAdapter, PMP_RFD pMpRfd);
105 * et131x_rx_dma_memory_alloc
106 * @adapter: pointer to our private adapter structure
108 * Returns 0 on success and errno on failure (as defined in errno.h)
110 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
111 * and the Packet Status Ring.
113 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
115 uint32_t OuterLoop, InnerLoop;
116 uint32_t bufsize;
117 uint32_t pktStatRingSize, FBRChunkSize;
118 RX_RING_t *rx_ring;
120 DBG_ENTER(et131x_dbginfo);
122 /* Setup some convenience pointers */
123 rx_ring = (RX_RING_t *) & adapter->RxRing;
125 /* Alloc memory for the lookup table */
126 #ifdef USE_FBR0
127 rx_ring->Fbr[0] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
128 #endif
130 rx_ring->Fbr[1] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
132 /* The first thing we will do is configure the sizes of the buffer
133 * rings. These will change based on jumbo packet support. Larger
134 * jumbo packets increases the size of each entry in FBR0, and the
135 * number of entries in FBR0, while at the same time decreasing the
136 * number of entries in FBR1.
138 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
139 * entries are huge in order to accomodate a "jumbo" frame, then it
140 * will have less entries. Conversely, FBR1 will now be relied upon
141 * to carry more "normal" frames, thus it's entry size also increases
142 * and the number of entries goes up too (since it now carries
143 * "small" + "regular" packets.
145 * In this scheme, we try to maintain 512 entries between the two
146 * rings. Also, FBR1 remains a constant size - when it's size doubles
147 * the number of entries halves. FBR0 increases in size, however.
150 if (adapter->RegistryJumboPacket < 2048) {
151 #ifdef USE_FBR0
152 rx_ring->Fbr0BufferSize = 256;
153 rx_ring->Fbr0NumEntries = 512;
154 #endif
155 rx_ring->Fbr1BufferSize = 2048;
156 rx_ring->Fbr1NumEntries = 512;
157 } else if (adapter->RegistryJumboPacket < 4096) {
158 #ifdef USE_FBR0
159 rx_ring->Fbr0BufferSize = 512;
160 rx_ring->Fbr0NumEntries = 1024;
161 #endif
162 rx_ring->Fbr1BufferSize = 4096;
163 rx_ring->Fbr1NumEntries = 512;
164 } else {
165 #ifdef USE_FBR0
166 rx_ring->Fbr0BufferSize = 1024;
167 rx_ring->Fbr0NumEntries = 768;
168 #endif
169 rx_ring->Fbr1BufferSize = 16384;
170 rx_ring->Fbr1NumEntries = 128;
173 #ifdef USE_FBR0
174 adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr0NumEntries +
175 adapter->RxRing.Fbr1NumEntries;
176 #else
177 adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr1NumEntries;
178 #endif
180 /* Allocate an area of memory for Free Buffer Ring 1 */
181 bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
182 rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
183 bufsize,
184 &rx_ring->pFbr1RingPa);
185 if (!rx_ring->pFbr1RingVa) {
186 DBG_ERROR(et131x_dbginfo,
187 "Cannot alloc memory for Free Buffer Ring 1\n");
188 DBG_LEAVE(et131x_dbginfo);
189 return -ENOMEM;
192 /* Save physical address
194 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
195 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
196 * are ever returned, make sure the high part is retrieved here
197 * before storing the adjusted address.
199 rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
201 /* Align Free Buffer Ring 1 on a 4K boundary */
202 et131x_align_allocated_memory(adapter,
203 &rx_ring->Fbr1Realpa,
204 &rx_ring->Fbr1offset, 0x0FFF);
206 rx_ring->pFbr1RingVa = (void *)((uint8_t *) rx_ring->pFbr1RingVa +
207 rx_ring->Fbr1offset);
209 #ifdef USE_FBR0
210 /* Allocate an area of memory for Free Buffer Ring 0 */
211 bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
212 rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
213 bufsize,
214 &rx_ring->pFbr0RingPa);
215 if (!rx_ring->pFbr0RingVa) {
216 DBG_ERROR(et131x_dbginfo,
217 "Cannot alloc memory for Free Buffer Ring 0\n");
218 DBG_LEAVE(et131x_dbginfo);
219 return -ENOMEM;
222 /* Save physical address
224 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
225 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
226 * are ever returned, make sure the high part is retrieved here before
227 * storing the adjusted address.
229 rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
231 /* Align Free Buffer Ring 0 on a 4K boundary */
232 et131x_align_allocated_memory(adapter,
233 &rx_ring->Fbr0Realpa,
234 &rx_ring->Fbr0offset, 0x0FFF);
236 rx_ring->pFbr0RingVa = (void *)((uint8_t *) rx_ring->pFbr0RingVa +
237 rx_ring->Fbr0offset);
238 #endif
240 for (OuterLoop = 0; OuterLoop < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
241 OuterLoop++) {
242 uint64_t Fbr1Offset;
243 uint64_t Fbr1TempPa;
244 uint32_t Fbr1Align;
246 /* This code allocates an area of memory big enough for N
247 * free buffers + (buffer_size - 1) so that the buffers can
248 * be aligned on 4k boundaries. If each buffer were aligned
249 * to a buffer_size boundary, the effect would be to double
250 * the size of FBR0. By allocating N buffers at once, we
251 * reduce this overhead.
253 if (rx_ring->Fbr1BufferSize > 4096) {
254 Fbr1Align = 4096;
255 } else {
256 Fbr1Align = rx_ring->Fbr1BufferSize;
259 FBRChunkSize =
260 (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
261 rx_ring->Fbr1MemVa[OuterLoop] =
262 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
263 &rx_ring->Fbr1MemPa[OuterLoop]);
265 if (!rx_ring->Fbr1MemVa[OuterLoop]) {
266 DBG_ERROR(et131x_dbginfo, "Could not alloc memory\n");
267 DBG_LEAVE(et131x_dbginfo);
268 return -ENOMEM;
271 /* See NOTE in "Save Physical Address" comment above */
272 Fbr1TempPa = rx_ring->Fbr1MemPa[OuterLoop];
274 et131x_align_allocated_memory(adapter,
275 &Fbr1TempPa,
276 &Fbr1Offset, (Fbr1Align - 1));
278 for (InnerLoop = 0; InnerLoop < FBR_CHUNKS; InnerLoop++) {
279 uint32_t index = (OuterLoop * FBR_CHUNKS) + InnerLoop;
281 /* Save the Virtual address of this index for quick
282 * access later
284 rx_ring->Fbr[1]->Va[index] =
285 (uint8_t *) rx_ring->Fbr1MemVa[OuterLoop] +
286 (InnerLoop * rx_ring->Fbr1BufferSize) + Fbr1Offset;
288 /* now store the physical address in the descriptor
289 * so the device can access it
291 rx_ring->Fbr[1]->PAHigh[index] =
292 (uint32_t) (Fbr1TempPa >> 32);
293 rx_ring->Fbr[1]->PALow[index] = (uint32_t) Fbr1TempPa;
295 Fbr1TempPa += rx_ring->Fbr1BufferSize;
297 rx_ring->Fbr[1]->Buffer1[index] =
298 rx_ring->Fbr[1]->Va[index];
299 rx_ring->Fbr[1]->Buffer2[index] =
300 rx_ring->Fbr[1]->Va[index] - 4;
304 #ifdef USE_FBR0
305 /* Same for FBR0 (if in use) */
306 for (OuterLoop = 0; OuterLoop < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
307 OuterLoop++) {
308 uint64_t Fbr0Offset;
309 uint64_t Fbr0TempPa;
311 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
312 rx_ring->Fbr0MemVa[OuterLoop] =
313 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
314 &rx_ring->Fbr0MemPa[OuterLoop]);
316 if (!rx_ring->Fbr0MemVa[OuterLoop]) {
317 DBG_ERROR(et131x_dbginfo, "Could not alloc memory\n");
318 DBG_LEAVE(et131x_dbginfo);
319 return -ENOMEM;
322 /* See NOTE in "Save Physical Address" comment above */
323 Fbr0TempPa = rx_ring->Fbr0MemPa[OuterLoop];
325 et131x_align_allocated_memory(adapter,
326 &Fbr0TempPa,
327 &Fbr0Offset,
328 rx_ring->Fbr0BufferSize - 1);
330 for (InnerLoop = 0; InnerLoop < FBR_CHUNKS; InnerLoop++) {
331 uint32_t index = (OuterLoop * FBR_CHUNKS) + InnerLoop;
333 rx_ring->Fbr[0]->Va[index] =
334 (uint8_t *) rx_ring->Fbr0MemVa[OuterLoop] +
335 (InnerLoop * rx_ring->Fbr0BufferSize) + Fbr0Offset;
337 rx_ring->Fbr[0]->PAHigh[index] =
338 (uint32_t) (Fbr0TempPa >> 32);
339 rx_ring->Fbr[0]->PALow[index] = (uint32_t) Fbr0TempPa;
341 Fbr0TempPa += rx_ring->Fbr0BufferSize;
343 rx_ring->Fbr[0]->Buffer1[index] =
344 rx_ring->Fbr[0]->Va[index];
345 rx_ring->Fbr[0]->Buffer2[index] =
346 rx_ring->Fbr[0]->Va[index] - 4;
349 #endif
351 /* Allocate an area of memory for FIFO of Packet Status ring entries */
352 pktStatRingSize =
353 sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
355 rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
356 pktStatRingSize + 0x0fff,
357 &rx_ring->pPSRingPa);
359 if (!rx_ring->pPSRingVa) {
360 DBG_ERROR(et131x_dbginfo,
361 "Cannot alloc memory for Packet Status Ring\n");
362 DBG_LEAVE(et131x_dbginfo);
363 return -ENOMEM;
366 /* Save physical address
368 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
369 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
370 * are ever returned, make sure the high part is retrieved here before
371 * storing the adjusted address.
373 rx_ring->pPSRingRealPa = rx_ring->pPSRingPa;
375 /* Align Packet Status Ring on a 4K boundary */
376 et131x_align_allocated_memory(adapter,
377 &rx_ring->pPSRingRealPa,
378 &rx_ring->pPSRingOffset, 0x0FFF);
380 rx_ring->pPSRingVa = (void *)((uint8_t *) rx_ring->pPSRingVa +
381 rx_ring->pPSRingOffset);
383 /* Allocate an area of memory for writeback of status information */
384 rx_ring->pRxStatusVa = pci_alloc_consistent(adapter->pdev,
385 sizeof(RX_STATUS_BLOCK_t) +
386 0x7, &rx_ring->pRxStatusPa);
387 if (!rx_ring->pRxStatusVa) {
388 DBG_ERROR(et131x_dbginfo,
389 "Cannot alloc memory for Status Block\n");
390 DBG_LEAVE(et131x_dbginfo);
391 return -ENOMEM;
394 /* Save physical address */
395 rx_ring->RxStatusRealPA = rx_ring->pRxStatusPa;
397 /* Align write back on an 8 byte boundary */
398 et131x_align_allocated_memory(adapter,
399 &rx_ring->RxStatusRealPA,
400 &rx_ring->RxStatusOffset, 0x07);
402 rx_ring->pRxStatusVa = (void *)((uint8_t *) rx_ring->pRxStatusVa +
403 rx_ring->RxStatusOffset);
404 rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
406 /* Recv
407 * pci_pool_create initializes a lookaside list. After successful
408 * creation, nonpaged fixed-size blocks can be allocated from and
409 * freed to the lookaside list.
410 * RFDs will be allocated from this pool.
412 rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
413 sizeof(MP_RFD),
415 SLAB_CACHE_DMA |
416 SLAB_HWCACHE_ALIGN,
417 NULL);
419 MP_SET_FLAG(adapter, fMP_ADAPTER_RECV_LOOKASIDE);
421 /* The RFDs are going to be put on lists later on, so initialize the
422 * lists now.
424 INIT_LIST_HEAD(&rx_ring->RecvList);
425 INIT_LIST_HEAD(&rx_ring->RecvPendingList);
427 DBG_LEAVE(et131x_dbginfo);
428 return 0;
432 * et131x_rx_dma_memory_free - Free all memory allocated within this module.
433 * @adapter: pointer to our private adapter structure
435 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
437 uint32_t index;
438 uint32_t bufsize;
439 uint32_t pktStatRingSize;
440 PMP_RFD pMpRfd;
441 RX_RING_t *rx_ring;
443 DBG_ENTER(et131x_dbginfo);
445 /* Setup some convenience pointers */
446 rx_ring = (RX_RING_t *) & adapter->RxRing;
448 /* Free RFDs and associated packet descriptors */
449 DBG_ASSERT(rx_ring->nReadyRecv == rx_ring->NumRfd);
451 while (!list_empty(&rx_ring->RecvList)) {
452 pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
453 MP_RFD, list_node);
455 list_del(&pMpRfd->list_node);
456 et131x_rfd_resources_free(adapter, pMpRfd);
459 while (!list_empty(&rx_ring->RecvPendingList)) {
460 pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvPendingList.next,
461 MP_RFD, list_node);
462 list_del(&pMpRfd->list_node);
463 et131x_rfd_resources_free(adapter, pMpRfd);
466 /* Free Free Buffer Ring 1 */
467 if (rx_ring->pFbr1RingVa) {
468 /* First the packet memory */
469 for (index = 0; index <
470 (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
471 if (rx_ring->Fbr1MemVa[index]) {
472 uint32_t Fbr1Align;
474 if (rx_ring->Fbr1BufferSize > 4096) {
475 Fbr1Align = 4096;
476 } else {
477 Fbr1Align = rx_ring->Fbr1BufferSize;
480 bufsize =
481 (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
482 Fbr1Align - 1;
484 pci_free_consistent(adapter->pdev,
485 bufsize,
486 rx_ring->Fbr1MemVa[index],
487 rx_ring->Fbr1MemPa[index]);
489 rx_ring->Fbr1MemVa[index] = NULL;
493 /* Now the FIFO itself */
494 rx_ring->pFbr1RingVa = (void *)((uint8_t *) rx_ring->pFbr1RingVa -
495 rx_ring->Fbr1offset);
497 bufsize =
498 (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
500 pci_free_consistent(adapter->pdev,
501 bufsize,
502 rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
504 rx_ring->pFbr1RingVa = NULL;
507 #ifdef USE_FBR0
508 /* Now the same for Free Buffer Ring 0 */
509 if (rx_ring->pFbr0RingVa) {
510 /* First the packet memory */
511 for (index = 0; index <
512 (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
513 if (rx_ring->Fbr0MemVa[index]) {
514 bufsize =
515 (rx_ring->Fbr0BufferSize *
516 (FBR_CHUNKS + 1)) - 1;
518 pci_free_consistent(adapter->pdev,
519 bufsize,
520 rx_ring->Fbr0MemVa[index],
521 rx_ring->Fbr0MemPa[index]);
523 rx_ring->Fbr0MemVa[index] = NULL;
527 /* Now the FIFO itself */
528 rx_ring->pFbr0RingVa = (void *)((uint8_t *) rx_ring->pFbr0RingVa -
529 rx_ring->Fbr0offset);
531 bufsize =
532 (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
534 pci_free_consistent(adapter->pdev,
535 bufsize,
536 rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
538 rx_ring->pFbr0RingVa = NULL;
540 #endif
542 /* Free Packet Status Ring */
543 if (rx_ring->pPSRingVa) {
544 rx_ring->pPSRingVa = (void *)((uint8_t *) rx_ring->pPSRingVa -
545 rx_ring->pPSRingOffset);
547 pktStatRingSize =
548 sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
550 pci_free_consistent(adapter->pdev,
551 pktStatRingSize + 0x0fff,
552 rx_ring->pPSRingVa, rx_ring->pPSRingPa);
554 rx_ring->pPSRingVa = NULL;
557 /* Free area of memory for the writeback of status information */
558 if (rx_ring->pRxStatusVa) {
559 rx_ring->pRxStatusVa = (void *)((uint8_t *) rx_ring->pRxStatusVa -
560 rx_ring->RxStatusOffset);
562 pci_free_consistent(adapter->pdev,
563 sizeof(RX_STATUS_BLOCK_t) + 0x7,
564 rx_ring->pRxStatusVa, rx_ring->pRxStatusPa);
566 rx_ring->pRxStatusVa = NULL;
569 /* Free receive buffer pool */
571 /* Free receive packet pool */
573 /* Destroy the lookaside (RFD) pool */
574 if (MP_TEST_FLAG(adapter, fMP_ADAPTER_RECV_LOOKASIDE)) {
575 kmem_cache_destroy(rx_ring->RecvLookaside);
576 MP_CLEAR_FLAG(adapter, fMP_ADAPTER_RECV_LOOKASIDE);
579 /* Free the FBR Lookup Table */
580 #ifdef USE_FBR0
581 kfree(rx_ring->Fbr[0]);
582 #endif
584 kfree(rx_ring->Fbr[1]);
586 /* Reset Counters */
587 rx_ring->nReadyRecv = 0;
589 DBG_LEAVE(et131x_dbginfo);
593 * et131x_init_recv - Initialize receive data structures.
594 * @adapter: pointer to our private adapter structure
596 * Returns 0 on success and errno on failure (as defined in errno.h)
598 int et131x_init_recv(struct et131x_adapter *adapter)
600 int status = -ENOMEM;
601 PMP_RFD pMpRfd = NULL;
602 uint32_t RfdCount;
603 uint32_t TotalNumRfd = 0;
604 RX_RING_t *rx_ring = NULL;
606 DBG_ENTER(et131x_dbginfo);
608 /* Setup some convenience pointers */
609 rx_ring = (RX_RING_t *) & adapter->RxRing;
611 /* Setup each RFD */
612 for (RfdCount = 0; RfdCount < rx_ring->NumRfd; RfdCount++) {
613 pMpRfd = (MP_RFD *) kmem_cache_alloc(rx_ring->RecvLookaside,
614 GFP_ATOMIC | GFP_DMA);
616 if (!pMpRfd) {
617 DBG_ERROR(et131x_dbginfo,
618 "Couldn't alloc RFD out of kmem_cache\n");
619 status = -ENOMEM;
620 continue;
623 status = et131x_rfd_resources_alloc(adapter, pMpRfd);
624 if (status != 0) {
625 DBG_ERROR(et131x_dbginfo,
626 "Couldn't alloc packet for RFD\n");
627 kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
628 continue;
631 /* Add this RFD to the RecvList */
632 list_add_tail(&pMpRfd->list_node, &rx_ring->RecvList);
634 /* Increment both the available RFD's, and the total RFD's. */
635 rx_ring->nReadyRecv++;
636 TotalNumRfd++;
639 if (TotalNumRfd > NIC_MIN_NUM_RFD) {
640 status = 0;
643 rx_ring->NumRfd = TotalNumRfd;
645 if (status != 0) {
646 kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
647 DBG_ERROR(et131x_dbginfo,
648 "Allocation problems in et131x_init_recv\n");
651 DBG_LEAVE(et131x_dbginfo);
652 return status;
656 * et131x_rfd_resources_alloc
657 * @adapter: pointer to our private adapter structure
658 * @pMpRfd: pointer to a RFD
660 * Returns 0 on success and errno on failure (as defined in errno.h)
662 int et131x_rfd_resources_alloc(struct et131x_adapter *adapter, MP_RFD *pMpRfd)
664 pMpRfd->Packet = NULL;
666 return 0;
670 * et131x_rfd_resources_free - Free the packet allocated for the given RFD
671 * @adapter: pointer to our private adapter structure
672 * @pMpRfd: pointer to a RFD
674 void et131x_rfd_resources_free(struct et131x_adapter *adapter, MP_RFD *pMpRfd)
676 pMpRfd->Packet = NULL;
677 kmem_cache_free(adapter->RxRing.RecvLookaside, pMpRfd);
681 * ConfigRxDmaRegs - Start of Rx_DMA init sequence
682 * @pAdapter: pointer to our adapter structure
684 void ConfigRxDmaRegs(struct et131x_adapter *pAdapter)
686 struct _RXDMA_t __iomem *pRxDma = &pAdapter->CSRAddress->rxdma;
687 struct _rx_ring_t *pRxLocal = &pAdapter->RxRing;
688 PFBR_DESC_t pFbrEntry;
689 uint32_t iEntry;
690 RXDMA_PSR_NUM_DES_t psr_num_des;
691 unsigned long lockflags;
693 DBG_ENTER(et131x_dbginfo);
695 /* Halt RXDMA to perform the reconfigure. */
696 et131x_rx_dma_disable(pAdapter);
698 /* Load the completion writeback physical address
700 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
701 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
702 * are ever returned, make sure the high part is retrieved here
703 * before storing the adjusted address.
705 writel((uint32_t) (pRxLocal->RxStatusRealPA >> 32),
706 &pRxDma->dma_wb_base_hi);
707 writel((uint32_t) pRxLocal->RxStatusRealPA, &pRxDma->dma_wb_base_lo);
709 memset(pRxLocal->pRxStatusVa, 0, sizeof(RX_STATUS_BLOCK_t));
711 /* Set the address and parameters of the packet status ring into the
712 * 1310's registers
714 writel((uint32_t) (pRxLocal->pPSRingRealPa >> 32),
715 &pRxDma->psr_base_hi);
716 writel((uint32_t) pRxLocal->pPSRingRealPa, &pRxDma->psr_base_lo);
717 writel(pRxLocal->PsrNumEntries - 1, &pRxDma->psr_num_des.value);
718 writel(0, &pRxDma->psr_full_offset.value);
720 psr_num_des.value = readl(&pRxDma->psr_num_des.value);
721 writel((psr_num_des.bits.psr_ndes * LO_MARK_PERCENT_FOR_PSR) / 100,
722 &pRxDma->psr_min_des.value);
724 spin_lock_irqsave(&pAdapter->RcvLock, lockflags);
726 /* These local variables track the PSR in the adapter structure */
727 pRxLocal->local_psr_full.bits.psr_full = 0;
728 pRxLocal->local_psr_full.bits.psr_full_wrap = 0;
730 /* Now's the best time to initialize FBR1 contents */
731 pFbrEntry = (PFBR_DESC_t) pRxLocal->pFbr1RingVa;
732 for (iEntry = 0; iEntry < pRxLocal->Fbr1NumEntries; iEntry++) {
733 pFbrEntry->addr_hi = pRxLocal->Fbr[1]->PAHigh[iEntry];
734 pFbrEntry->addr_lo = pRxLocal->Fbr[1]->PALow[iEntry];
735 pFbrEntry->word2.bits.bi = iEntry;
736 pFbrEntry++;
739 /* Set the address and parameters of Free buffer ring 1 (and 0 if
740 * required) into the 1310's registers
742 writel((uint32_t) (pRxLocal->Fbr1Realpa >> 32), &pRxDma->fbr1_base_hi);
743 writel((uint32_t) pRxLocal->Fbr1Realpa, &pRxDma->fbr1_base_lo);
744 writel(pRxLocal->Fbr1NumEntries - 1, &pRxDma->fbr1_num_des.value);
747 DMA10W_t fbr1_full = { 0 };
749 fbr1_full.bits.val = 0;
750 fbr1_full.bits.wrap = 1;
751 writel(fbr1_full.value, &pRxDma->fbr1_full_offset.value);
754 /* This variable tracks the free buffer ring 1 full position, so it
755 * has to match the above.
757 pRxLocal->local_Fbr1_full.bits.val = 0;
758 pRxLocal->local_Fbr1_full.bits.wrap = 1;
759 writel(((pRxLocal->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
760 &pRxDma->fbr1_min_des.value);
762 #ifdef USE_FBR0
763 /* Now's the best time to initialize FBR0 contents */
764 pFbrEntry = (PFBR_DESC_t) pRxLocal->pFbr0RingVa;
765 for (iEntry = 0; iEntry < pRxLocal->Fbr0NumEntries; iEntry++) {
766 pFbrEntry->addr_hi = pRxLocal->Fbr[0]->PAHigh[iEntry];
767 pFbrEntry->addr_lo = pRxLocal->Fbr[0]->PALow[iEntry];
768 pFbrEntry->word2.bits.bi = iEntry;
769 pFbrEntry++;
772 writel((uint32_t) (pRxLocal->Fbr0Realpa >> 32), &pRxDma->fbr0_base_hi);
773 writel((uint32_t) pRxLocal->Fbr0Realpa, &pRxDma->fbr0_base_lo);
774 writel(pRxLocal->Fbr0NumEntries - 1, &pRxDma->fbr0_num_des.value);
777 DMA10W_t fbr0_full = { 0 };
779 fbr0_full.bits.val = 0;
780 fbr0_full.bits.wrap = 1;
781 writel(fbr0_full.value, &pRxDma->fbr0_full_offset.value);
784 /* This variable tracks the free buffer ring 0 full position, so it
785 * has to match the above.
787 pRxLocal->local_Fbr0_full.bits.val = 0;
788 pRxLocal->local_Fbr0_full.bits.wrap = 1;
789 writel(((pRxLocal->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
790 &pRxDma->fbr0_min_des.value);
791 #endif
793 /* Program the number of packets we will receive before generating an
794 * interrupt.
795 * For version B silicon, this value gets updated once autoneg is
796 *complete.
798 writel(pAdapter->RegistryRxNumBuffers, &pRxDma->num_pkt_done.value);
800 /* The "time_done" is not working correctly to coalesce interrupts
801 * after a given time period, but rather is giving us an interrupt
802 * regardless of whether we have received packets.
803 * This value gets updated once autoneg is complete.
805 writel(pAdapter->RegistryRxTimeInterval, &pRxDma->max_pkt_time.value);
807 spin_unlock_irqrestore(&pAdapter->RcvLock, lockflags);
809 DBG_LEAVE(et131x_dbginfo);
813 * SetRxDmaTimer - Set the heartbeat timer according to line rate.
814 * @pAdapter: pointer to our adapter structure
816 void SetRxDmaTimer(struct et131x_adapter *pAdapter)
818 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
819 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
821 if ((pAdapter->uiLinkSpeed == TRUEPHY_SPEED_100MBPS) ||
822 (pAdapter->uiLinkSpeed == TRUEPHY_SPEED_10MBPS)) {
823 writel(0, &pAdapter->CSRAddress->rxdma.max_pkt_time.value);
824 writel(1, &pAdapter->CSRAddress->rxdma.num_pkt_done.value);
829 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
830 * @pAdapter: pointer to our adapter structure
832 void et131x_rx_dma_disable(struct et131x_adapter *pAdapter)
834 RXDMA_CSR_t csr;
836 DBG_ENTER(et131x_dbginfo);
838 /* Setup the receive dma configuration register */
839 writel(0x00002001, &pAdapter->CSRAddress->rxdma.csr.value);
840 csr.value = readl(&pAdapter->CSRAddress->rxdma.csr.value);
841 if (csr.bits.halt_status != 1) {
842 udelay(5);
843 csr.value = readl(&pAdapter->CSRAddress->rxdma.csr.value);
844 if (csr.bits.halt_status != 1) {
845 DBG_ERROR(et131x_dbginfo,
846 "RX Dma failed to enter halt state. CSR 0x%08x\n",
847 csr.value);
851 DBG_LEAVE(et131x_dbginfo);
855 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
856 * @pAdapter: pointer to our adapter structure
858 void et131x_rx_dma_enable(struct et131x_adapter *pAdapter)
860 DBG_RX_ENTER(et131x_dbginfo);
862 if (pAdapter->RegistryPhyLoopbk) {
863 /* RxDMA is disabled for loopback operation. */
864 writel(0x1, &pAdapter->CSRAddress->rxdma.csr.value);
865 } else {
866 /* Setup the receive dma configuration register for normal operation */
867 RXDMA_CSR_t csr = { 0 };
869 csr.bits.fbr1_enable = 1;
870 if (pAdapter->RxRing.Fbr1BufferSize == 4096) {
871 csr.bits.fbr1_size = 1;
872 } else if (pAdapter->RxRing.Fbr1BufferSize == 8192) {
873 csr.bits.fbr1_size = 2;
874 } else if (pAdapter->RxRing.Fbr1BufferSize == 16384) {
875 csr.bits.fbr1_size = 3;
877 #ifdef USE_FBR0
878 csr.bits.fbr0_enable = 1;
879 if (pAdapter->RxRing.Fbr0BufferSize == 256) {
880 csr.bits.fbr0_size = 1;
881 } else if (pAdapter->RxRing.Fbr0BufferSize == 512) {
882 csr.bits.fbr0_size = 2;
883 } else if (pAdapter->RxRing.Fbr0BufferSize == 1024) {
884 csr.bits.fbr0_size = 3;
886 #endif
887 writel(csr.value, &pAdapter->CSRAddress->rxdma.csr.value);
889 csr.value = readl(&pAdapter->CSRAddress->rxdma.csr.value);
890 if (csr.bits.halt_status != 0) {
891 udelay(5);
892 csr.value = readl(&pAdapter->CSRAddress->rxdma.csr.value);
893 if (csr.bits.halt_status != 0) {
894 DBG_ERROR(et131x_dbginfo,
895 "RX Dma failed to exit halt state. CSR 0x%08x\n",
896 csr.value);
901 DBG_RX_LEAVE(et131x_dbginfo);
905 * nic_rx_pkts - Checks the hardware for available packets
906 * @pAdapter: pointer to our adapter
908 * Returns pMpRfd, a pointer to our MPRFD.
910 * Checks the hardware for available packets, using completion ring
911 * If packets are available, it gets an RFD from the RecvList, attaches
912 * the packet to it, puts the RFD in the RecvPendList, and also returns
913 * the pointer to the RFD.
915 PMP_RFD nic_rx_pkts(struct et131x_adapter *pAdapter)
917 struct _rx_ring_t *pRxLocal = &pAdapter->RxRing;
918 PRX_STATUS_BLOCK_t pRxStatusBlock;
919 PPKT_STAT_DESC_t pPSREntry;
920 PMP_RFD pMpRfd;
921 uint32_t nIndex;
922 uint8_t *pBufVa;
923 unsigned long lockflags;
924 struct list_head *element;
925 uint8_t ringIndex;
926 uint16_t bufferIndex;
927 uint32_t localLen;
928 PKT_STAT_DESC_WORD0_t Word0;
931 DBG_RX_ENTER(et131x_dbginfo);
933 /* RX Status block is written by the DMA engine prior to every
934 * interrupt. It contains the next to be used entry in the Packet
935 * Status Ring, and also the two Free Buffer rings.
937 pRxStatusBlock = (PRX_STATUS_BLOCK_t) pRxLocal->pRxStatusVa;
939 if (pRxStatusBlock->Word1.bits.PSRoffset ==
940 pRxLocal->local_psr_full.bits.psr_full &&
941 pRxStatusBlock->Word1.bits.PSRwrap ==
942 pRxLocal->local_psr_full.bits.psr_full_wrap) {
943 /* Looks like this ring is not updated yet */
944 DBG_RX(et131x_dbginfo, "(0)\n");
945 DBG_RX_LEAVE(et131x_dbginfo);
946 return NULL;
949 /* The packet status ring indicates that data is available. */
950 pPSREntry = (PPKT_STAT_DESC_t) (pRxLocal->pPSRingVa) +
951 pRxLocal->local_psr_full.bits.psr_full;
953 /* Grab any information that is required once the PSR is
954 * advanced, since we can no longer rely on the memory being
955 * accurate
957 localLen = pPSREntry->word1.bits.length;
958 ringIndex = (uint8_t) pPSREntry->word1.bits.ri;
959 bufferIndex = (uint16_t) pPSREntry->word1.bits.bi;
960 Word0 = pPSREntry->word0;
962 DBG_RX(et131x_dbginfo, "RX PACKET STATUS\n");
963 DBG_RX(et131x_dbginfo, "\tlength : %d\n", localLen);
964 DBG_RX(et131x_dbginfo, "\tringIndex : %d\n", ringIndex);
965 DBG_RX(et131x_dbginfo, "\tbufferIndex : %d\n", bufferIndex);
966 DBG_RX(et131x_dbginfo, "\tword0 : 0x%08x\n", Word0.value);
968 #if 0
969 /* Check the Status Word that the MAC has appended to the PSR
970 * entry in case the MAC has detected errors.
972 if (Word0.value & ALCATEL_BAD_STATUS) {
973 DBG_ERROR(et131x_dbginfo,
974 "NICRxPkts >> Alcatel Status Word error."
975 "Value 0x%08x\n", pPSREntry->word0.value);
977 #endif
979 /* Indicate that we have used this PSR entry. */
980 if (++pRxLocal->local_psr_full.bits.psr_full >
981 pRxLocal->PsrNumEntries - 1) {
982 pRxLocal->local_psr_full.bits.psr_full = 0;
983 pRxLocal->local_psr_full.bits.psr_full_wrap ^= 1;
986 writel(pRxLocal->local_psr_full.value,
987 &pAdapter->CSRAddress->rxdma.psr_full_offset.value);
989 #ifndef USE_FBR0
990 if (ringIndex != 1) {
991 DBG_ERROR(et131x_dbginfo,
992 "NICRxPkts PSR Entry %d indicates "
993 "Buffer Ring 0 in use\n",
994 pRxLocal->local_psr_full.bits.psr_full);
995 DBG_RX_LEAVE(et131x_dbginfo);
996 return NULL;
998 #endif
1000 #ifdef USE_FBR0
1001 if (ringIndex > 1 ||
1002 (ringIndex == 0 &&
1003 bufferIndex > pRxLocal->Fbr0NumEntries - 1) ||
1004 (ringIndex == 1 &&
1005 bufferIndex > pRxLocal->Fbr1NumEntries - 1))
1006 #else
1007 if (ringIndex != 1 ||
1008 bufferIndex > pRxLocal->Fbr1NumEntries - 1)
1009 #endif
1011 /* Illegal buffer or ring index cannot be used by S/W*/
1012 DBG_ERROR(et131x_dbginfo,
1013 "NICRxPkts PSR Entry %d indicates "
1014 "length of %d and/or bad bi(%d)\n",
1015 pRxLocal->local_psr_full.bits.psr_full,
1016 localLen, bufferIndex);
1017 DBG_RX_LEAVE(et131x_dbginfo);
1018 return NULL;
1021 /* Get and fill the RFD. */
1022 spin_lock_irqsave(&pAdapter->RcvLock, lockflags);
1024 pMpRfd = NULL;
1025 element = pRxLocal->RecvList.next;
1026 pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
1028 if (pMpRfd == NULL) {
1029 DBG_RX(et131x_dbginfo,
1030 "NULL RFD returned from RecvList via list_entry()\n");
1031 DBG_RX_LEAVE(et131x_dbginfo);
1032 spin_unlock_irqrestore(&pAdapter->RcvLock, lockflags);
1033 return NULL;
1036 list_del(&pMpRfd->list_node);
1037 pRxLocal->nReadyRecv--;
1039 spin_unlock_irqrestore(&pAdapter->RcvLock, lockflags);
1041 pMpRfd->iBufferIndex = bufferIndex;
1042 pMpRfd->iRingIndex = ringIndex;
1044 /* In V1 silicon, there is a bug which screws up filtering of
1045 * runt packets. Therefore runt packet filtering is disabled
1046 * in the MAC and the packets are dropped here. They are
1047 * also counted here.
1049 if (localLen < (NIC_MIN_PACKET_SIZE + 4)) {
1050 pAdapter->Stats.other_errors++;
1051 localLen = 0;
1054 if (localLen) {
1055 if (pAdapter->ReplicaPhyLoopbk == 1) {
1056 pBufVa = pRxLocal->Fbr[ringIndex]->Va[bufferIndex];
1058 if (memcmp(&pBufVa[6], &pAdapter->CurrentAddress[0],
1059 ETH_ALEN) == 0) {
1060 if (memcmp(&pBufVa[42], "Replica packet",
1061 ETH_HLEN)) {
1062 pAdapter->ReplicaPhyLoopbkPF = 1;
1065 DBG_WARNING(et131x_dbginfo,
1066 "pBufVa:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
1067 pBufVa[6], pBufVa[7], pBufVa[8],
1068 pBufVa[9], pBufVa[10], pBufVa[11]);
1070 DBG_WARNING(et131x_dbginfo,
1071 "CurrentAddr:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
1072 pAdapter->CurrentAddress[0],
1073 pAdapter->CurrentAddress[1],
1074 pAdapter->CurrentAddress[2],
1075 pAdapter->CurrentAddress[3],
1076 pAdapter->CurrentAddress[4],
1077 pAdapter->CurrentAddress[5]);
1080 /* Determine if this is a multicast packet coming in */
1081 if ((Word0.value & ALCATEL_MULTICAST_PKT) &&
1082 !(Word0.value & ALCATEL_BROADCAST_PKT)) {
1083 /* Promiscuous mode and Multicast mode are
1084 * not mutually exclusive as was first
1085 * thought. I guess Promiscuous is just
1086 * considered a super-set of the other
1087 * filters. Generally filter is 0x2b when in
1088 * promiscuous mode.
1090 if ((pAdapter->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
1091 && !(pAdapter->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
1092 && !(pAdapter->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1093 pBufVa = pRxLocal->Fbr[ringIndex]->
1094 Va[bufferIndex];
1096 /* Loop through our list to see if the
1097 * destination address of this packet
1098 * matches one in our list.
1100 for (nIndex = 0;
1101 nIndex < pAdapter->MCAddressCount;
1102 nIndex++) {
1103 if (pBufVa[0] ==
1104 pAdapter->MCList[nIndex][0]
1105 && pBufVa[1] ==
1106 pAdapter->MCList[nIndex][1]
1107 && pBufVa[2] ==
1108 pAdapter->MCList[nIndex][2]
1109 && pBufVa[3] ==
1110 pAdapter->MCList[nIndex][3]
1111 && pBufVa[4] ==
1112 pAdapter->MCList[nIndex][4]
1113 && pBufVa[5] ==
1114 pAdapter->MCList[nIndex][5]) {
1115 break;
1119 /* If our index is equal to the number
1120 * of Multicast address we have, then
1121 * this means we did not find this
1122 * packet's matching address in our
1123 * list. Set the PacketSize to zero,
1124 * so we free our RFD when we return
1125 * from this function.
1127 if (nIndex == pAdapter->MCAddressCount) {
1128 localLen = 0;
1132 if (localLen > 0) {
1133 pAdapter->Stats.multircv++;
1135 } else if (Word0.value & ALCATEL_BROADCAST_PKT) {
1136 pAdapter->Stats.brdcstrcv++;
1137 } else {
1138 /* Not sure what this counter measures in
1139 * promiscuous mode. Perhaps we should check
1140 * the MAC address to see if it is directed
1141 * to us in promiscuous mode.
1143 pAdapter->Stats.unircv++;
1147 if (localLen > 0) {
1148 struct sk_buff *skb = NULL;
1150 //pMpRfd->PacketSize = localLen - 4;
1151 pMpRfd->PacketSize = localLen;
1153 skb = dev_alloc_skb(pMpRfd->PacketSize + 2);
1154 if (!skb) {
1155 DBG_ERROR(et131x_dbginfo,
1156 "Couldn't alloc an SKB for Rx\n");
1157 DBG_RX_LEAVE(et131x_dbginfo);
1158 return NULL;
1161 pAdapter->net_stats.rx_bytes += pMpRfd->PacketSize;
1163 memcpy(skb_put(skb, pMpRfd->PacketSize),
1164 pRxLocal->Fbr[ringIndex]->Va[bufferIndex],
1165 pMpRfd->PacketSize);
1167 skb->dev = pAdapter->netdev;
1168 skb->protocol = eth_type_trans(skb, pAdapter->netdev);
1169 skb->ip_summed = CHECKSUM_NONE;
1171 netif_rx(skb);
1172 } else {
1173 pMpRfd->PacketSize = 0;
1176 nic_return_rfd(pAdapter, pMpRfd);
1178 DBG_RX(et131x_dbginfo, "(1)\n");
1179 DBG_RX_LEAVE(et131x_dbginfo);
1180 return pMpRfd;
1184 * et131x_reset_recv - Reset the receive list
1185 * @pAdapter: pointer to our adapter
1187 * Assumption, Rcv spinlock has been acquired.
1189 void et131x_reset_recv(struct et131x_adapter *pAdapter)
1191 PMP_RFD pMpRfd;
1192 struct list_head *element;
1194 DBG_ENTER(et131x_dbginfo);
1196 DBG_ASSERT(!list_empty(&pAdapter->RxRing.RecvList));
1198 /* Take all the RFD's from the pending list, and stick them on the
1199 * RecvList.
1201 while (!list_empty(&pAdapter->RxRing.RecvPendingList)) {
1202 element = pAdapter->RxRing.RecvPendingList.next;
1204 pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
1206 list_move_tail(&pMpRfd->list_node, &pAdapter->RxRing.RecvList);
1209 DBG_LEAVE(et131x_dbginfo);
1213 * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1214 * @pAdapter: pointer to our adapter
1216 * Assumption, Rcv spinlock has been acquired.
1218 void et131x_handle_recv_interrupt(struct et131x_adapter *pAdapter)
1220 PMP_RFD pMpRfd = NULL;
1221 struct sk_buff *PacketArray[NUM_PACKETS_HANDLED];
1222 PMP_RFD RFDFreeArray[NUM_PACKETS_HANDLED];
1223 uint32_t PacketArrayCount = 0;
1224 uint32_t PacketsToHandle;
1225 uint32_t PacketFreeCount = 0;
1226 bool TempUnfinishedRec = false;
1228 DBG_RX_ENTER(et131x_dbginfo);
1230 PacketsToHandle = NUM_PACKETS_HANDLED;
1232 /* Process up to available RFD's */
1233 while (PacketArrayCount < PacketsToHandle) {
1234 if (list_empty(&pAdapter->RxRing.RecvList)) {
1235 DBG_ASSERT(pAdapter->RxRing.nReadyRecv == 0);
1236 DBG_ERROR(et131x_dbginfo, "NO RFD's !!!!!!!!!!!!!\n");
1237 TempUnfinishedRec = true;
1238 break;
1241 pMpRfd = nic_rx_pkts(pAdapter);
1243 if (pMpRfd == NULL) {
1244 break;
1247 /* Do not receive any packets until a filter has been set.
1248 * Do not receive any packets until we are at D0.
1249 * Do not receive any packets until we have link.
1250 * If length is zero, return the RFD in order to advance the
1251 * Free buffer ring.
1253 if ((!pAdapter->PacketFilter) ||
1254 (pAdapter->PoMgmt.PowerState != NdisDeviceStateD0) ||
1255 (!MP_LINK_DETECTED(pAdapter)) ||
1256 (pMpRfd->PacketSize == 0)) {
1257 continue;
1260 /* Increment the number of packets we received */
1261 pAdapter->Stats.ipackets++;
1263 /* Set the status on the packet, either resources or success */
1264 if (pAdapter->RxRing.nReadyRecv >= RFD_LOW_WATER_MARK) {
1265 /* Put this RFD on the pending list
1267 * NOTE: nic_rx_pkts() above is already returning the
1268 * RFD to the RecvList, so don't additionally do that
1269 * here.
1270 * Besides, we don't really need (at this point) the
1271 * pending list anyway.
1273 //spin_lock_irqsave( &pAdapter->RcvPendLock, lockflags );
1274 //list_add_tail( &pMpRfd->list_node, &pAdapter->RxRing.RecvPendingList );
1275 //spin_unlock_irqrestore( &pAdapter->RcvPendLock, lockflags );
1277 /* Update the number of outstanding Recvs */
1278 //MP_INC_RCV_REF( pAdapter );
1279 } else {
1280 RFDFreeArray[PacketFreeCount] = pMpRfd;
1281 PacketFreeCount++;
1283 DBG_WARNING(et131x_dbginfo,
1284 "RFD's are running out !!!!!!!!!!!!!\n");
1287 PacketArray[PacketArrayCount] = pMpRfd->Packet;
1288 PacketArrayCount++;
1291 if ((PacketArrayCount == NUM_PACKETS_HANDLED) || TempUnfinishedRec) {
1292 pAdapter->RxRing.UnfinishedReceives = true;
1293 writel(pAdapter->RegistryTxTimeInterval * NANO_IN_A_MICRO,
1294 &pAdapter->CSRAddress->global.watchdog_timer);
1295 } else {
1296 /* Watchdog timer will disable itself if appropriate. */
1297 pAdapter->RxRing.UnfinishedReceives = false;
1300 DBG_RX_LEAVE(et131x_dbginfo);
1304 * NICReturnRFD - Recycle a RFD and put it back onto the receive list
1305 * @pAdapter: pointer to our adapter
1306 * @pMpRfd: pointer to the RFD
1308 void nic_return_rfd(struct et131x_adapter *pAdapter, PMP_RFD pMpRfd)
1310 struct _rx_ring_t *pRxLocal = &pAdapter->RxRing;
1311 struct _RXDMA_t __iomem *pRxDma = &pAdapter->CSRAddress->rxdma;
1312 uint16_t bi = pMpRfd->iBufferIndex;
1313 uint8_t ri = pMpRfd->iRingIndex;
1314 unsigned long lockflags;
1316 DBG_RX_ENTER(et131x_dbginfo);
1318 /* We don't use any of the OOB data besides status. Otherwise, we
1319 * need to clean up OOB data
1321 if (
1322 #ifdef USE_FBR0
1323 (ri == 0 && bi < pRxLocal->Fbr0NumEntries) ||
1324 #endif
1325 (ri == 1 && bi < pRxLocal->Fbr1NumEntries)) {
1326 spin_lock_irqsave(&pAdapter->FbrLock, lockflags);
1328 if (ri == 1) {
1329 PFBR_DESC_t pNextDesc =
1330 (PFBR_DESC_t) (pRxLocal->pFbr1RingVa) +
1331 pRxLocal->local_Fbr1_full.bits.val;
1333 /* Handle the Free Buffer Ring advancement here. Write
1334 * the PA / Buffer Index for the returned buffer into
1335 * the oldest (next to be freed)FBR entry
1337 pNextDesc->addr_hi = pRxLocal->Fbr[1]->PAHigh[bi];
1338 pNextDesc->addr_lo = pRxLocal->Fbr[1]->PALow[bi];
1339 pNextDesc->word2.value = bi;
1341 if (++pRxLocal->local_Fbr1_full.bits.val >
1342 (pRxLocal->Fbr1NumEntries - 1)) {
1343 pRxLocal->local_Fbr1_full.bits.val = 0;
1344 pRxLocal->local_Fbr1_full.bits.wrap ^= 1;
1347 writel(pRxLocal->local_Fbr1_full.value,
1348 &pRxDma->fbr1_full_offset.value);
1350 #ifdef USE_FBR0
1351 else {
1352 PFBR_DESC_t pNextDesc =
1353 (PFBR_DESC_t) pRxLocal->pFbr0RingVa +
1354 pRxLocal->local_Fbr0_full.bits.val;
1356 /* Handle the Free Buffer Ring advancement here. Write
1357 * the PA / Buffer Index for the returned buffer into
1358 * the oldest (next to be freed) FBR entry
1360 pNextDesc->addr_hi = pRxLocal->Fbr[0]->PAHigh[bi];
1361 pNextDesc->addr_lo = pRxLocal->Fbr[0]->PALow[bi];
1362 pNextDesc->word2.value = bi;
1364 if (++pRxLocal->local_Fbr0_full.bits.val >
1365 (pRxLocal->Fbr0NumEntries - 1)) {
1366 pRxLocal->local_Fbr0_full.bits.val = 0;
1367 pRxLocal->local_Fbr0_full.bits.wrap ^= 1;
1370 writel(pRxLocal->local_Fbr0_full.value,
1371 &pRxDma->fbr0_full_offset.value);
1373 #endif
1374 spin_unlock_irqrestore(&pAdapter->FbrLock, lockflags);
1375 } else {
1376 DBG_ERROR(et131x_dbginfo,
1377 "NICReturnRFD illegal Buffer Index returned\n");
1380 /* The processing on this RFD is done, so put it back on the tail of
1381 * our list
1383 spin_lock_irqsave(&pAdapter->RcvLock, lockflags);
1384 list_add_tail(&pMpRfd->list_node, &pRxLocal->RecvList);
1385 pRxLocal->nReadyRecv++;
1386 spin_unlock_irqrestore(&pAdapter->RcvLock, lockflags);
1388 DBG_ASSERT(pRxLocal->nReadyRecv <= pRxLocal->NumRfd);
1389 DBG_RX_LEAVE(et131x_dbginfo);