Staging: sxg: fix napi interface build
[linux/fpc-iii.git] / drivers / staging / sxg / sxg.c
blob5d31e1bd9718de58b490bf45e314e10fe8b8cb20
1 /**************************************************************************
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
33 * Parts developed by LinSysSoft Sahara team
35 **************************************************************************/
38 * FILENAME: sxg.c
40 * The SXG driver for Alacritech's 10Gbe products.
42 * NOTE: This is the standard, non-accelerated version of Alacritech's
43 * IS-NIC driver.
46 #include <linux/kernel.h>
47 #include <linux/string.h>
48 #include <linux/errno.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/ioport.h>
52 #include <linux/slab.h>
53 #include <linux/interrupt.h>
54 #include <linux/timer.h>
55 #include <linux/pci.h>
56 #include <linux/spinlock.h>
57 #include <linux/init.h>
58 #include <linux/netdevice.h>
59 #include <linux/etherdevice.h>
60 #include <linux/ethtool.h>
61 #include <linux/skbuff.h>
62 #include <linux/delay.h>
63 #include <linux/types.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/mii.h>
66 #include <linux/ip.h>
67 #include <linux/in.h>
68 #include <linux/tcp.h>
69 #include <linux/ipv6.h>
71 #define SLIC_GET_STATS_ENABLED 0
72 #define LINUX_FREES_ADAPTER_RESOURCES 1
73 #define SXG_OFFLOAD_IP_CHECKSUM 0
74 #define SXG_POWER_MANAGEMENT_ENABLED 0
75 #define VPCI 0
76 #define ATK_DEBUG 1
78 #include "sxg_os.h"
79 #include "sxghw.h"
80 #include "sxghif.h"
81 #include "sxg.h"
82 #include "sxgdbg.h"
84 #include "sxgphycode.h"
85 #define SXG_UCODE_DBG 0 /* Turn on for debugging */
86 #ifdef SXG_UCODE_DBG
87 #include "saharadbgdownload.c"
88 #include "saharadbgdownloadB.c"
89 #else
90 #include "saharadownload.c"
91 #include "saharadownloadB.c"
92 #endif
94 static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
95 enum sxg_buffer_type BufferType);
96 static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
97 void *RcvBlock,
98 dma_addr_t PhysicalAddress,
99 u32 Length);
100 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
101 struct sxg_scatter_gather *SxgSgl,
102 dma_addr_t PhysicalAddress,
103 u32 Length);
105 static void sxg_mcast_init_crc32(void);
106 static int sxg_entry_open(struct net_device *dev);
107 static int sxg_second_open(struct net_device * dev);
108 static int sxg_entry_halt(struct net_device *dev);
109 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
110 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
111 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
112 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
113 struct sxg_scatter_gather *SxgSgl);
115 static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
116 int budget);
117 static void sxg_interrupt(struct adapter_t *adapter);
118 static int sxg_poll(struct napi_struct *napi, int budget);
119 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
120 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
121 int *sxg_napi_continue, int *work_done, int budget);
122 static void sxg_complete_slow_send(struct adapter_t *adapter);
123 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
124 struct sxg_event *Event);
125 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
126 static bool sxg_mac_filter(struct adapter_t *adapter,
127 struct ether_header *EtherHdr, ushort length);
128 static struct net_device_stats *sxg_get_stats(struct net_device * dev);
129 void sxg_free_resources(struct adapter_t *adapter);
130 void sxg_free_rcvblocks(struct adapter_t *adapter);
131 void sxg_free_sgl_buffers(struct adapter_t *adapter);
132 void sxg_unmap_resources(struct adapter_t *adapter);
133 void sxg_free_mcast_addrs(struct adapter_t *adapter);
134 void sxg_collect_statistics(struct adapter_t *adapter);
135 static int sxg_register_interrupt(struct adapter_t *adapter);
136 static void sxg_remove_isr(struct adapter_t *adapter);
137 static irqreturn_t sxg_isr(int irq, void *dev_id);
139 #define XXXTODO 0
141 #if XXXTODO
142 static int sxg_mac_set_address(struct net_device *dev, void *ptr);
143 #endif
144 static void sxg_mcast_set_list(struct net_device *dev);
146 static int sxg_adapter_set_hwaddr(struct adapter_t *adapter);
148 static int sxg_initialize_adapter(struct adapter_t *adapter);
149 static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
150 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
151 unsigned char Index);
152 int sxg_change_mtu (struct net_device *netdev, int new_mtu);
153 static int sxg_initialize_link(struct adapter_t *adapter);
154 static int sxg_phy_init(struct adapter_t *adapter);
155 static void sxg_link_event(struct adapter_t *adapter);
156 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
157 static void sxg_link_state(struct adapter_t *adapter,
158 enum SXG_LINK_STATE LinkState);
159 static int sxg_write_mdio_reg(struct adapter_t *adapter,
160 u32 DevAddr, u32 RegAddr, u32 Value);
161 static int sxg_read_mdio_reg(struct adapter_t *adapter,
162 u32 DevAddr, u32 RegAddr, u32 *pValue);
163 static void sxg_set_mcast_addr(struct adapter_t *adapter);
165 static unsigned int sxg_first_init = 1;
166 static char *sxg_banner =
167 "Alacritech SLIC Technology(tm) Server and Storage \
168 10Gbe Accelerator (Non-Accelerated)\n";
170 static int sxg_debug = 1;
171 static int debug = -1;
172 static struct net_device *head_netdevice = NULL;
174 static struct sxgbase_driver sxg_global = {
175 .dynamic_intagg = 1,
177 static int intagg_delay = 100;
178 static u32 dynamic_intagg = 0;
180 char sxg_driver_name[] = "sxg_nic";
181 #define DRV_AUTHOR "Alacritech, Inc. Engineering"
182 #define DRV_DESCRIPTION \
183 "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
184 #define DRV_COPYRIGHT \
185 "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
187 MODULE_AUTHOR(DRV_AUTHOR);
188 MODULE_DESCRIPTION(DRV_DESCRIPTION);
189 MODULE_LICENSE("GPL");
191 module_param(dynamic_intagg, int, 0);
192 MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
193 module_param(intagg_delay, int, 0);
194 MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
196 static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
197 {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
198 {0,}
201 MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
203 static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
205 writel(value, reg);
206 if (flush)
207 mb();
210 static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg,
211 u64 value, u32 cpu)
213 u32 value_high = (u32) (value >> 32);
214 u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
215 unsigned long flags;
217 spin_lock_irqsave(&adapter->Bit64RegLock, flags);
218 writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
219 writel(value_low, reg);
220 spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
223 static void sxg_init_driver(void)
225 if (sxg_first_init) {
226 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
227 __func__, jiffies);
228 sxg_first_init = 0;
229 spin_lock_init(&sxg_global.driver_lock);
233 static void sxg_dbg_macaddrs(struct adapter_t *adapter)
235 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
236 adapter->netdev->name, adapter->currmacaddr[0],
237 adapter->currmacaddr[1], adapter->currmacaddr[2],
238 adapter->currmacaddr[3], adapter->currmacaddr[4],
239 adapter->currmacaddr[5]);
240 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
241 adapter->netdev->name, adapter->macaddr[0],
242 adapter->macaddr[1], adapter->macaddr[2],
243 adapter->macaddr[3], adapter->macaddr[4],
244 adapter->macaddr[5]);
245 return;
248 /* SXG Globals */
249 static struct sxg_driver SxgDriver;
251 #ifdef ATKDBG
252 static struct sxg_trace_buffer LSxgTraceBuffer;
253 #endif /* ATKDBG */
254 static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
257 * MSI Related API's
259 int sxg_register_intr(struct adapter_t *adapter);
260 int sxg_enable_msi_x(struct adapter_t *adapter);
261 int sxg_add_msi_isr(struct adapter_t *adapter);
262 void sxg_remove_msix_isr(struct adapter_t *adapter);
263 int sxg_set_interrupt_capability(struct adapter_t *adapter);
265 int sxg_set_interrupt_capability(struct adapter_t *adapter)
267 int ret;
269 ret = sxg_enable_msi_x(adapter);
270 if (ret != STATUS_SUCCESS) {
271 adapter->msi_enabled = FALSE;
272 DBG_ERROR("sxg_set_interrupt_capability MSI-X Disable\n");
273 } else {
274 adapter->msi_enabled = TRUE;
275 DBG_ERROR("sxg_set_interrupt_capability MSI-X Enable\n");
277 return ret;
280 int sxg_register_intr(struct adapter_t *adapter)
282 int ret = 0;
284 if (adapter->msi_enabled) {
285 ret = sxg_add_msi_isr(adapter);
287 else {
288 DBG_ERROR("MSI-X Enable Failed. Using Pin INT\n");
289 ret = sxg_register_interrupt(adapter);
290 if (ret != STATUS_SUCCESS) {
291 DBG_ERROR("sxg_register_interrupt Failed\n");
294 return ret;
297 int sxg_enable_msi_x(struct adapter_t *adapter)
299 int ret;
301 adapter->nr_msix_entries = 1;
302 adapter->msi_entries = kmalloc(adapter->nr_msix_entries *
303 sizeof(struct msix_entry),GFP_KERNEL);
304 if (!adapter->msi_entries) {
305 DBG_ERROR("%s:MSI Entries memory allocation Failed\n",__func__);
306 return -ENOMEM;
308 memset(adapter->msi_entries, 0, adapter->nr_msix_entries *
309 sizeof(struct msix_entry));
311 ret = pci_enable_msix(adapter->pcidev, adapter->msi_entries,
312 adapter->nr_msix_entries);
313 if (ret) {
314 DBG_ERROR("Enabling MSI-X with %d vectors failed\n",
315 adapter->nr_msix_entries);
316 /*Should try with less vector returned.*/
317 kfree(adapter->msi_entries);
318 return STATUS_FAILURE; /*MSI-X Enable failed.*/
320 return (STATUS_SUCCESS);
323 int sxg_add_msi_isr(struct adapter_t *adapter)
325 int ret,i;
327 if (!adapter->intrregistered) {
328 for (i=0; i<adapter->nr_msix_entries; i++) {
329 ret = request_irq (adapter->msi_entries[i].vector,
330 sxg_isr,
331 IRQF_SHARED,
332 adapter->netdev->name,
333 adapter->netdev);
334 if (ret) {
335 DBG_ERROR("sxg: MSI-X request_irq (%s) "
336 "FAILED [%x]\n", adapter->netdev->name,
337 ret);
338 return (ret);
342 adapter->msi_enabled = TRUE;
343 adapter->intrregistered = 1;
344 adapter->IntRegistered = TRUE;
345 return (STATUS_SUCCESS);
348 void sxg_remove_msix_isr(struct adapter_t *adapter)
350 int i,vector;
351 struct net_device *netdev = adapter->netdev;
353 for(i=0; i< adapter->nr_msix_entries;i++)
355 vector = adapter->msi_entries[i].vector;
356 DBG_ERROR("%s : Freeing IRQ vector#%d\n",__FUNCTION__,vector);
357 free_irq(vector,netdev);
362 static void sxg_remove_isr(struct adapter_t *adapter)
364 struct net_device *netdev = adapter->netdev;
365 if (adapter->msi_enabled)
366 sxg_remove_msix_isr(adapter);
367 else
368 free_irq(adapter->netdev->irq, netdev);
371 void sxg_reset_interrupt_capability(struct adapter_t *adapter)
373 if (adapter->msi_enabled) {
374 pci_disable_msix(adapter->pcidev);
375 kfree(adapter->msi_entries);
376 adapter->msi_entries = NULL;
378 return;
382 * sxg_download_microcode
384 * Download Microcode to Sahara adapter
386 * Arguments -
387 * adapter - A pointer to our adapter structure
388 * UcodeSel - microcode file selection
390 * Return
391 * int
393 static bool sxg_download_microcode(struct adapter_t *adapter,
394 enum SXG_UCODE_SEL UcodeSel)
396 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
397 u32 Section;
398 u32 ThisSectionSize;
399 u32 *Instruction = NULL;
400 u32 BaseAddress, AddressOffset, Address;
401 /* u32 Failure; */
402 u32 ValueRead;
403 u32 i;
404 u32 numSections = 0;
405 u32 sectionSize[16];
406 u32 sectionStart[16];
408 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
409 adapter, 0, 0, 0);
410 DBG_ERROR("sxg: %s ENTER\n", __func__);
412 switch (UcodeSel) {
413 case SXG_UCODE_SAHARA: /* Sahara operational ucode */
414 numSections = SNumSections;
415 for (i = 0; i < numSections; i++) {
416 sectionSize[i] = SSectionSize[i];
417 sectionStart[i] = SSectionStart[i];
419 break;
420 default:
421 printk(KERN_ERR KBUILD_MODNAME
422 ": Woah, big error with the microcode!\n");
423 break;
426 DBG_ERROR("sxg: RESET THE CARD\n");
427 /* First, reset the card */
428 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
431 * Download each section of the microcode as specified in
432 * its download file. The *download.c file is generated using
433 * the saharaobjtoc facility which converts the metastep .obj
434 * file to a .c file which contains a two dimentional array.
436 for (Section = 0; Section < numSections; Section++) {
437 DBG_ERROR("sxg: SECTION # %d\n", Section);
438 switch (UcodeSel) {
439 case SXG_UCODE_SAHARA:
440 Instruction = (u32 *) & SaharaUCode[Section][0];
441 break;
442 default:
443 ASSERT(0);
444 break;
446 BaseAddress = sectionStart[Section];
447 /* Size in instructions */
448 ThisSectionSize = sectionSize[Section] / 12;
449 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
450 AddressOffset++) {
451 Address = BaseAddress + AddressOffset;
452 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
453 /* Write instruction bits 31 - 0 */
454 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
455 /* Write instruction bits 63-32 */
456 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
457 FLUSH);
458 /* Write instruction bits 95-64 */
459 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
460 FLUSH);
461 /* Write instruction address with the WRITE bit set */
462 WRITE_REG(HwRegs->UcodeAddr,
463 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
465 * Sahara bug in the ucode download logic - the write to DataLow
466 * for the next instruction could get corrupted. To avoid this,
467 * write to DataLow again for this instruction (which may get
468 * corrupted, but it doesn't matter), then increment the address
469 * and write the data for the next instruction to DataLow. That
470 * write should succeed.
472 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
473 /* Advance 3 u32S to start of next instruction */
474 Instruction += 3;
478 * Now repeat the entire operation reading the instruction back and
479 * checking for parity errors
481 for (Section = 0; Section < numSections; Section++) {
482 DBG_ERROR("sxg: check SECTION # %d\n", Section);
483 switch (UcodeSel) {
484 case SXG_UCODE_SAHARA:
485 Instruction = (u32 *) & SaharaUCode[Section][0];
486 break;
487 default:
488 ASSERT(0);
489 break;
491 BaseAddress = sectionStart[Section];
492 /* Size in instructions */
493 ThisSectionSize = sectionSize[Section] / 12;
494 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
495 AddressOffset++) {
496 Address = BaseAddress + AddressOffset;
497 /* Write the address with the READ bit set */
498 WRITE_REG(HwRegs->UcodeAddr,
499 (Address | MICROCODE_ADDRESS_READ), FLUSH);
500 /* Read it back and check parity bit. */
501 READ_REG(HwRegs->UcodeAddr, ValueRead);
502 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
503 DBG_ERROR("sxg: %s PARITY ERROR\n",
504 __func__);
506 return FALSE; /* Parity error */
508 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
509 /* Read the instruction back and compare */
510 READ_REG(HwRegs->UcodeDataLow, ValueRead);
511 if (ValueRead != *Instruction) {
512 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
513 __func__);
514 return FALSE; /* Miscompare */
516 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
517 if (ValueRead != *(Instruction + 1)) {
518 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
519 __func__);
520 return FALSE; /* Miscompare */
522 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
523 if (ValueRead != *(Instruction + 2)) {
524 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
525 __func__);
526 return FALSE; /* Miscompare */
528 /* Advance 3 u32S to start of next instruction */
529 Instruction += 3;
533 /* Everything OK, Go. */
534 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
537 * Poll the CardUp register to wait for microcode to initialize
538 * Give up after 10,000 attemps (500ms).
540 for (i = 0; i < 10000; i++) {
541 udelay(50);
542 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
543 if (ValueRead == 0xCAFE) {
544 DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
545 break;
548 if (i == 10000) {
549 DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
551 return FALSE; /* Timeout */
554 * Now write the LoadSync register. This is used to
555 * synchronize with the card so it can scribble on the memory
556 * that contained 0xCAFE from the "CardUp" step above
558 if (UcodeSel == SXG_UCODE_SAHARA) {
559 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
562 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
563 adapter, 0, 0, 0);
564 DBG_ERROR("sxg: %s EXIT\n", __func__);
566 return (TRUE);
570 * sxg_allocate_resources - Allocate memory and locks
572 * Arguments -
573 * adapter - A pointer to our adapter structure
575 * Return - int
577 static int sxg_allocate_resources(struct adapter_t *adapter)
579 int status;
580 u32 i;
581 u32 RssIds, IsrCount;
582 /* struct sxg_xmt_ring *XmtRing; */
583 /* struct sxg_rcv_ring *RcvRing; */
585 DBG_ERROR("%s ENTER\n", __func__);
587 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
588 adapter, 0, 0, 0);
590 /* Windows tells us how many CPUs it plans to use for */
591 /* RSS */
592 RssIds = SXG_RSS_CPU_COUNT(adapter);
593 IsrCount = adapter->msi_enabled ? RssIds : 1;
595 DBG_ERROR("%s Setup the spinlocks\n", __func__);
597 /* Allocate spinlocks and initialize listheads first. */
598 spin_lock_init(&adapter->RcvQLock);
599 spin_lock_init(&adapter->SglQLock);
600 spin_lock_init(&adapter->XmtZeroLock);
601 spin_lock_init(&adapter->Bit64RegLock);
602 spin_lock_init(&adapter->AdapterLock);
603 atomic_set(&adapter->pending_allocations, 0);
605 DBG_ERROR("%s Setup the lists\n", __func__);
607 InitializeListHead(&adapter->FreeRcvBuffers);
608 InitializeListHead(&adapter->FreeRcvBlocks);
609 InitializeListHead(&adapter->AllRcvBlocks);
610 InitializeListHead(&adapter->FreeSglBuffers);
611 InitializeListHead(&adapter->AllSglBuffers);
614 * Mark these basic allocations done. This flags essentially
615 * tells the SxgFreeResources routine that it can grab spinlocks
616 * and reference listheads.
618 adapter->BasicAllocations = TRUE;
620 * Main allocation loop. Start with the maximum supported by
621 * the microcode and back off if memory allocation
622 * fails. If we hit a minimum, fail.
625 for (;;) {
626 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
627 (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
630 * Start with big items first - receive and transmit rings.
631 * At the moment I'm going to keep the ring size fixed and
632 * adjust the TCBs if we fail. Later we might
633 * consider reducing the ring size as well..
635 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
636 sizeof(struct sxg_xmt_ring) *
638 &adapter->PXmtRings);
639 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
641 if (!adapter->XmtRings) {
642 goto per_tcb_allocation_failed;
644 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
646 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
647 (unsigned int)(sizeof(struct sxg_rcv_ring) * 1));
648 adapter->RcvRings =
649 pci_alloc_consistent(adapter->pcidev,
650 sizeof(struct sxg_rcv_ring) * 1,
651 &adapter->PRcvRings);
652 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
653 if (!adapter->RcvRings) {
654 goto per_tcb_allocation_failed;
656 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
657 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
658 adapter->pucode_stats = pci_map_single(adapter->pcidev,
659 adapter->ucode_stats,
660 sizeof(struct sxg_ucode_stats),
661 PCI_DMA_FROMDEVICE);
662 // memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
663 break;
665 per_tcb_allocation_failed:
666 /* an allocation failed. Free any successful allocations. */
667 if (adapter->XmtRings) {
668 pci_free_consistent(adapter->pcidev,
669 sizeof(struct sxg_xmt_ring) * 1,
670 adapter->XmtRings,
671 adapter->PXmtRings);
672 adapter->XmtRings = NULL;
674 if (adapter->RcvRings) {
675 pci_free_consistent(adapter->pcidev,
676 sizeof(struct sxg_rcv_ring) * 1,
677 adapter->RcvRings,
678 adapter->PRcvRings);
679 adapter->RcvRings = NULL;
681 /* Loop around and try again.... */
682 if (adapter->ucode_stats) {
683 pci_unmap_single(adapter->pcidev,
684 sizeof(struct sxg_ucode_stats),
685 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
686 adapter->ucode_stats = NULL;
691 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
692 /* Initialize rcv zero and xmt zero rings */
693 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
694 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
696 /* Sanity check receive data structure format */
697 /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
698 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
699 ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
700 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
703 * Allocate receive data buffers. We allocate a block of buffers and
704 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
706 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
707 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
708 status = sxg_allocate_buffer_memory(adapter,
709 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
710 SXG_BUFFER_TYPE_RCV);
711 if (status != STATUS_SUCCESS)
712 return status;
715 * NBL resource allocation can fail in the 'AllocateComplete' routine,
716 * which doesn't return status. Make sure we got the number of buffers
717 * we requested
719 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
720 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
721 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
723 return (STATUS_RESOURCES);
726 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
727 (unsigned int)(sizeof(struct sxg_event_ring) * RssIds));
729 /* Allocate event queues. */
730 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
731 sizeof(struct sxg_event_ring) *
732 RssIds,
733 &adapter->PEventRings);
735 if (!adapter->EventRings) {
736 /* Caller will call SxgFreeAdapter to clean up above
737 * allocations */
738 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
739 adapter, SXG_MAX_ENTRIES, 0, 0);
740 status = STATUS_RESOURCES;
741 goto per_tcb_allocation_failed;
743 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
745 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
746 /* Allocate ISR */
747 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
748 IsrCount, &adapter->PIsr);
749 if (!adapter->Isr) {
750 /* Caller will call SxgFreeAdapter to clean up above
751 * allocations */
752 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
753 adapter, SXG_MAX_ENTRIES, 0, 0);
754 status = STATUS_RESOURCES;
755 goto per_tcb_allocation_failed;
757 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
759 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
760 __func__, (unsigned int)sizeof(u32));
762 /* Allocate shared XMT ring zero index location */
763 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
764 sizeof(u32),
765 &adapter->
766 PXmtRingZeroIndex);
767 if (!adapter->XmtRingZeroIndex) {
768 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
769 adapter, SXG_MAX_ENTRIES, 0, 0);
770 status = STATUS_RESOURCES;
771 goto per_tcb_allocation_failed;
773 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
775 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
776 adapter, SXG_MAX_ENTRIES, 0, 0);
778 return status;
782 * sxg_config_pci -
784 * Set up PCI Configuration space
786 * Arguments -
787 * pcidev - A pointer to our adapter structure
789 static void sxg_config_pci(struct pci_dev *pcidev)
791 u16 pci_command;
792 u16 new_command;
794 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
795 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
796 /* Set the command register */
797 new_command = pci_command | (
798 /* Memory Space Enable */
799 PCI_COMMAND_MEMORY |
800 /* Bus master enable */
801 PCI_COMMAND_MASTER |
802 /* Memory write and invalidate */
803 PCI_COMMAND_INVALIDATE |
804 /* Parity error response */
805 PCI_COMMAND_PARITY |
806 /* System ERR */
807 PCI_COMMAND_SERR |
808 /* Fast back-to-back */
809 PCI_COMMAND_FAST_BACK);
810 if (pci_command != new_command) {
811 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
812 __func__, pci_command, new_command);
813 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
818 * sxg_read_config
819 * @adapter : Pointer to the adapter structure for the card
820 * This function will read the configuration data from EEPROM/FLASH
822 static inline int sxg_read_config(struct adapter_t *adapter)
824 /* struct sxg_config data; */
825 struct sw_cfg_data *data;
826 dma_addr_t p_addr;
827 unsigned long status;
828 unsigned long i;
830 data = pci_alloc_consistent(adapter->pcidev,
831 sizeof(struct sw_cfg_data), &p_addr);
832 if(!data) {
834 * We cant get even this much memory. Raise a hell
835 * Get out of here
837 printk(KERN_ERR"%s : Could not allocate memory for reading \
838 EEPROM\n", __FUNCTION__);
839 return -ENOMEM;
842 WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE);
844 WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0);
845 for(i=0; i<1000; i++) {
846 READ_REG(adapter->UcodeRegs[0].ConfigStat, status);
847 if (status != SXG_CFG_TIMEOUT)
848 break;
849 mdelay(1); /* Do we really need this */
852 switch(status) {
853 /* Config read from EEPROM succeeded */
854 case SXG_CFG_LOAD_EEPROM:
855 /* Config read from Flash succeeded */
856 case SXG_CFG_LOAD_FLASH:
857 /* Copy the MAC address to adapter structure */
858 /* TODO: We are not doing the remaining part : FRU,
859 * etc
861 memcpy(adapter->macaddr, data->MacAddr[0].MacAddr,
862 sizeof(struct sxg_config_mac));
863 break;
864 case SXG_CFG_TIMEOUT:
865 case SXG_CFG_LOAD_INVALID:
866 case SXG_CFG_LOAD_ERROR:
867 default: /* Fix default handler later */
868 printk(KERN_WARNING"%s : We could not read the config \
869 word. Status = %ld\n", __FUNCTION__, status);
870 break;
872 pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
873 p_addr);
874 if (adapter->netdev) {
875 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
876 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
878 sxg_dbg_macaddrs(adapter);
880 return status;
883 static int sxg_entry_probe(struct pci_dev *pcidev,
884 const struct pci_device_id *pci_tbl_entry)
886 static int did_version = 0;
887 int err;
888 struct net_device *netdev;
889 struct adapter_t *adapter;
890 void __iomem *memmapped_ioaddr;
891 u32 status = 0;
892 ulong mmio_start = 0;
893 ulong mmio_len = 0;
895 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
896 __func__, jiffies, smp_processor_id());
898 /* Initialize trace buffer */
899 #ifdef ATKDBG
900 SxgTraceBuffer = &LSxgTraceBuffer;
901 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
902 #endif
904 sxg_global.dynamic_intagg = dynamic_intagg;
906 err = pci_enable_device(pcidev);
908 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
909 if (err) {
910 return err;
913 if (sxg_debug > 0 && did_version++ == 0) {
914 printk(KERN_INFO "%s\n", sxg_banner);
915 printk(KERN_INFO "%s\n", SXG_DRV_VERSION);
918 if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) {
919 DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n");
920 } else {
921 if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
922 DBG_ERROR
923 ("No usable DMA configuration, aborting err[%x]\n",
924 err);
925 return err;
927 DBG_ERROR("pci_set_dma_mask(DMA_32BIT_MASK) successful\n");
930 DBG_ERROR("Call pci_request_regions\n");
932 err = pci_request_regions(pcidev, sxg_driver_name);
933 if (err) {
934 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
935 return err;
938 DBG_ERROR("call pci_set_master\n");
939 pci_set_master(pcidev);
941 DBG_ERROR("call alloc_etherdev\n");
942 netdev = alloc_etherdev(sizeof(struct adapter_t));
943 if (!netdev) {
944 err = -ENOMEM;
945 goto err_out_exit_sxg_probe;
947 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
949 SET_NETDEV_DEV(netdev, &pcidev->dev);
951 pci_set_drvdata(pcidev, netdev);
952 adapter = netdev_priv(netdev);
953 adapter->netdev = netdev;
954 adapter->pcidev = pcidev;
956 mmio_start = pci_resource_start(pcidev, 0);
957 mmio_len = pci_resource_len(pcidev, 0);
959 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
960 mmio_start, mmio_len);
962 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
963 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
964 memmapped_ioaddr);
965 if (!memmapped_ioaddr) {
966 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
967 __func__, mmio_len, mmio_start);
968 goto err_out_free_mmio_region_0;
971 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
972 len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
973 mmio_len, pcidev->irq);
975 adapter->HwRegs = (void *)memmapped_ioaddr;
976 adapter->base_addr = memmapped_ioaddr;
978 mmio_start = pci_resource_start(pcidev, 2);
979 mmio_len = pci_resource_len(pcidev, 2);
981 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
982 mmio_start, mmio_len);
984 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
985 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
986 memmapped_ioaddr);
987 if (!memmapped_ioaddr) {
988 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
989 __func__, mmio_len, mmio_start);
990 goto err_out_free_mmio_region_2;
993 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
994 "start[%lx] len[%lx], IRQ %d.\n", __func__,
995 memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
997 adapter->UcodeRegs = (void *)memmapped_ioaddr;
999 adapter->State = SXG_STATE_INITIALIZING;
1001 * Maintain a list of all adapters anchored by
1002 * the global SxgDriver structure.
1004 adapter->Next = SxgDriver.Adapters;
1005 SxgDriver.Adapters = adapter;
1006 adapter->AdapterID = ++SxgDriver.AdapterID;
1008 /* Initialize CRC table used to determine multicast hash */
1009 sxg_mcast_init_crc32();
1011 adapter->JumboEnabled = FALSE;
1012 adapter->RssEnabled = FALSE;
1013 if (adapter->JumboEnabled) {
1014 adapter->FrameSize = JUMBOMAXFRAME;
1015 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
1016 } else {
1017 adapter->FrameSize = ETHERMAXFRAME;
1018 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
1022 * status = SXG_READ_EEPROM(adapter);
1023 * if (!status) {
1024 * goto sxg_init_bad;
1028 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
1029 sxg_config_pci(pcidev);
1030 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
1032 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
1033 sxg_init_driver();
1034 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
1036 adapter->vendid = pci_tbl_entry->vendor;
1037 adapter->devid = pci_tbl_entry->device;
1038 adapter->subsysid = pci_tbl_entry->subdevice;
1039 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
1040 adapter->functionnumber = (pcidev->devfn & 0x7);
1041 adapter->memorylength = pci_resource_len(pcidev, 0);
1042 adapter->irq = pcidev->irq;
1043 adapter->next_netdevice = head_netdevice;
1044 head_netdevice = netdev;
1045 adapter->port = 0; /*adapter->functionnumber; */
1047 /* Allocate memory and other resources */
1048 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
1049 status = sxg_allocate_resources(adapter);
1050 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
1051 __func__, status);
1052 if (status != STATUS_SUCCESS) {
1053 goto err_out_unmap;
1056 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
1057 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
1058 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
1059 __func__);
1060 sxg_read_config(adapter);
1061 status = sxg_adapter_set_hwaddr(adapter);
1062 } else {
1063 adapter->state = ADAPT_FAIL;
1064 adapter->linkstate = LINK_DOWN;
1065 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
1068 netdev->base_addr = (unsigned long)adapter->base_addr;
1069 netdev->irq = adapter->irq;
1070 netdev->open = sxg_entry_open;
1071 netdev->stop = sxg_entry_halt;
1072 netdev->hard_start_xmit = sxg_send_packets;
1073 netdev->do_ioctl = sxg_ioctl;
1074 netdev->change_mtu = sxg_change_mtu;
1075 #if XXXTODO
1076 netdev->set_mac_address = sxg_mac_set_address;
1077 #endif
1078 netdev->get_stats = sxg_get_stats;
1079 netdev->set_multicast_list = sxg_mcast_set_list;
1080 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
1081 err = sxg_set_interrupt_capability(adapter);
1082 if (err != STATUS_SUCCESS)
1083 DBG_ERROR("Cannot enable MSI-X capability\n");
1085 strcpy(netdev->name, "eth%d");
1086 /* strcpy(netdev->name, pci_name(pcidev)); */
1087 if ((err = register_netdev(netdev))) {
1088 DBG_ERROR("Cannot register net device, aborting. %s\n",
1089 netdev->name);
1090 goto err_out_unmap;
1093 netif_napi_add(netdev, &adapter->napi,
1094 sxg_poll, SXG_NETDEV_WEIGHT);
1095 DBG_ERROR
1096 ("sxg: %s addr 0x%lx, irq %d, MAC addr \
1097 %02X:%02X:%02X:%02X:%02X:%02X\n",
1098 netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
1099 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
1100 netdev->dev_addr[4], netdev->dev_addr[5]);
1102 /* sxg_init_bad: */
1103 ASSERT(status == FALSE);
1104 /* sxg_free_adapter(adapter); */
1106 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
1107 status, jiffies, smp_processor_id());
1108 return status;
1110 err_out_unmap:
1111 sxg_free_resources(adapter);
1113 err_out_free_mmio_region_2:
1115 mmio_start = pci_resource_start(pcidev, 2);
1116 mmio_len = pci_resource_len(pcidev, 2);
1117 release_mem_region(mmio_start, mmio_len);
1119 err_out_free_mmio_region_0:
1121 mmio_start = pci_resource_start(pcidev, 0);
1122 mmio_len = pci_resource_len(pcidev, 0);
1124 release_mem_region(mmio_start, mmio_len);
1126 err_out_exit_sxg_probe:
1128 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
1129 smp_processor_id());
1131 pci_disable_device(pcidev);
1132 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
1133 kfree(netdev);
1134 printk("Exit %s, Sxg driver loading failed..\n", __FUNCTION__);
1136 return -ENODEV;
1140 * LINE BASE Interrupt routines..
1142 * sxg_disable_interrupt
1144 * DisableInterrupt Handler
1146 * Arguments:
1148 * adapter: Our adapter structure
1150 * Return Value:
1151 * None.
1153 static void sxg_disable_interrupt(struct adapter_t *adapter)
1155 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
1156 adapter, adapter->InterruptsEnabled, 0, 0);
1157 /* For now, RSS is disabled with line based interrupts */
1158 ASSERT(adapter->RssEnabled == FALSE);
1159 /* Turn off interrupts by writing to the icr register. */
1160 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
1162 adapter->InterruptsEnabled = 0;
1164 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
1165 adapter, adapter->InterruptsEnabled, 0, 0);
1169 * sxg_enable_interrupt
1171 * EnableInterrupt Handler
1173 * Arguments:
1175 * adapter: Our adapter structure
1177 * Return Value:
1178 * None.
1180 static void sxg_enable_interrupt(struct adapter_t *adapter)
1182 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
1183 adapter, adapter->InterruptsEnabled, 0, 0);
1184 /* For now, RSS is disabled with line based interrupts */
1185 ASSERT(adapter->RssEnabled == FALSE);
1186 /* Turn on interrupts by writing to the icr register. */
1187 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
1189 adapter->InterruptsEnabled = 1;
1191 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
1192 adapter, 0, 0, 0);
1196 * sxg_isr - Process an line-based interrupt
1198 * Arguments:
1199 * Context - Our adapter structure
1200 * QueueDefault - Output parameter to queue to default CPU
1201 * TargetCpus - Output bitmap to schedule DPC's
1203 * Return Value: TRUE if our interrupt
1205 static irqreturn_t sxg_isr(int irq, void *dev_id)
1207 struct net_device *dev = (struct net_device *) dev_id;
1208 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1210 if(adapter->state != ADAPT_UP)
1211 return IRQ_NONE;
1212 adapter->Stats.NumInts++;
1213 if (adapter->Isr[0] == 0) {
1215 * The SLIC driver used to experience a number of spurious
1216 * interrupts due to the delay associated with the masking of
1217 * the interrupt (we'd bounce back in here). If we see that
1218 * again with Sahara,add a READ_REG of the Icr register after
1219 * the WRITE_REG below.
1221 adapter->Stats.FalseInts++;
1222 return IRQ_NONE;
1225 * Move the Isr contents and clear the value in
1226 * shared memory, and mask interrupts
1228 /* ASSERT(adapter->IsrDpcsPending == 0); */
1229 #if XXXTODO /* RSS Stuff */
1231 * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1232 * schedule DPC's based on event queues.
1234 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
1235 for (i = 0;
1236 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
1237 i++) {
1238 struct sxg_event_ring *EventRing =
1239 &adapter->EventRings[i];
1240 struct sxg_event *Event =
1241 &EventRing->Ring[adapter->NextEvent[i]];
1242 unsigned char Cpu =
1243 adapter->RssSystemInfo->RssIdToCpu[i];
1244 if (Event->Status & EVENT_STATUS_VALID) {
1245 adapter->IsrDpcsPending++;
1246 CpuMask |= (1 << Cpu);
1251 * Now, either schedule the CPUs specified by the CpuMask,
1252 * or queue default
1254 if (CpuMask) {
1255 *QueueDefault = FALSE;
1256 } else {
1257 adapter->IsrDpcsPending = 1;
1258 *QueueDefault = TRUE;
1260 *TargetCpus = CpuMask;
1261 #endif
1262 sxg_interrupt(adapter);
1264 return IRQ_HANDLED;
1267 static void sxg_interrupt(struct adapter_t *adapter)
1269 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
1271 if (napi_schedule_prep(&adapter->napi)) {
1272 __napi_schedule(&adapter->napi);
1276 static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
1277 int budget)
1279 /* unsigned char RssId = 0; */
1280 u32 NewIsr;
1281 int sxg_napi_continue = 1;
1282 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1283 adapter, adapter->IsrCopy[0], 0, 0);
1284 /* For now, RSS is disabled with line based interrupts */
1285 ASSERT(adapter->RssEnabled == FALSE);
1287 adapter->IsrCopy[0] = adapter->Isr[0];
1288 adapter->Isr[0] = 0;
1290 /* Always process the event queue. */
1291 while (sxg_napi_continue)
1293 sxg_process_event_queue(adapter,
1294 (adapter->RssEnabled ? /*RssId */ 0 : 0),
1295 &sxg_napi_continue, work_done, budget);
1298 #if XXXTODO /* RSS stuff */
1299 if (--adapter->IsrDpcsPending) {
1300 /* We're done. */
1301 ASSERT(adapter->RssEnabled);
1302 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1303 adapter, 0, 0, 0);
1304 return;
1306 #endif
1307 /* Last (or only) DPC processes the ISR and clears the interrupt. */
1308 NewIsr = sxg_process_isr(adapter, 0);
1309 /* Reenable interrupts */
1310 adapter->IsrCopy[0] = 0;
1311 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1312 adapter, NewIsr, 0, 0);
1314 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1315 adapter, 0, 0, 0);
1317 static int sxg_poll(struct napi_struct *napi, int budget)
1319 struct adapter_t *adapter = container_of(napi, struct adapter_t, napi);
1320 int work_done = 0;
1322 sxg_handle_interrupt(adapter, &work_done, budget);
1324 if (work_done < budget) {
1325 napi_complete(napi);
1326 WRITE_REG(adapter->UcodeRegs[0].Isr, 0, TRUE);
1328 return work_done;
1332 * sxg_process_isr - Process an interrupt. Called from the line-based and
1333 * message based interrupt DPC routines
1335 * Arguments:
1336 * adapter - Our adapter structure
1337 * Queue - The ISR that needs processing
1339 * Return Value:
1340 * None
1342 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
1344 u32 Isr = adapter->IsrCopy[MessageId];
1345 u32 NewIsr = 0;
1347 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1348 adapter, Isr, 0, 0);
1350 /* Error */
1351 if (Isr & SXG_ISR_ERR) {
1352 if (Isr & SXG_ISR_PDQF) {
1353 adapter->Stats.PdqFull++;
1354 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
1356 /* No host buffer */
1357 if (Isr & SXG_ISR_RMISS) {
1359 * There is a bunch of code in the SLIC driver which
1360 * attempts to process more receive events per DPC
1361 * if we start to fall behind. We'll probablyd
1362 * need to do something similar here, but hold
1363 * off for now. I don't want to make the code more
1364 * complicated than strictly needed.
1366 adapter->stats.rx_missed_errors++;
1367 if (adapter->stats.rx_missed_errors< 5) {
1368 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
1369 __func__);
1372 /* Card crash */
1373 if (Isr & SXG_ISR_DEAD) {
1375 * Set aside the crash info and set the adapter state
1376 * to RESET
1378 adapter->CrashCpu = (unsigned char)
1379 ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
1380 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1381 adapter->Dead = TRUE;
1382 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
1383 adapter->CrashLocation, adapter->CrashCpu);
1385 /* Event ring full */
1386 if (Isr & SXG_ISR_ERFULL) {
1388 * Same issue as RMISS, really. This means the
1389 * host is falling behind the card. Need to increase
1390 * event ring size, process more events per interrupt,
1391 * and/or reduce/remove interrupt aggregation.
1393 adapter->Stats.EventRingFull++;
1394 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
1395 __func__);
1397 /* Transmit drop - no DRAM buffers or XMT error */
1398 if (Isr & SXG_ISR_XDROP) {
1399 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
1402 /* Slowpath send completions */
1403 if (Isr & SXG_ISR_SPSEND) {
1404 sxg_complete_slow_send(adapter);
1406 /* Dump */
1407 if (Isr & SXG_ISR_UPC) {
1408 /* Maybe change when debug is added.. */
1409 // ASSERT(adapter->DumpCmdRunning);
1410 adapter->DumpCmdRunning = FALSE;
1412 /* Link event */
1413 if (Isr & SXG_ISR_LINK) {
1414 sxg_link_event(adapter);
1416 /* Debug - breakpoint hit */
1417 if (Isr & SXG_ISR_BREAK) {
1419 * At the moment AGDB isn't written to support interactive
1420 * debug sessions. When it is, this interrupt will be used to
1421 * signal AGDB that it has hit a breakpoint. For now, ASSERT.
1423 ASSERT(0);
1425 /* Heartbeat response */
1426 if (Isr & SXG_ISR_PING) {
1427 adapter->PingOutstanding = FALSE;
1429 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1430 adapter, Isr, NewIsr, 0);
1432 return (NewIsr);
1436 * sxg_process_event_queue - Process our event queue
1438 * Arguments:
1439 * - adapter - Adapter structure
1440 * - RssId - The event queue requiring processing
1442 * Return Value:
1443 * None.
1445 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
1446 int *sxg_napi_continue, int *work_done, int budget)
1448 struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
1449 struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1450 u32 EventsProcessed = 0, Batches = 0;
1451 struct sk_buff *skb;
1452 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1453 struct sk_buff *prev_skb = NULL;
1454 struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1455 u32 Index;
1456 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1457 #endif
1458 u32 ReturnStatus = 0;
1459 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
1461 ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1462 (adapter->State == SXG_STATE_PAUSING) ||
1463 (adapter->State == SXG_STATE_PAUSED) ||
1464 (adapter->State == SXG_STATE_HALTING));
1466 * We may still have unprocessed events on the queue if
1467 * the card crashed. Don't process them.
1469 if (adapter->Dead) {
1470 return (0);
1473 * In theory there should only be a single processor that
1474 * accesses this queue, and only at interrupt-DPC time. So/
1475 * we shouldn't need a lock for any of this.
1477 while (Event->Status & EVENT_STATUS_VALID) {
1478 (*sxg_napi_continue) = 1;
1479 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1480 Event, Event->Code, Event->Status,
1481 adapter->NextEvent);
1482 switch (Event->Code) {
1483 case EVENT_CODE_BUFFERS:
1484 /* struct sxg_ring_info Head & Tail == unsigned char */
1485 ASSERT(!(Event->CommandIndex & 0xFF00));
1486 sxg_complete_descriptor_blocks(adapter,
1487 Event->CommandIndex);
1488 break;
1489 case EVENT_CODE_SLOWRCV:
1490 (*work_done)++;
1491 --adapter->RcvBuffersOnCard;
1492 if ((skb = sxg_slow_receive(adapter, Event))) {
1493 u32 rx_bytes;
1494 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1495 /* Add it to our indication list */
1496 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1497 IndicationList, num_skbs);
1499 * Linux, we just pass up each skb to the
1500 * protocol above at this point, there is no
1501 * capability of an indication list.
1503 #else
1504 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1505 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1506 rx_bytes = Event->Length;
1507 adapter->stats.rx_packets++;
1508 adapter->stats.rx_bytes += rx_bytes;
1509 #if SXG_OFFLOAD_IP_CHECKSUM
1510 skb->ip_summed = CHECKSUM_UNNECESSARY;
1511 #endif
1512 skb->dev = adapter->netdev;
1513 netif_receive_skb(skb);
1514 #endif
1516 break;
1517 default:
1518 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
1519 __func__, Event->Code);
1520 /* ASSERT(0); */
1523 * See if we need to restock card receive buffers.
1524 * There are two things to note here:
1525 * First - This test is not SMP safe. The
1526 * adapter->BuffersOnCard field is protected via atomic
1527 * interlocked calls, but we do not protect it with respect
1528 * to these tests. The only way to do that is with a lock,
1529 * and I don't want to grab a lock every time we adjust the
1530 * BuffersOnCard count. Instead, we allow the buffer
1531 * replenishment to be off once in a while. The worst that
1532 * can happen is the card is given on more-or-less descriptor
1533 * block than the arbitrary value we've chosen. No big deal
1534 * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1535 * is adjusted.
1536 * Second - We expect this test to rarely
1537 * evaluate to true. We attempt to refill descriptor blocks
1538 * as they are returned to us (sxg_complete_descriptor_blocks)
1539 * so The only time this should evaluate to true is when
1540 * sxg_complete_descriptor_blocks failed to allocate
1541 * receive buffers.
1543 if (adapter->JumboEnabled)
1544 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
1546 if (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
1547 sxg_stock_rcv_buffers(adapter);
1550 * It's more efficient to just set this to zero.
1551 * But clearing the top bit saves potential debug info...
1553 Event->Status &= ~EVENT_STATUS_VALID;
1554 /* Advance to the next event */
1555 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1556 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1557 EventsProcessed++;
1558 if (EventsProcessed == EVENT_RING_BATCH) {
1559 /* Release a batch of events back to the card */
1560 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1561 EVENT_RING_BATCH, FALSE);
1562 EventsProcessed = 0;
1564 * If we've processed our batch limit, break out of the
1565 * loop and return SXG_ISR_EVENT to arrange for us to
1566 * be called again
1568 if (Batches++ == EVENT_BATCH_LIMIT) {
1569 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1570 TRACE_NOISY, "EvtLimit", Batches,
1571 adapter->NextEvent, 0, 0);
1572 ReturnStatus = SXG_ISR_EVENT;
1573 break;
1576 if (*work_done >= budget) {
1577 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1578 EventsProcessed, FALSE);
1579 EventsProcessed = 0;
1580 (*sxg_napi_continue) = 0;
1581 break;
1584 if (!(Event->Status & EVENT_STATUS_VALID))
1585 (*sxg_napi_continue) = 0;
1587 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1588 /* Indicate any received dumb-nic frames */
1589 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1590 #endif
1591 /* Release events back to the card. */
1592 if (EventsProcessed) {
1593 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1594 EventsProcessed, FALSE);
1596 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1597 Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1599 return (ReturnStatus);
1603 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1605 * Arguments -
1606 * adapter - A pointer to our adapter structure
1607 * Return
1608 * None
1610 static void sxg_complete_slow_send(struct adapter_t *adapter)
1612 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1613 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
1614 u32 *ContextType;
1615 struct sxg_cmd *XmtCmd;
1616 unsigned long flags = 0;
1617 unsigned long sgl_flags = 0;
1618 unsigned int processed_count = 0;
1621 * NOTE - This lock is dropped and regrabbed in this loop.
1622 * This means two different processors can both be running/
1623 * through this loop. Be *very* careful.
1625 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1627 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1628 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1630 while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1631 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) {
1633 * Locate the current Cmd (ring descriptor entry), and
1634 * associated SGL, and advance the tail
1636 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1637 ASSERT(ContextType);
1638 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1639 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
1640 /* Clear the SGL field. */
1641 XmtCmd->Sgl = 0;
1643 switch (*ContextType) {
1644 case SXG_SGL_DUMB:
1646 struct sk_buff *skb;
1647 struct sxg_scatter_gather *SxgSgl =
1648 (struct sxg_scatter_gather *)ContextType;
1649 dma64_addr_t FirstSgeAddress;
1650 u32 FirstSgeLength;
1652 /* Dumb-nic send. Command context is the dumb-nic SGL */
1653 skb = (struct sk_buff *)ContextType;
1654 skb = SxgSgl->DumbPacket;
1655 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1656 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
1657 /* Complete the send */
1658 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1659 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1660 0, 0);
1661 ASSERT(adapter->Stats.XmtQLen);
1663 * Now drop the lock and complete the send
1664 * back to Microsoft. We need to drop the lock
1665 * because Microsoft can come back with a
1666 * chimney send, which results in a double trip
1667 * in SxgTcpOuput
1669 spin_unlock_irqrestore(
1670 &adapter->XmtZeroLock, flags);
1672 SxgSgl->DumbPacket = NULL;
1673 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1674 FirstSgeAddress,
1675 FirstSgeLength);
1676 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
1677 /* and reacquire.. */
1678 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1680 break;
1681 default:
1682 ASSERT(0);
1685 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
1686 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1687 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1691 * sxg_slow_receive
1693 * Arguments -
1694 * adapter - A pointer to our adapter structure
1695 * Event - Receive event
1697 * Return - skb
1699 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1700 struct sxg_event *Event)
1702 u32 BufferSize = adapter->ReceiveBufferSize;
1703 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1704 struct sk_buff *Packet;
1705 static int read_counter = 0;
1707 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
1708 if(read_counter++ & 0x100)
1710 sxg_collect_statistics(adapter);
1711 read_counter = 0;
1713 ASSERT(RcvDataBufferHdr);
1714 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
1715 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1716 RcvDataBufferHdr, RcvDataBufferHdr->State,
1717 /*RcvDataBufferHdr->VirtualAddress*/ 0);
1718 /* Drop rcv frames in non-running state */
1719 switch (adapter->State) {
1720 case SXG_STATE_RUNNING:
1721 break;
1722 case SXG_STATE_PAUSING:
1723 case SXG_STATE_PAUSED:
1724 case SXG_STATE_HALTING:
1725 goto drop;
1726 default:
1727 ASSERT(0);
1728 goto drop;
1732 * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1733 * RcvDataBufferHdr->VirtualAddress, Event->Length);
1736 /* Change buffer state to UPSTREAM */
1737 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1738 if (Event->Status & EVENT_STATUS_RCVERR) {
1739 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1740 Event, Event->Status, Event->HostHandle, 0);
1741 /* XXXTODO - Remove this print later */
1742 DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
1743 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
1744 sxg_process_rcv_error(adapter, *(u32 *)
1745 SXG_RECEIVE_DATA_LOCATION
1746 (RcvDataBufferHdr));
1747 goto drop;
1749 #if XXXTODO /* VLAN stuff */
1750 /* If there's a VLAN tag, extract it and validate it */
1751 if (((struct ether_header *)
1752 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
1753 == ETHERTYPE_VLAN) {
1754 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1755 STATUS_SUCCESS) {
1756 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1757 "BadVlan", Event,
1758 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1759 Event->Length, 0);
1760 goto drop;
1763 #endif
1764 /* Dumb-nic frame. See if it passes our mac filter and update stats */
1766 if (!sxg_mac_filter(adapter,
1767 (struct ether_header *)(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)),
1768 Event->Length)) {
1769 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1770 Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1771 Event->Length, 0);
1772 goto drop;
1775 Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
1776 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1777 Packet->protocol = eth_type_trans(Packet, adapter->netdev);
1779 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1780 RcvDataBufferHdr, Packet, Event->Length, 0);
1781 /* Lastly adjust the receive packet length. */
1782 RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
1783 RcvDataBufferHdr->PhysicalAddress = (dma_addr_t)NULL;
1784 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1785 if (RcvDataBufferHdr->skb)
1787 spin_lock(&adapter->RcvQLock);
1788 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1789 // adapter->RcvBuffersOnCard ++;
1790 spin_unlock(&adapter->RcvQLock);
1792 return (Packet);
1794 drop:
1795 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1796 RcvDataBufferHdr, Event->Length, 0, 0);
1797 adapter->stats.rx_dropped++;
1798 // adapter->Stats.RcvDiscards++;
1799 spin_lock(&adapter->RcvQLock);
1800 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1801 spin_unlock(&adapter->RcvQLock);
1802 return (NULL);
1806 * sxg_process_rcv_error - process receive error and update
1807 * stats
1809 * Arguments:
1810 * adapter - Adapter structure
1811 * ErrorStatus - 4-byte receive error status
1813 * Return Value : None
1815 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
1817 u32 Error;
1819 adapter->stats.rx_errors++;
1821 if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1822 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1823 switch (Error) {
1824 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1825 adapter->Stats.TransportCsum++;
1826 break;
1827 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1828 adapter->Stats.TransportUflow++;
1829 break;
1830 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1831 adapter->Stats.TransportHdrLen++;
1832 break;
1835 if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1836 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1837 switch (Error) {
1838 case SXG_RCV_STATUS_NETWORK_CSUM:
1839 adapter->Stats.NetworkCsum++;
1840 break;
1841 case SXG_RCV_STATUS_NETWORK_UFLOW:
1842 adapter->Stats.NetworkUflow++;
1843 break;
1844 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1845 adapter->Stats.NetworkHdrLen++;
1846 break;
1849 if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1850 adapter->Stats.Parity++;
1852 if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1853 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1854 switch (Error) {
1855 case SXG_RCV_STATUS_LINK_PARITY:
1856 adapter->Stats.LinkParity++;
1857 break;
1858 case SXG_RCV_STATUS_LINK_EARLY:
1859 adapter->Stats.LinkEarly++;
1860 break;
1861 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1862 adapter->Stats.LinkBufOflow++;
1863 break;
1864 case SXG_RCV_STATUS_LINK_CODE:
1865 adapter->Stats.LinkCode++;
1866 break;
1867 case SXG_RCV_STATUS_LINK_DRIBBLE:
1868 adapter->Stats.LinkDribble++;
1869 break;
1870 case SXG_RCV_STATUS_LINK_CRC:
1871 adapter->Stats.LinkCrc++;
1872 break;
1873 case SXG_RCV_STATUS_LINK_OFLOW:
1874 adapter->Stats.LinkOflow++;
1875 break;
1876 case SXG_RCV_STATUS_LINK_UFLOW:
1877 adapter->Stats.LinkUflow++;
1878 break;
1884 * sxg_mac_filter
1886 * Arguments:
1887 * adapter - Adapter structure
1888 * pether - Ethernet header
1889 * length - Frame length
1891 * Return Value : TRUE if the frame is to be allowed
1893 static bool sxg_mac_filter(struct adapter_t *adapter,
1894 struct ether_header *EtherHdr, ushort length)
1896 bool EqualAddr;
1897 struct net_device *dev = adapter->netdev;
1899 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1900 if (SXG_BROADCAST_PACKET(EtherHdr)) {
1901 /* broadcast */
1902 if (adapter->MacFilter & MAC_BCAST) {
1903 adapter->Stats.DumbRcvBcastPkts++;
1904 adapter->Stats.DumbRcvBcastBytes += length;
1905 return (TRUE);
1907 } else {
1908 /* multicast */
1909 if (adapter->MacFilter & MAC_ALLMCAST) {
1910 adapter->Stats.DumbRcvMcastPkts++;
1911 adapter->Stats.DumbRcvMcastBytes += length;
1912 return (TRUE);
1914 if (adapter->MacFilter & MAC_MCAST) {
1915 struct dev_mc_list *mclist = dev->mc_list;
1916 while (mclist) {
1917 ETHER_EQ_ADDR(mclist->da_addr,
1918 EtherHdr->ether_dhost,
1919 EqualAddr);
1920 if (EqualAddr) {
1921 adapter->Stats.
1922 DumbRcvMcastPkts++;
1923 adapter->Stats.
1924 DumbRcvMcastBytes += length;
1925 return (TRUE);
1927 mclist = mclist->next;
1931 } else if (adapter->MacFilter & MAC_DIRECTED) {
1933 * Not broadcast or multicast. Must be directed at us or
1934 * the card is in promiscuous mode. Either way, consider it
1935 * ours if MAC_DIRECTED is set
1937 adapter->Stats.DumbRcvUcastPkts++;
1938 adapter->Stats.DumbRcvUcastBytes += length;
1939 return (TRUE);
1941 if (adapter->MacFilter & MAC_PROMISC) {
1942 /* Whatever it is, keep it. */
1943 return (TRUE);
1945 return (FALSE);
1948 static int sxg_register_interrupt(struct adapter_t *adapter)
1950 if (!adapter->intrregistered) {
1951 int retval;
1953 DBG_ERROR
1954 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
1955 __func__, adapter, adapter->netdev->irq, NR_IRQS);
1957 spin_unlock_irqrestore(&sxg_global.driver_lock,
1958 sxg_global.flags);
1960 retval = request_irq(adapter->netdev->irq,
1961 &sxg_isr,
1962 IRQF_SHARED,
1963 adapter->netdev->name, adapter->netdev);
1965 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1967 if (retval) {
1968 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
1969 adapter->netdev->name, retval);
1970 return (retval);
1972 adapter->intrregistered = 1;
1973 adapter->IntRegistered = TRUE;
1974 /* Disable RSS with line-based interrupts */
1975 adapter->RssEnabled = FALSE;
1976 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
1977 __func__, adapter, adapter->netdev->irq);
1979 return (STATUS_SUCCESS);
1982 static void sxg_deregister_interrupt(struct adapter_t *adapter)
1984 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
1985 #if XXXTODO
1986 slic_init_cleanup(adapter);
1987 #endif
1988 memset(&adapter->stats, 0, sizeof(struct net_device_stats));
1989 adapter->error_interrupts = 0;
1990 adapter->rcv_interrupts = 0;
1991 adapter->xmit_interrupts = 0;
1992 adapter->linkevent_interrupts = 0;
1993 adapter->upr_interrupts = 0;
1994 adapter->num_isrs = 0;
1995 adapter->xmit_completes = 0;
1996 adapter->rcv_broadcasts = 0;
1997 adapter->rcv_multicasts = 0;
1998 adapter->rcv_unicasts = 0;
1999 DBG_ERROR("sxg: %s EXIT\n", __func__);
2003 * sxg_if_init
2005 * Perform initialization of our slic interface.
2008 static int sxg_if_init(struct adapter_t *adapter)
2010 struct net_device *dev = adapter->netdev;
2011 int status = 0;
2013 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
2014 __func__, adapter->netdev->name,
2015 adapter->state,
2016 adapter->linkstate, dev->flags);
2018 /* adapter should be down at this point */
2019 if (adapter->state != ADAPT_DOWN) {
2020 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
2021 return (-EIO);
2023 ASSERT(adapter->linkstate == LINK_DOWN);
2025 adapter->devflags_prev = dev->flags;
2026 adapter->MacFilter = MAC_DIRECTED;
2027 if (dev->flags) {
2028 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
2029 adapter->netdev->name);
2030 if (dev->flags & IFF_BROADCAST) {
2031 adapter->MacFilter |= MAC_BCAST;
2032 DBG_ERROR("BCAST ");
2034 if (dev->flags & IFF_PROMISC) {
2035 adapter->MacFilter |= MAC_PROMISC;
2036 DBG_ERROR("PROMISC ");
2038 if (dev->flags & IFF_ALLMULTI) {
2039 adapter->MacFilter |= MAC_ALLMCAST;
2040 DBG_ERROR("ALL_MCAST ");
2042 if (dev->flags & IFF_MULTICAST) {
2043 adapter->MacFilter |= MAC_MCAST;
2044 DBG_ERROR("MCAST ");
2046 DBG_ERROR("\n");
2048 status = sxg_register_intr(adapter);
2049 if (status != STATUS_SUCCESS) {
2050 DBG_ERROR("sxg_if_init: sxg_register_intr FAILED %x\n",
2051 status);
2052 sxg_deregister_interrupt(adapter);
2053 return (status);
2056 adapter->state = ADAPT_UP;
2058 /* clear any pending events, then enable interrupts */
2059 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
2061 return (STATUS_SUCCESS);
2064 void sxg_set_interrupt_aggregation(struct adapter_t *adapter)
2067 * Top bit disables aggregation on xmt (SXG_AGG_XMT_DISABLE).
2068 * Make sure Max is less than 0x8000.
2070 adapter->max_aggregation = SXG_MAX_AGG_DEFAULT;
2071 adapter->min_aggregation = SXG_MIN_AGG_DEFAULT;
2072 WRITE_REG(adapter->UcodeRegs[0].Aggregation,
2073 ((adapter->max_aggregation << SXG_MAX_AGG_SHIFT) |
2074 adapter->min_aggregation),
2075 TRUE);
2078 static int sxg_entry_open(struct net_device *dev)
2080 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2081 int status;
2082 static int turn;
2083 int sxg_initial_rcv_data_buffers = SXG_INITIAL_RCV_DATA_BUFFERS;
2084 int i;
2086 if (adapter->JumboEnabled == TRUE) {
2087 sxg_initial_rcv_data_buffers =
2088 SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS;
2089 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo,
2090 SXG_JUMBO_RCV_RING_SIZE);
2094 * Allocate receive data buffers. We allocate a block of buffers and
2095 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
2098 for (i = 0; i < sxg_initial_rcv_data_buffers;
2099 i += SXG_RCV_DESCRIPTORS_PER_BLOCK)
2101 status = sxg_allocate_buffer_memory(adapter,
2102 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
2103 SXG_BUFFER_TYPE_RCV);
2104 if (status != STATUS_SUCCESS)
2105 return status;
2108 * NBL resource allocation can fail in the 'AllocateComplete' routine,
2109 * which doesn't return status. Make sure we got the number of buffers
2110 * we requested
2113 if (adapter->FreeRcvBufferCount < sxg_initial_rcv_data_buffers) {
2114 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
2115 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
2117 return (STATUS_RESOURCES);
2120 * The microcode expects it to be downloaded on every open.
2122 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
2123 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
2124 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
2125 __FUNCTION__);
2126 sxg_read_config(adapter);
2127 } else {
2128 adapter->state = ADAPT_FAIL;
2129 adapter->linkstate = LINK_DOWN;
2130 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n",
2131 status);
2133 msleep(5);
2135 if (turn) {
2136 sxg_second_open(adapter->netdev);
2138 return STATUS_SUCCESS;
2141 turn++;
2143 ASSERT(adapter);
2144 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
2145 adapter->activated);
2146 DBG_ERROR
2147 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
2148 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
2149 adapter->netdev, adapter, adapter->port);
2151 netif_stop_queue(adapter->netdev);
2153 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2154 if (!adapter->activated) {
2155 sxg_global.num_sxg_ports_active++;
2156 adapter->activated = 1;
2158 /* Initialize the adapter */
2159 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
2160 status = sxg_initialize_adapter(adapter);
2161 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
2162 __func__, status);
2164 if (status == STATUS_SUCCESS) {
2165 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
2166 status = sxg_if_init(adapter);
2167 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
2168 status);
2171 if (status != STATUS_SUCCESS) {
2172 if (adapter->activated) {
2173 sxg_global.num_sxg_ports_active--;
2174 adapter->activated = 0;
2176 spin_unlock_irqrestore(&sxg_global.driver_lock,
2177 sxg_global.flags);
2178 return (status);
2180 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
2181 sxg_set_interrupt_aggregation(adapter);
2182 napi_enable(&adapter->napi);
2184 /* Enable interrupts */
2185 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2187 DBG_ERROR("sxg: %s EXIT\n", __func__);
2189 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2190 return STATUS_SUCCESS;
2193 int sxg_second_open(struct net_device * dev)
2195 struct adapter_t *adapter = (struct adapter_t*) netdev_priv(dev);
2196 int status = 0;
2198 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2199 netif_start_queue(adapter->netdev);
2200 adapter->state = ADAPT_UP;
2201 adapter->linkstate = LINK_UP;
2203 status = sxg_initialize_adapter(adapter);
2204 sxg_set_interrupt_aggregation(adapter);
2205 napi_enable(&adapter->napi);
2206 /* Re-enable interrupts */
2207 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2209 netif_carrier_on(dev);
2210 sxg_register_interrupt(adapter);
2211 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2212 return (STATUS_SUCCESS);
2216 static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
2218 u32 mmio_start = 0;
2219 u32 mmio_len = 0;
2221 struct net_device *dev = pci_get_drvdata(pcidev);
2222 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2224 flush_scheduled_work();
2226 /* Deallocate Resources */
2227 unregister_netdev(dev);
2228 sxg_reset_interrupt_capability(adapter);
2229 sxg_free_resources(adapter);
2231 ASSERT(adapter);
2233 mmio_start = pci_resource_start(pcidev, 0);
2234 mmio_len = pci_resource_len(pcidev, 0);
2236 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
2237 mmio_start, mmio_len);
2238 release_mem_region(mmio_start, mmio_len);
2240 mmio_start = pci_resource_start(pcidev, 2);
2241 mmio_len = pci_resource_len(pcidev, 2);
2243 DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __FUNCTION__,
2244 mmio_start, mmio_len);
2245 release_mem_region(mmio_start, mmio_len);
2247 pci_disable_device(pcidev);
2249 DBG_ERROR("sxg: %s deallocate device\n", __func__);
2250 kfree(dev);
2251 DBG_ERROR("sxg: %s EXIT\n", __func__);
2254 static int sxg_entry_halt(struct net_device *dev)
2256 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2257 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2258 int i;
2259 u32 RssIds, IsrCount;
2260 unsigned long flags;
2262 RssIds = SXG_RSS_CPU_COUNT(adapter);
2263 IsrCount = adapter->msi_enabled ? RssIds : 1;
2265 napi_disable(&adapter->napi);
2266 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2267 DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
2269 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 0, true);
2270 netif_stop_queue(adapter->netdev);
2271 adapter->state = ADAPT_DOWN;
2272 adapter->linkstate = LINK_DOWN;
2273 adapter->devflags_prev = 0;
2274 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
2275 __func__, dev->name, adapter, adapter->state);
2277 /* Disable interrupts */
2278 SXG_DISABLE_ALL_INTERRUPTS(adapter);
2280 netif_carrier_off(dev);
2281 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2283 sxg_deregister_interrupt(adapter);
2284 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
2285 mdelay(5000);
2286 spin_lock(&adapter->RcvQLock);
2287 /* Free all the blocks and the buffers, moved from remove() routine */
2288 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
2289 sxg_free_rcvblocks(adapter);
2293 InitializeListHead(&adapter->FreeRcvBuffers);
2294 InitializeListHead(&adapter->FreeRcvBlocks);
2295 InitializeListHead(&adapter->AllRcvBlocks);
2296 InitializeListHead(&adapter->FreeSglBuffers);
2297 InitializeListHead(&adapter->AllSglBuffers);
2299 adapter->FreeRcvBufferCount = 0;
2300 adapter->FreeRcvBlockCount = 0;
2301 adapter->AllRcvBlockCount = 0;
2302 adapter->RcvBuffersOnCard = 0;
2303 adapter->PendingRcvCount = 0;
2305 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
2306 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
2307 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
2308 for (i = 0; i < SXG_MAX_RING_SIZE; i++)
2309 adapter->RcvRingZeroInfo.Context[i] = NULL;
2310 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
2311 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
2313 spin_unlock(&adapter->RcvQLock);
2315 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2316 adapter->AllSglBufferCount = 0;
2317 adapter->FreeSglBufferCount = 0;
2318 adapter->PendingXmtCount = 0;
2319 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
2320 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
2321 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2323 for (i = 0; i < SXG_MAX_RSS; i++) {
2324 adapter->NextEvent[i] = 0;
2326 atomic_set(&adapter->pending_allocations, 0);
2327 adapter->intrregistered = 0;
2328 sxg_remove_isr(adapter);
2329 DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name);
2330 return (STATUS_SUCCESS);
2333 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2335 ASSERT(rq);
2336 /* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
2337 switch (cmd) {
2338 case SIOCSLICSETINTAGG:
2340 /* struct adapter_t *adapter = (struct adapter_t *)
2341 * netdev_priv(dev);
2343 u32 data[7];
2344 u32 intagg;
2346 if (copy_from_user(data, rq->ifr_data, 28)) {
2347 DBG_ERROR("copy_from_user FAILED getting \
2348 initial params\n");
2349 return -EFAULT;
2351 intagg = data[0];
2352 printk(KERN_EMERG
2353 "%s: set interrupt aggregation to %d\n",
2354 __func__, intagg);
2355 return 0;
2358 default:
2359 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
2360 return -EOPNOTSUPP;
2362 return 0;
2365 #define NORMAL_ETHFRAME 0
2368 * sxg_send_packets - Send a skb packet
2370 * Arguments:
2371 * skb - The packet to send
2372 * dev - Our linux net device that refs our adapter
2374 * Return:
2375 * 0 regardless of outcome XXXTODO refer to e1000 driver
2377 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
2379 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2380 u32 status = STATUS_SUCCESS;
2383 * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
2384 * skb);
2387 /* Check the adapter state */
2388 switch (adapter->State) {
2389 case SXG_STATE_INITIALIZING:
2390 case SXG_STATE_HALTED:
2391 case SXG_STATE_SHUTDOWN:
2392 ASSERT(0); /* unexpected */
2393 /* fall through */
2394 case SXG_STATE_RESETTING:
2395 case SXG_STATE_SLEEP:
2396 case SXG_STATE_BOOTDIAG:
2397 case SXG_STATE_DIAG:
2398 case SXG_STATE_HALTING:
2399 status = STATUS_FAILURE;
2400 break;
2401 case SXG_STATE_RUNNING:
2402 if (adapter->LinkState != SXG_LINK_UP) {
2403 status = STATUS_FAILURE;
2405 break;
2406 default:
2407 ASSERT(0);
2408 status = STATUS_FAILURE;
2410 if (status != STATUS_SUCCESS) {
2411 goto xmit_fail;
2413 /* send a packet */
2414 status = sxg_transmit_packet(adapter, skb);
2415 if (status == STATUS_SUCCESS) {
2416 goto xmit_done;
2419 xmit_fail:
2420 /* reject & complete all the packets if they cant be sent */
2421 if (status != STATUS_SUCCESS) {
2422 #if XXXTODO
2423 /* sxg_send_packets_fail(adapter, skb, status); */
2424 #else
2425 SXG_DROP_DUMB_SEND(adapter, skb);
2426 adapter->stats.tx_dropped++;
2427 return NETDEV_TX_BUSY;
2428 #endif
2430 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
2431 status);
2433 xmit_done:
2434 return NETDEV_TX_OK;
2438 * sxg_transmit_packet
2440 * This function transmits a single packet.
2442 * Arguments -
2443 * adapter - Pointer to our adapter structure
2444 * skb - The packet to be sent
2446 * Return - STATUS of send
2448 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
2450 struct sxg_x64_sgl *pSgl;
2451 struct sxg_scatter_gather *SxgSgl;
2452 unsigned long sgl_flags;
2453 /* void *SglBuffer; */
2454 /* u32 SglBufferLength; */
2457 * The vast majority of work is done in the shared
2458 * sxg_dumb_sgl routine.
2460 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
2461 adapter, skb, 0, 0);
2463 /* Allocate a SGL buffer */
2464 SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
2465 if (!SxgSgl) {
2466 adapter->Stats.NoSglBuf++;
2467 adapter->stats.tx_errors++;
2468 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
2469 adapter, skb, 0, 0);
2470 return (STATUS_RESOURCES);
2472 ASSERT(SxgSgl->adapter == adapter);
2473 /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2474 SglBufferLength = SXG_SGL_BUF_SIZE; */
2475 SxgSgl->VlanTag.VlanTci = 0;
2476 SxgSgl->VlanTag.VlanTpid = 0;
2477 SxgSgl->Type = SXG_SGL_DUMB;
2478 SxgSgl->DumbPacket = skb;
2479 pSgl = NULL;
2481 /* Call the common sxg_dumb_sgl routine to complete the send. */
2482 return (sxg_dumb_sgl(pSgl, SxgSgl));
2486 * sxg_dumb_sgl
2488 * Arguments:
2489 * pSgl -
2490 * SxgSgl - struct sxg_scatter_gather
2492 * Return Value:
2493 * Status of send operation.
2495 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2496 struct sxg_scatter_gather *SxgSgl)
2498 struct adapter_t *adapter = SxgSgl->adapter;
2499 struct sk_buff *skb = SxgSgl->DumbPacket;
2500 /* For now, all dumb-nic sends go on RSS queue zero */
2501 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
2502 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
2503 struct sxg_cmd *XmtCmd = NULL;
2504 /* u32 Index = 0; */
2505 u32 DataLength = skb->len;
2506 /* unsigned int BufLen; */
2507 /* u32 SglOffset; */
2508 u64 phys_addr;
2509 unsigned long flags;
2510 unsigned long queue_id=0;
2512 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2513 pSgl, SxgSgl, 0, 0);
2515 /* Set aside a pointer to the sgl */
2516 SxgSgl->pSgl = pSgl;
2518 /* Sanity check that our SGL format is as we expect. */
2519 ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge));
2520 /* Shouldn't be a vlan tag on this frame */
2521 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2522 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2525 * From here below we work with the SGL placed in our
2526 * buffer.
2529 SxgSgl->Sgl.NumberOfElements = 1;
2531 * Set ucode Queue ID based on bottom bits of destination TCP port.
2532 * This Queue ID splits slowpath/dumb-nic packet processing across
2533 * multiple threads on the card to improve performance. It is split
2534 * using the TCP port to avoid out-of-order packets that can result
2535 * from multithreaded processing. We use the destination port because
2536 * we expect to be run on a server, so in nearly all cases the local
2537 * port is likely to be constant (well-known server port) and the
2538 * remote port is likely to be random. The exception to this is iSCSI,
2539 * in which case we use the sport instead. Note
2540 * that original attempt at XOR'ing source and dest port resulted in
2541 * poor balance on NTTTCP/iometer applications since they tend to
2542 * line up (even-even, odd-odd..).
2545 if (skb->protocol == htons(ETH_P_IP)) {
2546 struct iphdr *ip;
2548 ip = ip_hdr(skb);
2549 if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof(
2550 struct tcphdr))){
2551 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2552 (ntohs (tcp_hdr(skb)->source) &
2553 SXG_LARGE_SEND_QUEUE_MASK):
2554 (ntohs(tcp_hdr(skb)->dest) &
2555 SXG_LARGE_SEND_QUEUE_MASK));
2557 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2558 if ( (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength >=
2559 sizeof(struct tcphdr)) ) {
2560 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2561 (ntohs (tcp_hdr(skb)->source) &
2562 SXG_LARGE_SEND_QUEUE_MASK):
2563 (ntohs(tcp_hdr(skb)->dest) &
2564 SXG_LARGE_SEND_QUEUE_MASK));
2568 /* Grab the spinlock and acquire a command */
2569 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2570 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2571 if (XmtCmd == NULL) {
2573 * Call sxg_complete_slow_send to see if we can
2574 * free up any XmtRingZero entries and then try again
2577 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2578 sxg_complete_slow_send(adapter);
2579 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2580 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2581 if (XmtCmd == NULL) {
2582 adapter->Stats.XmtZeroFull++;
2583 goto abortcmd;
2586 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2587 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
2588 /* Update stats */
2589 adapter->stats.tx_packets++;
2590 adapter->stats.tx_bytes += DataLength;
2591 #if XXXTODO /* Stats stuff */
2592 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2593 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2594 adapter->Stats.DumbXmtBcastPkts++;
2595 adapter->Stats.DumbXmtBcastBytes += DataLength;
2596 } else {
2597 adapter->Stats.DumbXmtMcastPkts++;
2598 adapter->Stats.DumbXmtMcastBytes += DataLength;
2600 } else {
2601 adapter->Stats.DumbXmtUcastPkts++;
2602 adapter->Stats.DumbXmtUcastBytes += DataLength;
2604 #endif
2606 * Fill in the command
2607 * Copy out the first SGE to the command and adjust for offset
2609 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
2610 PCI_DMA_TODEVICE);
2613 * SAHARA SGL WORKAROUND
2614 * See if the SGL straddles a 64k boundary. If so, skip to
2615 * the start of the next 64k boundary and continue
2618 if (SXG_INVALID_SGL(phys_addr,skb->data_len))
2620 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2621 /* Silently drop this packet */
2622 printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n");
2623 return STATUS_SUCCESS;
2625 memset(XmtCmd, '\0', sizeof(*XmtCmd));
2626 XmtCmd->Buffer.FirstSgeAddress = phys_addr;
2627 XmtCmd->Buffer.FirstSgeLength = DataLength;
2628 XmtCmd->Buffer.SgeOffset = 0;
2629 XmtCmd->Buffer.TotalLength = DataLength;
2630 XmtCmd->SgEntries = 1;
2631 XmtCmd->Flags = 0;
2633 * Advance transmit cmd descripter by 1.
2634 * NOTE - See comments in SxgTcpOutput where we write
2635 * to the XmtCmd register regarding CPU ID values and/or
2636 * multiple commands.
2637 * Top 16 bits specify queue_id. See comments about queue_id above
2639 /* Four queues at the moment */
2640 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0);
2641 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE);
2642 adapter->Stats.XmtQLen++; /* Stats within lock */
2643 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2644 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2645 XmtCmd, pSgl, SxgSgl, 0);
2646 return STATUS_SUCCESS;
2648 abortcmd:
2650 * NOTE - Only jump to this label AFTER grabbing the
2651 * XmtZeroLock, and DO NOT DROP IT between the
2652 * command allocation and the following abort.
2654 if (XmtCmd) {
2655 SXG_ABORT_CMD(XmtRingInfo);
2657 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2660 * failsgl:
2661 * Jump to this label if failure occurs before the
2662 * XmtZeroLock is grabbed
2664 adapter->stats.tx_errors++;
2665 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2666 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
2667 /* SxgSgl->DumbPacket is the skb */
2668 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
2670 return STATUS_FAILURE;
2674 * Link management functions
2676 * sxg_initialize_link - Initialize the link stuff
2678 * Arguments -
2679 * adapter - A pointer to our adapter structure
2681 * Return
2682 * status
2684 static int sxg_initialize_link(struct adapter_t *adapter)
2686 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2687 u32 Value;
2688 u32 ConfigData;
2689 u32 MaxFrame;
2690 int status;
2692 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2693 adapter, 0, 0, 0);
2695 /* Reset PHY and XGXS module */
2696 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2698 /* Reset transmit configuration register */
2699 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2701 /* Reset receive configuration register */
2702 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2704 /* Reset all MAC modules */
2705 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2708 * Link address 0
2709 * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2710 * is stored with the first nibble (0a) in the byte 0
2711 * of the Mac address. Possibly reverse?
2713 Value = *(u32 *) adapter->macaddr;
2714 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
2715 /* also write the MAC address to the MAC. Endian is reversed. */
2716 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
2717 Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF);
2718 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
2719 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
2720 Value = ntohl(Value);
2721 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
2722 /* Link address 1 */
2723 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2724 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
2725 /* Link address 2 */
2726 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2727 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
2728 /* Link address 3 */
2729 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2730 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2732 /* Enable MAC modules */
2733 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2735 /* Configure MAC */
2736 WRITE_REG(HwRegs->MacConfig1, (
2737 /* Allow sending of pause */
2738 AXGMAC_CFG1_XMT_PAUSE |
2739 /* Enable XMT */
2740 AXGMAC_CFG1_XMT_EN |
2741 /* Enable detection of pause */
2742 AXGMAC_CFG1_RCV_PAUSE |
2743 /* Enable receive */
2744 AXGMAC_CFG1_RCV_EN |
2745 /* short frame detection */
2746 AXGMAC_CFG1_SHORT_ASSERT |
2747 /* Verify frame length */
2748 AXGMAC_CFG1_CHECK_LEN |
2749 /* Generate FCS */
2750 AXGMAC_CFG1_GEN_FCS |
2751 /* Pad frames to 64 bytes */
2752 AXGMAC_CFG1_PAD_64),
2753 TRUE);
2755 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
2756 if (adapter->JumboEnabled) {
2757 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2760 * AMIIM Configuration Register -
2761 * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2762 * (bottom bits) of this register is used to determine the MDC frequency
2763 * as specified in the A-XGMAC Design Document. This value must not be
2764 * zero. The following value (62 or 0x3E) is based on our MAC transmit
2765 * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2766 * frequency of 2.5 MHz (see the PHY spec), we get:
2767 * 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2768 * This value happens to be the default value for this register, so we
2769 * really don't have to do this.
2771 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2773 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
2774 WRITE_REG(HwRegs->LinkStatus,
2775 (LS_PHY_CLR_RESET |
2776 LS_XGXS_ENABLE |
2777 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
2778 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2781 * Per information given by Aeluros, wait 100 ms after removing reset.
2782 * It's not enough to wait for the self-clearing reset bit in reg 0 to
2783 * clear.
2785 mdelay(100);
2787 /* Verify the PHY has come up by checking that the Reset bit has
2788 * cleared.
2790 status = sxg_read_mdio_reg(adapter,
2791 MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2792 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2793 &Value);
2794 DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
2795 (Value & PMA_CONTROL1_RESET));
2796 if (status != STATUS_SUCCESS)
2797 return (STATUS_FAILURE);
2798 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
2799 return (STATUS_FAILURE);
2801 /* The SERDES should be initialized by now - confirm */
2802 READ_REG(HwRegs->LinkStatus, Value);
2803 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
2804 return (STATUS_FAILURE);
2806 /* The XAUI link should also be up - confirm */
2807 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
2808 return (STATUS_FAILURE);
2810 /* Initialize the PHY */
2811 status = sxg_phy_init(adapter);
2812 if (status != STATUS_SUCCESS)
2813 return (STATUS_FAILURE);
2815 /* Enable the Link Alarm */
2817 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2818 * LASI_CONTROL - LASI control register
2819 * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
2821 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2822 LASI_CONTROL,
2823 LASI_CTL_LS_ALARM_ENABLE);
2824 if (status != STATUS_SUCCESS)
2825 return (STATUS_FAILURE);
2827 /* XXXTODO - temporary - verify bit is set */
2829 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2830 * LASI_CONTROL - LASI control register
2832 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2833 LASI_CONTROL,
2834 &Value);
2836 if (status != STATUS_SUCCESS)
2837 return (STATUS_FAILURE);
2838 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2839 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2841 /* Enable receive */
2842 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2843 ConfigData = (RCV_CONFIG_ENABLE |
2844 RCV_CONFIG_ENPARSE |
2845 RCV_CONFIG_RCVBAD |
2846 RCV_CONFIG_RCVPAUSE |
2847 RCV_CONFIG_TZIPV6 |
2848 RCV_CONFIG_TZIPV4 |
2849 RCV_CONFIG_HASH_16 |
2850 RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
2851 WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2853 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2855 /* Mark the link as down. We'll get a link event when it comes up. */
2856 sxg_link_state(adapter, SXG_LINK_DOWN);
2858 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2859 adapter, 0, 0, 0);
2860 return (STATUS_SUCCESS);
2864 * sxg_phy_init - Initialize the PHY
2866 * Arguments -
2867 * adapter - A pointer to our adapter structure
2869 * Return
2870 * status
2872 static int sxg_phy_init(struct adapter_t *adapter)
2874 u32 Value;
2875 struct phy_ucode *p;
2876 int status;
2878 DBG_ERROR("ENTER %s\n", __func__);
2880 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2881 * 0xC205 - PHY ID register (?)
2882 * &Value - XXXTODO - add def
2884 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2885 0xC205,
2886 &Value);
2887 if (status != STATUS_SUCCESS)
2888 return (STATUS_FAILURE);
2890 if (Value == 0x0012) {
2891 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2892 DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
2893 microcode.\n");
2895 /* Initialize AEL2005C PHY and download PHY microcode */
2896 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2897 if (p->Addr == 0) {
2898 /* if address == 0, data == sleep time in ms */
2899 mdelay(p->Data);
2900 } else {
2901 /* write the given data to the specified address */
2902 status = sxg_write_mdio_reg(adapter,
2903 MIIM_DEV_PHY_PMA,
2904 /* PHY address */
2905 p->Addr,
2906 /* PHY data */
2907 p->Data);
2908 if (status != STATUS_SUCCESS)
2909 return (STATUS_FAILURE);
2913 DBG_ERROR("EXIT %s\n", __func__);
2915 return (STATUS_SUCCESS);
2919 * sxg_link_event - Process a link event notification from the card
2921 * Arguments -
2922 * adapter - A pointer to our adapter structure
2924 * Return
2925 * None
2927 static void sxg_link_event(struct adapter_t *adapter)
2929 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2930 struct net_device *netdev = adapter->netdev;
2931 enum SXG_LINK_STATE LinkState;
2932 int status;
2933 u32 Value;
2935 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
2936 adapter, 0, 0, 0);
2937 DBG_ERROR("ENTER %s\n", __func__);
2939 /* Check the Link Status register. We should have a Link Alarm. */
2940 READ_REG(HwRegs->LinkStatus, Value);
2941 if (Value & LS_LINK_ALARM) {
2943 * We got a Link Status alarm. First, pause to let the
2944 * link state settle (it can bounce a number of times)
2946 mdelay(10);
2948 /* Now clear the alarm by reading the LASI status register. */
2949 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
2950 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2951 /* LASI status register */
2952 LASI_STATUS,
2953 &Value);
2954 if (status != STATUS_SUCCESS) {
2955 DBG_ERROR("Error reading LASI Status MDIO register!\n");
2956 sxg_link_state(adapter, SXG_LINK_DOWN);
2957 /* ASSERT(0); */
2959 ASSERT(Value & LASI_STATUS_LS_ALARM);
2961 /* Now get and set the link state */
2962 LinkState = sxg_get_link_state(adapter);
2963 sxg_link_state(adapter, LinkState);
2964 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
2965 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
2966 if (LinkState == SXG_LINK_UP)
2967 netif_carrier_on(netdev);
2968 else
2969 netif_carrier_off(netdev);
2970 } else {
2972 * XXXTODO - Assuming Link Attention is only being generated
2973 * for the Link Alarm pin (and not for a XAUI Link Status change)
2974 * , then it's impossible to get here. Yet we've gotten here
2975 * twice (under extreme conditions - bouncing the link up and
2976 * down many times a second). Needs further investigation.
2978 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
2979 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
2980 /* ASSERT(0); */
2982 DBG_ERROR("EXIT %s\n", __func__);
2987 * sxg_get_link_state - Determine if the link is up or down
2989 * Arguments -
2990 * adapter - A pointer to our adapter structure
2992 * Return
2993 * Link State
2995 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
2997 int status;
2998 u32 Value;
3000 DBG_ERROR("ENTER %s\n", __func__);
3002 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
3003 adapter, 0, 0, 0);
3006 * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
3007 * the following 3 bits (from 3 different MDIO registers) are all true.
3010 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3011 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
3012 /* PMA/PMD Receive Signal Detect register */
3013 PHY_PMA_RCV_DET,
3014 &Value);
3015 if (status != STATUS_SUCCESS)
3016 goto bad;
3018 /* If PMA/PMD receive signal detect is 0, then the link is down */
3019 if (!(Value & PMA_RCV_DETECT))
3020 return (SXG_LINK_DOWN);
3022 /* MIIM_DEV_PHY_PCS - PHY PCS module */
3023 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
3024 /* PCS 10GBASE-R Status 1 register */
3025 PHY_PCS_10G_STATUS1,
3026 &Value);
3027 if (status != STATUS_SUCCESS)
3028 goto bad;
3030 /* If PCS is not locked to receive blocks, then the link is down */
3031 if (!(Value & PCS_10B_BLOCK_LOCK))
3032 return (SXG_LINK_DOWN);
3034 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
3035 /* XS Lane Status register */
3036 PHY_XS_LANE_STATUS,
3037 &Value);
3038 if (status != STATUS_SUCCESS)
3039 goto bad;
3041 /* If XS transmit lanes are not aligned, then the link is down */
3042 if (!(Value & XS_LANE_ALIGN))
3043 return (SXG_LINK_DOWN);
3045 /* All 3 bits are true, so the link is up */
3046 DBG_ERROR("EXIT %s\n", __func__);
3048 return (SXG_LINK_UP);
3050 bad:
3051 /* An error occurred reading an MDIO register. This shouldn't happen. */
3052 DBG_ERROR("Error reading an MDIO register!\n");
3053 ASSERT(0);
3054 return (SXG_LINK_DOWN);
3057 static void sxg_indicate_link_state(struct adapter_t *adapter,
3058 enum SXG_LINK_STATE LinkState)
3060 if (adapter->LinkState == SXG_LINK_UP) {
3061 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
3062 __func__);
3063 netif_start_queue(adapter->netdev);
3064 } else {
3065 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
3066 __func__);
3067 netif_stop_queue(adapter->netdev);
3072 * sxg_change_mtu - Change the Maximum Transfer Unit
3073 * * @returns 0 on success, negative on failure
3075 int sxg_change_mtu (struct net_device *netdev, int new_mtu)
3077 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(netdev);
3079 if (!((new_mtu == SXG_DEFAULT_MTU) || (new_mtu == SXG_JUMBO_MTU)))
3080 return -EINVAL;
3082 if(new_mtu == netdev->mtu)
3083 return 0;
3085 netdev->mtu = new_mtu;
3087 if (new_mtu == SXG_JUMBO_MTU) {
3088 adapter->JumboEnabled = TRUE;
3089 adapter->FrameSize = JUMBOMAXFRAME;
3090 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
3091 } else {
3092 adapter->JumboEnabled = FALSE;
3093 adapter->FrameSize = ETHERMAXFRAME;
3094 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
3097 sxg_entry_halt(netdev);
3098 sxg_entry_open(netdev);
3099 return 0;
3103 * sxg_link_state - Set the link state and if necessary, indicate.
3104 * This routine the central point of processing for all link state changes.
3105 * Nothing else in the driver should alter the link state or perform
3106 * link state indications
3108 * Arguments -
3109 * adapter - A pointer to our adapter structure
3110 * LinkState - The link state
3112 * Return
3113 * None
3115 static void sxg_link_state(struct adapter_t *adapter,
3116 enum SXG_LINK_STATE LinkState)
3118 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
3119 adapter, LinkState, adapter->LinkState, adapter->State);
3121 DBG_ERROR("ENTER %s\n", __func__);
3124 * Hold the adapter lock during this routine. Maybe move
3125 * the lock to the caller.
3127 /* IMP TODO : Check if we can survive without taking this lock */
3128 // spin_lock(&adapter->AdapterLock);
3129 if (LinkState == adapter->LinkState) {
3130 /* Nothing changed.. */
3131 // spin_unlock(&adapter->AdapterLock);
3132 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
3133 __func__, LinkState);
3134 return;
3136 /* Save the adapter state */
3137 adapter->LinkState = LinkState;
3139 /* Drop the lock and indicate link state */
3140 // spin_unlock(&adapter->AdapterLock);
3141 DBG_ERROR("EXIT #1 %s\n", __func__);
3143 sxg_indicate_link_state(adapter, LinkState);
3147 * sxg_write_mdio_reg - Write to a register on the MDIO bus
3149 * Arguments -
3150 * adapter - A pointer to our adapter structure
3151 * DevAddr - MDIO device number being addressed
3152 * RegAddr - register address for the specified MDIO device
3153 * Value - value to write to the MDIO register
3155 * Return
3156 * status
3158 static int sxg_write_mdio_reg(struct adapter_t *adapter,
3159 u32 DevAddr, u32 RegAddr, u32 Value)
3161 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
3162 /* Address operation (written to MIIM field reg) */
3163 u32 AddrOp;
3164 /* Write operation (written to MIIM field reg) */
3165 u32 WriteOp;
3166 u32 Cmd;/* Command (written to MIIM command reg) */
3167 u32 ValueRead;
3168 u32 Timeout;
3170 /* DBG_ERROR("ENTER %s\n", __func__); */
3172 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3173 adapter, 0, 0, 0);
3175 /* Ensure values don't exceed field width */
3176 DevAddr &= 0x001F; /* 5-bit field */
3177 RegAddr &= 0xFFFF; /* 16-bit field */
3178 Value &= 0xFFFF; /* 16-bit field */
3180 /* Set MIIM field register bits for an MIIM address operation */
3181 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3182 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3183 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3184 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3186 /* Set MIIM field register bits for an MIIM write operation */
3187 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3188 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3189 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3190 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
3192 /* Set MIIM command register bits to execute an MIIM command */
3193 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3195 /* Reset the command register command bit (in case it's not 0) */
3196 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3198 /* MIIM write to set the address of the specified MDIO register */
3199 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3201 /* Write to MIIM Command Register to execute to address operation */
3202 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3204 /* Poll AMIIM Indicator register to wait for completion */
3205 Timeout = SXG_LINK_TIMEOUT;
3206 do {
3207 udelay(100); /* Timeout in 100us units */
3208 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3209 if (--Timeout == 0) {
3210 return (STATUS_FAILURE);
3212 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3214 /* Reset the command register command bit */
3215 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3217 /* MIIM write to set up an MDIO write operation */
3218 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
3220 /* Write to MIIM Command Register to execute the write operation */
3221 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3223 /* Poll AMIIM Indicator register to wait for completion */
3224 Timeout = SXG_LINK_TIMEOUT;
3225 do {
3226 udelay(100); /* Timeout in 100us units */
3227 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3228 if (--Timeout == 0) {
3229 return (STATUS_FAILURE);
3231 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3233 /* DBG_ERROR("EXIT %s\n", __func__); */
3235 return (STATUS_SUCCESS);
3239 * sxg_read_mdio_reg - Read a register on the MDIO bus
3241 * Arguments -
3242 * adapter - A pointer to our adapter structure
3243 * DevAddr - MDIO device number being addressed
3244 * RegAddr - register address for the specified MDIO device
3245 * pValue - pointer to where to put data read from the MDIO register
3247 * Return
3248 * status
3250 static int sxg_read_mdio_reg(struct adapter_t *adapter,
3251 u32 DevAddr, u32 RegAddr, u32 *pValue)
3253 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
3254 u32 AddrOp; /* Address operation (written to MIIM field reg) */
3255 u32 ReadOp; /* Read operation (written to MIIM field reg) */
3256 u32 Cmd; /* Command (written to MIIM command reg) */
3257 u32 ValueRead;
3258 u32 Timeout;
3260 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3261 adapter, 0, 0, 0);
3262 DBG_ERROR("ENTER %s\n", __FUNCTION__);
3264 /* Ensure values don't exceed field width */
3265 DevAddr &= 0x001F; /* 5-bit field */
3266 RegAddr &= 0xFFFF; /* 16-bit field */
3268 /* Set MIIM field register bits for an MIIM address operation */
3269 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3270 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3271 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3272 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3274 /* Set MIIM field register bits for an MIIM read operation */
3275 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3276 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3277 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3278 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
3280 /* Set MIIM command register bits to execute an MIIM command */
3281 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3283 /* Reset the command register command bit (in case it's not 0) */
3284 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3286 /* MIIM write to set the address of the specified MDIO register */
3287 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3289 /* Write to MIIM Command Register to execute to address operation */
3290 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3292 /* Poll AMIIM Indicator register to wait for completion */
3293 Timeout = SXG_LINK_TIMEOUT;
3294 do {
3295 udelay(100); /* Timeout in 100us units */
3296 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3297 if (--Timeout == 0) {
3298 DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __FUNCTION__);
3300 return (STATUS_FAILURE);
3302 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3304 /* Reset the command register command bit */
3305 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3307 /* MIIM write to set up an MDIO register read operation */
3308 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
3310 /* Write to MIIM Command Register to execute the read operation */
3311 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3313 /* Poll AMIIM Indicator register to wait for completion */
3314 Timeout = SXG_LINK_TIMEOUT;
3315 do {
3316 udelay(100); /* Timeout in 100us units */
3317 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3318 if (--Timeout == 0) {
3319 DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __FUNCTION__);
3321 return (STATUS_FAILURE);
3323 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3325 /* Read the MDIO register data back from the field register */
3326 READ_REG(HwRegs->MacAmiimField, *pValue);
3327 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
3329 DBG_ERROR("EXIT %s\n", __FUNCTION__);
3331 return (STATUS_SUCCESS);
3335 * Functions to obtain the CRC corresponding to the destination mac address.
3336 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
3337 * the polynomial:
3338 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
3339 * + x^4 + x^2 + x^1.
3341 * After the CRC for the 6 bytes is generated (but before the value is
3342 * complemented), we must then transpose the value and return bits 30-23.
3344 static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
3345 static u32 sxg_crc_init; /* Is table initialized */
3347 /* Contruct the CRC32 table */
3348 static void sxg_mcast_init_crc32(void)
3350 u32 c; /* CRC shit reg */
3351 u32 e = 0; /* Poly X-or pattern */
3352 int i; /* counter */
3353 int k; /* byte being shifted into crc */
3355 static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
3357 for (i = 0; i < sizeof(p) / sizeof(int); i++) {
3358 e |= 1L << (31 - p[i]);
3361 for (i = 1; i < 256; i++) {
3362 c = i;
3363 for (k = 8; k; k--) {
3364 c = c & 1 ? (c >> 1) ^ e : c >> 1;
3366 sxg_crc_table[i] = c;
3371 * Return the MAC hast as described above.
3373 static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
3375 u32 crc;
3376 char *p;
3377 int i;
3378 unsigned char machash = 0;
3380 if (!sxg_crc_init) {
3381 sxg_mcast_init_crc32();
3382 sxg_crc_init = 1;
3385 crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
3386 for (i = 0, p = macaddr; i < 6; ++p, ++i) {
3387 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
3390 /* Return bits 1-8, transposed */
3391 for (i = 1; i < 9; i++) {
3392 machash |= (((crc >> i) & 1) << (8 - i));
3395 return (machash);
3398 static void sxg_mcast_set_mask(struct adapter_t *adapter)
3400 struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs;
3402 DBG_ERROR("%s ENTER (%s) MacFilter[%x] mask[%llx]\n", __FUNCTION__,
3403 adapter->netdev->name, (unsigned int)adapter->MacFilter,
3404 adapter->MulticastMask);
3406 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
3408 * Turn on all multicast addresses. We have to do this for
3409 * promiscuous mode as well as ALLMCAST mode. It saves the
3410 * Microcode from having keep state about the MAC configuration
3412 /* DBG_ERROR("sxg: %s MacFilter = MAC_ALLMCAST | MAC_PROMISC\n \
3413 * SLUT MODE!!!\n",__func__);
3415 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
3416 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
3417 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3418 * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3421 } else {
3423 * Commit our multicast mast to the SLIC by writing to the
3424 * multicast address mask registers
3426 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3427 __func__, adapter->netdev->name,
3428 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
3429 ((ulong)
3430 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
3432 WRITE_REG(sxg_regs->McastLow,
3433 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
3434 WRITE_REG(sxg_regs->McastHigh,
3435 (u32) ((adapter->
3436 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
3440 static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
3442 unsigned char crcpoly;
3444 /* Get the CRC polynomial for the mac address */
3445 crcpoly = sxg_mcast_get_mac_hash(address);
3448 * We only have space on the SLIC for 64 entries. Lop
3449 * off the top two bits. (2^6 = 64)
3451 crcpoly &= 0x3F;
3453 /* OR in the new bit into our 64 bit mask. */
3454 adapter->MulticastMask |= (u64) 1 << crcpoly;
3458 * Function takes MAC addresses from dev_mc_list and generates the Mask
3461 static void sxg_set_mcast_addr(struct adapter_t *adapter)
3463 struct dev_mc_list *mclist;
3464 struct net_device *dev = adapter->netdev;
3465 int i;
3467 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_MCAST)) {
3468 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3469 i++, mclist = mclist->next) {
3470 sxg_mcast_set_bit(adapter,mclist->da_addr);
3473 sxg_mcast_set_mask(adapter);
3476 static void sxg_mcast_set_list(struct net_device *dev)
3478 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
3480 ASSERT(adapter);
3481 if (dev->flags & IFF_PROMISC)
3482 adapter->MacFilter |= MAC_PROMISC;
3483 if (dev->flags & IFF_MULTICAST)
3484 adapter->MacFilter |= MAC_MCAST;
3485 if (dev->flags & IFF_ALLMULTI)
3486 adapter->MacFilter |= MAC_ALLMCAST;
3488 //XXX handle other flags as well
3489 sxg_set_mcast_addr(adapter);
3492 void sxg_free_sgl_buffers(struct adapter_t *adapter)
3494 struct list_entry *ple;
3495 struct sxg_scatter_gather *Sgl;
3497 while(!(IsListEmpty(&adapter->AllSglBuffers))) {
3498 ple = RemoveHeadList(&adapter->AllSglBuffers);
3499 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3500 kfree(Sgl);
3501 adapter->AllSglBufferCount--;
3505 void sxg_free_rcvblocks(struct adapter_t *adapter)
3507 u32 i;
3508 void *temp_RcvBlock;
3509 struct list_entry *ple;
3510 struct sxg_rcv_block_hdr *RcvBlockHdr;
3511 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3512 ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3513 (adapter->state == SXG_STATE_HALTING));
3514 while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3516 ple = RemoveHeadList(&adapter->AllRcvBlocks);
3517 RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3519 if(RcvBlockHdr->VirtualAddress) {
3520 temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3522 for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3523 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3524 RcvDataBufferHdr =
3525 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3526 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3530 pci_free_consistent(adapter->pcidev,
3531 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3532 RcvBlockHdr->VirtualAddress,
3533 RcvBlockHdr->PhysicalAddress);
3534 adapter->AllRcvBlockCount--;
3536 ASSERT(adapter->AllRcvBlockCount == 0);
3537 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3538 adapter, 0, 0, 0);
3540 void sxg_free_mcast_addrs(struct adapter_t *adapter)
3542 struct sxg_multicast_address *address;
3543 while(adapter->MulticastAddrs) {
3544 address = adapter->MulticastAddrs;
3545 adapter->MulticastAddrs = address->Next;
3546 kfree(address);
3549 adapter->MulticastMask= 0;
3552 void sxg_unmap_resources(struct adapter_t *adapter)
3554 if(adapter->HwRegs) {
3555 iounmap((void *)adapter->HwRegs);
3557 if(adapter->UcodeRegs) {
3558 iounmap((void *)adapter->UcodeRegs);
3561 ASSERT(adapter->AllRcvBlockCount == 0);
3562 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3563 adapter, 0, 0, 0);
3569 * sxg_free_resources - Free everything allocated in SxgAllocateResources
3571 * Arguments -
3572 * adapter - A pointer to our adapter structure
3574 * Return
3575 * none
3577 void sxg_free_resources(struct adapter_t *adapter)
3579 u32 RssIds, IsrCount;
3580 RssIds = SXG_RSS_CPU_COUNT(adapter);
3581 IsrCount = adapter->msi_enabled ? RssIds : 1;
3583 if (adapter->BasicAllocations == FALSE) {
3585 * No allocations have been made, including spinlocks,
3586 * or listhead initializations. Return.
3588 return;
3591 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
3592 sxg_free_rcvblocks(adapter);
3594 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
3595 sxg_free_sgl_buffers(adapter);
3598 if (adapter->XmtRingZeroIndex) {
3599 pci_free_consistent(adapter->pcidev,
3600 sizeof(u32),
3601 adapter->XmtRingZeroIndex,
3602 adapter->PXmtRingZeroIndex);
3604 if (adapter->Isr) {
3605 pci_free_consistent(adapter->pcidev,
3606 sizeof(u32) * IsrCount,
3607 adapter->Isr, adapter->PIsr);
3610 if (adapter->EventRings) {
3611 pci_free_consistent(adapter->pcidev,
3612 sizeof(struct sxg_event_ring) * RssIds,
3613 adapter->EventRings, adapter->PEventRings);
3615 if (adapter->RcvRings) {
3616 pci_free_consistent(adapter->pcidev,
3617 sizeof(struct sxg_rcv_ring) * 1,
3618 adapter->RcvRings,
3619 adapter->PRcvRings);
3620 adapter->RcvRings = NULL;
3623 if(adapter->XmtRings) {
3624 pci_free_consistent(adapter->pcidev,
3625 sizeof(struct sxg_xmt_ring) * 1,
3626 adapter->XmtRings,
3627 adapter->PXmtRings);
3628 adapter->XmtRings = NULL;
3631 if (adapter->ucode_stats) {
3632 pci_unmap_single(adapter->pcidev,
3633 sizeof(struct sxg_ucode_stats),
3634 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3635 adapter->ucode_stats = NULL;
3639 /* Unmap register spaces */
3640 sxg_unmap_resources(adapter);
3642 sxg_free_mcast_addrs(adapter);
3644 adapter->BasicAllocations = FALSE;
3649 * sxg_allocate_complete -
3651 * This routine is called when a memory allocation has completed.
3653 * Arguments -
3654 * struct adapter_t * - Our adapter structure
3655 * VirtualAddress - Memory virtual address
3656 * PhysicalAddress - Memory physical address
3657 * Length - Length of memory allocated (or 0)
3658 * Context - The type of buffer allocated
3660 * Return
3661 * None.
3663 static int sxg_allocate_complete(struct adapter_t *adapter,
3664 void *VirtualAddress,
3665 dma_addr_t PhysicalAddress,
3666 u32 Length, enum sxg_buffer_type Context)
3668 int status = 0;
3669 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
3670 adapter, VirtualAddress, Length, Context);
3671 ASSERT(atomic_read(&adapter->pending_allocations));
3672 atomic_dec(&adapter->pending_allocations);
3674 switch (Context) {
3676 case SXG_BUFFER_TYPE_RCV:
3677 status = sxg_allocate_rcvblock_complete(adapter,
3678 VirtualAddress,
3679 PhysicalAddress, Length);
3680 break;
3681 case SXG_BUFFER_TYPE_SGL:
3682 sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *)
3683 VirtualAddress,
3684 PhysicalAddress, Length);
3685 break;
3687 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3688 adapter, VirtualAddress, Length, Context);
3690 return status;
3694 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3695 * synchronous and asynchronous buffer allocations
3697 * Arguments -
3698 * adapter - A pointer to our adapter structure
3699 * Size - block size to allocate
3700 * BufferType - Type of buffer to allocate
3702 * Return
3703 * int
3705 static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
3706 u32 Size, enum sxg_buffer_type BufferType)
3708 int status;
3709 void *Buffer;
3710 dma_addr_t pBuffer;
3712 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3713 adapter, Size, BufferType, 0);
3715 * Grab the adapter lock and check the state. If we're in anything other
3716 * than INITIALIZING or RUNNING state, fail. This is to prevent
3717 * allocations in an improper driver state
3720 atomic_inc(&adapter->pending_allocations);
3722 if(BufferType != SXG_BUFFER_TYPE_SGL)
3723 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3724 else {
3725 Buffer = kzalloc(Size, GFP_ATOMIC);
3726 pBuffer = (dma_addr_t)NULL;
3728 if (Buffer == NULL) {
3730 * Decrement the AllocationsPending count while holding
3731 * the lock. Pause processing relies on this
3733 atomic_dec(&adapter->pending_allocations);
3734 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3735 adapter, Size, BufferType, 0);
3736 return (STATUS_RESOURCES);
3738 status = sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
3740 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3741 adapter, Size, BufferType, status);
3742 return status;
3746 * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3747 * block allocation
3749 * Arguments -
3750 * adapter - A pointer to our adapter structure
3751 * RcvBlock - receive block virtual address
3752 * PhysicalAddress - Physical address
3753 * Length - Memory length
3755 * Return
3757 static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
3758 void *RcvBlock,
3759 dma_addr_t PhysicalAddress,
3760 u32 Length)
3762 u32 i;
3763 u32 BufferSize = adapter->ReceiveBufferSize;
3764 u64 Paddr;
3765 void *temp_RcvBlock;
3766 struct sxg_rcv_block_hdr *RcvBlockHdr;
3767 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3768 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3769 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
3771 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3772 adapter, RcvBlock, Length, 0);
3773 if (RcvBlock == NULL) {
3774 goto fail;
3776 memset(RcvBlock, 0, Length);
3777 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3778 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
3779 ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE));
3781 * First, initialize the contained pool of receive data buffers.
3782 * This initialization requires NBL/NB/MDL allocations, if any of them
3783 * fail, free the block and return without queueing the shared memory
3785 //RcvDataBuffer = RcvBlock;
3786 temp_RcvBlock = RcvBlock;
3787 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3788 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3789 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
3790 temp_RcvBlock;
3791 /* For FREE macro assertion */
3792 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
3793 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
3794 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3795 goto fail;
3800 * Place this entire block of memory on the AllRcvBlocks queue so it
3801 * can be free later
3804 RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock +
3805 SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE));
3806 RcvBlockHdr->VirtualAddress = RcvBlock;
3807 RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3808 spin_lock(&adapter->RcvQLock);
3809 adapter->AllRcvBlockCount++;
3810 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3811 spin_unlock(&adapter->RcvQLock);
3813 /* Now free the contained receive data buffers that we
3814 * initialized above */
3815 temp_RcvBlock = RcvBlock;
3816 for (i = 0, Paddr = PhysicalAddress;
3817 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3818 i++, Paddr += SXG_RCV_DATA_HDR_SIZE,
3819 temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3820 RcvDataBufferHdr =
3821 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3822 spin_lock(&adapter->RcvQLock);
3823 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3824 spin_unlock(&adapter->RcvQLock);
3827 /* Locate the descriptor block and put it on a separate free queue */
3828 RcvDescriptorBlock =
3829 (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock +
3830 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
3831 (SXG_RCV_DATA_HDR_SIZE));
3832 RcvDescriptorBlockHdr =
3833 (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock +
3834 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
3835 (SXG_RCV_DATA_HDR_SIZE));
3836 RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3837 RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3838 spin_lock(&adapter->RcvQLock);
3839 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3840 spin_unlock(&adapter->RcvQLock);
3841 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3842 adapter, RcvBlock, Length, 0);
3843 return STATUS_SUCCESS;
3844 fail:
3845 /* Free any allocated resources */
3846 if (RcvBlock) {
3847 temp_RcvBlock = RcvBlock;
3848 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3849 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3850 RcvDataBufferHdr =
3851 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3852 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3854 pci_free_consistent(adapter->pcidev,
3855 Length, RcvBlock, PhysicalAddress);
3857 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
3858 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3859 adapter, adapter->FreeRcvBufferCount,
3860 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
3861 adapter->Stats.NoMem++;
3862 /* As allocation failed, free all previously allocated blocks..*/
3863 //sxg_free_rcvblocks(adapter);
3865 return STATUS_RESOURCES;
3869 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
3871 * Arguments -
3872 * adapter - A pointer to our adapter structure
3873 * SxgSgl - struct sxg_scatter_gather buffer
3874 * PhysicalAddress - Physical address
3875 * Length - Memory length
3877 * Return
3879 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
3880 struct sxg_scatter_gather *SxgSgl,
3881 dma_addr_t PhysicalAddress,
3882 u32 Length)
3884 unsigned long sgl_flags;
3885 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
3886 adapter, SxgSgl, Length, 0);
3887 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
3888 adapter->AllSglBufferCount++;
3889 /* PhysicalAddress; */
3890 SxgSgl->PhysicalAddress = PhysicalAddress;
3891 /* Initialize backpointer once */
3892 SxgSgl->adapter = adapter;
3893 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
3894 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
3895 SxgSgl->State = SXG_BUFFER_BUSY;
3896 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
3897 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
3898 adapter, SxgSgl, Length, 0);
3902 static int sxg_adapter_set_hwaddr(struct adapter_t *adapter)
3905 * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
3906 * funct#[%d]\n", __func__, card->config_set,
3907 * adapter->port, adapter->physport, adapter->functionnumber);
3909 * sxg_dbg_macaddrs(adapter);
3911 /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
3912 * __FUNCTION__);
3915 /* sxg_dbg_macaddrs(adapter); */
3917 struct net_device * dev = adapter->netdev;
3918 if(!dev)
3920 printk("sxg: Dev is Null\n");
3923 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
3925 if (netif_running(dev)) {
3926 return -EBUSY;
3928 if (!adapter) {
3929 return -EBUSY;
3932 if (!(adapter->currmacaddr[0] ||
3933 adapter->currmacaddr[1] ||
3934 adapter->currmacaddr[2] ||
3935 adapter->currmacaddr[3] ||
3936 adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
3937 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
3939 if (adapter->netdev) {
3940 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
3941 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
3943 /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
3944 sxg_dbg_macaddrs(adapter);
3946 return 0;
3949 #if XXXTODO
3950 static int sxg_mac_set_address(struct net_device *dev, void *ptr)
3952 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
3953 struct sockaddr *addr = ptr;
3955 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
3957 if (netif_running(dev)) {
3958 return -EBUSY;
3960 if (!adapter) {
3961 return -EBUSY;
3963 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3964 __func__, adapter->netdev->name, adapter->currmacaddr[0],
3965 adapter->currmacaddr[1], adapter->currmacaddr[2],
3966 adapter->currmacaddr[3], adapter->currmacaddr[4],
3967 adapter->currmacaddr[5]);
3968 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3969 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
3970 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3971 __func__, adapter->netdev->name, adapter->currmacaddr[0],
3972 adapter->currmacaddr[1], adapter->currmacaddr[2],
3973 adapter->currmacaddr[3], adapter->currmacaddr[4],
3974 adapter->currmacaddr[5]);
3976 sxg_config_set(adapter, TRUE);
3977 return 0;
3979 #endif
3982 * SXG DRIVER FUNCTIONS (below)
3984 * sxg_initialize_adapter - Initialize adapter
3986 * Arguments -
3987 * adapter - A pointer to our adapter structure
3989 * Return - int
3991 static int sxg_initialize_adapter(struct adapter_t *adapter)
3993 u32 RssIds, IsrCount;
3994 u32 i;
3995 int status;
3996 int sxg_rcv_ring_size = SXG_RCV_RING_SIZE;
3998 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
3999 adapter, 0, 0, 0);
4001 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
4002 IsrCount = adapter->msi_enabled ? RssIds : 1;
4005 * Sanity check SXG_UCODE_REGS structure definition to
4006 * make sure the length is correct
4008 ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
4010 /* Disable interrupts */
4011 SXG_DISABLE_ALL_INTERRUPTS(adapter);
4013 /* Set MTU */
4014 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
4015 (adapter->FrameSize == JUMBOMAXFRAME));
4016 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
4018 /* Set event ring base address and size */
4019 WRITE_REG64(adapter,
4020 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
4021 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
4023 /* Per-ISR initialization */
4024 for (i = 0; i < IsrCount; i++) {
4025 u64 Addr;
4026 /* Set interrupt status pointer */
4027 Addr = adapter->PIsr + (i * sizeof(u32));
4028 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
4031 /* XMT ring zero index */
4032 WRITE_REG64(adapter,
4033 adapter->UcodeRegs[0].SPSendIndex,
4034 adapter->PXmtRingZeroIndex, 0);
4036 /* Per-RSS initialization */
4037 for (i = 0; i < RssIds; i++) {
4038 /* Release all event ring entries to the Microcode */
4039 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
4040 TRUE);
4043 /* Transmit ring base and size */
4044 WRITE_REG64(adapter,
4045 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
4046 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
4048 /* Receive ring base and size */
4049 WRITE_REG64(adapter,
4050 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
4051 if (adapter->JumboEnabled == TRUE)
4052 sxg_rcv_ring_size = SXG_JUMBO_RCV_RING_SIZE;
4053 WRITE_REG(adapter->UcodeRegs[0].RcvSize, sxg_rcv_ring_size, TRUE);
4055 /* Populate the card with receive buffers */
4056 sxg_stock_rcv_buffers(adapter);
4059 * Initialize checksum offload capabilities. At the moment we always
4060 * enable IP and TCP receive checksums on the card. Depending on the
4061 * checksum configuration specified by the user, we can choose to
4062 * report or ignore the checksum information provided by the card.
4064 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
4065 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
4067 /* Initialize the MAC, XAUI */
4068 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
4069 status = sxg_initialize_link(adapter);
4070 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
4071 status);
4072 if (status != STATUS_SUCCESS) {
4073 return (status);
4076 * Initialize Dead to FALSE.
4077 * SlicCheckForHang or SlicDumpThread will take it from here.
4079 adapter->Dead = FALSE;
4080 adapter->PingOutstanding = FALSE;
4081 adapter->State = SXG_STATE_RUNNING;
4083 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
4084 adapter, 0, 0, 0);
4085 return (STATUS_SUCCESS);
4089 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
4090 * the card. The caller should hold the RcvQLock
4092 * Arguments -
4093 * adapter - A pointer to our adapter structure
4094 * RcvDescriptorBlockHdr - Descriptor block to fill
4096 * Return
4097 * status
4099 static int sxg_fill_descriptor_block(struct adapter_t *adapter,
4100 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
4102 u32 i;
4103 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4104 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
4105 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
4106 struct sxg_cmd *RingDescriptorCmd;
4107 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4109 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
4110 adapter, adapter->RcvBuffersOnCard,
4111 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4113 ASSERT(RcvDescriptorBlockHdr);
4116 * If we don't have the resources to fill the descriptor block,
4117 * return failure
4119 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
4120 SXG_RING_FULL(RcvRingInfo)) {
4121 adapter->Stats.NoMem++;
4122 return (STATUS_FAILURE);
4124 /* Get a ring descriptor command */
4125 SXG_GET_CMD(RingZero,
4126 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
4127 ASSERT(RingDescriptorCmd);
4128 RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
4129 RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
4130 RcvDescriptorBlockHdr->VirtualAddress;
4132 /* Fill in the descriptor block */
4133 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
4134 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
4135 ASSERT(RcvDataBufferHdr);
4136 // ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
4137 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
4138 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
4139 adapter->ReceiveBufferSize);
4140 if(RcvDataBufferHdr->skb)
4141 RcvDataBufferHdr->SxgDumbRcvPacket =
4142 RcvDataBufferHdr->skb;
4143 else
4144 goto no_memory;
4146 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
4147 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
4148 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
4149 (void *)RcvDataBufferHdr;
4151 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
4152 RcvDataBufferHdr->PhysicalAddress;
4154 /* Add the descriptor block to receive descriptor ring 0 */
4155 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
4158 * RcvBuffersOnCard is not protected via the receive lock (see
4159 * sxg_process_event_queue) We don't want to grap a lock every time a
4160 * buffer is returned to us, so we use atomic interlocked functions
4161 * instead.
4163 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
4165 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
4166 RcvDescriptorBlockHdr,
4167 RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
4169 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
4170 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
4171 adapter, adapter->RcvBuffersOnCard,
4172 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4173 return (STATUS_SUCCESS);
4174 no_memory:
4175 return (-ENOMEM);
4179 * sxg_stock_rcv_buffers - Stock the card with receive buffers
4181 * Arguments -
4182 * adapter - A pointer to our adapter structure
4184 * Return
4185 * None
4187 static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
4189 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4190 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
4191 int sxg_min_rcv_data_buffers = SXG_MIN_RCV_DATA_BUFFERS;
4193 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
4194 adapter, adapter->RcvBuffersOnCard,
4195 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4197 * First, see if we've got less than our minimum threshold of
4198 * receive buffers, there isn't an allocation in progress, and
4199 * we haven't exceeded our maximum.. get another block of buffers
4200 * None of this needs to be SMP safe. It's round numbers.
4202 if (adapter->JumboEnabled == TRUE)
4203 sxg_min_rcv_data_buffers = SXG_MIN_JUMBO_RCV_DATA_BUFFERS;
4204 if ((adapter->FreeRcvBufferCount < sxg_min_rcv_data_buffers) &&
4205 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
4206 (atomic_read(&adapter->pending_allocations) == 0)) {
4207 sxg_allocate_buffer_memory(adapter,
4208 SXG_RCV_BLOCK_SIZE
4209 (SXG_RCV_DATA_HDR_SIZE),
4210 SXG_BUFFER_TYPE_RCV);
4212 /* Now grab the RcvQLock lock and proceed */
4213 spin_lock(&adapter->RcvQLock);
4214 if (adapter->JumboEnabled)
4215 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
4216 while (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
4217 struct list_entry *_ple;
4219 /* Get a descriptor block */
4220 RcvDescriptorBlockHdr = NULL;
4221 if (adapter->FreeRcvBlockCount) {
4222 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
4223 RcvDescriptorBlockHdr =
4224 container_of(_ple, struct sxg_rcv_descriptor_block_hdr,
4225 FreeList);
4226 adapter->FreeRcvBlockCount--;
4227 RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
4230 if (RcvDescriptorBlockHdr == NULL) {
4231 /* Bail out.. */
4232 adapter->Stats.NoMem++;
4233 break;
4235 /* Fill in the descriptor block and give it to the card */
4236 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
4237 STATUS_FAILURE) {
4238 /* Free the descriptor block */
4239 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4240 RcvDescriptorBlockHdr);
4241 break;
4244 spin_unlock(&adapter->RcvQLock);
4245 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
4246 adapter, adapter->RcvBuffersOnCard,
4247 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4251 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
4252 * completed by the microcode
4254 * Arguments -
4255 * adapter - A pointer to our adapter structure
4256 * Index - Where the microcode is up to
4258 * Return
4259 * None
4261 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
4262 unsigned char Index)
4264 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4265 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4266 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4267 struct sxg_cmd *RingDescriptorCmd;
4269 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
4270 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4272 /* Now grab the RcvQLock lock and proceed */
4273 spin_lock(&adapter->RcvQLock);
4274 ASSERT(Index != RcvRingInfo->Tail);
4275 while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
4276 RcvRingInfo->Tail) > 3) {
4278 * Locate the current Cmd (ring descriptor entry), and
4279 * associated receive descriptor block, and advance
4280 * the tail
4282 SXG_RETURN_CMD(RingZero,
4283 RcvRingInfo,
4284 RingDescriptorCmd, RcvDescriptorBlockHdr);
4285 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
4286 RcvRingInfo->Head, RcvRingInfo->Tail,
4287 RingDescriptorCmd, RcvDescriptorBlockHdr);
4289 /* Clear the SGL field */
4290 RingDescriptorCmd->Sgl = 0;
4292 * Attempt to refill it and hand it right back to the
4293 * card. If we fail to refill it, free the descriptor block
4294 * header. The card will be restocked later via the
4295 * RcvBuffersOnCard test
4297 if (sxg_fill_descriptor_block(adapter,
4298 RcvDescriptorBlockHdr) == STATUS_FAILURE)
4299 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4300 RcvDescriptorBlockHdr);
4302 spin_unlock(&adapter->RcvQLock);
4303 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
4304 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4308 * Read the statistics which the card has been maintaining.
4310 void sxg_collect_statistics(struct adapter_t *adapter)
4312 if(adapter->ucode_stats)
4313 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats,
4314 adapter->pucode_stats, 0);
4315 adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
4316 adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
4317 adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
4320 static struct net_device_stats *sxg_get_stats(struct net_device * dev)
4322 struct adapter_t *adapter = netdev_priv(dev);
4324 sxg_collect_statistics(adapter);
4325 return (&adapter->stats);
4328 static struct pci_driver sxg_driver = {
4329 .name = sxg_driver_name,
4330 .id_table = sxg_pci_tbl,
4331 .probe = sxg_entry_probe,
4332 .remove = sxg_entry_remove,
4333 #if SXG_POWER_MANAGEMENT_ENABLED
4334 .suspend = sxgpm_suspend,
4335 .resume = sxgpm_resume,
4336 #endif
4337 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
4340 static int __init sxg_module_init(void)
4342 sxg_init_driver();
4344 if (debug >= 0)
4345 sxg_debug = debug;
4347 return pci_register_driver(&sxg_driver);
4350 static void __exit sxg_module_cleanup(void)
4352 pci_unregister_driver(&sxg_driver);
4355 module_init(sxg_module_init);
4356 module_exit(sxg_module_cleanup);