1 /**************************************************************************
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
33 * Parts developed by LinSysSoft Sahara team
35 **************************************************************************/
40 * The SXG driver for Alacritech's 10Gbe products.
42 * NOTE: This is the standard, non-accelerated version of Alacritech's
46 #include <linux/kernel.h>
47 #include <linux/string.h>
48 #include <linux/errno.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/firmware.h>
52 #include <linux/ioport.h>
53 #include <linux/slab.h>
54 #include <linux/interrupt.h>
55 #include <linux/timer.h>
56 #include <linux/pci.h>
57 #include <linux/spinlock.h>
58 #include <linux/init.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/ethtool.h>
62 #include <linux/skbuff.h>
63 #include <linux/delay.h>
64 #include <linux/types.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/mii.h>
69 #include <linux/tcp.h>
70 #include <linux/ipv6.h>
72 #define SLIC_GET_STATS_ENABLED 0
73 #define LINUX_FREES_ADAPTER_RESOURCES 1
74 #define SXG_OFFLOAD_IP_CHECKSUM 0
75 #define SXG_POWER_MANAGEMENT_ENABLED 0
78 #define SXG_UCODE_DEBUG 0
86 #include "sxgphycode-1.2.h"
88 static int sxg_allocate_buffer_memory(struct adapter_t
*adapter
, u32 Size
,
89 enum sxg_buffer_type BufferType
);
90 static int sxg_allocate_rcvblock_complete(struct adapter_t
*adapter
,
92 dma_addr_t PhysicalAddress
,
94 static void sxg_allocate_sgl_buffer_complete(struct adapter_t
*adapter
,
95 struct sxg_scatter_gather
*SxgSgl
,
96 dma_addr_t PhysicalAddress
,
99 static void sxg_mcast_init_crc32(void);
100 static int sxg_entry_open(struct net_device
*dev
);
101 static int sxg_second_open(struct net_device
* dev
);
102 static int sxg_entry_halt(struct net_device
*dev
);
103 static int sxg_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
104 static int sxg_send_packets(struct sk_buff
*skb
, struct net_device
*dev
);
105 static int sxg_transmit_packet(struct adapter_t
*adapter
, struct sk_buff
*skb
);
106 static int sxg_dumb_sgl(struct sxg_x64_sgl
*pSgl
,
107 struct sxg_scatter_gather
*SxgSgl
);
109 static void sxg_handle_interrupt(struct adapter_t
*adapter
, int *work_done
,
111 static void sxg_interrupt(struct adapter_t
*adapter
);
112 static int sxg_poll(struct napi_struct
*napi
, int budget
);
113 static int sxg_process_isr(struct adapter_t
*adapter
, u32 MessageId
);
114 static u32
sxg_process_event_queue(struct adapter_t
*adapter
, u32 RssId
,
115 int *sxg_napi_continue
, int *work_done
, int budget
);
116 static void sxg_complete_slow_send(struct adapter_t
*adapter
);
117 static struct sk_buff
*sxg_slow_receive(struct adapter_t
*adapter
,
118 struct sxg_event
*Event
);
119 static void sxg_process_rcv_error(struct adapter_t
*adapter
, u32 ErrorStatus
);
120 static bool sxg_mac_filter(struct adapter_t
*adapter
,
121 struct ether_header
*EtherHdr
, ushort length
);
122 static struct net_device_stats
*sxg_get_stats(struct net_device
* dev
);
123 void sxg_free_resources(struct adapter_t
*adapter
);
124 void sxg_free_rcvblocks(struct adapter_t
*adapter
);
125 void sxg_free_sgl_buffers(struct adapter_t
*adapter
);
126 void sxg_unmap_resources(struct adapter_t
*adapter
);
127 void sxg_free_mcast_addrs(struct adapter_t
*adapter
);
128 void sxg_collect_statistics(struct adapter_t
*adapter
);
129 static int sxg_register_interrupt(struct adapter_t
*adapter
);
130 static void sxg_remove_isr(struct adapter_t
*adapter
);
131 static irqreturn_t
sxg_isr(int irq
, void *dev_id
);
133 static void sxg_watchdog(unsigned long data
);
134 static void sxg_update_link_status (struct work_struct
*work
);
139 static int sxg_mac_set_address(struct net_device
*dev
, void *ptr
);
141 static void sxg_mcast_set_list(struct net_device
*dev
);
143 static int sxg_adapter_set_hwaddr(struct adapter_t
*adapter
);
145 static int sxg_initialize_adapter(struct adapter_t
*adapter
);
146 static void sxg_stock_rcv_buffers(struct adapter_t
*adapter
);
147 static void sxg_complete_descriptor_blocks(struct adapter_t
*adapter
,
148 unsigned char Index
);
149 int sxg_change_mtu (struct net_device
*netdev
, int new_mtu
);
150 static int sxg_initialize_link(struct adapter_t
*adapter
);
151 static int sxg_phy_init(struct adapter_t
*adapter
);
152 static void sxg_link_event(struct adapter_t
*adapter
);
153 static enum SXG_LINK_STATE
sxg_get_link_state(struct adapter_t
*adapter
);
154 static void sxg_link_state(struct adapter_t
*adapter
,
155 enum SXG_LINK_STATE LinkState
);
156 static int sxg_write_mdio_reg(struct adapter_t
*adapter
,
157 u32 DevAddr
, u32 RegAddr
, u32 Value
);
158 static int sxg_read_mdio_reg(struct adapter_t
*adapter
,
159 u32 DevAddr
, u32 RegAddr
, u32
*pValue
);
160 static void sxg_set_mcast_addr(struct adapter_t
*adapter
);
162 static unsigned int sxg_first_init
= 1;
163 static char *sxg_banner
=
164 "Alacritech SLIC Technology(tm) Server and Storage \
165 10Gbe Accelerator (Non-Accelerated)\n";
167 static int sxg_debug
= 1;
168 static int debug
= -1;
169 static struct net_device
*head_netdevice
= NULL
;
171 static struct sxgbase_driver sxg_global
= {
174 static int intagg_delay
= 100;
175 static u32 dynamic_intagg
= 0;
177 char sxg_driver_name
[] = "sxg_nic";
178 #define DRV_AUTHOR "Alacritech, Inc. Engineering"
179 #define DRV_DESCRIPTION \
180 "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
181 #define DRV_COPYRIGHT \
182 "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
184 MODULE_AUTHOR(DRV_AUTHOR
);
185 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
186 MODULE_LICENSE("GPL");
188 module_param(dynamic_intagg
, int, 0);
189 MODULE_PARM_DESC(dynamic_intagg
, "Dynamic Interrupt Aggregation Setting");
190 module_param(intagg_delay
, int, 0);
191 MODULE_PARM_DESC(intagg_delay
, "uSec Interrupt Aggregation Delay");
193 static struct pci_device_id sxg_pci_tbl
[] __devinitdata
= {
194 {PCI_DEVICE(SXG_VENDOR_ID
, SXG_DEVICE_ID
)},
198 MODULE_DEVICE_TABLE(pci
, sxg_pci_tbl
);
200 static inline void sxg_reg32_write(void __iomem
*reg
, u32 value
, bool flush
)
207 static inline void sxg_reg64_write(struct adapter_t
*adapter
, void __iomem
*reg
,
210 u32 value_high
= (u32
) (value
>> 32);
211 u32 value_low
= (u32
) (value
& 0x00000000FFFFFFFF);
214 spin_lock_irqsave(&adapter
->Bit64RegLock
, flags
);
215 writel(value_high
, (void __iomem
*)(&adapter
->UcodeRegs
[cpu
].Upper
));
216 writel(value_low
, reg
);
217 spin_unlock_irqrestore(&adapter
->Bit64RegLock
, flags
);
220 static void sxg_init_driver(void)
222 if (sxg_first_init
) {
223 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
226 spin_lock_init(&sxg_global
.driver_lock
);
230 static void sxg_dbg_macaddrs(struct adapter_t
*adapter
)
232 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
233 adapter
->netdev
->name
, adapter
->currmacaddr
[0],
234 adapter
->currmacaddr
[1], adapter
->currmacaddr
[2],
235 adapter
->currmacaddr
[3], adapter
->currmacaddr
[4],
236 adapter
->currmacaddr
[5]);
237 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
238 adapter
->netdev
->name
, adapter
->macaddr
[0],
239 adapter
->macaddr
[1], adapter
->macaddr
[2],
240 adapter
->macaddr
[3], adapter
->macaddr
[4],
241 adapter
->macaddr
[5]);
246 static struct sxg_driver SxgDriver
;
249 static struct sxg_trace_buffer LSxgTraceBuffer
;
251 static struct sxg_trace_buffer
*SxgTraceBuffer
= NULL
;
256 int sxg_register_intr(struct adapter_t
*adapter
);
257 int sxg_enable_msi_x(struct adapter_t
*adapter
);
258 int sxg_add_msi_isr(struct adapter_t
*adapter
);
259 void sxg_remove_msix_isr(struct adapter_t
*adapter
);
260 int sxg_set_interrupt_capability(struct adapter_t
*adapter
);
262 int sxg_set_interrupt_capability(struct adapter_t
*adapter
)
266 ret
= sxg_enable_msi_x(adapter
);
267 if (ret
!= STATUS_SUCCESS
) {
268 adapter
->msi_enabled
= FALSE
;
269 DBG_ERROR("sxg_set_interrupt_capability MSI-X Disable\n");
271 adapter
->msi_enabled
= TRUE
;
272 DBG_ERROR("sxg_set_interrupt_capability MSI-X Enable\n");
277 int sxg_register_intr(struct adapter_t
*adapter
)
281 if (adapter
->msi_enabled
) {
282 ret
= sxg_add_msi_isr(adapter
);
285 DBG_ERROR("MSI-X Enable Failed. Using Pin INT\n");
286 ret
= sxg_register_interrupt(adapter
);
287 if (ret
!= STATUS_SUCCESS
) {
288 DBG_ERROR("sxg_register_interrupt Failed\n");
294 int sxg_enable_msi_x(struct adapter_t
*adapter
)
298 adapter
->nr_msix_entries
= 1;
299 adapter
->msi_entries
= kmalloc(adapter
->nr_msix_entries
*
300 sizeof(struct msix_entry
),GFP_KERNEL
);
301 if (!adapter
->msi_entries
) {
302 DBG_ERROR("%s:MSI Entries memory allocation Failed\n",__func__
);
305 memset(adapter
->msi_entries
, 0, adapter
->nr_msix_entries
*
306 sizeof(struct msix_entry
));
308 ret
= pci_enable_msix(adapter
->pcidev
, adapter
->msi_entries
,
309 adapter
->nr_msix_entries
);
311 DBG_ERROR("Enabling MSI-X with %d vectors failed\n",
312 adapter
->nr_msix_entries
);
313 /*Should try with less vector returned.*/
314 kfree(adapter
->msi_entries
);
315 return STATUS_FAILURE
; /*MSI-X Enable failed.*/
317 return (STATUS_SUCCESS
);
320 int sxg_add_msi_isr(struct adapter_t
*adapter
)
324 if (!adapter
->intrregistered
) {
325 spin_unlock_irqrestore(&sxg_global
.driver_lock
,
327 for (i
=0; i
<adapter
->nr_msix_entries
; i
++) {
328 ret
= request_irq (adapter
->msi_entries
[i
].vector
,
331 adapter
->netdev
->name
,
334 spin_lock_irqsave(&sxg_global
.driver_lock
,
336 DBG_ERROR("sxg: MSI-X request_irq (%s) "
337 "FAILED [%x]\n", adapter
->netdev
->name
,
343 spin_lock_irqsave(&sxg_global
.driver_lock
, sxg_global
.flags
);
344 adapter
->msi_enabled
= TRUE
;
345 adapter
->intrregistered
= 1;
346 adapter
->IntRegistered
= TRUE
;
347 return (STATUS_SUCCESS
);
350 void sxg_remove_msix_isr(struct adapter_t
*adapter
)
353 struct net_device
*netdev
= adapter
->netdev
;
355 for(i
=0; i
< adapter
->nr_msix_entries
;i
++)
357 vector
= adapter
->msi_entries
[i
].vector
;
358 DBG_ERROR("%s : Freeing IRQ vector#%d\n",__func__
,vector
);
359 free_irq(vector
,netdev
);
364 static void sxg_remove_isr(struct adapter_t
*adapter
)
366 struct net_device
*netdev
= adapter
->netdev
;
367 if (adapter
->msi_enabled
)
368 sxg_remove_msix_isr(adapter
);
370 free_irq(adapter
->netdev
->irq
, netdev
);
373 void sxg_reset_interrupt_capability(struct adapter_t
*adapter
)
375 if (adapter
->msi_enabled
) {
376 pci_disable_msix(adapter
->pcidev
);
377 kfree(adapter
->msi_entries
);
378 adapter
->msi_entries
= NULL
;
384 * sxg_download_microcode
386 * Download Microcode to Sahara adapter using the Linux
387 * Firmware module to get the ucode.sys file.
390 * adapter - A pointer to our adapter structure
391 * UcodeSel - microcode file selection
396 static bool sxg_download_microcode(struct adapter_t
*adapter
,
397 enum SXG_UCODE_SEL UcodeSel
)
399 const struct firmware
*fw
;
400 const char *file
= "";
401 struct sxg_hw_regs
*HwRegs
= adapter
->HwRegs
;
407 u32 BaseAddress
, AddressOffset
, Address
;
412 u32 num_sections
= 0;
414 u32 sectionStart
[16];
416 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "DnldUcod",
420 * This routine is only implemented to download the microcode
421 * for the Revision B Sahara chip. Rev A and Diagnostic
422 * microcode is not supported at this time. If Rev A or
423 * diagnostic ucode is required, this routine will obviously
424 * need to change. Also, eventually need to add support for
425 * Rev B checked version of ucode. That's easy enough once
426 * the free version of Rev B works.
428 ASSERT(UcodeSel
== SXG_UCODE_SYSTEM
);
429 ASSERT(adapter
->asictype
== SAHARA_REV_B
);
431 file
= "sxg/saharadbgdownloadB.sys";
433 file
= "sxg/saharadownloadB.sys";
435 ret
= request_firmware(&fw
, file
, &adapter
->pcidev
->dev
);
437 DBG_ERROR("%s SXG_NIC: Failed to load firmware %s\n", __func__
,file
);
442 * The microcode .sys file contains starts with a 4 byte word containing
443 * the number of sections. That is followed by "num_sections" 4 byte
444 * words containing each "section" size. That is followed num_sections
445 * 4 byte words containing each section "start" address.
447 * Following the above header, the .sys file contains num_sections,
448 * where each section size is specified, newline delineatetd 12 byte
449 * microcode instructions.
451 num_sections
= *(u32
*)(fw
->data
+ index
);
453 ASSERT(num_sections
<= 3);
454 for (i
= 0; i
< num_sections
; i
++) {
455 sectionSize
[i
] = *(u32
*)(fw
->data
+ index
);
458 for (i
= 0; i
< num_sections
; i
++) {
459 sectionStart
[i
] = *(u32
*)(fw
->data
+ index
);
463 /* First, reset the card */
464 WRITE_REG(HwRegs
->Reset
, 0xDEAD, FLUSH
);
466 HwRegs
= adapter
->HwRegs
;
469 * Download each section of the microcode as specified in
470 * sectionSize[index] to sectionStart[index] address. As
471 * described above, the .sys file contains 12 byte word
472 * microcode instructions. The *download.sys file is generated
473 * using the objtosys.exe utility that was built for Sahara
476 /* See usage of this below when we read back for parity */
478 instruction
= *(u32
*)(fw
->data
+ index
);
481 for (Section
= 0; Section
< num_sections
; Section
++) {
482 BaseAddress
= sectionStart
[Section
];
483 /* Size in instructions */
484 ThisSectionSize
= sectionSize
[Section
] / 12;
485 for (AddressOffset
= 0; AddressOffset
< ThisSectionSize
;
487 u32 first_instr
= 0; /* See comment below */
489 Address
= BaseAddress
+ AddressOffset
;
490 ASSERT((Address
& ~MICROCODE_ADDRESS_MASK
) == 0);
491 /* Write instruction bits 31 - 0 (low) */
492 first_instr
= instruction
;
493 WRITE_REG(HwRegs
->UcodeDataLow
, instruction
, FLUSH
);
494 instruction
= *(u32
*)(fw
->data
+ index
);
495 index
+= 4; /* Advance to the "next" instruction */
497 /* Write instruction bits 63-32 (middle) */
498 WRITE_REG(HwRegs
->UcodeDataMiddle
, instruction
, FLUSH
);
499 instruction
= *(u32
*)(fw
->data
+ index
);
500 index
+= 4; /* Advance to the "next" instruction */
502 /* Write instruction bits 95-64 (high) */
503 WRITE_REG(HwRegs
->UcodeDataHigh
, instruction
, FLUSH
);
504 instruction
= *(u32
*)(fw
->data
+ index
);
505 index
+= 4; /* Advance to the "next" instruction */
507 /* Write instruction address with the WRITE bit set */
508 WRITE_REG(HwRegs
->UcodeAddr
,
509 (Address
| MICROCODE_ADDRESS_WRITE
), FLUSH
);
511 * Sahara bug in the ucode download logic - the write to DataLow
512 * for the next instruction could get corrupted. To avoid this,
513 * write to DataLow again for this instruction (which may get
514 * corrupted, but it doesn't matter), then increment the address
515 * and write the data for the next instruction to DataLow. That
516 * write should succeed.
518 WRITE_REG(HwRegs
->UcodeDataLow
, first_instr
, FLUSH
);
522 * Now repeat the entire operation reading the instruction back and
523 * checking for parity errors
527 for (Section
= 0; Section
< num_sections
; Section
++) {
528 BaseAddress
= sectionStart
[Section
];
529 /* Size in instructions */
530 ThisSectionSize
= sectionSize
[Section
] / 12;
531 for (AddressOffset
= 0; AddressOffset
< ThisSectionSize
;
533 Address
= BaseAddress
+ AddressOffset
;
534 /* Write the address with the READ bit set */
535 WRITE_REG(HwRegs
->UcodeAddr
,
536 (Address
| MICROCODE_ADDRESS_READ
), FLUSH
);
537 /* Read it back and check parity bit. */
538 READ_REG(HwRegs
->UcodeAddr
, ValueRead
);
539 if (ValueRead
& MICROCODE_ADDRESS_PARITY
) {
540 DBG_ERROR("sxg: %s PARITY ERROR\n",
543 return FALSE
; /* Parity error */
545 ASSERT((ValueRead
& MICROCODE_ADDRESS_MASK
) == Address
);
546 /* Read the instruction back and compare */
547 /* First instruction */
548 instruction
= *(u32
*)(fw
->data
+ index
);
550 READ_REG(HwRegs
->UcodeDataLow
, ValueRead
);
551 if (ValueRead
!= instruction
) {
552 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
554 return FALSE
; /* Miscompare */
556 instruction
= *(u32
*)(fw
->data
+ index
);
558 READ_REG(HwRegs
->UcodeDataMiddle
, ValueRead
);
559 if (ValueRead
!= instruction
) {
560 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
562 return FALSE
; /* Miscompare */
564 instruction
= *(u32
*)(fw
->data
+ index
);
566 READ_REG(HwRegs
->UcodeDataHigh
, ValueRead
);
567 if (ValueRead
!= instruction
) {
568 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
570 return FALSE
; /* Miscompare */
575 /* download finished */
576 release_firmware(fw
);
577 /* Everything OK, Go. */
578 WRITE_REG(HwRegs
->UcodeAddr
, MICROCODE_ADDRESS_GO
, FLUSH
);
581 * Poll the CardUp register to wait for microcode to initialize
582 * Give up after 10,000 attemps (500ms).
584 for (i
= 0; i
< 10000; i
++) {
586 READ_REG(adapter
->UcodeRegs
[0].CardUp
, ValueRead
);
587 if (ValueRead
== 0xCAFE) {
592 DBG_ERROR("sxg: %s TIMEOUT bringing up card - verify MICROCODE\n", __func__
);
594 return FALSE
; /* Timeout */
597 * Now write the LoadSync register. This is used to
598 * synchronize with the card so it can scribble on the memory
599 * that contained 0xCAFE from the "CardUp" step above
601 if (UcodeSel
== SXG_UCODE_SYSTEM
) {
602 WRITE_REG(adapter
->UcodeRegs
[0].LoadSync
, 0, FLUSH
);
605 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XDnldUcd",
611 * sxg_allocate_resources - Allocate memory and locks
614 * adapter - A pointer to our adapter structure
618 static int sxg_allocate_resources(struct adapter_t
*adapter
)
620 int status
= STATUS_SUCCESS
;
621 u32 RssIds
, IsrCount
;
622 /* struct sxg_xmt_ring *XmtRing; */
623 /* struct sxg_rcv_ring *RcvRing; */
625 DBG_ERROR("%s ENTER\n", __func__
);
627 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "AllocRes",
630 /* Windows tells us how many CPUs it plans to use for */
632 RssIds
= SXG_RSS_CPU_COUNT(adapter
);
633 IsrCount
= adapter
->msi_enabled
? RssIds
: 1;
635 DBG_ERROR("%s Setup the spinlocks\n", __func__
);
637 /* Allocate spinlocks and initialize listheads first. */
638 spin_lock_init(&adapter
->RcvQLock
);
639 spin_lock_init(&adapter
->SglQLock
);
640 spin_lock_init(&adapter
->XmtZeroLock
);
641 spin_lock_init(&adapter
->Bit64RegLock
);
642 spin_lock_init(&adapter
->AdapterLock
);
643 atomic_set(&adapter
->pending_allocations
, 0);
645 DBG_ERROR("%s Setup the lists\n", __func__
);
647 InitializeListHead(&adapter
->FreeRcvBuffers
);
648 InitializeListHead(&adapter
->FreeRcvBlocks
);
649 InitializeListHead(&adapter
->AllRcvBlocks
);
650 InitializeListHead(&adapter
->FreeSglBuffers
);
651 InitializeListHead(&adapter
->AllSglBuffers
);
654 * Mark these basic allocations done. This flags essentially
655 * tells the SxgFreeResources routine that it can grab spinlocks
656 * and reference listheads.
658 adapter
->BasicAllocations
= TRUE
;
660 * Main allocation loop. Start with the maximum supported by
661 * the microcode and back off if memory allocation
662 * fails. If we hit a minimum, fail.
666 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__
,
667 (unsigned int)(sizeof(struct sxg_xmt_ring
) * 1));
670 * Start with big items first - receive and transmit rings.
671 * At the moment I'm going to keep the ring size fixed and
672 * adjust the TCBs if we fail. Later we might
673 * consider reducing the ring size as well..
675 adapter
->XmtRings
= pci_alloc_consistent(adapter
->pcidev
,
676 sizeof(struct sxg_xmt_ring
) *
678 &adapter
->PXmtRings
);
679 DBG_ERROR("%s XmtRings[%p]\n", __func__
, adapter
->XmtRings
);
681 if (!adapter
->XmtRings
) {
682 goto per_tcb_allocation_failed
;
684 memset(adapter
->XmtRings
, 0, sizeof(struct sxg_xmt_ring
) * 1);
686 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__
,
687 (unsigned int)(sizeof(struct sxg_rcv_ring
) * 1));
689 pci_alloc_consistent(adapter
->pcidev
,
690 sizeof(struct sxg_rcv_ring
) * 1,
691 &adapter
->PRcvRings
);
692 DBG_ERROR("%s RcvRings[%p]\n", __func__
, adapter
->RcvRings
);
693 if (!adapter
->RcvRings
) {
694 goto per_tcb_allocation_failed
;
696 memset(adapter
->RcvRings
, 0, sizeof(struct sxg_rcv_ring
) * 1);
697 adapter
->ucode_stats
= kzalloc(sizeof(struct sxg_ucode_stats
), GFP_ATOMIC
);
698 adapter
->pucode_stats
= pci_map_single(adapter
->pcidev
,
699 adapter
->ucode_stats
,
700 sizeof(struct sxg_ucode_stats
),
702 // memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
705 per_tcb_allocation_failed
:
706 /* an allocation failed. Free any successful allocations. */
707 if (adapter
->XmtRings
) {
708 pci_free_consistent(adapter
->pcidev
,
709 sizeof(struct sxg_xmt_ring
) * 1,
712 adapter
->XmtRings
= NULL
;
714 if (adapter
->RcvRings
) {
715 pci_free_consistent(adapter
->pcidev
,
716 sizeof(struct sxg_rcv_ring
) * 1,
719 adapter
->RcvRings
= NULL
;
721 /* Loop around and try again.... */
722 if (adapter
->ucode_stats
) {
723 pci_unmap_single(adapter
->pcidev
,
724 sizeof(struct sxg_ucode_stats
),
725 adapter
->pucode_stats
, PCI_DMA_FROMDEVICE
);
726 adapter
->ucode_stats
= NULL
;
731 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__
);
732 /* Initialize rcv zero and xmt zero rings */
733 SXG_INITIALIZE_RING(adapter
->RcvRingZeroInfo
, SXG_RCV_RING_SIZE
);
734 SXG_INITIALIZE_RING(adapter
->XmtRingZeroInfo
, SXG_XMT_RING_SIZE
);
736 /* Sanity check receive data structure format */
737 /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
738 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
739 ASSERT(sizeof(struct sxg_rcv_descriptor_block
) ==
740 SXG_RCV_DESCRIPTOR_BLOCK_SIZE
);
742 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__
,
743 (unsigned int)(sizeof(struct sxg_event_ring
) * RssIds
));
745 /* Allocate event queues. */
746 adapter
->EventRings
= pci_alloc_consistent(adapter
->pcidev
,
747 sizeof(struct sxg_event_ring
) *
749 &adapter
->PEventRings
);
751 if (!adapter
->EventRings
) {
752 /* Caller will call SxgFreeAdapter to clean up above
754 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XAResF8",
755 adapter
, SXG_MAX_ENTRIES
, 0, 0);
756 status
= STATUS_RESOURCES
;
757 goto per_tcb_allocation_failed
;
759 memset(adapter
->EventRings
, 0, sizeof(struct sxg_event_ring
) * RssIds
);
761 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__
, IsrCount
);
763 adapter
->Isr
= pci_alloc_consistent(adapter
->pcidev
,
764 IsrCount
, &adapter
->PIsr
);
766 /* Caller will call SxgFreeAdapter to clean up above
768 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XAResF9",
769 adapter
, SXG_MAX_ENTRIES
, 0, 0);
770 status
= STATUS_RESOURCES
;
771 goto per_tcb_allocation_failed
;
773 memset(adapter
->Isr
, 0, sizeof(u32
) * IsrCount
);
775 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
776 __func__
, (unsigned int)sizeof(u32
));
778 /* Allocate shared XMT ring zero index location */
779 adapter
->XmtRingZeroIndex
= pci_alloc_consistent(adapter
->pcidev
,
783 if (!adapter
->XmtRingZeroIndex
) {
784 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XAResF10",
785 adapter
, SXG_MAX_ENTRIES
, 0, 0);
786 status
= STATUS_RESOURCES
;
787 goto per_tcb_allocation_failed
;
789 memset(adapter
->XmtRingZeroIndex
, 0, sizeof(u32
));
791 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XAlcResS",
792 adapter
, SXG_MAX_ENTRIES
, 0, 0);
800 * Set up PCI Configuration space
803 * pcidev - A pointer to our adapter structure
805 static void sxg_config_pci(struct pci_dev
*pcidev
)
810 pci_read_config_word(pcidev
, PCI_COMMAND
, &pci_command
);
811 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__
, pci_command
);
812 /* Set the command register */
813 new_command
= pci_command
| (
814 /* Memory Space Enable */
816 /* Bus master enable */
818 /* Memory write and invalidate */
819 PCI_COMMAND_INVALIDATE
|
820 /* Parity error response */
824 /* Fast back-to-back */
825 PCI_COMMAND_FAST_BACK
);
826 if (pci_command
!= new_command
) {
827 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
828 __func__
, pci_command
, new_command
);
829 pci_write_config_word(pcidev
, PCI_COMMAND
, new_command
);
835 * @adapter : Pointer to the adapter structure for the card
836 * This function will read the configuration data from EEPROM/FLASH
838 static inline int sxg_read_config(struct adapter_t
*adapter
)
840 /* struct sxg_config data; */
841 struct sxg_config
*config
;
842 struct sw_cfg_data
*data
;
844 unsigned long status
;
846 config
= pci_alloc_consistent(adapter
->pcidev
,
847 sizeof(struct sxg_config
), &p_addr
);
851 * We cant get even this much memory. Raise a hell
854 printk(KERN_ERR
"%s : Could not allocate memory for reading \
855 EEPROM\n", __func__
);
859 data
= &config
->SwCfg
;
861 /* Initialize (reflective memory) status register */
862 WRITE_REG(adapter
->UcodeRegs
[0].ConfigStat
, SXG_CFG_TIMEOUT
, TRUE
);
864 /* Send request to fetch configuration data */
865 WRITE_REG64(adapter
, adapter
->UcodeRegs
[0].Config
, p_addr
, 0);
866 for(i
=0; i
<1000; i
++) {
867 READ_REG(adapter
->UcodeRegs
[0].ConfigStat
, status
);
868 if (status
!= SXG_CFG_TIMEOUT
)
870 mdelay(1); /* Do we really need this */
874 /* Config read from EEPROM succeeded */
875 case SXG_CFG_LOAD_EEPROM
:
876 /* Config read from Flash succeeded */
877 case SXG_CFG_LOAD_FLASH
:
879 * Copy the MAC address to adapter structure
880 * TODO: We are not doing the remaining part : FRU, etc
882 memcpy(adapter
->macaddr
, data
->MacAddr
[0].MacAddr
,
883 sizeof(struct sxg_config_mac
));
885 case SXG_CFG_TIMEOUT
:
886 case SXG_CFG_LOAD_INVALID
:
887 case SXG_CFG_LOAD_ERROR
:
888 default: /* Fix default handler later */
889 printk(KERN_WARNING
"%s : We could not read the config \
890 word. Status = %ld\n", __func__
, status
);
893 pci_free_consistent(adapter
->pcidev
, sizeof(struct sw_cfg_data
), data
,
895 if (adapter
->netdev
) {
896 memcpy(adapter
->netdev
->dev_addr
, adapter
->currmacaddr
, 6);
897 memcpy(adapter
->netdev
->perm_addr
, adapter
->currmacaddr
, 6);
899 sxg_dbg_macaddrs(adapter
);
904 static const struct net_device_ops sxg_netdev_ops
= {
905 .ndo_open
= sxg_entry_open
,
906 .ndo_stop
= sxg_entry_halt
,
907 .ndo_start_xmit
= sxg_send_packets
,
908 .ndo_do_ioctl
= sxg_ioctl
,
909 .ndo_change_mtu
= sxg_change_mtu
,
910 .ndo_get_stats
= sxg_get_stats
,
911 .ndo_set_multicast_list
= sxg_mcast_set_list
,
912 .ndo_validate_addr
= eth_validate_addr
,
914 .ndo_set_mac_address
= sxg_mac_set_address
,
916 .ndo_set_mac_address
= eth_mac_addr
,
920 static int sxg_entry_probe(struct pci_dev
*pcidev
,
921 const struct pci_device_id
*pci_tbl_entry
)
923 static int did_version
= 0;
925 struct net_device
*netdev
;
926 struct adapter_t
*adapter
;
927 void __iomem
*memmapped_ioaddr
;
929 ulong mmio_start
= 0;
931 unsigned char revision_id
;
933 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
934 __func__
, jiffies
, smp_processor_id());
936 /* Initialize trace buffer */
938 SxgTraceBuffer
= &LSxgTraceBuffer
;
939 SXG_TRACE_INIT(SxgTraceBuffer
, TRACE_NOISY
);
942 sxg_global
.dynamic_intagg
= dynamic_intagg
;
944 err
= pci_enable_device(pcidev
);
946 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev
, err
);
951 if (sxg_debug
> 0 && did_version
++ == 0) {
952 printk(KERN_INFO
"%s\n", sxg_banner
);
953 printk(KERN_INFO
"%s\n", SXG_DRV_VERSION
);
956 pci_read_config_byte(pcidev
, PCI_REVISION_ID
, &revision_id
);
958 if (!(err
= pci_set_dma_mask(pcidev
, DMA_BIT_MASK(64)))) {
959 DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(64)) successful\n");
961 if ((err
= pci_set_dma_mask(pcidev
, DMA_BIT_MASK(32)))) {
963 ("No usable DMA configuration, aborting err[%x]\n",
967 DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(32)) successful\n");
970 DBG_ERROR("Call pci_request_regions\n");
972 err
= pci_request_regions(pcidev
, sxg_driver_name
);
974 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err
);
978 DBG_ERROR("call pci_set_master\n");
979 pci_set_master(pcidev
);
981 DBG_ERROR("call alloc_etherdev\n");
982 netdev
= alloc_etherdev(sizeof(struct adapter_t
));
985 goto err_out_exit_sxg_probe
;
987 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev
);
989 SET_NETDEV_DEV(netdev
, &pcidev
->dev
);
991 pci_set_drvdata(pcidev
, netdev
);
992 adapter
= netdev_priv(netdev
);
993 if (revision_id
== 1) {
994 adapter
->asictype
= SAHARA_REV_A
;
995 } else if (revision_id
== 2) {
996 adapter
->asictype
= SAHARA_REV_B
;
999 DBG_ERROR("%s Unexpected revision ID %x\n", __func__
, revision_id
);
1000 goto err_out_exit_sxg_probe
;
1002 adapter
->netdev
= netdev
;
1003 adapter
->pcidev
= pcidev
;
1005 mmio_start
= pci_resource_start(pcidev
, 0);
1006 mmio_len
= pci_resource_len(pcidev
, 0);
1008 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
1009 mmio_start
, mmio_len
);
1011 memmapped_ioaddr
= ioremap(mmio_start
, mmio_len
);
1012 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__
,
1014 if (!memmapped_ioaddr
) {
1015 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
1016 __func__
, mmio_len
, mmio_start
);
1017 goto err_out_free_mmio_region_0
;
1020 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
1021 len[%lx], IRQ %d.\n", __func__
, memmapped_ioaddr
, mmio_start
,
1022 mmio_len
, pcidev
->irq
);
1024 adapter
->HwRegs
= (void *)memmapped_ioaddr
;
1025 adapter
->base_addr
= memmapped_ioaddr
;
1027 mmio_start
= pci_resource_start(pcidev
, 2);
1028 mmio_len
= pci_resource_len(pcidev
, 2);
1030 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
1031 mmio_start
, mmio_len
);
1033 memmapped_ioaddr
= ioremap(mmio_start
, mmio_len
);
1034 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__
,
1036 if (!memmapped_ioaddr
) {
1037 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
1038 __func__
, mmio_len
, mmio_start
);
1039 goto err_out_free_mmio_region_2
;
1042 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
1043 "start[%lx] len[%lx], IRQ %d.\n", __func__
,
1044 memmapped_ioaddr
, mmio_start
, mmio_len
, pcidev
->irq
);
1046 adapter
->UcodeRegs
= (void *)memmapped_ioaddr
;
1048 adapter
->State
= SXG_STATE_INITIALIZING
;
1050 * Maintain a list of all adapters anchored by
1051 * the global SxgDriver structure.
1053 adapter
->Next
= SxgDriver
.Adapters
;
1054 SxgDriver
.Adapters
= adapter
;
1055 adapter
->AdapterID
= ++SxgDriver
.AdapterID
;
1057 /* Initialize CRC table used to determine multicast hash */
1058 sxg_mcast_init_crc32();
1060 adapter
->JumboEnabled
= FALSE
;
1061 adapter
->RssEnabled
= FALSE
;
1062 if (adapter
->JumboEnabled
) {
1063 adapter
->FrameSize
= JUMBOMAXFRAME
;
1064 adapter
->ReceiveBufferSize
= SXG_RCV_JUMBO_BUFFER_SIZE
;
1066 adapter
->FrameSize
= ETHERMAXFRAME
;
1067 adapter
->ReceiveBufferSize
= SXG_RCV_DATA_BUFFER_SIZE
;
1071 * status = SXG_READ_EEPROM(adapter);
1073 * goto sxg_init_bad;
1077 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__
);
1078 sxg_config_pci(pcidev
);
1079 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__
);
1081 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__
);
1083 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__
);
1085 adapter
->vendid
= pci_tbl_entry
->vendor
;
1086 adapter
->devid
= pci_tbl_entry
->device
;
1087 adapter
->subsysid
= pci_tbl_entry
->subdevice
;
1088 adapter
->slotnumber
= ((pcidev
->devfn
>> 3) & 0x1F);
1089 adapter
->functionnumber
= (pcidev
->devfn
& 0x7);
1090 adapter
->memorylength
= pci_resource_len(pcidev
, 0);
1091 adapter
->irq
= pcidev
->irq
;
1092 adapter
->next_netdevice
= head_netdevice
;
1093 head_netdevice
= netdev
;
1094 adapter
->port
= 0; /*adapter->functionnumber; */
1096 /* Allocate memory and other resources */
1097 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__
);
1098 status
= sxg_allocate_resources(adapter
);
1099 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
1101 if (status
!= STATUS_SUCCESS
) {
1105 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__
);
1106 if (sxg_download_microcode(adapter
, SXG_UCODE_SYSTEM
)) {
1107 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
1109 sxg_read_config(adapter
);
1110 status
= sxg_adapter_set_hwaddr(adapter
);
1112 adapter
->state
= ADAPT_FAIL
;
1113 adapter
->linkstate
= LINK_DOWN
;
1114 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status
);
1117 netdev
->base_addr
= (unsigned long)adapter
->base_addr
;
1118 netdev
->irq
= adapter
->irq
;
1119 netdev
->netdev_ops
= &sxg_netdev_ops
;
1120 SET_ETHTOOL_OPS(netdev
, &sxg_nic_ethtool_ops
);
1121 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
1122 err
= sxg_set_interrupt_capability(adapter
);
1123 if (err
!= STATUS_SUCCESS
)
1124 DBG_ERROR("Cannot enable MSI-X capability\n");
1126 strcpy(netdev
->name
, "eth%d");
1127 /* strcpy(netdev->name, pci_name(pcidev)); */
1128 if ((err
= register_netdev(netdev
))) {
1129 DBG_ERROR("Cannot register net device, aborting. %s\n",
1134 netif_napi_add(netdev
, &adapter
->napi
,
1135 sxg_poll
, SXG_NETDEV_WEIGHT
);
1136 netdev
->watchdog_timeo
= 2 * HZ
;
1137 init_timer(&adapter
->watchdog_timer
);
1138 adapter
->watchdog_timer
.function
= &sxg_watchdog
;
1139 adapter
->watchdog_timer
.data
= (unsigned long) adapter
;
1140 INIT_WORK(&adapter
->update_link_status
, sxg_update_link_status
);
1143 ("sxg: %s addr 0x%lx, irq %d, MAC addr \
1144 %02X:%02X:%02X:%02X:%02X:%02X\n",
1145 netdev
->name
, netdev
->base_addr
, pcidev
->irq
, netdev
->dev_addr
[0],
1146 netdev
->dev_addr
[1], netdev
->dev_addr
[2], netdev
->dev_addr
[3],
1147 netdev
->dev_addr
[4], netdev
->dev_addr
[5]);
1150 ASSERT(status
== FALSE
);
1151 /* sxg_free_adapter(adapter); */
1153 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__
,
1154 status
, jiffies
, smp_processor_id());
1158 sxg_free_resources(adapter
);
1160 err_out_free_mmio_region_2
:
1162 mmio_start
= pci_resource_start(pcidev
, 2);
1163 mmio_len
= pci_resource_len(pcidev
, 2);
1164 release_mem_region(mmio_start
, mmio_len
);
1166 err_out_free_mmio_region_0
:
1168 mmio_start
= pci_resource_start(pcidev
, 0);
1169 mmio_len
= pci_resource_len(pcidev
, 0);
1171 release_mem_region(mmio_start
, mmio_len
);
1173 err_out_exit_sxg_probe
:
1175 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__
, jiffies
,
1176 smp_processor_id());
1178 pci_disable_device(pcidev
);
1179 DBG_ERROR("sxg: %s deallocate device\n", __func__
);
1181 printk("Exit %s, Sxg driver loading failed..\n", __func__
);
1187 * LINE BASE Interrupt routines..
1189 * sxg_disable_interrupt
1191 * DisableInterrupt Handler
1195 * adapter: Our adapter structure
1200 static void sxg_disable_interrupt(struct adapter_t
*adapter
)
1202 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "DisIntr",
1203 adapter
, adapter
->InterruptsEnabled
, 0, 0);
1204 /* For now, RSS is disabled with line based interrupts */
1205 ASSERT(adapter
->RssEnabled
== FALSE
);
1206 /* Turn off interrupts by writing to the icr register. */
1207 WRITE_REG(adapter
->UcodeRegs
[0].Icr
, SXG_ICR(0, SXG_ICR_DISABLE
), TRUE
);
1209 adapter
->InterruptsEnabled
= 0;
1211 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XDisIntr",
1212 adapter
, adapter
->InterruptsEnabled
, 0, 0);
1216 * sxg_enable_interrupt
1218 * EnableInterrupt Handler
1222 * adapter: Our adapter structure
1227 static void sxg_enable_interrupt(struct adapter_t
*adapter
)
1229 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "EnIntr",
1230 adapter
, adapter
->InterruptsEnabled
, 0, 0);
1231 /* For now, RSS is disabled with line based interrupts */
1232 ASSERT(adapter
->RssEnabled
== FALSE
);
1233 /* Turn on interrupts by writing to the icr register. */
1234 WRITE_REG(adapter
->UcodeRegs
[0].Icr
, SXG_ICR(0, SXG_ICR_ENABLE
), TRUE
);
1236 adapter
->InterruptsEnabled
= 1;
1238 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XEnIntr",
1243 * sxg_isr - Process an line-based interrupt
1246 * Context - Our adapter structure
1247 * QueueDefault - Output parameter to queue to default CPU
1248 * TargetCpus - Output bitmap to schedule DPC's
1250 * Return Value: TRUE if our interrupt
1252 static irqreturn_t
sxg_isr(int irq
, void *dev_id
)
1254 struct net_device
*dev
= (struct net_device
*) dev_id
;
1255 struct adapter_t
*adapter
= (struct adapter_t
*) netdev_priv(dev
);
1257 if(adapter
->state
!= ADAPT_UP
)
1259 adapter
->Stats
.NumInts
++;
1260 if (adapter
->Isr
[0] == 0) {
1262 * The SLIC driver used to experience a number of spurious
1263 * interrupts due to the delay associated with the masking of
1264 * the interrupt (we'd bounce back in here). If we see that
1265 * again with Sahara,add a READ_REG of the Icr register after
1266 * the WRITE_REG below.
1268 adapter
->Stats
.FalseInts
++;
1272 * Move the Isr contents and clear the value in
1273 * shared memory, and mask interrupts
1275 /* ASSERT(adapter->IsrDpcsPending == 0); */
1276 #if XXXTODO /* RSS Stuff */
1278 * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1279 * schedule DPC's based on event queues.
1281 if (adapter
->RssEnabled
&& (adapter
->IsrCopy
[0] & SXG_ISR_EVENT
)) {
1283 i
< adapter
->RssSystemInfo
->ProcessorInfo
.RssCpuCount
;
1285 struct sxg_event_ring
*EventRing
=
1286 &adapter
->EventRings
[i
];
1287 struct sxg_event
*Event
=
1288 &EventRing
->Ring
[adapter
->NextEvent
[i
]];
1290 adapter
->RssSystemInfo
->RssIdToCpu
[i
];
1291 if (Event
->Status
& EVENT_STATUS_VALID
) {
1292 adapter
->IsrDpcsPending
++;
1293 CpuMask
|= (1 << Cpu
);
1298 * Now, either schedule the CPUs specified by the CpuMask,
1302 *QueueDefault
= FALSE
;
1304 adapter
->IsrDpcsPending
= 1;
1305 *QueueDefault
= TRUE
;
1307 *TargetCpus
= CpuMask
;
1309 sxg_interrupt(adapter
);
1314 static void sxg_interrupt(struct adapter_t
*adapter
)
1316 WRITE_REG(adapter
->UcodeRegs
[0].Icr
, SXG_ICR(0, SXG_ICR_MASK
), TRUE
);
1318 if (napi_schedule_prep(&adapter
->napi
)) {
1319 __napi_schedule(&adapter
->napi
);
1323 static void sxg_handle_interrupt(struct adapter_t
*adapter
, int *work_done
,
1326 /* unsigned char RssId = 0; */
1328 int sxg_napi_continue
= 1;
1329 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "HndlIntr",
1330 adapter
, adapter
->IsrCopy
[0], 0, 0);
1331 /* For now, RSS is disabled with line based interrupts */
1332 ASSERT(adapter
->RssEnabled
== FALSE
);
1334 adapter
->IsrCopy
[0] = adapter
->Isr
[0];
1335 adapter
->Isr
[0] = 0;
1337 /* Always process the event queue. */
1338 while (sxg_napi_continue
)
1340 sxg_process_event_queue(adapter
,
1341 (adapter
->RssEnabled
? /*RssId */ 0 : 0),
1342 &sxg_napi_continue
, work_done
, budget
);
1345 #if XXXTODO /* RSS stuff */
1346 if (--adapter
->IsrDpcsPending
) {
1348 ASSERT(adapter
->RssEnabled
);
1349 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "DPCsPend",
1354 /* Last (or only) DPC processes the ISR and clears the interrupt. */
1355 NewIsr
= sxg_process_isr(adapter
, 0);
1356 /* Reenable interrupts */
1357 adapter
->IsrCopy
[0] = 0;
1358 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "ClearIsr",
1359 adapter
, NewIsr
, 0, 0);
1361 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XHndlInt",
1364 static int sxg_poll(struct napi_struct
*napi
, int budget
)
1366 struct adapter_t
*adapter
= container_of(napi
, struct adapter_t
, napi
);
1369 sxg_handle_interrupt(adapter
, &work_done
, budget
);
1371 if (work_done
< budget
) {
1372 napi_complete(napi
);
1373 WRITE_REG(adapter
->UcodeRegs
[0].Isr
, 0, TRUE
);
1379 * sxg_process_isr - Process an interrupt. Called from the line-based and
1380 * message based interrupt DPC routines
1383 * adapter - Our adapter structure
1384 * Queue - The ISR that needs processing
1389 static int sxg_process_isr(struct adapter_t
*adapter
, u32 MessageId
)
1391 u32 Isr
= adapter
->IsrCopy
[MessageId
];
1394 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "ProcIsr",
1395 adapter
, Isr
, 0, 0);
1398 if (Isr
& SXG_ISR_ERR
) {
1399 if (Isr
& SXG_ISR_PDQF
) {
1400 adapter
->Stats
.PdqFull
++;
1401 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__
);
1403 /* No host buffer */
1404 if (Isr
& SXG_ISR_RMISS
) {
1406 * There is a bunch of code in the SLIC driver which
1407 * attempts to process more receive events per DPC
1408 * if we start to fall behind. We'll probablyd
1409 * need to do something similar here, but hold
1410 * off for now. I don't want to make the code more
1411 * complicated than strictly needed.
1413 adapter
->stats
.rx_missed_errors
++;
1414 if (adapter
->stats
.rx_missed_errors
< 5) {
1415 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
1420 if (Isr
& SXG_ISR_DEAD
) {
1422 * Set aside the crash info and set the adapter state
1425 adapter
->CrashCpu
= (unsigned char)
1426 ((Isr
& SXG_ISR_CPU
) >> SXG_ISR_CPU_SHIFT
);
1427 adapter
->CrashLocation
= (ushort
) (Isr
& SXG_ISR_CRASH
);
1428 adapter
->Dead
= TRUE
;
1429 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__
,
1430 adapter
->CrashLocation
, adapter
->CrashCpu
);
1432 /* Event ring full */
1433 if (Isr
& SXG_ISR_ERFULL
) {
1435 * Same issue as RMISS, really. This means the
1436 * host is falling behind the card. Need to increase
1437 * event ring size, process more events per interrupt,
1438 * and/or reduce/remove interrupt aggregation.
1440 adapter
->Stats
.EventRingFull
++;
1441 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
1444 /* Transmit drop - no DRAM buffers or XMT error */
1445 if (Isr
& SXG_ISR_XDROP
) {
1446 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__
);
1449 /* Slowpath send completions */
1450 if (Isr
& SXG_ISR_SPSEND
) {
1451 sxg_complete_slow_send(adapter
);
1454 if (Isr
& SXG_ISR_UPC
) {
1455 /* Maybe change when debug is added.. */
1456 // ASSERT(adapter->DumpCmdRunning);
1457 adapter
->DumpCmdRunning
= FALSE
;
1460 if (Isr
& SXG_ISR_LINK
) {
1461 if (adapter
->state
!= ADAPT_DOWN
) {
1462 adapter
->link_status_changed
= 1;
1463 schedule_work(&adapter
->update_link_status
);
1466 /* Debug - breakpoint hit */
1467 if (Isr
& SXG_ISR_BREAK
) {
1469 * At the moment AGDB isn't written to support interactive
1470 * debug sessions. When it is, this interrupt will be used to
1471 * signal AGDB that it has hit a breakpoint. For now, ASSERT.
1475 /* Heartbeat response */
1476 if (Isr
& SXG_ISR_PING
) {
1477 adapter
->PingOutstanding
= FALSE
;
1479 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XProcIsr",
1480 adapter
, Isr
, NewIsr
, 0);
1486 * sxg_rcv_checksum - Set the checksum for received packet
1489 * @adapter - Adapter structure on which packet is received
1490 * @skb - Packet which is receieved
1491 * @Event - Event read from hardware
1494 void sxg_rcv_checksum(struct adapter_t
*adapter
, struct sk_buff
*skb
,
1495 struct sxg_event
*Event
)
1497 skb
->ip_summed
= CHECKSUM_NONE
;
1498 if (likely(adapter
->flags
& SXG_RCV_IP_CSUM_ENABLED
)) {
1499 if (likely(adapter
->flags
& SXG_RCV_TCP_CSUM_ENABLED
)
1500 && (Event
->Status
& EVENT_STATUS_TCPIP
)) {
1501 if(!(Event
->Status
& EVENT_STATUS_TCPBAD
))
1502 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1503 if(!(Event
->Status
& EVENT_STATUS_IPBAD
))
1504 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1505 } else if(Event
->Status
& EVENT_STATUS_IPONLY
) {
1506 if(!(Event
->Status
& EVENT_STATUS_IPBAD
))
1507 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1513 * sxg_process_event_queue - Process our event queue
1516 * - adapter - Adapter structure
1517 * - RssId - The event queue requiring processing
1522 static u32
sxg_process_event_queue(struct adapter_t
*adapter
, u32 RssId
,
1523 int *sxg_napi_continue
, int *work_done
, int budget
)
1525 struct sxg_event_ring
*EventRing
= &adapter
->EventRings
[RssId
];
1526 struct sxg_event
*Event
= &EventRing
->Ring
[adapter
->NextEvent
[RssId
]];
1527 u32 EventsProcessed
= 0, Batches
= 0;
1528 struct sk_buff
*skb
;
1529 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1530 struct sk_buff
*prev_skb
= NULL
;
1531 struct sk_buff
*IndicationList
[SXG_RCV_ARRAYSIZE
];
1533 struct sxg_rcv_data_buffer_hdr
*RcvDataBufferHdr
;
1535 u32 ReturnStatus
= 0;
1536 int sxg_rcv_data_buffers
= SXG_RCV_DATA_BUFFERS
;
1538 ASSERT((adapter
->State
== SXG_STATE_RUNNING
) ||
1539 (adapter
->State
== SXG_STATE_PAUSING
) ||
1540 (adapter
->State
== SXG_STATE_PAUSED
) ||
1541 (adapter
->State
== SXG_STATE_HALTING
));
1543 * We may still have unprocessed events on the queue if
1544 * the card crashed. Don't process them.
1546 if (adapter
->Dead
) {
1550 * In theory there should only be a single processor that
1551 * accesses this queue, and only at interrupt-DPC time. So/
1552 * we shouldn't need a lock for any of this.
1554 while (Event
->Status
& EVENT_STATUS_VALID
) {
1555 (*sxg_napi_continue
) = 1;
1556 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "Event",
1557 Event
, Event
->Code
, Event
->Status
,
1558 adapter
->NextEvent
);
1559 switch (Event
->Code
) {
1560 case EVENT_CODE_BUFFERS
:
1561 /* struct sxg_ring_info Head & Tail == unsigned char */
1562 ASSERT(!(Event
->CommandIndex
& 0xFF00));
1563 sxg_complete_descriptor_blocks(adapter
,
1564 Event
->CommandIndex
);
1566 case EVENT_CODE_SLOWRCV
:
1568 --adapter
->RcvBuffersOnCard
;
1569 if ((skb
= sxg_slow_receive(adapter
, Event
))) {
1571 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1572 /* Add it to our indication list */
1573 SXG_ADD_RCV_PACKET(adapter
, skb
, prev_skb
,
1574 IndicationList
, num_skbs
);
1576 * Linux, we just pass up each skb to the
1577 * protocol above at this point, there is no
1578 * capability of an indication list.
1581 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1582 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1583 rx_bytes
= Event
->Length
;
1584 adapter
->stats
.rx_packets
++;
1585 adapter
->stats
.rx_bytes
+= rx_bytes
;
1586 sxg_rcv_checksum(adapter
, skb
, Event
);
1587 skb
->dev
= adapter
->netdev
;
1588 netif_receive_skb(skb
);
1593 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
1594 __func__
, Event
->Code
);
1598 * See if we need to restock card receive buffers.
1599 * There are two things to note here:
1600 * First - This test is not SMP safe. The
1601 * adapter->BuffersOnCard field is protected via atomic
1602 * interlocked calls, but we do not protect it with respect
1603 * to these tests. The only way to do that is with a lock,
1604 * and I don't want to grab a lock every time we adjust the
1605 * BuffersOnCard count. Instead, we allow the buffer
1606 * replenishment to be off once in a while. The worst that
1607 * can happen is the card is given on more-or-less descriptor
1608 * block than the arbitrary value we've chosen. No big deal
1609 * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1611 * Second - We expect this test to rarely
1612 * evaluate to true. We attempt to refill descriptor blocks
1613 * as they are returned to us (sxg_complete_descriptor_blocks)
1614 * so The only time this should evaluate to true is when
1615 * sxg_complete_descriptor_blocks failed to allocate
1618 if (adapter
->JumboEnabled
)
1619 sxg_rcv_data_buffers
= SXG_JUMBO_RCV_DATA_BUFFERS
;
1621 if (adapter
->RcvBuffersOnCard
< sxg_rcv_data_buffers
) {
1622 sxg_stock_rcv_buffers(adapter
);
1625 * It's more efficient to just set this to zero.
1626 * But clearing the top bit saves potential debug info...
1628 Event
->Status
&= ~EVENT_STATUS_VALID
;
1629 /* Advance to the next event */
1630 SXG_ADVANCE_INDEX(adapter
->NextEvent
[RssId
], EVENT_RING_SIZE
);
1631 Event
= &EventRing
->Ring
[adapter
->NextEvent
[RssId
]];
1633 if (EventsProcessed
== EVENT_RING_BATCH
) {
1634 /* Release a batch of events back to the card */
1635 WRITE_REG(adapter
->UcodeRegs
[RssId
].EventRelease
,
1636 EVENT_RING_BATCH
, FALSE
);
1637 EventsProcessed
= 0;
1639 * If we've processed our batch limit, break out of the
1640 * loop and return SXG_ISR_EVENT to arrange for us to
1643 if (Batches
++ == EVENT_BATCH_LIMIT
) {
1644 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
,
1645 TRACE_NOISY
, "EvtLimit", Batches
,
1646 adapter
->NextEvent
, 0, 0);
1647 ReturnStatus
= SXG_ISR_EVENT
;
1651 if (*work_done
>= budget
) {
1652 WRITE_REG(adapter
->UcodeRegs
[RssId
].EventRelease
,
1653 EventsProcessed
, FALSE
);
1654 EventsProcessed
= 0;
1655 (*sxg_napi_continue
) = 0;
1659 if (!(Event
->Status
& EVENT_STATUS_VALID
))
1660 (*sxg_napi_continue
) = 0;
1662 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1663 /* Indicate any received dumb-nic frames */
1664 SXG_INDICATE_PACKETS(adapter
, IndicationList
, num_skbs
);
1666 /* Release events back to the card. */
1667 if (EventsProcessed
) {
1668 WRITE_REG(adapter
->UcodeRegs
[RssId
].EventRelease
,
1669 EventsProcessed
, FALSE
);
1671 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XPrcEvnt",
1672 Batches
, EventsProcessed
, adapter
->NextEvent
, num_skbs
);
1674 return (ReturnStatus
);
1678 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1681 * adapter - A pointer to our adapter structure
1685 static void sxg_complete_slow_send(struct adapter_t
*adapter
)
1687 struct sxg_xmt_ring
*XmtRing
= &adapter
->XmtRings
[0];
1688 struct sxg_ring_info
*XmtRingInfo
= &adapter
->XmtRingZeroInfo
;
1690 struct sxg_cmd
*XmtCmd
;
1691 unsigned long flags
= 0;
1692 unsigned long sgl_flags
= 0;
1693 unsigned int processed_count
= 0;
1696 * NOTE - This lock is dropped and regrabbed in this loop.
1697 * This means two different processors can both be running/
1698 * through this loop. Be *very* careful.
1700 spin_lock_irqsave(&adapter
->XmtZeroLock
, flags
);
1702 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "CmpSnds",
1703 adapter
, XmtRingInfo
->Head
, XmtRingInfo
->Tail
, 0);
1705 while ((XmtRingInfo
->Tail
!= *adapter
->XmtRingZeroIndex
)
1706 && processed_count
++ < SXG_COMPLETE_SLOW_SEND_LIMIT
) {
1708 * Locate the current Cmd (ring descriptor entry), and
1709 * associated SGL, and advance the tail
1711 SXG_RETURN_CMD(XmtRing
, XmtRingInfo
, XmtCmd
, ContextType
);
1712 ASSERT(ContextType
);
1713 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "CmpSnd",
1714 XmtRingInfo
->Head
, XmtRingInfo
->Tail
, XmtCmd
, 0);
1715 /* Clear the SGL field. */
1718 switch (*ContextType
) {
1721 struct sk_buff
*skb
;
1722 struct sxg_scatter_gather
*SxgSgl
=
1723 (struct sxg_scatter_gather
*)ContextType
;
1724 dma64_addr_t FirstSgeAddress
;
1727 /* Dumb-nic send. Command context is the dumb-nic SGL */
1728 skb
= (struct sk_buff
*)ContextType
;
1729 skb
= SxgSgl
->DumbPacket
;
1730 FirstSgeAddress
= XmtCmd
->Buffer
.FirstSgeAddress
;
1731 FirstSgeLength
= XmtCmd
->Buffer
.FirstSgeLength
;
1732 /* Complete the send */
1733 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
,
1734 TRACE_IMPORTANT
, "DmSndCmp", skb
, 0,
1736 ASSERT(adapter
->Stats
.XmtQLen
);
1738 * Now drop the lock and complete the send
1739 * back to Microsoft. We need to drop the lock
1740 * because Microsoft can come back with a
1741 * chimney send, which results in a double trip
1744 spin_unlock_irqrestore(
1745 &adapter
->XmtZeroLock
, flags
);
1747 SxgSgl
->DumbPacket
= NULL
;
1748 SXG_COMPLETE_DUMB_SEND(adapter
, skb
,
1751 SXG_FREE_SGL_BUFFER(adapter
, SxgSgl
, NULL
);
1752 /* and reacquire.. */
1753 spin_lock_irqsave(&adapter
->XmtZeroLock
, flags
);
1760 spin_unlock_irqrestore(&adapter
->XmtZeroLock
, flags
);
1761 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "CmpSnd",
1762 adapter
, XmtRingInfo
->Head
, XmtRingInfo
->Tail
, 0);
1769 * adapter - A pointer to our adapter structure
1770 * Event - Receive event
1774 static struct sk_buff
*sxg_slow_receive(struct adapter_t
*adapter
,
1775 struct sxg_event
*Event
)
1777 u32 BufferSize
= adapter
->ReceiveBufferSize
;
1778 struct sxg_rcv_data_buffer_hdr
*RcvDataBufferHdr
;
1779 struct sk_buff
*Packet
;
1780 static int read_counter
= 0;
1782 RcvDataBufferHdr
= (struct sxg_rcv_data_buffer_hdr
*) Event
->HostHandle
;
1783 if(read_counter
++ & 0x100)
1785 sxg_collect_statistics(adapter
);
1788 ASSERT(RcvDataBufferHdr
);
1789 ASSERT(RcvDataBufferHdr
->State
== SXG_BUFFER_ONCARD
);
1790 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_IMPORTANT
, "SlowRcv", Event
,
1791 RcvDataBufferHdr
, RcvDataBufferHdr
->State
,
1792 /*RcvDataBufferHdr->VirtualAddress*/ 0);
1793 /* Drop rcv frames in non-running state */
1794 switch (adapter
->State
) {
1795 case SXG_STATE_RUNNING
:
1797 case SXG_STATE_PAUSING
:
1798 case SXG_STATE_PAUSED
:
1799 case SXG_STATE_HALTING
:
1807 * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1808 * RcvDataBufferHdr->VirtualAddress, Event->Length);
1811 /* Change buffer state to UPSTREAM */
1812 RcvDataBufferHdr
->State
= SXG_BUFFER_UPSTREAM
;
1813 if (Event
->Status
& EVENT_STATUS_RCVERR
) {
1814 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "RcvError",
1815 Event
, Event
->Status
, Event
->HostHandle
, 0);
1816 sxg_process_rcv_error(adapter
, *(u32
*)
1817 SXG_RECEIVE_DATA_LOCATION
1818 (RcvDataBufferHdr
));
1821 #if XXXTODO /* VLAN stuff */
1822 /* If there's a VLAN tag, extract it and validate it */
1823 if (((struct ether_header
*)
1824 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr
)))->EtherType
1825 == ETHERTYPE_VLAN
) {
1826 if (SxgExtractVlanHeader(adapter
, RcvDataBufferHdr
, Event
) !=
1828 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
,
1830 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr
),
1836 /* Dumb-nic frame. See if it passes our mac filter and update stats */
1838 if (!sxg_mac_filter(adapter
,
1839 (struct ether_header
*)(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr
)),
1841 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "RcvFiltr",
1842 Event
, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr
),
1847 Packet
= RcvDataBufferHdr
->SxgDumbRcvPacket
;
1848 SXG_ADJUST_RCV_PACKET(Packet
, RcvDataBufferHdr
, Event
);
1849 Packet
->protocol
= eth_type_trans(Packet
, adapter
->netdev
);
1851 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_IMPORTANT
, "DumbRcv",
1852 RcvDataBufferHdr
, Packet
, Event
->Length
, 0);
1853 /* Lastly adjust the receive packet length. */
1854 RcvDataBufferHdr
->SxgDumbRcvPacket
= NULL
;
1855 RcvDataBufferHdr
->PhysicalAddress
= (dma_addr_t
)NULL
;
1856 SXG_ALLOCATE_RCV_PACKET(adapter
, RcvDataBufferHdr
, BufferSize
);
1857 if (RcvDataBufferHdr
->skb
)
1859 spin_lock(&adapter
->RcvQLock
);
1860 SXG_FREE_RCV_DATA_BUFFER(adapter
, RcvDataBufferHdr
);
1861 // adapter->RcvBuffersOnCard ++;
1862 spin_unlock(&adapter
->RcvQLock
);
1867 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "DropRcv",
1868 RcvDataBufferHdr
, Event
->Length
, 0, 0);
1869 adapter
->stats
.rx_dropped
++;
1870 // adapter->Stats.RcvDiscards++;
1871 spin_lock(&adapter
->RcvQLock
);
1872 SXG_FREE_RCV_DATA_BUFFER(adapter
, RcvDataBufferHdr
);
1873 spin_unlock(&adapter
->RcvQLock
);
1878 * sxg_process_rcv_error - process receive error and update
1882 * adapter - Adapter structure
1883 * ErrorStatus - 4-byte receive error status
1885 * Return Value : None
1887 static void sxg_process_rcv_error(struct adapter_t
*adapter
, u32 ErrorStatus
)
1891 adapter
->stats
.rx_errors
++;
1893 if (ErrorStatus
& SXG_RCV_STATUS_TRANSPORT_ERROR
) {
1894 Error
= ErrorStatus
& SXG_RCV_STATUS_TRANSPORT_MASK
;
1896 case SXG_RCV_STATUS_TRANSPORT_CSUM
:
1897 adapter
->Stats
.TransportCsum
++;
1899 case SXG_RCV_STATUS_TRANSPORT_UFLOW
:
1900 adapter
->Stats
.TransportUflow
++;
1902 case SXG_RCV_STATUS_TRANSPORT_HDRLEN
:
1903 adapter
->Stats
.TransportHdrLen
++;
1907 if (ErrorStatus
& SXG_RCV_STATUS_NETWORK_ERROR
) {
1908 Error
= ErrorStatus
& SXG_RCV_STATUS_NETWORK_MASK
;
1910 case SXG_RCV_STATUS_NETWORK_CSUM
:
1911 adapter
->Stats
.NetworkCsum
++;
1913 case SXG_RCV_STATUS_NETWORK_UFLOW
:
1914 adapter
->Stats
.NetworkUflow
++;
1916 case SXG_RCV_STATUS_NETWORK_HDRLEN
:
1917 adapter
->Stats
.NetworkHdrLen
++;
1921 if (ErrorStatus
& SXG_RCV_STATUS_PARITY
) {
1922 adapter
->Stats
.Parity
++;
1924 if (ErrorStatus
& SXG_RCV_STATUS_LINK_ERROR
) {
1925 Error
= ErrorStatus
& SXG_RCV_STATUS_LINK_MASK
;
1927 case SXG_RCV_STATUS_LINK_PARITY
:
1928 adapter
->Stats
.LinkParity
++;
1930 case SXG_RCV_STATUS_LINK_EARLY
:
1931 adapter
->Stats
.LinkEarly
++;
1933 case SXG_RCV_STATUS_LINK_BUFOFLOW
:
1934 adapter
->Stats
.LinkBufOflow
++;
1936 case SXG_RCV_STATUS_LINK_CODE
:
1937 adapter
->Stats
.LinkCode
++;
1939 case SXG_RCV_STATUS_LINK_DRIBBLE
:
1940 adapter
->Stats
.LinkDribble
++;
1942 case SXG_RCV_STATUS_LINK_CRC
:
1943 adapter
->Stats
.LinkCrc
++;
1945 case SXG_RCV_STATUS_LINK_OFLOW
:
1946 adapter
->Stats
.LinkOflow
++;
1948 case SXG_RCV_STATUS_LINK_UFLOW
:
1949 adapter
->Stats
.LinkUflow
++;
1959 * adapter - Adapter structure
1960 * pether - Ethernet header
1961 * length - Frame length
1963 * Return Value : TRUE if the frame is to be allowed
1965 static bool sxg_mac_filter(struct adapter_t
*adapter
,
1966 struct ether_header
*EtherHdr
, ushort length
)
1969 struct net_device
*dev
= adapter
->netdev
;
1971 if (SXG_MULTICAST_PACKET(EtherHdr
)) {
1972 if (SXG_BROADCAST_PACKET(EtherHdr
)) {
1974 if (adapter
->MacFilter
& MAC_BCAST
) {
1975 adapter
->Stats
.DumbRcvBcastPkts
++;
1976 adapter
->Stats
.DumbRcvBcastBytes
+= length
;
1981 if (adapter
->MacFilter
& MAC_ALLMCAST
) {
1982 adapter
->Stats
.DumbRcvMcastPkts
++;
1983 adapter
->Stats
.DumbRcvMcastBytes
+= length
;
1986 if (adapter
->MacFilter
& MAC_MCAST
) {
1987 struct dev_mc_list
*mclist
= dev
->mc_list
;
1989 ETHER_EQ_ADDR(mclist
->da_addr
,
1990 EtherHdr
->ether_dhost
,
1996 DumbRcvMcastBytes
+= length
;
1999 mclist
= mclist
->next
;
2003 } else if (adapter
->MacFilter
& MAC_DIRECTED
) {
2005 * Not broadcast or multicast. Must be directed at us or
2006 * the card is in promiscuous mode. Either way, consider it
2007 * ours if MAC_DIRECTED is set
2009 adapter
->Stats
.DumbRcvUcastPkts
++;
2010 adapter
->Stats
.DumbRcvUcastBytes
+= length
;
2013 if (adapter
->MacFilter
& MAC_PROMISC
) {
2014 /* Whatever it is, keep it. */
2020 static int sxg_register_interrupt(struct adapter_t
*adapter
)
2022 if (!adapter
->intrregistered
) {
2026 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
2027 __func__
, adapter
, adapter
->netdev
->irq
, NR_IRQS
);
2029 spin_unlock_irqrestore(&sxg_global
.driver_lock
,
2032 retval
= request_irq(adapter
->netdev
->irq
,
2035 adapter
->netdev
->name
, adapter
->netdev
);
2037 spin_lock_irqsave(&sxg_global
.driver_lock
, sxg_global
.flags
);
2040 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
2041 adapter
->netdev
->name
, retval
);
2044 adapter
->intrregistered
= 1;
2045 adapter
->IntRegistered
= TRUE
;
2046 /* Disable RSS with line-based interrupts */
2047 adapter
->RssEnabled
= FALSE
;
2048 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
2049 __func__
, adapter
, adapter
->netdev
->irq
);
2051 return (STATUS_SUCCESS
);
2054 static void sxg_deregister_interrupt(struct adapter_t
*adapter
)
2056 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__
, adapter
);
2058 slic_init_cleanup(adapter
);
2060 memset(&adapter
->stats
, 0, sizeof(struct net_device_stats
));
2061 adapter
->error_interrupts
= 0;
2062 adapter
->rcv_interrupts
= 0;
2063 adapter
->xmit_interrupts
= 0;
2064 adapter
->linkevent_interrupts
= 0;
2065 adapter
->upr_interrupts
= 0;
2066 adapter
->num_isrs
= 0;
2067 adapter
->xmit_completes
= 0;
2068 adapter
->rcv_broadcasts
= 0;
2069 adapter
->rcv_multicasts
= 0;
2070 adapter
->rcv_unicasts
= 0;
2071 DBG_ERROR("sxg: %s EXIT\n", __func__
);
2077 * Perform initialization of our slic interface.
2080 static int sxg_if_init(struct adapter_t
*adapter
)
2082 struct net_device
*dev
= adapter
->netdev
;
2085 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
2086 __func__
, adapter
->netdev
->name
,
2088 adapter
->linkstate
, dev
->flags
);
2090 /* adapter should be down at this point */
2091 if (adapter
->state
!= ADAPT_DOWN
) {
2092 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
2095 ASSERT(adapter
->linkstate
== LINK_DOWN
);
2097 adapter
->devflags_prev
= dev
->flags
;
2098 adapter
->MacFilter
= MAC_DIRECTED
;
2100 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__
,
2101 adapter
->netdev
->name
);
2102 if (dev
->flags
& IFF_BROADCAST
) {
2103 adapter
->MacFilter
|= MAC_BCAST
;
2104 DBG_ERROR("BCAST ");
2106 if (dev
->flags
& IFF_PROMISC
) {
2107 adapter
->MacFilter
|= MAC_PROMISC
;
2108 DBG_ERROR("PROMISC ");
2110 if (dev
->flags
& IFF_ALLMULTI
) {
2111 adapter
->MacFilter
|= MAC_ALLMCAST
;
2112 DBG_ERROR("ALL_MCAST ");
2114 if (dev
->flags
& IFF_MULTICAST
) {
2115 adapter
->MacFilter
|= MAC_MCAST
;
2116 DBG_ERROR("MCAST ");
2120 status
= sxg_register_intr(adapter
);
2121 if (status
!= STATUS_SUCCESS
) {
2122 DBG_ERROR("sxg_if_init: sxg_register_intr FAILED %x\n",
2124 sxg_deregister_interrupt(adapter
);
2128 adapter
->state
= ADAPT_UP
;
2130 /* clear any pending events, then enable interrupts */
2131 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__
);
2133 return (STATUS_SUCCESS
);
2136 void sxg_set_interrupt_aggregation(struct adapter_t
*adapter
)
2139 * Top bit disables aggregation on xmt (SXG_AGG_XMT_DISABLE).
2140 * Make sure Max is less than 0x8000.
2142 adapter
->max_aggregation
= SXG_MAX_AGG_DEFAULT
;
2143 adapter
->min_aggregation
= SXG_MIN_AGG_DEFAULT
;
2144 WRITE_REG(adapter
->UcodeRegs
[0].Aggregation
,
2145 ((adapter
->max_aggregation
<< SXG_MAX_AGG_SHIFT
) |
2146 adapter
->min_aggregation
),
2150 static int sxg_entry_open(struct net_device
*dev
)
2152 struct adapter_t
*adapter
= (struct adapter_t
*) netdev_priv(dev
);
2155 int sxg_initial_rcv_data_buffers
= SXG_INITIAL_RCV_DATA_BUFFERS
;
2158 if (adapter
->JumboEnabled
== TRUE
) {
2159 sxg_initial_rcv_data_buffers
=
2160 SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS
;
2161 SXG_INITIALIZE_RING(adapter
->RcvRingZeroInfo
,
2162 SXG_JUMBO_RCV_RING_SIZE
);
2166 * Allocate receive data buffers. We allocate a block of buffers and
2167 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
2170 for (i
= 0; i
< sxg_initial_rcv_data_buffers
;
2171 i
+= SXG_RCV_DESCRIPTORS_PER_BLOCK
)
2173 status
= sxg_allocate_buffer_memory(adapter
,
2174 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE
),
2175 SXG_BUFFER_TYPE_RCV
);
2176 if (status
!= STATUS_SUCCESS
)
2180 * NBL resource allocation can fail in the 'AllocateComplete' routine,
2181 * which doesn't return status. Make sure we got the number of buffers
2185 if (adapter
->FreeRcvBufferCount
< sxg_initial_rcv_data_buffers
) {
2186 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XAResF6",
2187 adapter
, adapter
->FreeRcvBufferCount
, SXG_MAX_ENTRIES
,
2189 return (STATUS_RESOURCES
);
2192 * The microcode expects it to be downloaded on every open.
2194 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__
);
2195 if (sxg_download_microcode(adapter
, SXG_UCODE_SYSTEM
)) {
2196 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
2198 sxg_read_config(adapter
);
2200 adapter
->state
= ADAPT_FAIL
;
2201 adapter
->linkstate
= LINK_DOWN
;
2202 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n",
2208 sxg_second_open(adapter
->netdev
);
2210 return STATUS_SUCCESS
;
2216 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__
,
2217 adapter
->activated
);
2219 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
2220 __func__
, adapter
->netdev
->name
, jiffies
, smp_processor_id(),
2221 adapter
->netdev
, adapter
, adapter
->port
);
2223 netif_stop_queue(adapter
->netdev
);
2225 spin_lock_irqsave(&sxg_global
.driver_lock
, sxg_global
.flags
);
2226 if (!adapter
->activated
) {
2227 sxg_global
.num_sxg_ports_active
++;
2228 adapter
->activated
= 1;
2230 /* Initialize the adapter */
2231 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__
);
2232 status
= sxg_initialize_adapter(adapter
);
2233 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
2236 if (status
== STATUS_SUCCESS
) {
2237 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__
);
2238 status
= sxg_if_init(adapter
);
2239 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__
,
2243 if (status
!= STATUS_SUCCESS
) {
2244 if (adapter
->activated
) {
2245 sxg_global
.num_sxg_ports_active
--;
2246 adapter
->activated
= 0;
2248 spin_unlock_irqrestore(&sxg_global
.driver_lock
,
2252 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__
);
2253 sxg_set_interrupt_aggregation(adapter
);
2254 napi_enable(&adapter
->napi
);
2256 /* Enable interrupts */
2257 SXG_ENABLE_ALL_INTERRUPTS(adapter
);
2259 DBG_ERROR("sxg: %s EXIT\n", __func__
);
2261 spin_unlock_irqrestore(&sxg_global
.driver_lock
, sxg_global
.flags
);
2262 mod_timer(&adapter
->watchdog_timer
, jiffies
);
2264 return STATUS_SUCCESS
;
2267 int sxg_second_open(struct net_device
* dev
)
2269 struct adapter_t
*adapter
= (struct adapter_t
*) netdev_priv(dev
);
2272 spin_lock_irqsave(&sxg_global
.driver_lock
, sxg_global
.flags
);
2273 netif_start_queue(adapter
->netdev
);
2274 adapter
->state
= ADAPT_UP
;
2275 adapter
->linkstate
= LINK_UP
;
2277 status
= sxg_initialize_adapter(adapter
);
2278 sxg_set_interrupt_aggregation(adapter
);
2279 napi_enable(&adapter
->napi
);
2280 /* Re-enable interrupts */
2281 SXG_ENABLE_ALL_INTERRUPTS(adapter
);
2283 sxg_register_intr(adapter
);
2284 spin_unlock_irqrestore(&sxg_global
.driver_lock
, sxg_global
.flags
);
2285 mod_timer(&adapter
->watchdog_timer
, jiffies
);
2286 return (STATUS_SUCCESS
);
2290 static void __devexit
sxg_entry_remove(struct pci_dev
*pcidev
)
2295 struct net_device
*dev
= pci_get_drvdata(pcidev
);
2296 struct adapter_t
*adapter
= (struct adapter_t
*) netdev_priv(dev
);
2298 flush_scheduled_work();
2300 /* Deallocate Resources */
2301 unregister_netdev(dev
);
2302 sxg_reset_interrupt_capability(adapter
);
2303 sxg_free_resources(adapter
);
2307 mmio_start
= pci_resource_start(pcidev
, 0);
2308 mmio_len
= pci_resource_len(pcidev
, 0);
2310 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __func__
,
2311 mmio_start
, mmio_len
);
2312 release_mem_region(mmio_start
, mmio_len
);
2314 mmio_start
= pci_resource_start(pcidev
, 2);
2315 mmio_len
= pci_resource_len(pcidev
, 2);
2317 DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __func__
,
2318 mmio_start
, mmio_len
);
2319 release_mem_region(mmio_start
, mmio_len
);
2321 pci_disable_device(pcidev
);
2323 DBG_ERROR("sxg: %s deallocate device\n", __func__
);
2325 DBG_ERROR("sxg: %s EXIT\n", __func__
);
2328 static int sxg_entry_halt(struct net_device
*dev
)
2330 struct adapter_t
*adapter
= (struct adapter_t
*) netdev_priv(dev
);
2331 struct sxg_hw_regs
*HwRegs
= adapter
->HwRegs
;
2333 u32 RssIds
, IsrCount
;
2334 unsigned long flags
;
2336 RssIds
= SXG_RSS_CPU_COUNT(adapter
);
2337 IsrCount
= adapter
->msi_enabled
? RssIds
: 1;
2338 /* Disable interrupts */
2339 spin_lock_irqsave(&sxg_global
.driver_lock
, sxg_global
.flags
);
2340 SXG_DISABLE_ALL_INTERRUPTS(adapter
);
2341 adapter
->state
= ADAPT_DOWN
;
2342 adapter
->linkstate
= LINK_DOWN
;
2344 spin_unlock_irqrestore(&sxg_global
.driver_lock
, sxg_global
.flags
);
2345 sxg_deregister_interrupt(adapter
);
2346 WRITE_REG(HwRegs
->Reset
, 0xDEAD, FLUSH
);
2349 del_timer_sync(&adapter
->watchdog_timer
);
2350 netif_stop_queue(dev
);
2351 netif_carrier_off(dev
);
2353 napi_disable(&adapter
->napi
);
2355 WRITE_REG(adapter
->UcodeRegs
[0].RcvCmd
, 0, true);
2356 adapter
->devflags_prev
= 0;
2357 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
2358 __func__
, dev
->name
, adapter
, adapter
->state
);
2360 spin_lock(&adapter
->RcvQLock
);
2361 /* Free all the blocks and the buffers, moved from remove() routine */
2362 if (!(IsListEmpty(&adapter
->AllRcvBlocks
))) {
2363 sxg_free_rcvblocks(adapter
);
2367 InitializeListHead(&adapter
->FreeRcvBuffers
);
2368 InitializeListHead(&adapter
->FreeRcvBlocks
);
2369 InitializeListHead(&adapter
->AllRcvBlocks
);
2370 InitializeListHead(&adapter
->FreeSglBuffers
);
2371 InitializeListHead(&adapter
->AllSglBuffers
);
2373 adapter
->FreeRcvBufferCount
= 0;
2374 adapter
->FreeRcvBlockCount
= 0;
2375 adapter
->AllRcvBlockCount
= 0;
2376 adapter
->RcvBuffersOnCard
= 0;
2377 adapter
->PendingRcvCount
= 0;
2379 memset(adapter
->RcvRings
, 0, sizeof(struct sxg_rcv_ring
) * 1);
2380 memset(adapter
->EventRings
, 0, sizeof(struct sxg_event_ring
) * RssIds
);
2381 memset(adapter
->Isr
, 0, sizeof(u32
) * IsrCount
);
2382 for (i
= 0; i
< SXG_MAX_RING_SIZE
; i
++)
2383 adapter
->RcvRingZeroInfo
.Context
[i
] = NULL
;
2384 SXG_INITIALIZE_RING(adapter
->RcvRingZeroInfo
, SXG_RCV_RING_SIZE
);
2385 SXG_INITIALIZE_RING(adapter
->XmtRingZeroInfo
, SXG_XMT_RING_SIZE
);
2387 spin_unlock(&adapter
->RcvQLock
);
2389 spin_lock_irqsave(&adapter
->XmtZeroLock
, flags
);
2390 adapter
->AllSglBufferCount
= 0;
2391 adapter
->FreeSglBufferCount
= 0;
2392 adapter
->PendingXmtCount
= 0;
2393 memset(adapter
->XmtRings
, 0, sizeof(struct sxg_xmt_ring
) * 1);
2394 memset(adapter
->XmtRingZeroIndex
, 0, sizeof(u32
));
2395 spin_unlock_irqrestore(&adapter
->XmtZeroLock
, flags
);
2397 for (i
= 0; i
< SXG_MAX_RSS
; i
++) {
2398 adapter
->NextEvent
[i
] = 0;
2400 atomic_set(&adapter
->pending_allocations
, 0);
2401 adapter
->intrregistered
= 0;
2402 sxg_remove_isr(adapter
);
2403 DBG_ERROR("sxg: %s (%s) EXIT\n", __func__
, dev
->name
);
2404 return (STATUS_SUCCESS
);
2407 static int sxg_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2410 /* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
2412 case SIOCSLICSETINTAGG
:
2414 /* struct adapter_t *adapter = (struct adapter_t *)
2420 if (copy_from_user(data
, rq
->ifr_data
, 28)) {
2421 DBG_ERROR("copy_from_user FAILED getting \
2427 "%s: set interrupt aggregation to %d\n",
2433 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
2439 #define NORMAL_ETHFRAME 0
2442 * sxg_send_packets - Send a skb packet
2445 * skb - The packet to send
2446 * dev - Our linux net device that refs our adapter
2449 * 0 regardless of outcome XXXTODO refer to e1000 driver
2451 static int sxg_send_packets(struct sk_buff
*skb
, struct net_device
*dev
)
2453 struct adapter_t
*adapter
= (struct adapter_t
*) netdev_priv(dev
);
2454 u32 status
= STATUS_SUCCESS
;
2457 * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __func__,
2461 /* Check the adapter state */
2462 switch (adapter
->State
) {
2463 case SXG_STATE_INITIALIZING
:
2464 case SXG_STATE_HALTED
:
2465 case SXG_STATE_SHUTDOWN
:
2466 ASSERT(0); /* unexpected */
2468 case SXG_STATE_RESETTING
:
2469 case SXG_STATE_SLEEP
:
2470 case SXG_STATE_BOOTDIAG
:
2471 case SXG_STATE_DIAG
:
2472 case SXG_STATE_HALTING
:
2473 status
= STATUS_FAILURE
;
2475 case SXG_STATE_RUNNING
:
2476 if (adapter
->LinkState
!= SXG_LINK_UP
) {
2477 status
= STATUS_FAILURE
;
2482 status
= STATUS_FAILURE
;
2484 if (status
!= STATUS_SUCCESS
) {
2488 status
= sxg_transmit_packet(adapter
, skb
);
2489 if (status
== STATUS_SUCCESS
) {
2494 /* reject & complete all the packets if they cant be sent */
2495 if (status
!= STATUS_SUCCESS
) {
2497 /* sxg_send_packets_fail(adapter, skb, status); */
2499 SXG_DROP_DUMB_SEND(adapter
, skb
);
2500 adapter
->stats
.tx_dropped
++;
2501 return NETDEV_TX_BUSY
;
2504 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__
,
2508 return NETDEV_TX_OK
;
2512 * sxg_transmit_packet
2514 * This function transmits a single packet.
2517 * adapter - Pointer to our adapter structure
2518 * skb - The packet to be sent
2520 * Return - STATUS of send
2522 static int sxg_transmit_packet(struct adapter_t
*adapter
, struct sk_buff
*skb
)
2524 struct sxg_x64_sgl
*pSgl
;
2525 struct sxg_scatter_gather
*SxgSgl
;
2526 unsigned long sgl_flags
;
2527 /* void *SglBuffer; */
2528 /* u32 SglBufferLength; */
2531 * The vast majority of work is done in the shared
2532 * sxg_dumb_sgl routine.
2534 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "DumbSend",
2535 adapter
, skb
, 0, 0);
2537 /* Allocate a SGL buffer */
2538 SXG_GET_SGL_BUFFER(adapter
, SxgSgl
, 0);
2540 adapter
->Stats
.NoSglBuf
++;
2541 adapter
->stats
.tx_errors
++;
2542 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "SndPktF1",
2543 adapter
, skb
, 0, 0);
2544 return (STATUS_RESOURCES
);
2546 ASSERT(SxgSgl
->adapter
== adapter
);
2547 /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2548 SglBufferLength = SXG_SGL_BUF_SIZE; */
2549 SxgSgl
->VlanTag
.VlanTci
= 0;
2550 SxgSgl
->VlanTag
.VlanTpid
= 0;
2551 SxgSgl
->Type
= SXG_SGL_DUMB
;
2552 SxgSgl
->DumbPacket
= skb
;
2555 /* Call the common sxg_dumb_sgl routine to complete the send. */
2556 return (sxg_dumb_sgl(pSgl
, SxgSgl
));
2564 * SxgSgl - struct sxg_scatter_gather
2567 * Status of send operation.
2569 static int sxg_dumb_sgl(struct sxg_x64_sgl
*pSgl
,
2570 struct sxg_scatter_gather
*SxgSgl
)
2572 struct adapter_t
*adapter
= SxgSgl
->adapter
;
2573 struct sk_buff
*skb
= SxgSgl
->DumbPacket
;
2574 /* For now, all dumb-nic sends go on RSS queue zero */
2575 struct sxg_xmt_ring
*XmtRing
= &adapter
->XmtRings
[0];
2576 struct sxg_ring_info
*XmtRingInfo
= &adapter
->XmtRingZeroInfo
;
2577 struct sxg_cmd
*XmtCmd
= NULL
;
2578 /* u32 Index = 0; */
2579 u32 DataLength
= skb
->len
;
2580 /* unsigned int BufLen; */
2581 /* u32 SglOffset; */
2583 unsigned long flags
;
2584 unsigned long queue_id
=0;
2585 int offload_cksum
= 0;
2587 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "DumbSgl",
2588 pSgl
, SxgSgl
, 0, 0);
2590 /* Set aside a pointer to the sgl */
2591 SxgSgl
->pSgl
= pSgl
;
2593 /* Sanity check that our SGL format is as we expect. */
2594 ASSERT(sizeof(struct sxg_x64_sge
) == sizeof(struct sxg_x64_sge
));
2595 /* Shouldn't be a vlan tag on this frame */
2596 ASSERT(SxgSgl
->VlanTag
.VlanTci
== 0);
2597 ASSERT(SxgSgl
->VlanTag
.VlanTpid
== 0);
2600 * From here below we work with the SGL placed in our
2604 SxgSgl
->Sgl
.NumberOfElements
= 1;
2606 * Set ucode Queue ID based on bottom bits of destination TCP port.
2607 * This Queue ID splits slowpath/dumb-nic packet processing across
2608 * multiple threads on the card to improve performance. It is split
2609 * using the TCP port to avoid out-of-order packets that can result
2610 * from multithreaded processing. We use the destination port because
2611 * we expect to be run on a server, so in nearly all cases the local
2612 * port is likely to be constant (well-known server port) and the
2613 * remote port is likely to be random. The exception to this is iSCSI,
2614 * in which case we use the sport instead. Note
2615 * that original attempt at XOR'ing source and dest port resulted in
2616 * poor balance on NTTTCP/iometer applications since they tend to
2617 * line up (even-even, odd-odd..).
2620 if (skb
->protocol
== htons(ETH_P_IP
)) {
2624 if (ip
->protocol
== IPPROTO_TCP
)
2626 if (!offload_cksum
|| !tcp_hdr(skb
))
2628 else if (offload_cksum
&& (DataLength
>= sizeof(
2630 queue_id
= ((ntohs(tcp_hdr(skb
)->dest
) == ISCSI_PORT
) ?
2631 (ntohs (tcp_hdr(skb
)->source
) &
2632 SXG_LARGE_SEND_QUEUE_MASK
):
2633 (ntohs(tcp_hdr(skb
)->dest
) &
2634 SXG_LARGE_SEND_QUEUE_MASK
));
2636 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2637 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2639 if (!offload_cksum
|| !tcp_hdr(skb
))
2641 else if (offload_cksum
&& (DataLength
>=sizeof(struct tcphdr
))){
2642 queue_id
= ((ntohs(tcp_hdr(skb
)->dest
) == ISCSI_PORT
) ?
2643 (ntohs (tcp_hdr(skb
)->source
) &
2644 SXG_LARGE_SEND_QUEUE_MASK
):
2645 (ntohs(tcp_hdr(skb
)->dest
) &
2646 SXG_LARGE_SEND_QUEUE_MASK
));
2650 /* Grab the spinlock and acquire a command */
2651 spin_lock_irqsave(&adapter
->XmtZeroLock
, flags
);
2652 SXG_GET_CMD(XmtRing
, XmtRingInfo
, XmtCmd
, SxgSgl
);
2653 if (XmtCmd
== NULL
) {
2655 * Call sxg_complete_slow_send to see if we can
2656 * free up any XmtRingZero entries and then try again
2659 spin_unlock_irqrestore(&adapter
->XmtZeroLock
, flags
);
2660 sxg_complete_slow_send(adapter
);
2661 spin_lock_irqsave(&adapter
->XmtZeroLock
, flags
);
2662 SXG_GET_CMD(XmtRing
, XmtRingInfo
, XmtCmd
, SxgSgl
);
2663 if (XmtCmd
== NULL
) {
2664 adapter
->Stats
.XmtZeroFull
++;
2668 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "DumbCmd",
2669 XmtCmd
, XmtRingInfo
->Head
, XmtRingInfo
->Tail
, 0);
2670 memset(XmtCmd
, '\0', sizeof(*XmtCmd
));
2671 XmtCmd
->SgEntries
= 1;
2673 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2675 * We need to set the Checkum in IP header to 0. This is
2676 * required by hardware.
2678 if (offload_cksum
) {
2679 ip_hdr(skb
)->check
= 0x0;
2680 XmtCmd
->CsumFlags
.Flags
|= SXG_SLOWCMD_CSUM_IP
;
2681 XmtCmd
->CsumFlags
.Flags
|= SXG_SLOWCMD_CSUM_TCP
;
2683 * Dont know if length will require a change in
2686 XmtCmd
->CsumFlags
.MacLen
= ETH_HLEN
;
2687 XmtCmd
->CsumFlags
.IpHl
= skb_network_header_len(skb
) >>
2688 SXG_NW_HDR_LEN_SHIFT
;
2690 if (skb_checksum_help(skb
)){
2691 printk(KERN_EMERG
"Dropped UDP packet for"
2692 " incorrect checksum calculation\n");
2694 SXG_ABORT_CMD(XmtRingInfo
);
2695 spin_unlock_irqrestore(&adapter
->XmtZeroLock
,
2697 return STATUS_SUCCESS
;
2703 * Fill in the command
2704 * Copy out the first SGE to the command and adjust for offset
2706 phys_addr
= pci_map_single(adapter
->pcidev
, skb
->data
, skb
->len
,
2710 * SAHARA SGL WORKAROUND
2711 * See if the SGL straddles a 64k boundary. If so, skip to
2712 * the start of the next 64k boundary and continue
2715 if ((adapter
->asictype
== SAHARA_REV_A
) &&
2716 (SXG_INVALID_SGL(phys_addr
,skb
->data_len
)))
2718 spin_unlock_irqrestore(&adapter
->XmtZeroLock
, flags
);
2720 SXG_ABORT_CMD(XmtRingInfo
);
2721 /* Silently drop this packet */
2722 printk(KERN_EMERG
"Dropped a packet for 64k boundary problem\n");
2723 return STATUS_SUCCESS
;
2725 XmtCmd
->Buffer
.FirstSgeAddress
= phys_addr
;
2726 XmtCmd
->Buffer
.FirstSgeLength
= DataLength
;
2727 XmtCmd
->Buffer
.SgeOffset
= 0;
2728 XmtCmd
->Buffer
.TotalLength
= DataLength
;
2731 * Advance transmit cmd descripter by 1.
2732 * NOTE - See comments in SxgTcpOutput where we write
2733 * to the XmtCmd register regarding CPU ID values and/or
2734 * multiple commands.
2735 * Top 16 bits specify queue_id. See comments about queue_id above
2737 /* Four queues at the moment */
2738 ASSERT((queue_id
& ~SXG_LARGE_SEND_QUEUE_MASK
) == 0);
2739 WRITE_REG(adapter
->UcodeRegs
[0].XmtCmd
, ((queue_id
<< 16) | 1), TRUE
);
2740 adapter
->Stats
.XmtQLen
++; /* Stats within lock */
2742 adapter
->stats
.tx_packets
++;
2743 adapter
->stats
.tx_bytes
+= DataLength
;
2744 #if XXXTODO /* Stats stuff */
2745 if (SXG_MULTICAST_PACKET(EtherHdr
)) {
2746 if (SXG_BROADCAST_PACKET(EtherHdr
)) {
2747 adapter
->Stats
.DumbXmtBcastPkts
++;
2748 adapter
->Stats
.DumbXmtBcastBytes
+= DataLength
;
2750 adapter
->Stats
.DumbXmtMcastPkts
++;
2751 adapter
->Stats
.DumbXmtMcastBytes
+= DataLength
;
2754 adapter
->Stats
.DumbXmtUcastPkts
++;
2755 adapter
->Stats
.DumbXmtUcastBytes
+= DataLength
;
2759 spin_unlock_irqrestore(&adapter
->XmtZeroLock
, flags
);
2760 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XDumSgl2",
2761 XmtCmd
, pSgl
, SxgSgl
, 0);
2762 return STATUS_SUCCESS
;
2766 * NOTE - Only jump to this label AFTER grabbing the
2767 * XmtZeroLock, and DO NOT DROP IT between the
2768 * command allocation and the following abort.
2771 SXG_ABORT_CMD(XmtRingInfo
);
2773 spin_unlock_irqrestore(&adapter
->XmtZeroLock
, flags
);
2777 * Jump to this label if failure occurs before the
2778 * XmtZeroLock is grabbed
2780 adapter
->stats
.tx_errors
++;
2781 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_IMPORTANT
, "DumSGFal",
2782 pSgl
, SxgSgl
, XmtRingInfo
->Head
, XmtRingInfo
->Tail
);
2783 /* SxgSgl->DumbPacket is the skb */
2784 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
2786 return STATUS_FAILURE
;
2790 * Link management functions
2792 * sxg_initialize_link - Initialize the link stuff
2795 * adapter - A pointer to our adapter structure
2800 static int sxg_initialize_link(struct adapter_t
*adapter
)
2802 struct sxg_hw_regs
*HwRegs
= adapter
->HwRegs
;
2809 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "InitLink",
2812 /* Reset PHY and XGXS module */
2813 WRITE_REG(HwRegs
->LinkStatus
, LS_SERDES_POWER_DOWN
, TRUE
);
2815 /* Reset transmit configuration register */
2816 WRITE_REG(HwRegs
->XmtConfig
, XMT_CONFIG_RESET
, TRUE
);
2818 /* Reset receive configuration register */
2819 WRITE_REG(HwRegs
->RcvConfig
, RCV_CONFIG_RESET
, TRUE
);
2821 /* Reset all MAC modules */
2822 WRITE_REG(HwRegs
->MacConfig0
, AXGMAC_CFG0_SUB_RESET
, TRUE
);
2826 * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2827 * is stored with the first nibble (0a) in the byte 0
2828 * of the Mac address. Possibly reverse?
2830 Value
= *(u32
*) adapter
->macaddr
;
2831 WRITE_REG(HwRegs
->LinkAddress0Low
, Value
, TRUE
);
2832 /* also write the MAC address to the MAC. Endian is reversed. */
2833 WRITE_REG(HwRegs
->MacAddressLow
, ntohl(Value
), TRUE
);
2834 Value
= (*(u16
*) & adapter
->macaddr
[4] & 0x0000FFFF);
2835 WRITE_REG(HwRegs
->LinkAddress0High
, Value
| LINK_ADDRESS_ENABLE
, TRUE
);
2836 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
2837 Value
= ntohl(Value
);
2838 WRITE_REG(HwRegs
->MacAddressHigh
, Value
, TRUE
);
2839 /* Link address 1 */
2840 WRITE_REG(HwRegs
->LinkAddress1Low
, 0, TRUE
);
2841 WRITE_REG(HwRegs
->LinkAddress1High
, 0, TRUE
);
2842 /* Link address 2 */
2843 WRITE_REG(HwRegs
->LinkAddress2Low
, 0, TRUE
);
2844 WRITE_REG(HwRegs
->LinkAddress2High
, 0, TRUE
);
2845 /* Link address 3 */
2846 WRITE_REG(HwRegs
->LinkAddress3Low
, 0, TRUE
);
2847 WRITE_REG(HwRegs
->LinkAddress3High
, 0, TRUE
);
2849 /* Enable MAC modules */
2850 WRITE_REG(HwRegs
->MacConfig0
, 0, TRUE
);
2853 AxgMacReg1
= ( /* Enable XMT */
2854 AXGMAC_CFG1_XMT_EN
|
2855 /* Enable receive */
2856 AXGMAC_CFG1_RCV_EN
|
2857 /* short frame detection */
2858 AXGMAC_CFG1_SHORT_ASSERT
|
2859 /* Verify frame length */
2860 AXGMAC_CFG1_CHECK_LEN
|
2862 AXGMAC_CFG1_GEN_FCS
|
2863 /* Pad frames to 64 bytes */
2864 AXGMAC_CFG1_PAD_64
);
2866 if (adapter
->XmtFcEnabled
) {
2867 AxgMacReg1
|= AXGMAC_CFG1_XMT_PAUSE
; /* Allow sending of pause */
2869 if (adapter
->RcvFcEnabled
) {
2870 AxgMacReg1
|= AXGMAC_CFG1_RCV_PAUSE
; /* Enable detection of pause */
2873 WRITE_REG(HwRegs
->MacConfig1
, AxgMacReg1
, TRUE
);
2875 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
2876 if (adapter
->JumboEnabled
) {
2877 WRITE_REG(HwRegs
->MacMaxFrameLen
, AXGMAC_MAXFRAME_JUMBO
, TRUE
);
2880 * AMIIM Configuration Register -
2881 * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2882 * (bottom bits) of this register is used to determine the MDC frequency
2883 * as specified in the A-XGMAC Design Document. This value must not be
2884 * zero. The following value (62 or 0x3E) is based on our MAC transmit
2885 * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2886 * frequency of 2.5 MHz (see the PHY spec), we get:
2887 * 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2888 * This value happens to be the default value for this register, so we
2889 * really don't have to do this.
2891 if (adapter
->asictype
== SAHARA_REV_B
) {
2892 WRITE_REG(HwRegs
->MacAmiimConfig
, 0x0000001F, TRUE
);
2894 WRITE_REG(HwRegs
->MacAmiimConfig
, 0x0000003E, TRUE
);
2897 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
2898 WRITE_REG(HwRegs
->LinkStatus
,
2905 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2908 * Per information given by Aeluros, wait 100 ms after removing reset.
2909 * It's not enough to wait for the self-clearing reset bit in reg 0 to
2914 /* Verify the PHY has come up by checking that the Reset bit has
2917 status
= sxg_read_mdio_reg(adapter
,
2918 MIIM_DEV_PHY_PMA
, /* PHY PMA/PMD module */
2919 PHY_PMA_CONTROL1
, /* PMA/PMD control register */
2921 DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value
,
2922 (Value
& PMA_CONTROL1_RESET
));
2923 if (status
!= STATUS_SUCCESS
)
2924 return (STATUS_FAILURE
);
2925 if (Value
& PMA_CONTROL1_RESET
) /* reset complete if bit is 0 */
2926 return (STATUS_FAILURE
);
2928 /* The SERDES should be initialized by now - confirm */
2929 READ_REG(HwRegs
->LinkStatus
, Value
);
2930 if (Value
& LS_SERDES_DOWN
) /* verify SERDES is initialized */
2931 return (STATUS_FAILURE
);
2933 /* The XAUI link should also be up - confirm */
2934 if (!(Value
& LS_XAUI_LINK_UP
)) /* verify XAUI link is up */
2935 return (STATUS_FAILURE
);
2937 /* Initialize the PHY */
2938 status
= sxg_phy_init(adapter
);
2939 if (status
!= STATUS_SUCCESS
)
2940 return (STATUS_FAILURE
);
2942 /* Enable the Link Alarm */
2944 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2945 * LASI_CONTROL - LASI control register
2946 * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
2948 status
= sxg_write_mdio_reg(adapter
, MIIM_DEV_PHY_PMA
,
2950 LASI_CTL_LS_ALARM_ENABLE
);
2951 if (status
!= STATUS_SUCCESS
)
2952 return (STATUS_FAILURE
);
2954 /* XXXTODO - temporary - verify bit is set */
2956 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2957 * LASI_CONTROL - LASI control register
2959 status
= sxg_read_mdio_reg(adapter
, MIIM_DEV_PHY_PMA
,
2963 if (status
!= STATUS_SUCCESS
)
2964 return (STATUS_FAILURE
);
2965 if (!(Value
& LASI_CTL_LS_ALARM_ENABLE
)) {
2966 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2968 /* Enable receive */
2969 MaxFrame
= adapter
->JumboEnabled
? JUMBOMAXFRAME
: ETHERMAXFRAME
;
2970 ConfigData
= (RCV_CONFIG_ENABLE
|
2971 RCV_CONFIG_ENPARSE
|
2973 RCV_CONFIG_RCVPAUSE
|
2976 RCV_CONFIG_HASH_16
|
2977 RCV_CONFIG_SOCKET
| RCV_CONFIG_BUFSIZE(MaxFrame
));
2979 if (adapter
->asictype
== SAHARA_REV_B
) {
2980 ConfigData
|= (RCV_CONFIG_HIPRICTL
|
2981 RCV_CONFIG_NEWSTATUSFMT
);
2983 WRITE_REG(HwRegs
->RcvConfig
, ConfigData
, TRUE
);
2985 WRITE_REG(HwRegs
->XmtConfig
, XMT_CONFIG_ENABLE
, TRUE
);
2987 /* Mark the link as down. We'll get a link event when it comes up. */
2988 sxg_link_state(adapter
, SXG_LINK_DOWN
);
2990 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XInitLnk",
2992 return (STATUS_SUCCESS
);
2996 * sxg_phy_init - Initialize the PHY
2999 * adapter - A pointer to our adapter structure
3004 static int sxg_phy_init(struct adapter_t
*adapter
)
3007 struct phy_ucode
*p
;
3010 DBG_ERROR("ENTER %s\n", __func__
);
3012 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
3013 * 0xC205 - PHY ID register (?)
3014 * &Value - XXXTODO - add def
3016 status
= sxg_read_mdio_reg(adapter
, MIIM_DEV_PHY_PMA
,
3019 if (status
!= STATUS_SUCCESS
)
3020 return (STATUS_FAILURE
);
3022 if (Value
== 0x0012) {
3023 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
3024 DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
3027 /* Initialize AEL2005C PHY and download PHY microcode */
3028 for (p
= PhyUcode
; p
->Addr
!= 0xFFFF; p
++) {
3030 /* if address == 0, data == sleep time in ms */
3033 /* write the given data to the specified address */
3034 status
= sxg_write_mdio_reg(adapter
,
3040 if (status
!= STATUS_SUCCESS
)
3041 return (STATUS_FAILURE
);
3045 DBG_ERROR("EXIT %s\n", __func__
);
3047 return (STATUS_SUCCESS
);
3051 * sxg_link_event - Process a link event notification from the card
3054 * adapter - A pointer to our adapter structure
3059 static void sxg_link_event(struct adapter_t
*adapter
)
3061 struct sxg_hw_regs
*HwRegs
= adapter
->HwRegs
;
3062 struct net_device
*netdev
= adapter
->netdev
;
3063 enum SXG_LINK_STATE LinkState
;
3067 if (adapter
->state
== ADAPT_DOWN
)
3069 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "LinkEvnt",
3071 DBG_ERROR("ENTER %s\n", __func__
);
3073 /* Check the Link Status register. We should have a Link Alarm. */
3074 READ_REG(HwRegs
->LinkStatus
, Value
);
3075 if (Value
& LS_LINK_ALARM
) {
3077 * We got a Link Status alarm. First, pause to let the
3078 * link state settle (it can bounce a number of times)
3082 /* Now clear the alarm by reading the LASI status register. */
3083 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3084 status
= sxg_read_mdio_reg(adapter
, MIIM_DEV_PHY_PMA
,
3085 /* LASI status register */
3088 if (status
!= STATUS_SUCCESS
) {
3089 DBG_ERROR("Error reading LASI Status MDIO register!\n");
3090 sxg_link_state(adapter
, SXG_LINK_DOWN
);
3094 * We used to assert that the LASI_LS_ALARM bit was set, as
3095 * it should be. But there appears to be cases during
3096 * initialization (when the PHY is reset and re-initialized)
3097 * when we get a link alarm, but the status bit is 0 when we
3098 * read it. Rather than trying to assure this never happens
3099 * (and nver being certain), just ignore it.
3101 * ASSERT(Value & LASI_STATUS_LS_ALARM);
3104 /* Now get and set the link state */
3105 LinkState
= sxg_get_link_state(adapter
);
3106 sxg_link_state(adapter
, LinkState
);
3107 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
3108 ((LinkState
== SXG_LINK_UP
) ? "UP" : "DOWN"));
3109 if (LinkState
== SXG_LINK_UP
) {
3110 netif_carrier_on(netdev
);
3111 netif_tx_start_all_queues(netdev
);
3113 netif_tx_stop_all_queues(netdev
);
3114 netif_carrier_off(netdev
);
3118 * XXXTODO - Assuming Link Attention is only being generated
3119 * for the Link Alarm pin (and not for a XAUI Link Status change)
3120 * , then it's impossible to get here. Yet we've gotten here
3121 * twice (under extreme conditions - bouncing the link up and
3122 * down many times a second). Needs further investigation.
3124 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
3125 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value
);
3128 DBG_ERROR("EXIT %s\n", __func__
);
3133 * sxg_get_link_state - Determine if the link is up or down
3136 * adapter - A pointer to our adapter structure
3141 static enum SXG_LINK_STATE
sxg_get_link_state(struct adapter_t
*adapter
)
3146 DBG_ERROR("ENTER %s\n", __func__
);
3148 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "GetLink",
3152 * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
3153 * the following 3 bits (from 3 different MDIO registers) are all true.
3156 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3157 status
= sxg_read_mdio_reg(adapter
, MIIM_DEV_PHY_PMA
,
3158 /* PMA/PMD Receive Signal Detect register */
3161 if (status
!= STATUS_SUCCESS
)
3164 /* If PMA/PMD receive signal detect is 0, then the link is down */
3165 if (!(Value
& PMA_RCV_DETECT
))
3166 return (SXG_LINK_DOWN
);
3168 /* MIIM_DEV_PHY_PCS - PHY PCS module */
3169 status
= sxg_read_mdio_reg(adapter
, MIIM_DEV_PHY_PCS
,
3170 /* PCS 10GBASE-R Status 1 register */
3171 PHY_PCS_10G_STATUS1
,
3173 if (status
!= STATUS_SUCCESS
)
3176 /* If PCS is not locked to receive blocks, then the link is down */
3177 if (!(Value
& PCS_10B_BLOCK_LOCK
))
3178 return (SXG_LINK_DOWN
);
3180 status
= sxg_read_mdio_reg(adapter
, MIIM_DEV_PHY_XS
,/* PHY XS module */
3181 /* XS Lane Status register */
3184 if (status
!= STATUS_SUCCESS
)
3187 /* If XS transmit lanes are not aligned, then the link is down */
3188 if (!(Value
& XS_LANE_ALIGN
))
3189 return (SXG_LINK_DOWN
);
3191 /* All 3 bits are true, so the link is up */
3192 DBG_ERROR("EXIT %s\n", __func__
);
3194 return (SXG_LINK_UP
);
3197 /* An error occurred reading an MDIO register. This shouldn't happen. */
3198 DBG_ERROR("Error reading an MDIO register!\n");
3200 return (SXG_LINK_DOWN
);
3203 static void sxg_indicate_link_state(struct adapter_t
*adapter
,
3204 enum SXG_LINK_STATE LinkState
)
3206 if (adapter
->LinkState
== SXG_LINK_UP
) {
3207 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
3209 netif_start_queue(adapter
->netdev
);
3211 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
3213 netif_stop_queue(adapter
->netdev
);
3218 * sxg_change_mtu - Change the Maximum Transfer Unit
3219 * * @returns 0 on success, negative on failure
3221 int sxg_change_mtu (struct net_device
*netdev
, int new_mtu
)
3223 struct adapter_t
*adapter
= (struct adapter_t
*) netdev_priv(netdev
);
3225 if (!((new_mtu
== SXG_DEFAULT_MTU
) || (new_mtu
== SXG_JUMBO_MTU
)))
3228 if(new_mtu
== netdev
->mtu
)
3231 netdev
->mtu
= new_mtu
;
3233 if (new_mtu
== SXG_JUMBO_MTU
) {
3234 adapter
->JumboEnabled
= TRUE
;
3235 adapter
->FrameSize
= JUMBOMAXFRAME
;
3236 adapter
->ReceiveBufferSize
= SXG_RCV_JUMBO_BUFFER_SIZE
;
3238 adapter
->JumboEnabled
= FALSE
;
3239 adapter
->FrameSize
= ETHERMAXFRAME
;
3240 adapter
->ReceiveBufferSize
= SXG_RCV_DATA_BUFFER_SIZE
;
3243 sxg_entry_halt(netdev
);
3244 sxg_entry_open(netdev
);
3249 * sxg_link_state - Set the link state and if necessary, indicate.
3250 * This routine the central point of processing for all link state changes.
3251 * Nothing else in the driver should alter the link state or perform
3252 * link state indications
3255 * adapter - A pointer to our adapter structure
3256 * LinkState - The link state
3261 static void sxg_link_state(struct adapter_t
*adapter
,
3262 enum SXG_LINK_STATE LinkState
)
3264 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_IMPORTANT
, "LnkINDCT",
3265 adapter
, LinkState
, adapter
->LinkState
, adapter
->State
);
3267 DBG_ERROR("ENTER %s\n", __func__
);
3270 * Hold the adapter lock during this routine. Maybe move
3271 * the lock to the caller.
3273 /* IMP TODO : Check if we can survive without taking this lock */
3274 // spin_lock(&adapter->AdapterLock);
3275 if (LinkState
== adapter
->LinkState
) {
3276 /* Nothing changed.. */
3277 // spin_unlock(&adapter->AdapterLock);
3278 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
3279 __func__
, LinkState
);
3282 /* Save the adapter state */
3283 adapter
->LinkState
= LinkState
;
3285 /* Drop the lock and indicate link state */
3286 // spin_unlock(&adapter->AdapterLock);
3287 DBG_ERROR("EXIT #1 %s\n", __func__
);
3289 sxg_indicate_link_state(adapter
, LinkState
);
3293 * sxg_write_mdio_reg - Write to a register on the MDIO bus
3296 * adapter - A pointer to our adapter structure
3297 * DevAddr - MDIO device number being addressed
3298 * RegAddr - register address for the specified MDIO device
3299 * Value - value to write to the MDIO register
3304 static int sxg_write_mdio_reg(struct adapter_t
*adapter
,
3305 u32 DevAddr
, u32 RegAddr
, u32 Value
)
3307 struct sxg_hw_regs
*HwRegs
= adapter
->HwRegs
;
3308 /* Address operation (written to MIIM field reg) */
3310 /* Write operation (written to MIIM field reg) */
3312 u32 Cmd
;/* Command (written to MIIM command reg) */
3316 /* DBG_ERROR("ENTER %s\n", __func__); */
3318 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "WrtMDIO",
3321 /* Ensure values don't exceed field width */
3322 DevAddr
&= 0x001F; /* 5-bit field */
3323 RegAddr
&= 0xFFFF; /* 16-bit field */
3324 Value
&= 0xFFFF; /* 16-bit field */
3326 /* Set MIIM field register bits for an MIIM address operation */
3327 AddrOp
= (MIIM_PORT_NUM
<< AXGMAC_AMIIM_FIELD_PORT_SHIFT
) |
3328 (DevAddr
<< AXGMAC_AMIIM_FIELD_DEV_SHIFT
) |
3329 (MIIM_TA_10GB
<< AXGMAC_AMIIM_FIELD_TA_SHIFT
) |
3330 (MIIM_OP_ADDR
<< AXGMAC_AMIIM_FIELD_OP_SHIFT
) | RegAddr
;
3332 /* Set MIIM field register bits for an MIIM write operation */
3333 WriteOp
= (MIIM_PORT_NUM
<< AXGMAC_AMIIM_FIELD_PORT_SHIFT
) |
3334 (DevAddr
<< AXGMAC_AMIIM_FIELD_DEV_SHIFT
) |
3335 (MIIM_TA_10GB
<< AXGMAC_AMIIM_FIELD_TA_SHIFT
) |
3336 (MIIM_OP_WRITE
<< AXGMAC_AMIIM_FIELD_OP_SHIFT
) | Value
;
3338 /* Set MIIM command register bits to execute an MIIM command */
3339 Cmd
= AXGMAC_AMIIM_CMD_START
| AXGMAC_AMIIM_CMD_10G_OPERATION
;
3341 /* Reset the command register command bit (in case it's not 0) */
3342 WRITE_REG(HwRegs
->MacAmiimCmd
, 0, TRUE
);
3344 /* MIIM write to set the address of the specified MDIO register */
3345 WRITE_REG(HwRegs
->MacAmiimField
, AddrOp
, TRUE
);
3347 /* Write to MIIM Command Register to execute to address operation */
3348 WRITE_REG(HwRegs
->MacAmiimCmd
, Cmd
, TRUE
);
3350 /* Poll AMIIM Indicator register to wait for completion */
3351 Timeout
= SXG_LINK_TIMEOUT
;
3353 udelay(100); /* Timeout in 100us units */
3354 READ_REG(HwRegs
->MacAmiimIndicator
, ValueRead
);
3355 if (--Timeout
== 0) {
3356 return (STATUS_FAILURE
);
3358 } while (ValueRead
& AXGMAC_AMIIM_INDC_BUSY
);
3360 /* Reset the command register command bit */
3361 WRITE_REG(HwRegs
->MacAmiimCmd
, 0, TRUE
);
3363 /* MIIM write to set up an MDIO write operation */
3364 WRITE_REG(HwRegs
->MacAmiimField
, WriteOp
, TRUE
);
3366 /* Write to MIIM Command Register to execute the write operation */
3367 WRITE_REG(HwRegs
->MacAmiimCmd
, Cmd
, TRUE
);
3369 /* Poll AMIIM Indicator register to wait for completion */
3370 Timeout
= SXG_LINK_TIMEOUT
;
3372 udelay(100); /* Timeout in 100us units */
3373 READ_REG(HwRegs
->MacAmiimIndicator
, ValueRead
);
3374 if (--Timeout
== 0) {
3375 return (STATUS_FAILURE
);
3377 } while (ValueRead
& AXGMAC_AMIIM_INDC_BUSY
);
3379 /* DBG_ERROR("EXIT %s\n", __func__); */
3381 return (STATUS_SUCCESS
);
3385 * sxg_read_mdio_reg - Read a register on the MDIO bus
3388 * adapter - A pointer to our adapter structure
3389 * DevAddr - MDIO device number being addressed
3390 * RegAddr - register address for the specified MDIO device
3391 * pValue - pointer to where to put data read from the MDIO register
3396 static int sxg_read_mdio_reg(struct adapter_t
*adapter
,
3397 u32 DevAddr
, u32 RegAddr
, u32
*pValue
)
3399 struct sxg_hw_regs
*HwRegs
= adapter
->HwRegs
;
3400 u32 AddrOp
; /* Address operation (written to MIIM field reg) */
3401 u32 ReadOp
; /* Read operation (written to MIIM field reg) */
3402 u32 Cmd
; /* Command (written to MIIM command reg) */
3406 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "WrtMDIO",
3408 DBG_ERROR("ENTER %s\n", __func__
);
3410 /* Ensure values don't exceed field width */
3411 DevAddr
&= 0x001F; /* 5-bit field */
3412 RegAddr
&= 0xFFFF; /* 16-bit field */
3414 /* Set MIIM field register bits for an MIIM address operation */
3415 AddrOp
= (MIIM_PORT_NUM
<< AXGMAC_AMIIM_FIELD_PORT_SHIFT
) |
3416 (DevAddr
<< AXGMAC_AMIIM_FIELD_DEV_SHIFT
) |
3417 (MIIM_TA_10GB
<< AXGMAC_AMIIM_FIELD_TA_SHIFT
) |
3418 (MIIM_OP_ADDR
<< AXGMAC_AMIIM_FIELD_OP_SHIFT
) | RegAddr
;
3420 /* Set MIIM field register bits for an MIIM read operation */
3421 ReadOp
= (MIIM_PORT_NUM
<< AXGMAC_AMIIM_FIELD_PORT_SHIFT
) |
3422 (DevAddr
<< AXGMAC_AMIIM_FIELD_DEV_SHIFT
) |
3423 (MIIM_TA_10GB
<< AXGMAC_AMIIM_FIELD_TA_SHIFT
) |
3424 (MIIM_OP_READ
<< AXGMAC_AMIIM_FIELD_OP_SHIFT
);
3426 /* Set MIIM command register bits to execute an MIIM command */
3427 Cmd
= AXGMAC_AMIIM_CMD_START
| AXGMAC_AMIIM_CMD_10G_OPERATION
;
3429 /* Reset the command register command bit (in case it's not 0) */
3430 WRITE_REG(HwRegs
->MacAmiimCmd
, 0, TRUE
);
3432 /* MIIM write to set the address of the specified MDIO register */
3433 WRITE_REG(HwRegs
->MacAmiimField
, AddrOp
, TRUE
);
3435 /* Write to MIIM Command Register to execute to address operation */
3436 WRITE_REG(HwRegs
->MacAmiimCmd
, Cmd
, TRUE
);
3438 /* Poll AMIIM Indicator register to wait for completion */
3439 Timeout
= SXG_LINK_TIMEOUT
;
3441 udelay(100); /* Timeout in 100us units */
3442 READ_REG(HwRegs
->MacAmiimIndicator
, ValueRead
);
3443 if (--Timeout
== 0) {
3444 DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __func__
);
3446 return (STATUS_FAILURE
);
3448 } while (ValueRead
& AXGMAC_AMIIM_INDC_BUSY
);
3450 /* Reset the command register command bit */
3451 WRITE_REG(HwRegs
->MacAmiimCmd
, 0, TRUE
);
3453 /* MIIM write to set up an MDIO register read operation */
3454 WRITE_REG(HwRegs
->MacAmiimField
, ReadOp
, TRUE
);
3456 /* Write to MIIM Command Register to execute the read operation */
3457 WRITE_REG(HwRegs
->MacAmiimCmd
, Cmd
, TRUE
);
3459 /* Poll AMIIM Indicator register to wait for completion */
3460 Timeout
= SXG_LINK_TIMEOUT
;
3462 udelay(100); /* Timeout in 100us units */
3463 READ_REG(HwRegs
->MacAmiimIndicator
, ValueRead
);
3464 if (--Timeout
== 0) {
3465 DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __func__
);
3467 return (STATUS_FAILURE
);
3469 } while (ValueRead
& AXGMAC_AMIIM_INDC_BUSY
);
3471 /* Read the MDIO register data back from the field register */
3472 READ_REG(HwRegs
->MacAmiimField
, *pValue
);
3473 *pValue
&= 0xFFFF; /* data is in the lower 16 bits */
3475 DBG_ERROR("EXIT %s\n", __func__
);
3477 return (STATUS_SUCCESS
);
3481 * Functions to obtain the CRC corresponding to the destination mac address.
3482 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
3484 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
3485 * + x^4 + x^2 + x^1.
3487 * After the CRC for the 6 bytes is generated (but before the value is
3488 * complemented), we must then transpose the value and return bits 30-23.
3490 static u32 sxg_crc_table
[256];/* Table of CRC's for all possible byte values */
3491 static u32 sxg_crc_init
; /* Is table initialized */
3493 /* Contruct the CRC32 table */
3494 static void sxg_mcast_init_crc32(void)
3496 u32 c
; /* CRC shit reg */
3497 u32 e
= 0; /* Poly X-or pattern */
3498 int i
; /* counter */
3499 int k
; /* byte being shifted into crc */
3501 static int p
[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
3503 for (i
= 0; i
< sizeof(p
) / sizeof(int); i
++) {
3504 e
|= 1L << (31 - p
[i
]);
3507 for (i
= 1; i
< 256; i
++) {
3509 for (k
= 8; k
; k
--) {
3510 c
= c
& 1 ? (c
>> 1) ^ e
: c
>> 1;
3512 sxg_crc_table
[i
] = c
;
3517 * Return the MAC hast as described above.
3519 static unsigned char sxg_mcast_get_mac_hash(char *macaddr
)
3524 unsigned char machash
= 0;
3526 if (!sxg_crc_init
) {
3527 sxg_mcast_init_crc32();
3531 crc
= 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
3532 for (i
= 0, p
= macaddr
; i
< 6; ++p
, ++i
) {
3533 crc
= (crc
>> 8) ^ sxg_crc_table
[(crc
^ *p
) & 0xFF];
3536 /* Return bits 1-8, transposed */
3537 for (i
= 1; i
< 9; i
++) {
3538 machash
|= (((crc
>> i
) & 1) << (8 - i
));
3544 static void sxg_mcast_set_mask(struct adapter_t
*adapter
)
3546 struct sxg_ucode_regs
*sxg_regs
= adapter
->UcodeRegs
;
3548 DBG_ERROR("%s ENTER (%s) MacFilter[%x] mask[%llx]\n", __func__
,
3549 adapter
->netdev
->name
, (unsigned int)adapter
->MacFilter
,
3550 adapter
->MulticastMask
);
3552 if (adapter
->MacFilter
& (MAC_ALLMCAST
| MAC_PROMISC
)) {
3554 * Turn on all multicast addresses. We have to do this for
3555 * promiscuous mode as well as ALLMCAST mode. It saves the
3556 * Microcode from having keep state about the MAC configuration
3558 /* DBG_ERROR("sxg: %s MacFilter = MAC_ALLMCAST | MAC_PROMISC\n \
3559 * SLUT MODE!!!\n",__func__);
3561 WRITE_REG(sxg_regs
->McastLow
, 0xFFFFFFFF, FLUSH
);
3562 WRITE_REG(sxg_regs
->McastHigh
, 0xFFFFFFFF, FLUSH
);
3563 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3564 * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3569 * Commit our multicast mast to the SLIC by writing to the
3570 * multicast address mask registers
3572 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3573 __func__
, adapter
->netdev
->name
,
3574 ((ulong
) (adapter
->MulticastMask
& 0xFFFFFFFF)),
3576 ((adapter
->MulticastMask
>> 32) & 0xFFFFFFFF)));
3578 WRITE_REG(sxg_regs
->McastLow
,
3579 (u32
) (adapter
->MulticastMask
& 0xFFFFFFFF), FLUSH
);
3580 WRITE_REG(sxg_regs
->McastHigh
,
3582 MulticastMask
>> 32) & 0xFFFFFFFF), FLUSH
);
3586 static void sxg_mcast_set_bit(struct adapter_t
*adapter
, char *address
)
3588 unsigned char crcpoly
;
3590 /* Get the CRC polynomial for the mac address */
3591 crcpoly
= sxg_mcast_get_mac_hash(address
);
3594 * We only have space on the SLIC for 64 entries. Lop
3595 * off the top two bits. (2^6 = 64)
3599 /* OR in the new bit into our 64 bit mask. */
3600 adapter
->MulticastMask
|= (u64
) 1 << crcpoly
;
3604 * Function takes MAC addresses from dev_mc_list and generates the Mask
3607 static void sxg_set_mcast_addr(struct adapter_t
*adapter
)
3609 struct dev_mc_list
*mclist
;
3610 struct net_device
*dev
= adapter
->netdev
;
3613 if (adapter
->MacFilter
& (MAC_ALLMCAST
| MAC_MCAST
)) {
3614 for (i
= 0, mclist
= dev
->mc_list
; i
< dev
->mc_count
;
3615 i
++, mclist
= mclist
->next
) {
3616 sxg_mcast_set_bit(adapter
,mclist
->da_addr
);
3619 sxg_mcast_set_mask(adapter
);
3622 static void sxg_mcast_set_list(struct net_device
*dev
)
3624 struct adapter_t
*adapter
= (struct adapter_t
*) netdev_priv(dev
);
3627 if (dev
->flags
& IFF_PROMISC
)
3628 adapter
->MacFilter
|= MAC_PROMISC
;
3629 if (dev
->flags
& IFF_MULTICAST
)
3630 adapter
->MacFilter
|= MAC_MCAST
;
3631 if (dev
->flags
& IFF_ALLMULTI
)
3632 adapter
->MacFilter
|= MAC_ALLMCAST
;
3634 //XXX handle other flags as well
3635 sxg_set_mcast_addr(adapter
);
3638 void sxg_free_sgl_buffers(struct adapter_t
*adapter
)
3640 struct list_entry
*ple
;
3641 struct sxg_scatter_gather
*Sgl
;
3643 while(!(IsListEmpty(&adapter
->AllSglBuffers
))) {
3644 ple
= RemoveHeadList(&adapter
->AllSglBuffers
);
3645 Sgl
= container_of(ple
, struct sxg_scatter_gather
, AllList
);
3647 adapter
->AllSglBufferCount
--;
3651 void sxg_free_rcvblocks(struct adapter_t
*adapter
)
3654 void *temp_RcvBlock
;
3655 struct list_entry
*ple
;
3656 struct sxg_rcv_block_hdr
*RcvBlockHdr
;
3657 struct sxg_rcv_data_buffer_hdr
*RcvDataBufferHdr
;
3658 ASSERT((adapter
->state
== SXG_STATE_INITIALIZING
) ||
3659 (adapter
->state
== SXG_STATE_HALTING
));
3660 while(!(IsListEmpty(&adapter
->AllRcvBlocks
))) {
3662 ple
= RemoveHeadList(&adapter
->AllRcvBlocks
);
3663 RcvBlockHdr
= container_of(ple
, struct sxg_rcv_block_hdr
, AllList
);
3665 if(RcvBlockHdr
->VirtualAddress
) {
3666 temp_RcvBlock
= RcvBlockHdr
->VirtualAddress
;
3668 for(i
=0; i
< SXG_RCV_DESCRIPTORS_PER_BLOCK
;
3669 i
++, temp_RcvBlock
+= SXG_RCV_DATA_HDR_SIZE
) {
3671 (struct sxg_rcv_data_buffer_hdr
*)temp_RcvBlock
;
3672 SXG_FREE_RCV_PACKET(RcvDataBufferHdr
);
3676 pci_free_consistent(adapter
->pcidev
,
3677 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE
),
3678 RcvBlockHdr
->VirtualAddress
,
3679 RcvBlockHdr
->PhysicalAddress
);
3680 adapter
->AllRcvBlockCount
--;
3682 ASSERT(adapter
->AllRcvBlockCount
== 0);
3683 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XFrRBlk",
3686 void sxg_free_mcast_addrs(struct adapter_t
*adapter
)
3688 struct sxg_multicast_address
*address
;
3689 while(adapter
->MulticastAddrs
) {
3690 address
= adapter
->MulticastAddrs
;
3691 adapter
->MulticastAddrs
= address
->Next
;
3695 adapter
->MulticastMask
= 0;
3698 void sxg_unmap_resources(struct adapter_t
*adapter
)
3700 if(adapter
->HwRegs
) {
3701 iounmap((void *)adapter
->HwRegs
);
3703 if(adapter
->UcodeRegs
) {
3704 iounmap((void *)adapter
->UcodeRegs
);
3707 ASSERT(adapter
->AllRcvBlockCount
== 0);
3708 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XFrRBlk",
3715 * sxg_free_resources - Free everything allocated in SxgAllocateResources
3718 * adapter - A pointer to our adapter structure
3723 void sxg_free_resources(struct adapter_t
*adapter
)
3725 u32 RssIds
, IsrCount
;
3726 RssIds
= SXG_RSS_CPU_COUNT(adapter
);
3727 IsrCount
= adapter
->msi_enabled
? RssIds
: 1;
3729 if (adapter
->BasicAllocations
== FALSE
) {
3731 * No allocations have been made, including spinlocks,
3732 * or listhead initializations. Return.
3737 if (!(IsListEmpty(&adapter
->AllRcvBlocks
))) {
3738 sxg_free_rcvblocks(adapter
);
3740 if (!(IsListEmpty(&adapter
->AllSglBuffers
))) {
3741 sxg_free_sgl_buffers(adapter
);
3744 if (adapter
->XmtRingZeroIndex
) {
3745 pci_free_consistent(adapter
->pcidev
,
3747 adapter
->XmtRingZeroIndex
,
3748 adapter
->PXmtRingZeroIndex
);
3751 pci_free_consistent(adapter
->pcidev
,
3752 sizeof(u32
) * IsrCount
,
3753 adapter
->Isr
, adapter
->PIsr
);
3756 if (adapter
->EventRings
) {
3757 pci_free_consistent(adapter
->pcidev
,
3758 sizeof(struct sxg_event_ring
) * RssIds
,
3759 adapter
->EventRings
, adapter
->PEventRings
);
3761 if (adapter
->RcvRings
) {
3762 pci_free_consistent(adapter
->pcidev
,
3763 sizeof(struct sxg_rcv_ring
) * 1,
3765 adapter
->PRcvRings
);
3766 adapter
->RcvRings
= NULL
;
3769 if(adapter
->XmtRings
) {
3770 pci_free_consistent(adapter
->pcidev
,
3771 sizeof(struct sxg_xmt_ring
) * 1,
3773 adapter
->PXmtRings
);
3774 adapter
->XmtRings
= NULL
;
3777 if (adapter
->ucode_stats
) {
3778 pci_unmap_single(adapter
->pcidev
,
3779 sizeof(struct sxg_ucode_stats
),
3780 adapter
->pucode_stats
, PCI_DMA_FROMDEVICE
);
3781 adapter
->ucode_stats
= NULL
;
3785 /* Unmap register spaces */
3786 sxg_unmap_resources(adapter
);
3788 sxg_free_mcast_addrs(adapter
);
3790 adapter
->BasicAllocations
= FALSE
;
3795 * sxg_allocate_complete -
3797 * This routine is called when a memory allocation has completed.
3800 * struct adapter_t * - Our adapter structure
3801 * VirtualAddress - Memory virtual address
3802 * PhysicalAddress - Memory physical address
3803 * Length - Length of memory allocated (or 0)
3804 * Context - The type of buffer allocated
3809 static int sxg_allocate_complete(struct adapter_t
*adapter
,
3810 void *VirtualAddress
,
3811 dma_addr_t PhysicalAddress
,
3812 u32 Length
, enum sxg_buffer_type Context
)
3815 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "AllocCmp",
3816 adapter
, VirtualAddress
, Length
, Context
);
3817 ASSERT(atomic_read(&adapter
->pending_allocations
));
3818 atomic_dec(&adapter
->pending_allocations
);
3822 case SXG_BUFFER_TYPE_RCV
:
3823 status
= sxg_allocate_rcvblock_complete(adapter
,
3825 PhysicalAddress
, Length
);
3827 case SXG_BUFFER_TYPE_SGL
:
3828 sxg_allocate_sgl_buffer_complete(adapter
, (struct sxg_scatter_gather
*)
3830 PhysicalAddress
, Length
);
3833 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XAlocCmp",
3834 adapter
, VirtualAddress
, Length
, Context
);
3840 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3841 * synchronous and asynchronous buffer allocations
3844 * adapter - A pointer to our adapter structure
3845 * Size - block size to allocate
3846 * BufferType - Type of buffer to allocate
3851 static int sxg_allocate_buffer_memory(struct adapter_t
*adapter
,
3852 u32 Size
, enum sxg_buffer_type BufferType
)
3858 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "AllocMem",
3859 adapter
, Size
, BufferType
, 0);
3861 * Grab the adapter lock and check the state. If we're in anything other
3862 * than INITIALIZING or RUNNING state, fail. This is to prevent
3863 * allocations in an improper driver state
3866 atomic_inc(&adapter
->pending_allocations
);
3868 if(BufferType
!= SXG_BUFFER_TYPE_SGL
)
3869 Buffer
= pci_alloc_consistent(adapter
->pcidev
, Size
, &pBuffer
);
3871 Buffer
= kzalloc(Size
, GFP_ATOMIC
);
3872 pBuffer
= (dma_addr_t
)NULL
;
3874 if (Buffer
== NULL
) {
3876 * Decrement the AllocationsPending count while holding
3877 * the lock. Pause processing relies on this
3879 atomic_dec(&adapter
->pending_allocations
);
3880 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "AlcMemF1",
3881 adapter
, Size
, BufferType
, 0);
3882 return (STATUS_RESOURCES
);
3884 status
= sxg_allocate_complete(adapter
, Buffer
, pBuffer
, Size
, BufferType
);
3886 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XAlocMem",
3887 adapter
, Size
, BufferType
, status
);
3892 * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3896 * adapter - A pointer to our adapter structure
3897 * RcvBlock - receive block virtual address
3898 * PhysicalAddress - Physical address
3899 * Length - Memory length
3903 static int sxg_allocate_rcvblock_complete(struct adapter_t
*adapter
,
3905 dma_addr_t PhysicalAddress
,
3909 u32 BufferSize
= adapter
->ReceiveBufferSize
;
3911 void *temp_RcvBlock
;
3912 struct sxg_rcv_block_hdr
*RcvBlockHdr
;
3913 struct sxg_rcv_data_buffer_hdr
*RcvDataBufferHdr
;
3914 struct sxg_rcv_descriptor_block
*RcvDescriptorBlock
;
3915 struct sxg_rcv_descriptor_block_hdr
*RcvDescriptorBlockHdr
;
3917 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "AlRcvBlk",
3918 adapter
, RcvBlock
, Length
, 0);
3919 if (RcvBlock
== NULL
) {
3922 memset(RcvBlock
, 0, Length
);
3923 ASSERT((BufferSize
== SXG_RCV_DATA_BUFFER_SIZE
) ||
3924 (BufferSize
== SXG_RCV_JUMBO_BUFFER_SIZE
));
3925 ASSERT(Length
== SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE
));
3927 * First, initialize the contained pool of receive data buffers.
3928 * This initialization requires NBL/NB/MDL allocations, if any of them
3929 * fail, free the block and return without queueing the shared memory
3931 //RcvDataBuffer = RcvBlock;
3932 temp_RcvBlock
= RcvBlock
;
3933 for (i
= 0; i
< SXG_RCV_DESCRIPTORS_PER_BLOCK
;
3934 i
++, temp_RcvBlock
+= SXG_RCV_DATA_HDR_SIZE
) {
3935 RcvDataBufferHdr
= (struct sxg_rcv_data_buffer_hdr
*)
3937 /* For FREE macro assertion */
3938 RcvDataBufferHdr
->State
= SXG_BUFFER_UPSTREAM
;
3939 SXG_ALLOCATE_RCV_PACKET(adapter
, RcvDataBufferHdr
, BufferSize
);
3940 if (RcvDataBufferHdr
->SxgDumbRcvPacket
== NULL
)
3946 * Place this entire block of memory on the AllRcvBlocks queue so it
3950 RcvBlockHdr
= (struct sxg_rcv_block_hdr
*) ((unsigned char *)RcvBlock
+
3951 SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE
));
3952 RcvBlockHdr
->VirtualAddress
= RcvBlock
;
3953 RcvBlockHdr
->PhysicalAddress
= PhysicalAddress
;
3954 spin_lock(&adapter
->RcvQLock
);
3955 adapter
->AllRcvBlockCount
++;
3956 InsertTailList(&adapter
->AllRcvBlocks
, &RcvBlockHdr
->AllList
);
3957 spin_unlock(&adapter
->RcvQLock
);
3959 /* Now free the contained receive data buffers that we
3960 * initialized above */
3961 temp_RcvBlock
= RcvBlock
;
3962 for (i
= 0, Paddr
= PhysicalAddress
;
3963 i
< SXG_RCV_DESCRIPTORS_PER_BLOCK
;
3964 i
++, Paddr
+= SXG_RCV_DATA_HDR_SIZE
,
3965 temp_RcvBlock
+= SXG_RCV_DATA_HDR_SIZE
) {
3967 (struct sxg_rcv_data_buffer_hdr
*)temp_RcvBlock
;
3968 spin_lock(&adapter
->RcvQLock
);
3969 SXG_FREE_RCV_DATA_BUFFER(adapter
, RcvDataBufferHdr
);
3970 spin_unlock(&adapter
->RcvQLock
);
3973 /* Locate the descriptor block and put it on a separate free queue */
3974 RcvDescriptorBlock
=
3975 (struct sxg_rcv_descriptor_block
*) ((unsigned char *)RcvBlock
+
3976 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
3977 (SXG_RCV_DATA_HDR_SIZE
));
3978 RcvDescriptorBlockHdr
=
3979 (struct sxg_rcv_descriptor_block_hdr
*) ((unsigned char *)RcvBlock
+
3980 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
3981 (SXG_RCV_DATA_HDR_SIZE
));
3982 RcvDescriptorBlockHdr
->VirtualAddress
= RcvDescriptorBlock
;
3983 RcvDescriptorBlockHdr
->PhysicalAddress
= Paddr
;
3984 spin_lock(&adapter
->RcvQLock
);
3985 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter
, RcvDescriptorBlockHdr
);
3986 spin_unlock(&adapter
->RcvQLock
);
3987 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XAlRBlk",
3988 adapter
, RcvBlock
, Length
, 0);
3989 return STATUS_SUCCESS
;
3991 /* Free any allocated resources */
3993 temp_RcvBlock
= RcvBlock
;
3994 for (i
= 0; i
< SXG_RCV_DESCRIPTORS_PER_BLOCK
;
3995 i
++, temp_RcvBlock
+= SXG_RCV_DATA_HDR_SIZE
) {
3997 (struct sxg_rcv_data_buffer_hdr
*)temp_RcvBlock
;
3998 SXG_FREE_RCV_PACKET(RcvDataBufferHdr
);
4000 pci_free_consistent(adapter
->pcidev
,
4001 Length
, RcvBlock
, PhysicalAddress
);
4003 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__
);
4004 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_IMPORTANT
, "RcvAFail",
4005 adapter
, adapter
->FreeRcvBufferCount
,
4006 adapter
->FreeRcvBlockCount
, adapter
->AllRcvBlockCount
);
4007 adapter
->Stats
.NoMem
++;
4008 /* As allocation failed, free all previously allocated blocks..*/
4009 //sxg_free_rcvblocks(adapter);
4011 return STATUS_RESOURCES
;
4015 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
4018 * adapter - A pointer to our adapter structure
4019 * SxgSgl - struct sxg_scatter_gather buffer
4020 * PhysicalAddress - Physical address
4021 * Length - Memory length
4025 static void sxg_allocate_sgl_buffer_complete(struct adapter_t
*adapter
,
4026 struct sxg_scatter_gather
*SxgSgl
,
4027 dma_addr_t PhysicalAddress
,
4030 unsigned long sgl_flags
;
4031 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "AlSglCmp",
4032 adapter
, SxgSgl
, Length
, 0);
4033 spin_lock_irqsave(&adapter
->SglQLock
, sgl_flags
);
4034 adapter
->AllSglBufferCount
++;
4035 /* PhysicalAddress; */
4036 SxgSgl
->PhysicalAddress
= PhysicalAddress
;
4037 /* Initialize backpointer once */
4038 SxgSgl
->adapter
= adapter
;
4039 InsertTailList(&adapter
->AllSglBuffers
, &SxgSgl
->AllList
);
4040 spin_unlock_irqrestore(&adapter
->SglQLock
, sgl_flags
);
4041 SxgSgl
->State
= SXG_BUFFER_BUSY
;
4042 SXG_FREE_SGL_BUFFER(adapter
, SxgSgl
, NULL
);
4043 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XAlSgl",
4044 adapter
, SxgSgl
, Length
, 0);
4048 static int sxg_adapter_set_hwaddr(struct adapter_t
*adapter
)
4051 * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
4052 * funct#[%d]\n", __func__, card->config_set,
4053 * adapter->port, adapter->physport, adapter->functionnumber);
4055 * sxg_dbg_macaddrs(adapter);
4057 /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
4061 /* sxg_dbg_macaddrs(adapter); */
4063 struct net_device
* dev
= adapter
->netdev
;
4066 printk("sxg: Dev is Null\n");
4069 DBG_ERROR("%s ENTER (%s)\n", __func__
, adapter
->netdev
->name
);
4071 if (netif_running(dev
)) {
4078 if (!(adapter
->currmacaddr
[0] ||
4079 adapter
->currmacaddr
[1] ||
4080 adapter
->currmacaddr
[2] ||
4081 adapter
->currmacaddr
[3] ||
4082 adapter
->currmacaddr
[4] || adapter
->currmacaddr
[5])) {
4083 memcpy(adapter
->currmacaddr
, adapter
->macaddr
, 6);
4085 if (adapter
->netdev
) {
4086 memcpy(adapter
->netdev
->dev_addr
, adapter
->currmacaddr
, 6);
4087 memcpy(adapter
->netdev
->perm_addr
, adapter
->currmacaddr
, 6);
4089 /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
4090 sxg_dbg_macaddrs(adapter
);
4096 static int sxg_mac_set_address(struct net_device
*dev
, void *ptr
)
4098 struct adapter_t
*adapter
= (struct adapter_t
*) netdev_priv(dev
);
4099 struct sockaddr
*addr
= ptr
;
4101 DBG_ERROR("%s ENTER (%s)\n", __func__
, adapter
->netdev
->name
);
4103 if (netif_running(dev
)) {
4109 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
4110 __func__
, adapter
->netdev
->name
, adapter
->currmacaddr
[0],
4111 adapter
->currmacaddr
[1], adapter
->currmacaddr
[2],
4112 adapter
->currmacaddr
[3], adapter
->currmacaddr
[4],
4113 adapter
->currmacaddr
[5]);
4114 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
4115 memcpy(adapter
->currmacaddr
, addr
->sa_data
, dev
->addr_len
);
4116 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
4117 __func__
, adapter
->netdev
->name
, adapter
->currmacaddr
[0],
4118 adapter
->currmacaddr
[1], adapter
->currmacaddr
[2],
4119 adapter
->currmacaddr
[3], adapter
->currmacaddr
[4],
4120 adapter
->currmacaddr
[5]);
4122 sxg_config_set(adapter
, TRUE
);
4128 * SXG DRIVER FUNCTIONS (below)
4130 * sxg_initialize_adapter - Initialize adapter
4133 * adapter - A pointer to our adapter structure
4137 static int sxg_initialize_adapter(struct adapter_t
*adapter
)
4139 u32 RssIds
, IsrCount
;
4142 int sxg_rcv_ring_size
= SXG_RCV_RING_SIZE
;
4144 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "InitAdpt",
4147 RssIds
= 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
4148 IsrCount
= adapter
->msi_enabled
? RssIds
: 1;
4151 * Sanity check SXG_UCODE_REGS structure definition to
4152 * make sure the length is correct
4154 ASSERT(sizeof(struct sxg_ucode_regs
) == SXG_REGISTER_SIZE_PER_CPU
);
4156 /* Disable interrupts */
4157 SXG_DISABLE_ALL_INTERRUPTS(adapter
);
4160 ASSERT((adapter
->FrameSize
== ETHERMAXFRAME
) ||
4161 (adapter
->FrameSize
== JUMBOMAXFRAME
));
4162 WRITE_REG(adapter
->UcodeRegs
[0].LinkMtu
, adapter
->FrameSize
, TRUE
);
4164 /* Set event ring base address and size */
4165 WRITE_REG64(adapter
,
4166 adapter
->UcodeRegs
[0].EventBase
, adapter
->PEventRings
, 0);
4167 WRITE_REG(adapter
->UcodeRegs
[0].EventSize
, EVENT_RING_SIZE
, TRUE
);
4169 /* Per-ISR initialization */
4170 for (i
= 0; i
< IsrCount
; i
++) {
4172 /* Set interrupt status pointer */
4173 Addr
= adapter
->PIsr
+ (i
* sizeof(u32
));
4174 WRITE_REG64(adapter
, adapter
->UcodeRegs
[i
].Isp
, Addr
, i
);
4177 /* XMT ring zero index */
4178 WRITE_REG64(adapter
,
4179 adapter
->UcodeRegs
[0].SPSendIndex
,
4180 adapter
->PXmtRingZeroIndex
, 0);
4182 /* Per-RSS initialization */
4183 for (i
= 0; i
< RssIds
; i
++) {
4184 /* Release all event ring entries to the Microcode */
4185 WRITE_REG(adapter
->UcodeRegs
[i
].EventRelease
, EVENT_RING_SIZE
,
4189 /* Transmit ring base and size */
4190 WRITE_REG64(adapter
,
4191 adapter
->UcodeRegs
[0].XmtBase
, adapter
->PXmtRings
, 0);
4192 WRITE_REG(adapter
->UcodeRegs
[0].XmtSize
, SXG_XMT_RING_SIZE
, TRUE
);
4194 /* Receive ring base and size */
4195 WRITE_REG64(adapter
,
4196 adapter
->UcodeRegs
[0].RcvBase
, adapter
->PRcvRings
, 0);
4197 if (adapter
->JumboEnabled
== TRUE
)
4198 sxg_rcv_ring_size
= SXG_JUMBO_RCV_RING_SIZE
;
4199 WRITE_REG(adapter
->UcodeRegs
[0].RcvSize
, sxg_rcv_ring_size
, TRUE
);
4201 /* Populate the card with receive buffers */
4202 sxg_stock_rcv_buffers(adapter
);
4205 * Initialize checksum offload capabilities. At the moment we always
4206 * enable IP and TCP receive checksums on the card. Depending on the
4207 * checksum configuration specified by the user, we can choose to
4208 * report or ignore the checksum information provided by the card.
4210 WRITE_REG(adapter
->UcodeRegs
[0].ReceiveChecksum
,
4211 SXG_RCV_TCP_CSUM_ENABLED
| SXG_RCV_IP_CSUM_ENABLED
, TRUE
);
4213 adapter
->flags
|= (SXG_RCV_TCP_CSUM_ENABLED
| SXG_RCV_IP_CSUM_ENABLED
);
4215 /* Initialize the MAC, XAUI */
4216 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__
);
4217 status
= sxg_initialize_link(adapter
);
4218 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__
,
4220 if (status
!= STATUS_SUCCESS
) {
4224 * Initialize Dead to FALSE.
4225 * SlicCheckForHang or SlicDumpThread will take it from here.
4227 adapter
->Dead
= FALSE
;
4228 adapter
->PingOutstanding
= FALSE
;
4229 adapter
->XmtFcEnabled
= TRUE
;
4230 adapter
->RcvFcEnabled
= TRUE
;
4232 adapter
->State
= SXG_STATE_RUNNING
;
4234 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XInit",
4236 return (STATUS_SUCCESS
);
4240 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
4241 * the card. The caller should hold the RcvQLock
4244 * adapter - A pointer to our adapter structure
4245 * RcvDescriptorBlockHdr - Descriptor block to fill
4250 static int sxg_fill_descriptor_block(struct adapter_t
*adapter
,
4251 struct sxg_rcv_descriptor_block_hdr
*RcvDescriptorBlockHdr
)
4254 struct sxg_ring_info
*RcvRingInfo
= &adapter
->RcvRingZeroInfo
;
4255 struct sxg_rcv_data_buffer_hdr
*RcvDataBufferHdr
;
4256 struct sxg_rcv_descriptor_block
*RcvDescriptorBlock
;
4257 struct sxg_cmd
*RingDescriptorCmd
;
4258 struct sxg_rcv_ring
*RingZero
= &adapter
->RcvRings
[0];
4260 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "FilBlk",
4261 adapter
, adapter
->RcvBuffersOnCard
,
4262 adapter
->FreeRcvBufferCount
, adapter
->AllRcvBlockCount
);
4264 ASSERT(RcvDescriptorBlockHdr
);
4267 * If we don't have the resources to fill the descriptor block,
4270 if ((adapter
->FreeRcvBufferCount
< SXG_RCV_DESCRIPTORS_PER_BLOCK
) ||
4271 SXG_RING_FULL(RcvRingInfo
)) {
4272 adapter
->Stats
.NoMem
++;
4273 return (STATUS_FAILURE
);
4275 /* Get a ring descriptor command */
4276 SXG_GET_CMD(RingZero
,
4277 RcvRingInfo
, RingDescriptorCmd
, RcvDescriptorBlockHdr
);
4278 ASSERT(RingDescriptorCmd
);
4279 RcvDescriptorBlockHdr
->State
= SXG_BUFFER_ONCARD
;
4280 RcvDescriptorBlock
= (struct sxg_rcv_descriptor_block
*)
4281 RcvDescriptorBlockHdr
->VirtualAddress
;
4283 /* Fill in the descriptor block */
4284 for (i
= 0; i
< SXG_RCV_DESCRIPTORS_PER_BLOCK
; i
++) {
4285 SXG_GET_RCV_DATA_BUFFER(adapter
, RcvDataBufferHdr
);
4286 ASSERT(RcvDataBufferHdr
);
4287 // ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
4288 if (!RcvDataBufferHdr
->SxgDumbRcvPacket
) {
4289 SXG_ALLOCATE_RCV_PACKET(adapter
, RcvDataBufferHdr
,
4290 adapter
->ReceiveBufferSize
);
4291 if(RcvDataBufferHdr
->skb
)
4292 RcvDataBufferHdr
->SxgDumbRcvPacket
=
4293 RcvDataBufferHdr
->skb
;
4297 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr
->SxgDumbRcvPacket
);
4298 RcvDataBufferHdr
->State
= SXG_BUFFER_ONCARD
;
4299 RcvDescriptorBlock
->Descriptors
[i
].VirtualAddress
=
4300 (void *)RcvDataBufferHdr
;
4302 RcvDescriptorBlock
->Descriptors
[i
].PhysicalAddress
=
4303 RcvDataBufferHdr
->PhysicalAddress
;
4305 /* Add the descriptor block to receive descriptor ring 0 */
4306 RingDescriptorCmd
->Sgl
= RcvDescriptorBlockHdr
->PhysicalAddress
;
4309 * RcvBuffersOnCard is not protected via the receive lock (see
4310 * sxg_process_event_queue) We don't want to grap a lock every time a
4311 * buffer is returned to us, so we use atomic interlocked functions
4314 adapter
->RcvBuffersOnCard
+= SXG_RCV_DESCRIPTORS_PER_BLOCK
;
4316 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "DscBlk",
4317 RcvDescriptorBlockHdr
,
4318 RingDescriptorCmd
, RcvRingInfo
->Head
, RcvRingInfo
->Tail
);
4320 WRITE_REG(adapter
->UcodeRegs
[0].RcvCmd
, 1, true);
4321 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XFilBlk",
4322 adapter
, adapter
->RcvBuffersOnCard
,
4323 adapter
->FreeRcvBufferCount
, adapter
->AllRcvBlockCount
);
4324 return (STATUS_SUCCESS
);
4326 for (; i
>= 0 ; i
--) {
4327 if (RcvDescriptorBlock
->Descriptors
[i
].VirtualAddress
) {
4328 RcvDataBufferHdr
= (struct sxg_rcv_data_buffer_hdr
*)
4329 RcvDescriptorBlock
->Descriptors
[i
].
4331 RcvDescriptorBlock
->Descriptors
[i
].PhysicalAddress
=
4333 RcvDescriptorBlock
->Descriptors
[i
].VirtualAddress
=NULL
;
4335 SXG_FREE_RCV_DATA_BUFFER(adapter
, RcvDataBufferHdr
);
4337 RcvDescriptorBlockHdr
->State
= SXG_BUFFER_FREE
;
4338 SXG_RETURN_CMD(RingZero
, RcvRingInfo
, RingDescriptorCmd
,
4339 RcvDescriptorBlockHdr
);
4345 * sxg_stock_rcv_buffers - Stock the card with receive buffers
4348 * adapter - A pointer to our adapter structure
4353 static void sxg_stock_rcv_buffers(struct adapter_t
*adapter
)
4355 struct sxg_rcv_descriptor_block_hdr
*RcvDescriptorBlockHdr
;
4356 int sxg_rcv_data_buffers
= SXG_RCV_DATA_BUFFERS
;
4357 int sxg_min_rcv_data_buffers
= SXG_MIN_RCV_DATA_BUFFERS
;
4359 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "StockBuf",
4360 adapter
, adapter
->RcvBuffersOnCard
,
4361 adapter
->FreeRcvBufferCount
, adapter
->AllRcvBlockCount
);
4363 * First, see if we've got less than our minimum threshold of
4364 * receive buffers, there isn't an allocation in progress, and
4365 * we haven't exceeded our maximum.. get another block of buffers
4366 * None of this needs to be SMP safe. It's round numbers.
4368 if (adapter
->JumboEnabled
== TRUE
)
4369 sxg_min_rcv_data_buffers
= SXG_MIN_JUMBO_RCV_DATA_BUFFERS
;
4370 if ((adapter
->FreeRcvBufferCount
< sxg_min_rcv_data_buffers
) &&
4371 (adapter
->AllRcvBlockCount
< SXG_MAX_RCV_BLOCKS
) &&
4372 (atomic_read(&adapter
->pending_allocations
) == 0)) {
4373 sxg_allocate_buffer_memory(adapter
,
4375 (SXG_RCV_DATA_HDR_SIZE
),
4376 SXG_BUFFER_TYPE_RCV
);
4378 /* Now grab the RcvQLock lock and proceed */
4379 spin_lock(&adapter
->RcvQLock
);
4380 if (adapter
->JumboEnabled
)
4381 sxg_rcv_data_buffers
= SXG_JUMBO_RCV_DATA_BUFFERS
;
4382 while (adapter
->RcvBuffersOnCard
< sxg_rcv_data_buffers
) {
4383 struct list_entry
*_ple
;
4385 /* Get a descriptor block */
4386 RcvDescriptorBlockHdr
= NULL
;
4387 if (adapter
->FreeRcvBlockCount
) {
4388 _ple
= RemoveHeadList(&adapter
->FreeRcvBlocks
);
4389 RcvDescriptorBlockHdr
=
4390 container_of(_ple
, struct sxg_rcv_descriptor_block_hdr
,
4392 adapter
->FreeRcvBlockCount
--;
4393 RcvDescriptorBlockHdr
->State
= SXG_BUFFER_BUSY
;
4396 if (RcvDescriptorBlockHdr
== NULL
) {
4398 adapter
->Stats
.NoMem
++;
4401 /* Fill in the descriptor block and give it to the card */
4402 if (sxg_fill_descriptor_block(adapter
, RcvDescriptorBlockHdr
) ==
4404 /* Free the descriptor block */
4405 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter
,
4406 RcvDescriptorBlockHdr
);
4410 spin_unlock(&adapter
->RcvQLock
);
4411 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XFilBlks",
4412 adapter
, adapter
->RcvBuffersOnCard
,
4413 adapter
->FreeRcvBufferCount
, adapter
->AllRcvBlockCount
);
4417 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
4418 * completed by the microcode
4421 * adapter - A pointer to our adapter structure
4422 * Index - Where the microcode is up to
4427 static void sxg_complete_descriptor_blocks(struct adapter_t
*adapter
,
4428 unsigned char Index
)
4430 struct sxg_rcv_ring
*RingZero
= &adapter
->RcvRings
[0];
4431 struct sxg_ring_info
*RcvRingInfo
= &adapter
->RcvRingZeroInfo
;
4432 struct sxg_rcv_descriptor_block_hdr
*RcvDescriptorBlockHdr
;
4433 struct sxg_cmd
*RingDescriptorCmd
;
4435 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "CmpRBlks",
4436 adapter
, Index
, RcvRingInfo
->Head
, RcvRingInfo
->Tail
);
4438 /* Now grab the RcvQLock lock and proceed */
4439 spin_lock(&adapter
->RcvQLock
);
4440 ASSERT(Index
!= RcvRingInfo
->Tail
);
4441 while (sxg_ring_get_forward_diff(RcvRingInfo
, Index
,
4442 RcvRingInfo
->Tail
) > 3) {
4444 * Locate the current Cmd (ring descriptor entry), and
4445 * associated receive descriptor block, and advance
4448 SXG_RETURN_CMD(RingZero
,
4450 RingDescriptorCmd
, RcvDescriptorBlockHdr
);
4451 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "CmpRBlk",
4452 RcvRingInfo
->Head
, RcvRingInfo
->Tail
,
4453 RingDescriptorCmd
, RcvDescriptorBlockHdr
);
4455 /* Clear the SGL field */
4456 RingDescriptorCmd
->Sgl
= 0;
4458 * Attempt to refill it and hand it right back to the
4459 * card. If we fail to refill it, free the descriptor block
4460 * header. The card will be restocked later via the
4461 * RcvBuffersOnCard test
4463 if (sxg_fill_descriptor_block(adapter
,
4464 RcvDescriptorBlockHdr
) == STATUS_FAILURE
)
4465 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter
,
4466 RcvDescriptorBlockHdr
);
4468 spin_unlock(&adapter
->RcvQLock
);
4469 SXG_TRACE(TRACE_SXG
, SxgTraceBuffer
, TRACE_NOISY
, "XCRBlks",
4470 adapter
, Index
, RcvRingInfo
->Head
, RcvRingInfo
->Tail
);
4474 * Read the statistics which the card has been maintaining.
4476 void sxg_collect_statistics(struct adapter_t
*adapter
)
4478 if(adapter
->ucode_stats
)
4479 WRITE_REG64(adapter
, adapter
->UcodeRegs
[0].GetUcodeStats
,
4480 adapter
->pucode_stats
, 0);
4481 adapter
->stats
.rx_fifo_errors
= adapter
->ucode_stats
->ERDrops
;
4482 adapter
->stats
.rx_over_errors
= adapter
->ucode_stats
->NBDrops
;
4483 adapter
->stats
.tx_fifo_errors
= adapter
->ucode_stats
->XDrops
;
4486 static struct net_device_stats
*sxg_get_stats(struct net_device
* dev
)
4488 struct adapter_t
*adapter
= netdev_priv(dev
);
4490 sxg_collect_statistics(adapter
);
4491 return (&adapter
->stats
);
4494 static void sxg_watchdog(unsigned long data
)
4496 struct adapter_t
*adapter
= (struct adapter_t
*) data
;
4498 if (adapter
->state
!= ADAPT_DOWN
) {
4499 sxg_link_event(adapter
);
4500 /* Reset the timer */
4501 mod_timer(&adapter
->watchdog_timer
, round_jiffies(jiffies
+ 2 * HZ
));
4505 static void sxg_update_link_status (struct work_struct
*work
)
4507 struct adapter_t
*adapter
= (struct adapter_t
*)container_of
4508 (work
, struct adapter_t
, update_link_status
);
4509 if (likely(adapter
->link_status_changed
)) {
4510 sxg_link_event(adapter
);
4511 adapter
->link_status_changed
= 0;
4515 static struct pci_driver sxg_driver
= {
4516 .name
= sxg_driver_name
,
4517 .id_table
= sxg_pci_tbl
,
4518 .probe
= sxg_entry_probe
,
4519 .remove
= __devexit_p(sxg_entry_remove
),
4520 #if SXG_POWER_MANAGEMENT_ENABLED
4521 .suspend
= sxgpm_suspend
,
4522 .resume
= sxgpm_resume
,
4524 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
4527 static int __init
sxg_module_init(void)
4534 return pci_register_driver(&sxg_driver
);
4537 static void __exit
sxg_module_cleanup(void)
4539 pci_unregister_driver(&sxg_driver
);
4542 module_init(sxg_module_init
);
4543 module_exit(sxg_module_cleanup
);