1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
66 #include "s2io-regs.h"
68 #define DRV_VERSION "Version 2.0.9.1"
70 /* S2io Driver name & version. */
71 static char s2io_driver_name
[] = "Neterion";
72 static char s2io_driver_version
[] = DRV_VERSION
;
74 static inline int RXD_IS_UP2DT(RxD_t
*rxdp
)
78 ret
= ((!(rxdp
->Control_1
& RXD_OWN_XENA
)) &&
79 (GET_RXD_MARKER(rxdp
->Control_2
) != THE_RXD_MARK
));
85 * Cards with following subsystem_id have a link state indication
86 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
87 * macro below identifies these cards given the subsystem_id.
89 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
90 (dev_type == XFRAME_I_DEVICE) ? \
91 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
92 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
94 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
95 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
96 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
99 static inline int rx_buffer_level(nic_t
* sp
, int rxb_size
, int ring
)
102 mac_info_t
*mac_control
;
104 mac_control
= &sp
->mac_control
;
105 if ((mac_control
->rings
[ring
].pkt_cnt
- rxb_size
) > 16) {
107 if (rxb_size
<= MAX_RXDS_PER_BLOCK
) {
115 /* Ethtool related variables and Macros. */
116 static char s2io_gstrings
[][ETH_GSTRING_LEN
] = {
117 "Register test\t(offline)",
118 "Eeprom test\t(offline)",
119 "Link test\t(online)",
120 "RLDRAM test\t(offline)",
121 "BIST Test\t(offline)"
124 static char ethtool_stats_keys
[][ETH_GSTRING_LEN
] = {
126 {"tmac_data_octets"},
130 {"tmac_pause_ctrl_frms"},
131 {"tmac_any_err_frms"},
132 {"tmac_vld_ip_octets"},
140 {"rmac_data_octets"},
141 {"rmac_fcs_err_frms"},
143 {"rmac_vld_mcst_frms"},
144 {"rmac_vld_bcst_frms"},
145 {"rmac_in_rng_len_err_frms"},
147 {"rmac_pause_ctrl_frms"},
148 {"rmac_discarded_frms"},
149 {"rmac_usized_frms"},
150 {"rmac_osized_frms"},
152 {"rmac_jabber_frms"},
160 {"rmac_err_drp_udp"},
162 {"rmac_accepted_ip"},
164 {"\n DRIVER STATISTICS"},
165 {"single_bit_ecc_errs"},
166 {"double_bit_ecc_errs"},
169 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
170 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
172 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
173 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
175 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
176 init_timer(&timer); \
177 timer.function = handle; \
178 timer.data = (unsigned long) arg; \
179 mod_timer(&timer, (jiffies + exp)) \
182 static void s2io_vlan_rx_register(struct net_device
*dev
,
183 struct vlan_group
*grp
)
185 nic_t
*nic
= dev
->priv
;
188 spin_lock_irqsave(&nic
->tx_lock
, flags
);
190 spin_unlock_irqrestore(&nic
->tx_lock
, flags
);
193 /* Unregister the vlan */
194 static void s2io_vlan_rx_kill_vid(struct net_device
*dev
, unsigned long vid
)
196 nic_t
*nic
= dev
->priv
;
199 spin_lock_irqsave(&nic
->tx_lock
, flags
);
201 nic
->vlgrp
->vlan_devices
[vid
] = NULL
;
202 spin_unlock_irqrestore(&nic
->tx_lock
, flags
);
206 * Constants to be programmed into the Xena's registers, to configure
210 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
213 static u64 herc_act_dtx_cfg
[] = {
215 0x8000051536750000ULL
, 0x80000515367500E0ULL
,
217 0x8000051536750004ULL
, 0x80000515367500E4ULL
,
219 0x80010515003F0000ULL
, 0x80010515003F00E0ULL
,
221 0x80010515003F0004ULL
, 0x80010515003F00E4ULL
,
223 0x801205150D440000ULL
, 0x801205150D4400E0ULL
,
225 0x801205150D440004ULL
, 0x801205150D4400E4ULL
,
227 0x80020515F2100000ULL
, 0x80020515F21000E0ULL
,
229 0x80020515F2100004ULL
, 0x80020515F21000E4ULL
,
234 static u64 xena_mdio_cfg
[] = {
236 0xC001010000000000ULL
, 0xC0010100000000E0ULL
,
237 0xC0010100008000E4ULL
,
238 /* Remove Reset from PMA PLL */
239 0xC001010000000000ULL
, 0xC0010100000000E0ULL
,
240 0xC0010100000000E4ULL
,
244 static u64 xena_dtx_cfg
[] = {
245 0x8000051500000000ULL
, 0x80000515000000E0ULL
,
246 0x80000515D93500E4ULL
, 0x8001051500000000ULL
,
247 0x80010515000000E0ULL
, 0x80010515001E00E4ULL
,
248 0x8002051500000000ULL
, 0x80020515000000E0ULL
,
249 0x80020515F21000E4ULL
,
250 /* Set PADLOOPBACKN */
251 0x8002051500000000ULL
, 0x80020515000000E0ULL
,
252 0x80020515B20000E4ULL
, 0x8003051500000000ULL
,
253 0x80030515000000E0ULL
, 0x80030515B20000E4ULL
,
254 0x8004051500000000ULL
, 0x80040515000000E0ULL
,
255 0x80040515B20000E4ULL
, 0x8005051500000000ULL
,
256 0x80050515000000E0ULL
, 0x80050515B20000E4ULL
,
258 /* Remove PADLOOPBACKN */
259 0x8002051500000000ULL
, 0x80020515000000E0ULL
,
260 0x80020515F20000E4ULL
, 0x8003051500000000ULL
,
261 0x80030515000000E0ULL
, 0x80030515F20000E4ULL
,
262 0x8004051500000000ULL
, 0x80040515000000E0ULL
,
263 0x80040515F20000E4ULL
, 0x8005051500000000ULL
,
264 0x80050515000000E0ULL
, 0x80050515F20000E4ULL
,
269 * Constants for Fixing the MacAddress problem seen mostly on
272 static u64 fix_mac
[] = {
273 0x0060000000000000ULL
, 0x0060600000000000ULL
,
274 0x0040600000000000ULL
, 0x0000600000000000ULL
,
275 0x0020600000000000ULL
, 0x0060600000000000ULL
,
276 0x0020600000000000ULL
, 0x0060600000000000ULL
,
277 0x0020600000000000ULL
, 0x0060600000000000ULL
,
278 0x0020600000000000ULL
, 0x0060600000000000ULL
,
279 0x0020600000000000ULL
, 0x0060600000000000ULL
,
280 0x0020600000000000ULL
, 0x0060600000000000ULL
,
281 0x0020600000000000ULL
, 0x0060600000000000ULL
,
282 0x0020600000000000ULL
, 0x0060600000000000ULL
,
283 0x0020600000000000ULL
, 0x0060600000000000ULL
,
284 0x0020600000000000ULL
, 0x0060600000000000ULL
,
285 0x0020600000000000ULL
, 0x0000600000000000ULL
,
286 0x0040600000000000ULL
, 0x0060600000000000ULL
,
290 /* Module Loadable parameters. */
291 static unsigned int tx_fifo_num
= 1;
292 static unsigned int tx_fifo_len
[MAX_TX_FIFOS
] =
293 {[0 ...(MAX_TX_FIFOS
- 1)] = 0 };
294 static unsigned int rx_ring_num
= 1;
295 static unsigned int rx_ring_sz
[MAX_RX_RINGS
] =
296 {[0 ...(MAX_RX_RINGS
- 1)] = 0 };
297 static unsigned int rts_frm_len
[MAX_RX_RINGS
] =
298 {[0 ...(MAX_RX_RINGS
- 1)] = 0 };
299 static unsigned int use_continuous_tx_intrs
= 1;
300 static unsigned int rmac_pause_time
= 65535;
301 static unsigned int mc_pause_threshold_q0q3
= 187;
302 static unsigned int mc_pause_threshold_q4q7
= 187;
303 static unsigned int shared_splits
;
304 static unsigned int tmac_util_period
= 5;
305 static unsigned int rmac_util_period
= 5;
306 static unsigned int bimodal
= 0;
307 #ifndef CONFIG_S2IO_NAPI
308 static unsigned int indicate_max_pkts
;
310 /* Frequency of Rx desc syncs expressed as power of 2 */
311 static unsigned int rxsync_frequency
= 3;
312 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
313 static unsigned int intr_type
= 0;
317 * This table lists all the devices that this driver supports.
319 static struct pci_device_id s2io_tbl
[] __devinitdata
= {
320 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_S2IO_WIN
,
321 PCI_ANY_ID
, PCI_ANY_ID
},
322 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_S2IO_UNI
,
323 PCI_ANY_ID
, PCI_ANY_ID
},
324 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_HERC_WIN
,
325 PCI_ANY_ID
, PCI_ANY_ID
},
326 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_HERC_UNI
,
327 PCI_ANY_ID
, PCI_ANY_ID
},
331 MODULE_DEVICE_TABLE(pci
, s2io_tbl
);
333 static struct pci_driver s2io_driver
= {
335 .id_table
= s2io_tbl
,
336 .probe
= s2io_init_nic
,
337 .remove
= __devexit_p(s2io_rem_nic
),
340 /* A simplifier macro used both by init and free shared_mem Fns(). */
341 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
344 * init_shared_mem - Allocation and Initialization of Memory
345 * @nic: Device private variable.
346 * Description: The function allocates all the memory areas shared
347 * between the NIC and the driver. This includes Tx descriptors,
348 * Rx descriptors and the statistics block.
351 static int init_shared_mem(struct s2io_nic
*nic
)
354 void *tmp_v_addr
, *tmp_v_addr_next
;
355 dma_addr_t tmp_p_addr
, tmp_p_addr_next
;
356 RxD_block_t
*pre_rxd_blk
= NULL
;
357 int i
, j
, blk_cnt
, rx_sz
, tx_sz
;
358 int lst_size
, lst_per_page
;
359 struct net_device
*dev
= nic
->dev
;
360 #ifdef CONFIG_2BUFF_MODE
365 mac_info_t
*mac_control
;
366 struct config_param
*config
;
368 mac_control
= &nic
->mac_control
;
369 config
= &nic
->config
;
372 /* Allocation and initialization of TXDLs in FIOFs */
374 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
375 size
+= config
->tx_cfg
[i
].fifo_len
;
377 if (size
> MAX_AVAILABLE_TXDS
) {
378 DBG_PRINT(ERR_DBG
, "%s: Requested TxDs too high, ",
380 DBG_PRINT(ERR_DBG
, "Requested: %d, max supported: 8192\n", size
);
384 lst_size
= (sizeof(TxD_t
) * config
->max_txds
);
385 tx_sz
= lst_size
* size
;
386 lst_per_page
= PAGE_SIZE
/ lst_size
;
388 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
389 int fifo_len
= config
->tx_cfg
[i
].fifo_len
;
390 int list_holder_size
= fifo_len
* sizeof(list_info_hold_t
);
391 mac_control
->fifos
[i
].list_info
= kmalloc(list_holder_size
,
393 if (!mac_control
->fifos
[i
].list_info
) {
395 "Malloc failed for list_info\n");
398 memset(mac_control
->fifos
[i
].list_info
, 0, list_holder_size
);
400 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
401 int page_num
= TXD_MEM_PAGE_CNT(config
->tx_cfg
[i
].fifo_len
,
403 mac_control
->fifos
[i
].tx_curr_put_info
.offset
= 0;
404 mac_control
->fifos
[i
].tx_curr_put_info
.fifo_len
=
405 config
->tx_cfg
[i
].fifo_len
- 1;
406 mac_control
->fifos
[i
].tx_curr_get_info
.offset
= 0;
407 mac_control
->fifos
[i
].tx_curr_get_info
.fifo_len
=
408 config
->tx_cfg
[i
].fifo_len
- 1;
409 mac_control
->fifos
[i
].fifo_no
= i
;
410 mac_control
->fifos
[i
].nic
= nic
;
411 mac_control
->fifos
[i
].max_txds
= MAX_SKB_FRAGS
+ 1;
413 for (j
= 0; j
< page_num
; j
++) {
417 tmp_v
= pci_alloc_consistent(nic
->pdev
,
421 "pci_alloc_consistent ");
422 DBG_PRINT(ERR_DBG
, "failed for TxDL\n");
425 /* If we got a zero DMA address(can happen on
426 * certain platforms like PPC), reallocate.
427 * Store virtual address of page we don't want,
431 mac_control
->zerodma_virt_addr
= tmp_v
;
433 "%s: Zero DMA address for TxDL. ", dev
->name
);
435 "Virtual address %p\n", tmp_v
);
436 tmp_v
= pci_alloc_consistent(nic
->pdev
,
440 "pci_alloc_consistent ");
441 DBG_PRINT(ERR_DBG
, "failed for TxDL\n");
445 while (k
< lst_per_page
) {
446 int l
= (j
* lst_per_page
) + k
;
447 if (l
== config
->tx_cfg
[i
].fifo_len
)
449 mac_control
->fifos
[i
].list_info
[l
].list_virt_addr
=
450 tmp_v
+ (k
* lst_size
);
451 mac_control
->fifos
[i
].list_info
[l
].list_phy_addr
=
452 tmp_p
+ (k
* lst_size
);
458 /* Allocation and initialization of RXDs in Rings */
460 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
461 if (config
->rx_cfg
[i
].num_rxd
% (MAX_RXDS_PER_BLOCK
+ 1)) {
462 DBG_PRINT(ERR_DBG
, "%s: RxD count of ", dev
->name
);
463 DBG_PRINT(ERR_DBG
, "Ring%d is not a multiple of ",
465 DBG_PRINT(ERR_DBG
, "RxDs per Block");
468 size
+= config
->rx_cfg
[i
].num_rxd
;
469 mac_control
->rings
[i
].block_count
=
470 config
->rx_cfg
[i
].num_rxd
/ (MAX_RXDS_PER_BLOCK
+ 1);
471 mac_control
->rings
[i
].pkt_cnt
=
472 config
->rx_cfg
[i
].num_rxd
- mac_control
->rings
[i
].block_count
;
474 size
= (size
* (sizeof(RxD_t
)));
477 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
478 mac_control
->rings
[i
].rx_curr_get_info
.block_index
= 0;
479 mac_control
->rings
[i
].rx_curr_get_info
.offset
= 0;
480 mac_control
->rings
[i
].rx_curr_get_info
.ring_len
=
481 config
->rx_cfg
[i
].num_rxd
- 1;
482 mac_control
->rings
[i
].rx_curr_put_info
.block_index
= 0;
483 mac_control
->rings
[i
].rx_curr_put_info
.offset
= 0;
484 mac_control
->rings
[i
].rx_curr_put_info
.ring_len
=
485 config
->rx_cfg
[i
].num_rxd
- 1;
486 mac_control
->rings
[i
].nic
= nic
;
487 mac_control
->rings
[i
].ring_no
= i
;
490 config
->rx_cfg
[i
].num_rxd
/ (MAX_RXDS_PER_BLOCK
+ 1);
491 /* Allocating all the Rx blocks */
492 for (j
= 0; j
< blk_cnt
; j
++) {
493 #ifndef CONFIG_2BUFF_MODE
494 size
= (MAX_RXDS_PER_BLOCK
+ 1) * (sizeof(RxD_t
));
496 size
= SIZE_OF_BLOCK
;
498 tmp_v_addr
= pci_alloc_consistent(nic
->pdev
, size
,
500 if (tmp_v_addr
== NULL
) {
502 * In case of failure, free_shared_mem()
503 * is called, which should free any
504 * memory that was alloced till the
507 mac_control
->rings
[i
].rx_blocks
[j
].block_virt_addr
=
511 memset(tmp_v_addr
, 0, size
);
512 mac_control
->rings
[i
].rx_blocks
[j
].block_virt_addr
=
514 mac_control
->rings
[i
].rx_blocks
[j
].block_dma_addr
=
517 /* Interlinking all Rx Blocks */
518 for (j
= 0; j
< blk_cnt
; j
++) {
520 mac_control
->rings
[i
].rx_blocks
[j
].block_virt_addr
;
522 mac_control
->rings
[i
].rx_blocks
[(j
+ 1) %
523 blk_cnt
].block_virt_addr
;
525 mac_control
->rings
[i
].rx_blocks
[j
].block_dma_addr
;
527 mac_control
->rings
[i
].rx_blocks
[(j
+ 1) %
528 blk_cnt
].block_dma_addr
;
530 pre_rxd_blk
= (RxD_block_t
*) tmp_v_addr
;
531 pre_rxd_blk
->reserved_1
= END_OF_BLOCK
; /* last RxD
534 #ifndef CONFIG_2BUFF_MODE
535 pre_rxd_blk
->reserved_2_pNext_RxD_block
=
536 (unsigned long) tmp_v_addr_next
;
538 pre_rxd_blk
->pNext_RxD_Blk_physical
=
539 (u64
) tmp_p_addr_next
;
543 #ifdef CONFIG_2BUFF_MODE
545 * Allocation of Storages for buffer addresses in 2BUFF mode
546 * and the buffers as well.
548 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
550 config
->rx_cfg
[i
].num_rxd
/ (MAX_RXDS_PER_BLOCK
+ 1);
551 mac_control
->rings
[i
].ba
= kmalloc((sizeof(buffAdd_t
*) * blk_cnt
),
553 if (!mac_control
->rings
[i
].ba
)
555 for (j
= 0; j
< blk_cnt
; j
++) {
557 mac_control
->rings
[i
].ba
[j
] = kmalloc((sizeof(buffAdd_t
) *
558 (MAX_RXDS_PER_BLOCK
+ 1)),
560 if (!mac_control
->rings
[i
].ba
[j
])
562 while (k
!= MAX_RXDS_PER_BLOCK
) {
563 ba
= &mac_control
->rings
[i
].ba
[j
][k
];
565 ba
->ba_0_org
= (void *) kmalloc
566 (BUF0_LEN
+ ALIGN_SIZE
, GFP_KERNEL
);
569 tmp
= (unsigned long) ba
->ba_0_org
;
571 tmp
&= ~((unsigned long) ALIGN_SIZE
);
572 ba
->ba_0
= (void *) tmp
;
574 ba
->ba_1_org
= (void *) kmalloc
575 (BUF1_LEN
+ ALIGN_SIZE
, GFP_KERNEL
);
578 tmp
= (unsigned long) ba
->ba_1_org
;
580 tmp
&= ~((unsigned long) ALIGN_SIZE
);
581 ba
->ba_1
= (void *) tmp
;
588 /* Allocation and initialization of Statistics block */
589 size
= sizeof(StatInfo_t
);
590 mac_control
->stats_mem
= pci_alloc_consistent
591 (nic
->pdev
, size
, &mac_control
->stats_mem_phy
);
593 if (!mac_control
->stats_mem
) {
595 * In case of failure, free_shared_mem() is called, which
596 * should free any memory that was alloced till the
601 mac_control
->stats_mem_sz
= size
;
603 tmp_v_addr
= mac_control
->stats_mem
;
604 mac_control
->stats_info
= (StatInfo_t
*) tmp_v_addr
;
605 memset(tmp_v_addr
, 0, size
);
606 DBG_PRINT(INIT_DBG
, "%s:Ring Mem PHY: 0x%llx\n", dev
->name
,
607 (unsigned long long) tmp_p_addr
);
613 * free_shared_mem - Free the allocated Memory
614 * @nic: Device private variable.
615 * Description: This function is to free all memory locations allocated by
616 * the init_shared_mem() function and return it to the kernel.
619 static void free_shared_mem(struct s2io_nic
*nic
)
621 int i
, j
, blk_cnt
, size
;
623 dma_addr_t tmp_p_addr
;
624 mac_info_t
*mac_control
;
625 struct config_param
*config
;
626 int lst_size
, lst_per_page
;
627 struct net_device
*dev
= nic
->dev
;
632 mac_control
= &nic
->mac_control
;
633 config
= &nic
->config
;
635 lst_size
= (sizeof(TxD_t
) * config
->max_txds
);
636 lst_per_page
= PAGE_SIZE
/ lst_size
;
638 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
639 int page_num
= TXD_MEM_PAGE_CNT(config
->tx_cfg
[i
].fifo_len
,
641 for (j
= 0; j
< page_num
; j
++) {
642 int mem_blks
= (j
* lst_per_page
);
643 if (!mac_control
->fifos
[i
].list_info
)
645 if (!mac_control
->fifos
[i
].list_info
[mem_blks
].
648 pci_free_consistent(nic
->pdev
, PAGE_SIZE
,
649 mac_control
->fifos
[i
].
652 mac_control
->fifos
[i
].
656 /* If we got a zero DMA address during allocation,
659 if (mac_control
->zerodma_virt_addr
) {
660 pci_free_consistent(nic
->pdev
, PAGE_SIZE
,
661 mac_control
->zerodma_virt_addr
,
664 "%s: Freeing TxDL with zero DMA addr. ",
666 DBG_PRINT(INIT_DBG
, "Virtual address %p\n",
667 mac_control
->zerodma_virt_addr
);
669 kfree(mac_control
->fifos
[i
].list_info
);
672 #ifndef CONFIG_2BUFF_MODE
673 size
= (MAX_RXDS_PER_BLOCK
+ 1) * (sizeof(RxD_t
));
675 size
= SIZE_OF_BLOCK
;
677 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
678 blk_cnt
= mac_control
->rings
[i
].block_count
;
679 for (j
= 0; j
< blk_cnt
; j
++) {
680 tmp_v_addr
= mac_control
->rings
[i
].rx_blocks
[j
].
682 tmp_p_addr
= mac_control
->rings
[i
].rx_blocks
[j
].
684 if (tmp_v_addr
== NULL
)
686 pci_free_consistent(nic
->pdev
, size
,
687 tmp_v_addr
, tmp_p_addr
);
691 #ifdef CONFIG_2BUFF_MODE
692 /* Freeing buffer storage addresses in 2BUFF mode. */
693 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
695 config
->rx_cfg
[i
].num_rxd
/ (MAX_RXDS_PER_BLOCK
+ 1);
696 for (j
= 0; j
< blk_cnt
; j
++) {
698 if (!mac_control
->rings
[i
].ba
[j
])
700 while (k
!= MAX_RXDS_PER_BLOCK
) {
701 buffAdd_t
*ba
= &mac_control
->rings
[i
].ba
[j
][k
];
706 kfree(mac_control
->rings
[i
].ba
[j
]);
708 if (mac_control
->rings
[i
].ba
)
709 kfree(mac_control
->rings
[i
].ba
);
713 if (mac_control
->stats_mem
) {
714 pci_free_consistent(nic
->pdev
,
715 mac_control
->stats_mem_sz
,
716 mac_control
->stats_mem
,
717 mac_control
->stats_mem_phy
);
722 * s2io_verify_pci_mode -
725 static int s2io_verify_pci_mode(nic_t
*nic
)
727 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
728 register u64 val64
= 0;
731 val64
= readq(&bar0
->pci_mode
);
732 mode
= (u8
)GET_PCI_MODE(val64
);
734 if ( val64
& PCI_MODE_UNKNOWN_MODE
)
735 return -1; /* Unknown PCI mode */
741 * s2io_print_pci_mode -
743 static int s2io_print_pci_mode(nic_t
*nic
)
745 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
746 register u64 val64
= 0;
748 struct config_param
*config
= &nic
->config
;
750 val64
= readq(&bar0
->pci_mode
);
751 mode
= (u8
)GET_PCI_MODE(val64
);
753 if ( val64
& PCI_MODE_UNKNOWN_MODE
)
754 return -1; /* Unknown PCI mode */
756 if (val64
& PCI_MODE_32_BITS
) {
757 DBG_PRINT(ERR_DBG
, "%s: Device is on 32 bit ", nic
->dev
->name
);
759 DBG_PRINT(ERR_DBG
, "%s: Device is on 64 bit ", nic
->dev
->name
);
763 case PCI_MODE_PCI_33
:
764 DBG_PRINT(ERR_DBG
, "33MHz PCI bus\n");
765 config
->bus_speed
= 33;
767 case PCI_MODE_PCI_66
:
768 DBG_PRINT(ERR_DBG
, "66MHz PCI bus\n");
769 config
->bus_speed
= 133;
771 case PCI_MODE_PCIX_M1_66
:
772 DBG_PRINT(ERR_DBG
, "66MHz PCIX(M1) bus\n");
773 config
->bus_speed
= 133; /* Herc doubles the clock rate */
775 case PCI_MODE_PCIX_M1_100
:
776 DBG_PRINT(ERR_DBG
, "100MHz PCIX(M1) bus\n");
777 config
->bus_speed
= 200;
779 case PCI_MODE_PCIX_M1_133
:
780 DBG_PRINT(ERR_DBG
, "133MHz PCIX(M1) bus\n");
781 config
->bus_speed
= 266;
783 case PCI_MODE_PCIX_M2_66
:
784 DBG_PRINT(ERR_DBG
, "133MHz PCIX(M2) bus\n");
785 config
->bus_speed
= 133;
787 case PCI_MODE_PCIX_M2_100
:
788 DBG_PRINT(ERR_DBG
, "200MHz PCIX(M2) bus\n");
789 config
->bus_speed
= 200;
791 case PCI_MODE_PCIX_M2_133
:
792 DBG_PRINT(ERR_DBG
, "266MHz PCIX(M2) bus\n");
793 config
->bus_speed
= 266;
796 return -1; /* Unsupported bus speed */
803 * init_nic - Initialization of hardware
804 * @nic: device peivate variable
805 * Description: The function sequentially configures every block
806 * of the H/W from their reset values.
807 * Return Value: SUCCESS on success and
808 * '-1' on failure (endian settings incorrect).
811 static int init_nic(struct s2io_nic
*nic
)
813 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
814 struct net_device
*dev
= nic
->dev
;
815 register u64 val64
= 0;
819 mac_info_t
*mac_control
;
820 struct config_param
*config
;
821 int mdio_cnt
= 0, dtx_cnt
= 0;
822 unsigned long long mem_share
;
825 mac_control
= &nic
->mac_control
;
826 config
= &nic
->config
;
828 /* to set the swapper controle on the card */
829 if(s2io_set_swapper(nic
)) {
830 DBG_PRINT(ERR_DBG
,"ERROR: Setting Swapper failed\n");
835 * Herc requires EOI to be removed from reset before XGXS, so..
837 if (nic
->device_type
& XFRAME_II_DEVICE
) {
838 val64
= 0xA500000000ULL
;
839 writeq(val64
, &bar0
->sw_reset
);
841 val64
= readq(&bar0
->sw_reset
);
844 /* Remove XGXS from reset state */
846 writeq(val64
, &bar0
->sw_reset
);
848 val64
= readq(&bar0
->sw_reset
);
850 /* Enable Receiving broadcasts */
851 add
= &bar0
->mac_cfg
;
852 val64
= readq(&bar0
->mac_cfg
);
853 val64
|= MAC_RMAC_BCAST_ENABLE
;
854 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
855 writel((u32
) val64
, add
);
856 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
857 writel((u32
) (val64
>> 32), (add
+ 4));
859 /* Read registers in all blocks */
860 val64
= readq(&bar0
->mac_int_mask
);
861 val64
= readq(&bar0
->mc_int_mask
);
862 val64
= readq(&bar0
->xgxs_int_mask
);
866 writeq(vBIT(val64
, 2, 14), &bar0
->rmac_max_pyld_len
);
869 * Configuring the XAUI Interface of Xena.
870 * ***************************************
871 * To Configure the Xena's XAUI, one has to write a series
872 * of 64 bit values into two registers in a particular
873 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
874 * which will be defined in the array of configuration values
875 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
876 * to switch writing from one regsiter to another. We continue
877 * writing these values until we encounter the 'END_SIGN' macro.
878 * For example, After making a series of 21 writes into
879 * dtx_control register the 'SWITCH_SIGN' appears and hence we
880 * start writing into mdio_control until we encounter END_SIGN.
882 if (nic
->device_type
& XFRAME_II_DEVICE
) {
883 while (herc_act_dtx_cfg
[dtx_cnt
] != END_SIGN
) {
884 SPECIAL_REG_WRITE(herc_act_dtx_cfg
[dtx_cnt
],
885 &bar0
->dtx_control
, UF
);
887 msleep(1); /* Necessary!! */
893 while (xena_dtx_cfg
[dtx_cnt
] != END_SIGN
) {
894 if (xena_dtx_cfg
[dtx_cnt
] == SWITCH_SIGN
) {
898 SPECIAL_REG_WRITE(xena_dtx_cfg
[dtx_cnt
],
899 &bar0
->dtx_control
, UF
);
900 val64
= readq(&bar0
->dtx_control
);
904 while (xena_mdio_cfg
[mdio_cnt
] != END_SIGN
) {
905 if (xena_mdio_cfg
[mdio_cnt
] == SWITCH_SIGN
) {
909 SPECIAL_REG_WRITE(xena_mdio_cfg
[mdio_cnt
],
910 &bar0
->mdio_control
, UF
);
911 val64
= readq(&bar0
->mdio_control
);
914 if ((xena_dtx_cfg
[dtx_cnt
] == END_SIGN
) &&
915 (xena_mdio_cfg
[mdio_cnt
] == END_SIGN
)) {
923 /* Tx DMA Initialization */
925 writeq(val64
, &bar0
->tx_fifo_partition_0
);
926 writeq(val64
, &bar0
->tx_fifo_partition_1
);
927 writeq(val64
, &bar0
->tx_fifo_partition_2
);
928 writeq(val64
, &bar0
->tx_fifo_partition_3
);
931 for (i
= 0, j
= 0; i
< config
->tx_fifo_num
; i
++) {
933 vBIT(config
->tx_cfg
[i
].fifo_len
- 1, ((i
* 32) + 19),
934 13) | vBIT(config
->tx_cfg
[i
].fifo_priority
,
937 if (i
== (config
->tx_fifo_num
- 1)) {
944 writeq(val64
, &bar0
->tx_fifo_partition_0
);
948 writeq(val64
, &bar0
->tx_fifo_partition_1
);
952 writeq(val64
, &bar0
->tx_fifo_partition_2
);
956 writeq(val64
, &bar0
->tx_fifo_partition_3
);
961 /* Enable Tx FIFO partition 0. */
962 val64
= readq(&bar0
->tx_fifo_partition_0
);
963 val64
|= BIT(0); /* To enable the FIFO partition. */
964 writeq(val64
, &bar0
->tx_fifo_partition_0
);
967 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
968 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
970 if ((nic
->device_type
== XFRAME_I_DEVICE
) &&
971 (get_xena_rev_id(nic
->pdev
) < 4))
972 writeq(PCC_ENABLE_FOUR
, &bar0
->pcc_enable
);
974 val64
= readq(&bar0
->tx_fifo_partition_0
);
975 DBG_PRINT(INIT_DBG
, "Fifo partition at: 0x%p is: 0x%llx\n",
976 &bar0
->tx_fifo_partition_0
, (unsigned long long) val64
);
979 * Initialization of Tx_PA_CONFIG register to ignore packet
980 * integrity checking.
982 val64
= readq(&bar0
->tx_pa_cfg
);
983 val64
|= TX_PA_CFG_IGNORE_FRM_ERR
| TX_PA_CFG_IGNORE_SNAP_OUI
|
984 TX_PA_CFG_IGNORE_LLC_CTRL
| TX_PA_CFG_IGNORE_L2_ERR
;
985 writeq(val64
, &bar0
->tx_pa_cfg
);
987 /* Rx DMA intialization. */
989 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
991 vBIT(config
->rx_cfg
[i
].ring_priority
, (5 + (i
* 8)),
994 writeq(val64
, &bar0
->rx_queue_priority
);
997 * Allocating equal share of memory to all the
1001 if (nic
->device_type
& XFRAME_II_DEVICE
)
1006 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1009 mem_share
= (mem_size
/ config
->rx_ring_num
+
1010 mem_size
% config
->rx_ring_num
);
1011 val64
|= RX_QUEUE_CFG_Q0_SZ(mem_share
);
1014 mem_share
= (mem_size
/ config
->rx_ring_num
);
1015 val64
|= RX_QUEUE_CFG_Q1_SZ(mem_share
);
1018 mem_share
= (mem_size
/ config
->rx_ring_num
);
1019 val64
|= RX_QUEUE_CFG_Q2_SZ(mem_share
);
1022 mem_share
= (mem_size
/ config
->rx_ring_num
);
1023 val64
|= RX_QUEUE_CFG_Q3_SZ(mem_share
);
1026 mem_share
= (mem_size
/ config
->rx_ring_num
);
1027 val64
|= RX_QUEUE_CFG_Q4_SZ(mem_share
);
1030 mem_share
= (mem_size
/ config
->rx_ring_num
);
1031 val64
|= RX_QUEUE_CFG_Q5_SZ(mem_share
);
1034 mem_share
= (mem_size
/ config
->rx_ring_num
);
1035 val64
|= RX_QUEUE_CFG_Q6_SZ(mem_share
);
1038 mem_share
= (mem_size
/ config
->rx_ring_num
);
1039 val64
|= RX_QUEUE_CFG_Q7_SZ(mem_share
);
1043 writeq(val64
, &bar0
->rx_queue_cfg
);
1046 * Filling Tx round robin registers
1047 * as per the number of FIFOs
1049 switch (config
->tx_fifo_num
) {
1051 val64
= 0x0000000000000000ULL
;
1052 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1053 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1054 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1055 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1056 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1059 val64
= 0x0000010000010000ULL
;
1060 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1061 val64
= 0x0100000100000100ULL
;
1062 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1063 val64
= 0x0001000001000001ULL
;
1064 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1065 val64
= 0x0000010000010000ULL
;
1066 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1067 val64
= 0x0100000000000000ULL
;
1068 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1071 val64
= 0x0001000102000001ULL
;
1072 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1073 val64
= 0x0001020000010001ULL
;
1074 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1075 val64
= 0x0200000100010200ULL
;
1076 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1077 val64
= 0x0001000102000001ULL
;
1078 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1079 val64
= 0x0001020000000000ULL
;
1080 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1083 val64
= 0x0001020300010200ULL
;
1084 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1085 val64
= 0x0100000102030001ULL
;
1086 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1087 val64
= 0x0200010000010203ULL
;
1088 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1089 val64
= 0x0001020001000001ULL
;
1090 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1091 val64
= 0x0203000100000000ULL
;
1092 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1095 val64
= 0x0001000203000102ULL
;
1096 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1097 val64
= 0x0001020001030004ULL
;
1098 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1099 val64
= 0x0001000203000102ULL
;
1100 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1101 val64
= 0x0001020001030004ULL
;
1102 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1103 val64
= 0x0001000000000000ULL
;
1104 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1107 val64
= 0x0001020304000102ULL
;
1108 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1109 val64
= 0x0304050001020001ULL
;
1110 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1111 val64
= 0x0203000100000102ULL
;
1112 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1113 val64
= 0x0304000102030405ULL
;
1114 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1115 val64
= 0x0001000200000000ULL
;
1116 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1119 val64
= 0x0001020001020300ULL
;
1120 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1121 val64
= 0x0102030400010203ULL
;
1122 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1123 val64
= 0x0405060001020001ULL
;
1124 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1125 val64
= 0x0304050000010200ULL
;
1126 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1127 val64
= 0x0102030000000000ULL
;
1128 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1131 val64
= 0x0001020300040105ULL
;
1132 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1133 val64
= 0x0200030106000204ULL
;
1134 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1135 val64
= 0x0103000502010007ULL
;
1136 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1137 val64
= 0x0304010002060500ULL
;
1138 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1139 val64
= 0x0103020400000000ULL
;
1140 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1144 /* Filling the Rx round robin registers as per the
1145 * number of Rings and steering based on QoS.
1147 switch (config
->rx_ring_num
) {
1149 val64
= 0x8080808080808080ULL
;
1150 writeq(val64
, &bar0
->rts_qos_steering
);
1153 val64
= 0x0000010000010000ULL
;
1154 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1155 val64
= 0x0100000100000100ULL
;
1156 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1157 val64
= 0x0001000001000001ULL
;
1158 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1159 val64
= 0x0000010000010000ULL
;
1160 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1161 val64
= 0x0100000000000000ULL
;
1162 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1164 val64
= 0x8080808040404040ULL
;
1165 writeq(val64
, &bar0
->rts_qos_steering
);
1168 val64
= 0x0001000102000001ULL
;
1169 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1170 val64
= 0x0001020000010001ULL
;
1171 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1172 val64
= 0x0200000100010200ULL
;
1173 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1174 val64
= 0x0001000102000001ULL
;
1175 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1176 val64
= 0x0001020000000000ULL
;
1177 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1179 val64
= 0x8080804040402020ULL
;
1180 writeq(val64
, &bar0
->rts_qos_steering
);
1183 val64
= 0x0001020300010200ULL
;
1184 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1185 val64
= 0x0100000102030001ULL
;
1186 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1187 val64
= 0x0200010000010203ULL
;
1188 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1189 val64
= 0x0001020001000001ULL
;
1190 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1191 val64
= 0x0203000100000000ULL
;
1192 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1194 val64
= 0x8080404020201010ULL
;
1195 writeq(val64
, &bar0
->rts_qos_steering
);
1198 val64
= 0x0001000203000102ULL
;
1199 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1200 val64
= 0x0001020001030004ULL
;
1201 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1202 val64
= 0x0001000203000102ULL
;
1203 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1204 val64
= 0x0001020001030004ULL
;
1205 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1206 val64
= 0x0001000000000000ULL
;
1207 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1209 val64
= 0x8080404020201008ULL
;
1210 writeq(val64
, &bar0
->rts_qos_steering
);
1213 val64
= 0x0001020304000102ULL
;
1214 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1215 val64
= 0x0304050001020001ULL
;
1216 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1217 val64
= 0x0203000100000102ULL
;
1218 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1219 val64
= 0x0304000102030405ULL
;
1220 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1221 val64
= 0x0001000200000000ULL
;
1222 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1224 val64
= 0x8080404020100804ULL
;
1225 writeq(val64
, &bar0
->rts_qos_steering
);
1228 val64
= 0x0001020001020300ULL
;
1229 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1230 val64
= 0x0102030400010203ULL
;
1231 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1232 val64
= 0x0405060001020001ULL
;
1233 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1234 val64
= 0x0304050000010200ULL
;
1235 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1236 val64
= 0x0102030000000000ULL
;
1237 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1239 val64
= 0x8080402010080402ULL
;
1240 writeq(val64
, &bar0
->rts_qos_steering
);
1243 val64
= 0x0001020300040105ULL
;
1244 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1245 val64
= 0x0200030106000204ULL
;
1246 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1247 val64
= 0x0103000502010007ULL
;
1248 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1249 val64
= 0x0304010002060500ULL
;
1250 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1251 val64
= 0x0103020400000000ULL
;
1252 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1254 val64
= 0x8040201008040201ULL
;
1255 writeq(val64
, &bar0
->rts_qos_steering
);
1261 for (i
= 0; i
< 8; i
++)
1262 writeq(val64
, &bar0
->rts_frm_len_n
[i
]);
1264 /* Set the default rts frame length for the rings configured */
1265 val64
= MAC_RTS_FRM_LEN_SET(dev
->mtu
+22);
1266 for (i
= 0 ; i
< config
->rx_ring_num
; i
++)
1267 writeq(val64
, &bar0
->rts_frm_len_n
[i
]);
1269 /* Set the frame length for the configured rings
1270 * desired by the user
1272 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1273 /* If rts_frm_len[i] == 0 then it is assumed that user not
1274 * specified frame length steering.
1275 * If the user provides the frame length then program
1276 * the rts_frm_len register for those values or else
1277 * leave it as it is.
1279 if (rts_frm_len
[i
] != 0) {
1280 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len
[i
]),
1281 &bar0
->rts_frm_len_n
[i
]);
1285 /* Program statistics memory */
1286 writeq(mac_control
->stats_mem_phy
, &bar0
->stat_addr
);
1288 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1289 val64
= STAT_BC(0x320);
1290 writeq(val64
, &bar0
->stat_byte_cnt
);
1294 * Initializing the sampling rate for the device to calculate the
1295 * bandwidth utilization.
1297 val64
= MAC_TX_LINK_UTIL_VAL(tmac_util_period
) |
1298 MAC_RX_LINK_UTIL_VAL(rmac_util_period
);
1299 writeq(val64
, &bar0
->mac_link_util
);
1303 * Initializing the Transmit and Receive Traffic Interrupt
1307 * TTI Initialization. Default Tx timer gets us about
1308 * 250 interrupts per sec. Continuous interrupts are enabled
1311 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1312 int count
= (nic
->config
.bus_speed
* 125)/2;
1313 val64
= TTI_DATA1_MEM_TX_TIMER_VAL(count
);
1316 val64
= TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1318 val64
|= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1319 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1320 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN
;
1321 if (use_continuous_tx_intrs
)
1322 val64
|= TTI_DATA1_MEM_TX_TIMER_CI_EN
;
1323 writeq(val64
, &bar0
->tti_data1_mem
);
1325 val64
= TTI_DATA2_MEM_TX_UFC_A(0x10) |
1326 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1327 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1328 writeq(val64
, &bar0
->tti_data2_mem
);
1330 val64
= TTI_CMD_MEM_WE
| TTI_CMD_MEM_STROBE_NEW_CMD
;
1331 writeq(val64
, &bar0
->tti_command_mem
);
1334 * Once the operation completes, the Strobe bit of the command
1335 * register will be reset. We poll for this particular condition
1336 * We wait for a maximum of 500ms for the operation to complete,
1337 * if it's not complete by then we return error.
1341 val64
= readq(&bar0
->tti_command_mem
);
1342 if (!(val64
& TTI_CMD_MEM_STROBE_NEW_CMD
)) {
1346 DBG_PRINT(ERR_DBG
, "%s: TTI init Failed\n",
1354 if (nic
->config
.bimodal
) {
1356 for (k
= 0; k
< config
->rx_ring_num
; k
++) {
1357 val64
= TTI_CMD_MEM_WE
| TTI_CMD_MEM_STROBE_NEW_CMD
;
1358 val64
|= TTI_CMD_MEM_OFFSET(0x38+k
);
1359 writeq(val64
, &bar0
->tti_command_mem
);
1362 * Once the operation completes, the Strobe bit of the command
1363 * register will be reset. We poll for this particular condition
1364 * We wait for a maximum of 500ms for the operation to complete,
1365 * if it's not complete by then we return error.
1369 val64
= readq(&bar0
->tti_command_mem
);
1370 if (!(val64
& TTI_CMD_MEM_STROBE_NEW_CMD
)) {
1375 "%s: TTI init Failed\n",
1385 /* RTI Initialization */
1386 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1388 * Programmed to generate Apprx 500 Intrs per
1391 int count
= (nic
->config
.bus_speed
* 125)/4;
1392 val64
= RTI_DATA1_MEM_RX_TIMER_VAL(count
);
1394 val64
= RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1396 val64
|= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1397 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1398 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN
;
1400 writeq(val64
, &bar0
->rti_data1_mem
);
1402 val64
= RTI_DATA2_MEM_RX_UFC_A(0x1) |
1403 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1404 if (nic
->intr_type
== MSI_X
)
1405 val64
|= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1406 RTI_DATA2_MEM_RX_UFC_D(0x40));
1408 val64
|= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1409 RTI_DATA2_MEM_RX_UFC_D(0x80));
1410 writeq(val64
, &bar0
->rti_data2_mem
);
1412 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1413 val64
= RTI_CMD_MEM_WE
| RTI_CMD_MEM_STROBE_NEW_CMD
1414 | RTI_CMD_MEM_OFFSET(i
);
1415 writeq(val64
, &bar0
->rti_command_mem
);
1418 * Once the operation completes, the Strobe bit of the
1419 * command register will be reset. We poll for this
1420 * particular condition. We wait for a maximum of 500ms
1421 * for the operation to complete, if it's not complete
1422 * by then we return error.
1426 val64
= readq(&bar0
->rti_command_mem
);
1427 if (!(val64
& RTI_CMD_MEM_STROBE_NEW_CMD
)) {
1431 DBG_PRINT(ERR_DBG
, "%s: RTI init Failed\n",
1442 * Initializing proper values as Pause threshold into all
1443 * the 8 Queues on Rx side.
1445 writeq(0xffbbffbbffbbffbbULL
, &bar0
->mc_pause_thresh_q0q3
);
1446 writeq(0xffbbffbbffbbffbbULL
, &bar0
->mc_pause_thresh_q4q7
);
1448 /* Disable RMAC PAD STRIPPING */
1449 add
= &bar0
->mac_cfg
;
1450 val64
= readq(&bar0
->mac_cfg
);
1451 val64
&= ~(MAC_CFG_RMAC_STRIP_PAD
);
1452 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
1453 writel((u32
) (val64
), add
);
1454 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
1455 writel((u32
) (val64
>> 32), (add
+ 4));
1456 val64
= readq(&bar0
->mac_cfg
);
1459 * Set the time value to be inserted in the pause frame
1460 * generated by xena.
1462 val64
= readq(&bar0
->rmac_pause_cfg
);
1463 val64
&= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1464 val64
|= RMAC_PAUSE_HG_PTIME(nic
->mac_control
.rmac_pause_time
);
1465 writeq(val64
, &bar0
->rmac_pause_cfg
);
1468 * Set the Threshold Limit for Generating the pause frame
1469 * If the amount of data in any Queue exceeds ratio of
1470 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1471 * pause frame is generated
1474 for (i
= 0; i
< 4; i
++) {
1476 (((u64
) 0xFF00 | nic
->mac_control
.
1477 mc_pause_threshold_q0q3
)
1480 writeq(val64
, &bar0
->mc_pause_thresh_q0q3
);
1483 for (i
= 0; i
< 4; i
++) {
1485 (((u64
) 0xFF00 | nic
->mac_control
.
1486 mc_pause_threshold_q4q7
)
1489 writeq(val64
, &bar0
->mc_pause_thresh_q4q7
);
1492 * TxDMA will stop Read request if the number of read split has
1493 * exceeded the limit pointed by shared_splits
1495 val64
= readq(&bar0
->pic_control
);
1496 val64
|= PIC_CNTL_SHARED_SPLITS(shared_splits
);
1497 writeq(val64
, &bar0
->pic_control
);
1500 * Programming the Herc to split every write transaction
1501 * that does not start on an ADB to reduce disconnects.
1503 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1504 val64
= WREQ_SPLIT_MASK_SET_MASK(255);
1505 writeq(val64
, &bar0
->wreq_split_mask
);
1508 /* Setting Link stability period to 64 ms */
1509 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1510 val64
= MISC_LINK_STABILITY_PRD(3);
1511 writeq(val64
, &bar0
->misc_control
);
1516 #define LINK_UP_DOWN_INTERRUPT 1
1517 #define MAC_RMAC_ERR_TIMER 2
1519 int s2io_link_fault_indication(nic_t
*nic
)
1521 if (nic
->intr_type
!= INTA
)
1522 return MAC_RMAC_ERR_TIMER
;
1523 if (nic
->device_type
== XFRAME_II_DEVICE
)
1524 return LINK_UP_DOWN_INTERRUPT
;
1526 return MAC_RMAC_ERR_TIMER
;
1530 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1531 * @nic: device private variable,
1532 * @mask: A mask indicating which Intr block must be modified and,
1533 * @flag: A flag indicating whether to enable or disable the Intrs.
1534 * Description: This function will either disable or enable the interrupts
1535 * depending on the flag argument. The mask argument can be used to
1536 * enable/disable any Intr block.
1537 * Return Value: NONE.
1540 static void en_dis_able_nic_intrs(struct s2io_nic
*nic
, u16 mask
, int flag
)
1542 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
1543 register u64 val64
= 0, temp64
= 0;
1545 /* Top level interrupt classification */
1546 /* PIC Interrupts */
1547 if ((mask
& (TX_PIC_INTR
| RX_PIC_INTR
))) {
1548 /* Enable PIC Intrs in the general intr mask register */
1549 val64
= TXPIC_INT_M
| PIC_RX_INT_M
;
1550 if (flag
== ENABLE_INTRS
) {
1551 temp64
= readq(&bar0
->general_int_mask
);
1552 temp64
&= ~((u64
) val64
);
1553 writeq(temp64
, &bar0
->general_int_mask
);
1555 * If Hercules adapter enable GPIO otherwise
1556 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1557 * interrupts for now.
1560 if (s2io_link_fault_indication(nic
) ==
1561 LINK_UP_DOWN_INTERRUPT
) {
1562 temp64
= readq(&bar0
->pic_int_mask
);
1563 temp64
&= ~((u64
) PIC_INT_GPIO
);
1564 writeq(temp64
, &bar0
->pic_int_mask
);
1565 temp64
= readq(&bar0
->gpio_int_mask
);
1566 temp64
&= ~((u64
) GPIO_INT_MASK_LINK_UP
);
1567 writeq(temp64
, &bar0
->gpio_int_mask
);
1569 writeq(DISABLE_ALL_INTRS
, &bar0
->pic_int_mask
);
1572 * No MSI Support is available presently, so TTI and
1573 * RTI interrupts are also disabled.
1575 } else if (flag
== DISABLE_INTRS
) {
1577 * Disable PIC Intrs in the general
1578 * intr mask register
1580 writeq(DISABLE_ALL_INTRS
, &bar0
->pic_int_mask
);
1581 temp64
= readq(&bar0
->general_int_mask
);
1583 writeq(val64
, &bar0
->general_int_mask
);
1587 /* DMA Interrupts */
1588 /* Enabling/Disabling Tx DMA interrupts */
1589 if (mask
& TX_DMA_INTR
) {
1590 /* Enable TxDMA Intrs in the general intr mask register */
1591 val64
= TXDMA_INT_M
;
1592 if (flag
== ENABLE_INTRS
) {
1593 temp64
= readq(&bar0
->general_int_mask
);
1594 temp64
&= ~((u64
) val64
);
1595 writeq(temp64
, &bar0
->general_int_mask
);
1597 * Keep all interrupts other than PFC interrupt
1598 * and PCC interrupt disabled in DMA level.
1600 val64
= DISABLE_ALL_INTRS
& ~(TXDMA_PFC_INT_M
|
1602 writeq(val64
, &bar0
->txdma_int_mask
);
1604 * Enable only the MISC error 1 interrupt in PFC block
1606 val64
= DISABLE_ALL_INTRS
& (~PFC_MISC_ERR_1
);
1607 writeq(val64
, &bar0
->pfc_err_mask
);
1609 * Enable only the FB_ECC error interrupt in PCC block
1611 val64
= DISABLE_ALL_INTRS
& (~PCC_FB_ECC_ERR
);
1612 writeq(val64
, &bar0
->pcc_err_mask
);
1613 } else if (flag
== DISABLE_INTRS
) {
1615 * Disable TxDMA Intrs in the general intr mask
1618 writeq(DISABLE_ALL_INTRS
, &bar0
->txdma_int_mask
);
1619 writeq(DISABLE_ALL_INTRS
, &bar0
->pfc_err_mask
);
1620 temp64
= readq(&bar0
->general_int_mask
);
1622 writeq(val64
, &bar0
->general_int_mask
);
1626 /* Enabling/Disabling Rx DMA interrupts */
1627 if (mask
& RX_DMA_INTR
) {
1628 /* Enable RxDMA Intrs in the general intr mask register */
1629 val64
= RXDMA_INT_M
;
1630 if (flag
== ENABLE_INTRS
) {
1631 temp64
= readq(&bar0
->general_int_mask
);
1632 temp64
&= ~((u64
) val64
);
1633 writeq(temp64
, &bar0
->general_int_mask
);
1635 * All RxDMA block interrupts are disabled for now
1638 writeq(DISABLE_ALL_INTRS
, &bar0
->rxdma_int_mask
);
1639 } else if (flag
== DISABLE_INTRS
) {
1641 * Disable RxDMA Intrs in the general intr mask
1644 writeq(DISABLE_ALL_INTRS
, &bar0
->rxdma_int_mask
);
1645 temp64
= readq(&bar0
->general_int_mask
);
1647 writeq(val64
, &bar0
->general_int_mask
);
1651 /* MAC Interrupts */
1652 /* Enabling/Disabling MAC interrupts */
1653 if (mask
& (TX_MAC_INTR
| RX_MAC_INTR
)) {
1654 val64
= TXMAC_INT_M
| RXMAC_INT_M
;
1655 if (flag
== ENABLE_INTRS
) {
1656 temp64
= readq(&bar0
->general_int_mask
);
1657 temp64
&= ~((u64
) val64
);
1658 writeq(temp64
, &bar0
->general_int_mask
);
1660 * All MAC block error interrupts are disabled for now
1663 } else if (flag
== DISABLE_INTRS
) {
1665 * Disable MAC Intrs in the general intr mask register
1667 writeq(DISABLE_ALL_INTRS
, &bar0
->mac_int_mask
);
1668 writeq(DISABLE_ALL_INTRS
,
1669 &bar0
->mac_rmac_err_mask
);
1671 temp64
= readq(&bar0
->general_int_mask
);
1673 writeq(val64
, &bar0
->general_int_mask
);
1677 /* XGXS Interrupts */
1678 if (mask
& (TX_XGXS_INTR
| RX_XGXS_INTR
)) {
1679 val64
= TXXGXS_INT_M
| RXXGXS_INT_M
;
1680 if (flag
== ENABLE_INTRS
) {
1681 temp64
= readq(&bar0
->general_int_mask
);
1682 temp64
&= ~((u64
) val64
);
1683 writeq(temp64
, &bar0
->general_int_mask
);
1685 * All XGXS block error interrupts are disabled for now
1688 writeq(DISABLE_ALL_INTRS
, &bar0
->xgxs_int_mask
);
1689 } else if (flag
== DISABLE_INTRS
) {
1691 * Disable MC Intrs in the general intr mask register
1693 writeq(DISABLE_ALL_INTRS
, &bar0
->xgxs_int_mask
);
1694 temp64
= readq(&bar0
->general_int_mask
);
1696 writeq(val64
, &bar0
->general_int_mask
);
1700 /* Memory Controller(MC) interrupts */
1701 if (mask
& MC_INTR
) {
1703 if (flag
== ENABLE_INTRS
) {
1704 temp64
= readq(&bar0
->general_int_mask
);
1705 temp64
&= ~((u64
) val64
);
1706 writeq(temp64
, &bar0
->general_int_mask
);
1708 * Enable all MC Intrs.
1710 writeq(0x0, &bar0
->mc_int_mask
);
1711 writeq(0x0, &bar0
->mc_err_mask
);
1712 } else if (flag
== DISABLE_INTRS
) {
1714 * Disable MC Intrs in the general intr mask register
1716 writeq(DISABLE_ALL_INTRS
, &bar0
->mc_int_mask
);
1717 temp64
= readq(&bar0
->general_int_mask
);
1719 writeq(val64
, &bar0
->general_int_mask
);
1724 /* Tx traffic interrupts */
1725 if (mask
& TX_TRAFFIC_INTR
) {
1726 val64
= TXTRAFFIC_INT_M
;
1727 if (flag
== ENABLE_INTRS
) {
1728 temp64
= readq(&bar0
->general_int_mask
);
1729 temp64
&= ~((u64
) val64
);
1730 writeq(temp64
, &bar0
->general_int_mask
);
1732 * Enable all the Tx side interrupts
1733 * writing 0 Enables all 64 TX interrupt levels
1735 writeq(0x0, &bar0
->tx_traffic_mask
);
1736 } else if (flag
== DISABLE_INTRS
) {
1738 * Disable Tx Traffic Intrs in the general intr mask
1741 writeq(DISABLE_ALL_INTRS
, &bar0
->tx_traffic_mask
);
1742 temp64
= readq(&bar0
->general_int_mask
);
1744 writeq(val64
, &bar0
->general_int_mask
);
1748 /* Rx traffic interrupts */
1749 if (mask
& RX_TRAFFIC_INTR
) {
1750 val64
= RXTRAFFIC_INT_M
;
1751 if (flag
== ENABLE_INTRS
) {
1752 temp64
= readq(&bar0
->general_int_mask
);
1753 temp64
&= ~((u64
) val64
);
1754 writeq(temp64
, &bar0
->general_int_mask
);
1755 /* writing 0 Enables all 8 RX interrupt levels */
1756 writeq(0x0, &bar0
->rx_traffic_mask
);
1757 } else if (flag
== DISABLE_INTRS
) {
1759 * Disable Rx Traffic Intrs in the general intr mask
1762 writeq(DISABLE_ALL_INTRS
, &bar0
->rx_traffic_mask
);
1763 temp64
= readq(&bar0
->general_int_mask
);
1765 writeq(val64
, &bar0
->general_int_mask
);
1770 static int check_prc_pcc_state(u64 val64
, int flag
, int rev_id
, int herc
)
1774 if (flag
== FALSE
) {
1775 if ((!herc
&& (rev_id
>= 4)) || herc
) {
1776 if (!(val64
& ADAPTER_STATUS_RMAC_PCC_IDLE
) &&
1777 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1778 ADAPTER_STATUS_RC_PRC_QUIESCENT
)) {
1782 if (!(val64
& ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE
) &&
1783 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1784 ADAPTER_STATUS_RC_PRC_QUIESCENT
)) {
1789 if ((!herc
&& (rev_id
>= 4)) || herc
) {
1790 if (((val64
& ADAPTER_STATUS_RMAC_PCC_IDLE
) ==
1791 ADAPTER_STATUS_RMAC_PCC_IDLE
) &&
1792 (!(val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ||
1793 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1794 ADAPTER_STATUS_RC_PRC_QUIESCENT
))) {
1798 if (((val64
& ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE
) ==
1799 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE
) &&
1800 (!(val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ||
1801 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1802 ADAPTER_STATUS_RC_PRC_QUIESCENT
))) {
1811 * verify_xena_quiescence - Checks whether the H/W is ready
1812 * @val64 : Value read from adapter status register.
1813 * @flag : indicates if the adapter enable bit was ever written once
1815 * Description: Returns whether the H/W is ready to go or not. Depending
1816 * on whether adapter enable bit was written or not the comparison
1817 * differs and the calling function passes the input argument flag to
1819 * Return: 1 If xena is quiescence
1820 * 0 If Xena is not quiescence
1823 static int verify_xena_quiescence(nic_t
*sp
, u64 val64
, int flag
)
1826 u64 tmp64
= ~((u64
) val64
);
1827 int rev_id
= get_xena_rev_id(sp
->pdev
);
1829 herc
= (sp
->device_type
== XFRAME_II_DEVICE
);
1832 (ADAPTER_STATUS_TDMA_READY
| ADAPTER_STATUS_RDMA_READY
|
1833 ADAPTER_STATUS_PFC_READY
| ADAPTER_STATUS_TMAC_BUF_EMPTY
|
1834 ADAPTER_STATUS_PIC_QUIESCENT
| ADAPTER_STATUS_MC_DRAM_READY
|
1835 ADAPTER_STATUS_MC_QUEUES_READY
| ADAPTER_STATUS_M_PLL_LOCK
|
1836 ADAPTER_STATUS_P_PLL_LOCK
))) {
1837 ret
= check_prc_pcc_state(val64
, flag
, rev_id
, herc
);
1844 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1845 * @sp: Pointer to device specifc structure
1847 * New procedure to clear mac address reading problems on Alpha platforms
1851 void fix_mac_address(nic_t
* sp
)
1853 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
1857 while (fix_mac
[i
] != END_SIGN
) {
1858 writeq(fix_mac
[i
++], &bar0
->gpio_control
);
1860 val64
= readq(&bar0
->gpio_control
);
1865 * start_nic - Turns the device on
1866 * @nic : device private variable.
1868 * This function actually turns the device on. Before this function is
1869 * called,all Registers are configured from their reset states
1870 * and shared memory is allocated but the NIC is still quiescent. On
1871 * calling this function, the device interrupts are cleared and the NIC is
1872 * literally switched on by writing into the adapter control register.
1874 * SUCCESS on success and -1 on failure.
1877 static int start_nic(struct s2io_nic
*nic
)
1879 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
1880 struct net_device
*dev
= nic
->dev
;
1881 register u64 val64
= 0;
1884 mac_info_t
*mac_control
;
1885 struct config_param
*config
;
1887 mac_control
= &nic
->mac_control
;
1888 config
= &nic
->config
;
1890 /* PRC Initialization and configuration */
1891 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1892 writeq((u64
) mac_control
->rings
[i
].rx_blocks
[0].block_dma_addr
,
1893 &bar0
->prc_rxd0_n
[i
]);
1895 val64
= readq(&bar0
->prc_ctrl_n
[i
]);
1896 if (nic
->config
.bimodal
)
1897 val64
|= PRC_CTRL_BIMODAL_INTERRUPT
;
1898 #ifndef CONFIG_2BUFF_MODE
1899 val64
|= PRC_CTRL_RC_ENABLED
;
1901 val64
|= PRC_CTRL_RC_ENABLED
| PRC_CTRL_RING_MODE_3
;
1903 writeq(val64
, &bar0
->prc_ctrl_n
[i
]);
1906 #ifdef CONFIG_2BUFF_MODE
1907 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1908 val64
= readq(&bar0
->rx_pa_cfg
);
1909 val64
|= RX_PA_CFG_IGNORE_L2_ERR
;
1910 writeq(val64
, &bar0
->rx_pa_cfg
);
1914 * Enabling MC-RLDRAM. After enabling the device, we timeout
1915 * for around 100ms, which is approximately the time required
1916 * for the device to be ready for operation.
1918 val64
= readq(&bar0
->mc_rldram_mrs
);
1919 val64
|= MC_RLDRAM_QUEUE_SIZE_ENABLE
| MC_RLDRAM_MRS_ENABLE
;
1920 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_mrs
, UF
);
1921 val64
= readq(&bar0
->mc_rldram_mrs
);
1923 msleep(100); /* Delay by around 100 ms. */
1925 /* Enabling ECC Protection. */
1926 val64
= readq(&bar0
->adapter_control
);
1927 val64
&= ~ADAPTER_ECC_EN
;
1928 writeq(val64
, &bar0
->adapter_control
);
1931 * Clearing any possible Link state change interrupts that
1932 * could have popped up just before Enabling the card.
1934 val64
= readq(&bar0
->mac_rmac_err_reg
);
1936 writeq(val64
, &bar0
->mac_rmac_err_reg
);
1939 * Verify if the device is ready to be enabled, if so enable
1942 val64
= readq(&bar0
->adapter_status
);
1943 if (!verify_xena_quiescence(nic
, val64
, nic
->device_enabled_once
)) {
1944 DBG_PRINT(ERR_DBG
, "%s: device is not ready, ", dev
->name
);
1945 DBG_PRINT(ERR_DBG
, "Adapter status reads: 0x%llx\n",
1946 (unsigned long long) val64
);
1950 /* Enable select interrupts */
1951 if (nic
->intr_type
!= INTA
)
1952 en_dis_able_nic_intrs(nic
, ENA_ALL_INTRS
, DISABLE_INTRS
);
1954 interruptible
= TX_TRAFFIC_INTR
| RX_TRAFFIC_INTR
;
1955 interruptible
|= TX_PIC_INTR
| RX_PIC_INTR
;
1956 interruptible
|= TX_MAC_INTR
| RX_MAC_INTR
;
1957 en_dis_able_nic_intrs(nic
, interruptible
, ENABLE_INTRS
);
1961 * With some switches, link might be already up at this point.
1962 * Because of this weird behavior, when we enable laser,
1963 * we may not get link. We need to handle this. We cannot
1964 * figure out which switch is misbehaving. So we are forced to
1965 * make a global change.
1968 /* Enabling Laser. */
1969 val64
= readq(&bar0
->adapter_control
);
1970 val64
|= ADAPTER_EOI_TX_ON
;
1971 writeq(val64
, &bar0
->adapter_control
);
1973 /* SXE-002: Initialize link and activity LED */
1974 subid
= nic
->pdev
->subsystem_device
;
1975 if (((subid
& 0xFF) >= 0x07) &&
1976 (nic
->device_type
== XFRAME_I_DEVICE
)) {
1977 val64
= readq(&bar0
->gpio_control
);
1978 val64
|= 0x0000800000000000ULL
;
1979 writeq(val64
, &bar0
->gpio_control
);
1980 val64
= 0x0411040400000000ULL
;
1981 writeq(val64
, (void __iomem
*)bar0
+ 0x2700);
1985 * Don't see link state interrupts on certain switches, so
1986 * directly scheduling a link state task from here.
1988 schedule_work(&nic
->set_link_task
);
1994 * free_tx_buffers - Free all queued Tx buffers
1995 * @nic : device private variable.
1997 * Free all queued Tx buffers.
1998 * Return Value: void
2001 static void free_tx_buffers(struct s2io_nic
*nic
)
2003 struct net_device
*dev
= nic
->dev
;
2004 struct sk_buff
*skb
;
2007 mac_info_t
*mac_control
;
2008 struct config_param
*config
;
2009 int cnt
= 0, frg_cnt
;
2011 mac_control
= &nic
->mac_control
;
2012 config
= &nic
->config
;
2014 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
2015 for (j
= 0; j
< config
->tx_cfg
[i
].fifo_len
- 1; j
++) {
2016 txdp
= (TxD_t
*) mac_control
->fifos
[i
].list_info
[j
].
2019 (struct sk_buff
*) ((unsigned long) txdp
->
2022 memset(txdp
, 0, sizeof(TxD_t
) *
2026 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
2027 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2028 txdp
->Buffer_Pointer
,
2029 skb
->len
- skb
->data_len
,
2035 for (j
= 0; j
< frg_cnt
; j
++, txdp
++) {
2037 &skb_shinfo(skb
)->frags
[j
];
2038 pci_unmap_page(nic
->pdev
,
2048 memset(txdp
, 0, sizeof(TxD_t
) * config
->max_txds
);
2052 "%s:forcibly freeing %d skbs on FIFO%d\n",
2054 mac_control
->fifos
[i
].tx_curr_get_info
.offset
= 0;
2055 mac_control
->fifos
[i
].tx_curr_put_info
.offset
= 0;
2060 * stop_nic - To stop the nic
2061 * @nic ; device private variable.
2063 * This function does exactly the opposite of what the start_nic()
2064 * function does. This function is called to stop the device.
2069 static void stop_nic(struct s2io_nic
*nic
)
2071 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
2072 register u64 val64
= 0;
2073 u16 interruptible
, i
;
2074 mac_info_t
*mac_control
;
2075 struct config_param
*config
;
2077 mac_control
= &nic
->mac_control
;
2078 config
= &nic
->config
;
2080 /* Disable all interrupts */
2081 interruptible
= TX_TRAFFIC_INTR
| RX_TRAFFIC_INTR
;
2082 interruptible
|= TX_PIC_INTR
| RX_PIC_INTR
;
2083 interruptible
|= TX_MAC_INTR
| RX_MAC_INTR
;
2084 en_dis_able_nic_intrs(nic
, interruptible
, DISABLE_INTRS
);
2087 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2088 val64
= readq(&bar0
->prc_ctrl_n
[i
]);
2089 val64
&= ~((u64
) PRC_CTRL_RC_ENABLED
);
2090 writeq(val64
, &bar0
->prc_ctrl_n
[i
]);
2095 * fill_rx_buffers - Allocates the Rx side skbs
2096 * @nic: device private variable
2097 * @ring_no: ring number
2099 * The function allocates Rx side skbs and puts the physical
2100 * address of these buffers into the RxD buffer pointers, so that the NIC
2101 * can DMA the received frame into these locations.
2102 * The NIC supports 3 receive modes, viz
2104 * 2. three buffer and
2105 * 3. Five buffer modes.
2106 * Each mode defines how many fragments the received frame will be split
2107 * up into by the NIC. The frame is split into L3 header, L4 Header,
2108 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2109 * is split into 3 fragments. As of now only single buffer mode is
2112 * SUCCESS on success or an appropriate -ve value on failure.
2115 int fill_rx_buffers(struct s2io_nic
*nic
, int ring_no
)
2117 struct net_device
*dev
= nic
->dev
;
2118 struct sk_buff
*skb
;
2120 int off
, off1
, size
, block_no
, block_no1
;
2121 int offset
, offset1
;
2124 mac_info_t
*mac_control
;
2125 struct config_param
*config
;
2126 #ifdef CONFIG_2BUFF_MODE
2131 dma_addr_t rxdpphys
;
2133 #ifndef CONFIG_S2IO_NAPI
2134 unsigned long flags
;
2136 RxD_t
*first_rxdp
= NULL
;
2138 mac_control
= &nic
->mac_control
;
2139 config
= &nic
->config
;
2140 alloc_cnt
= mac_control
->rings
[ring_no
].pkt_cnt
-
2141 atomic_read(&nic
->rx_bufs_left
[ring_no
]);
2142 size
= dev
->mtu
+ HEADER_ETHERNET_II_802_3_SIZE
+
2143 HEADER_802_2_SIZE
+ HEADER_SNAP_SIZE
;
2145 while (alloc_tab
< alloc_cnt
) {
2146 block_no
= mac_control
->rings
[ring_no
].rx_curr_put_info
.
2148 block_no1
= mac_control
->rings
[ring_no
].rx_curr_get_info
.
2150 off
= mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
;
2151 off1
= mac_control
->rings
[ring_no
].rx_curr_get_info
.offset
;
2152 #ifndef CONFIG_2BUFF_MODE
2153 offset
= block_no
* (MAX_RXDS_PER_BLOCK
+ 1) + off
;
2154 offset1
= block_no1
* (MAX_RXDS_PER_BLOCK
+ 1) + off1
;
2156 offset
= block_no
* (MAX_RXDS_PER_BLOCK
) + off
;
2157 offset1
= block_no1
* (MAX_RXDS_PER_BLOCK
) + off1
;
2160 rxdp
= mac_control
->rings
[ring_no
].rx_blocks
[block_no
].
2161 block_virt_addr
+ off
;
2162 if ((offset
== offset1
) && (rxdp
->Host_Control
)) {
2163 DBG_PRINT(INTR_DBG
, "%s: Get and Put", dev
->name
);
2164 DBG_PRINT(INTR_DBG
, " info equated\n");
2167 #ifndef CONFIG_2BUFF_MODE
2168 if (rxdp
->Control_1
== END_OF_BLOCK
) {
2169 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2171 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2172 block_index
%= mac_control
->rings
[ring_no
].block_count
;
2173 block_no
= mac_control
->rings
[ring_no
].rx_curr_put_info
.
2176 off
%= (MAX_RXDS_PER_BLOCK
+ 1);
2177 mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
=
2179 rxdp
= (RxD_t
*) ((unsigned long) rxdp
->Control_2
);
2180 DBG_PRINT(INTR_DBG
, "%s: Next block at: %p\n",
2183 #ifndef CONFIG_S2IO_NAPI
2184 spin_lock_irqsave(&nic
->put_lock
, flags
);
2185 mac_control
->rings
[ring_no
].put_pos
=
2186 (block_no
* (MAX_RXDS_PER_BLOCK
+ 1)) + off
;
2187 spin_unlock_irqrestore(&nic
->put_lock
, flags
);
2190 if (rxdp
->Host_Control
== END_OF_BLOCK
) {
2191 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2193 mac_control
->rings
[ring_no
].rx_curr_put_info
.block_index
2194 %= mac_control
->rings
[ring_no
].block_count
;
2195 block_no
= mac_control
->rings
[ring_no
].rx_curr_put_info
2198 DBG_PRINT(INTR_DBG
, "%s: block%d at: 0x%llx\n",
2199 dev
->name
, block_no
,
2200 (unsigned long long) rxdp
->Control_1
);
2201 mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
=
2203 rxdp
= mac_control
->rings
[ring_no
].rx_blocks
[block_no
].
2206 #ifndef CONFIG_S2IO_NAPI
2207 spin_lock_irqsave(&nic
->put_lock
, flags
);
2208 mac_control
->rings
[ring_no
].put_pos
= (block_no
*
2209 (MAX_RXDS_PER_BLOCK
+ 1)) + off
;
2210 spin_unlock_irqrestore(&nic
->put_lock
, flags
);
2214 #ifndef CONFIG_2BUFF_MODE
2215 if (rxdp
->Control_1
& RXD_OWN_XENA
)
2217 if (rxdp
->Control_2
& BIT(0))
2220 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2224 #ifdef CONFIG_2BUFF_MODE
2226 * RxDs Spanning cache lines will be replenished only
2227 * if the succeeding RxD is also owned by Host. It
2228 * will always be the ((8*i)+3) and ((8*i)+6)
2229 * descriptors for the 48 byte descriptor. The offending
2230 * decsriptor is of-course the 3rd descriptor.
2232 rxdpphys
= mac_control
->rings
[ring_no
].rx_blocks
[block_no
].
2233 block_dma_addr
+ (off
* sizeof(RxD_t
));
2234 if (((u64
) (rxdpphys
)) % 128 > 80) {
2235 rxdpnext
= mac_control
->rings
[ring_no
].rx_blocks
[block_no
].
2236 block_virt_addr
+ (off
+ 1);
2237 if (rxdpnext
->Host_Control
== END_OF_BLOCK
) {
2238 nextblk
= (block_no
+ 1) %
2239 (mac_control
->rings
[ring_no
].block_count
);
2240 rxdpnext
= mac_control
->rings
[ring_no
].rx_blocks
2241 [nextblk
].block_virt_addr
;
2243 if (rxdpnext
->Control_2
& BIT(0))
2248 #ifndef CONFIG_2BUFF_MODE
2249 skb
= dev_alloc_skb(size
+ NET_IP_ALIGN
);
2251 skb
= dev_alloc_skb(dev
->mtu
+ ALIGN_SIZE
+ BUF0_LEN
+ 4);
2254 DBG_PRINT(ERR_DBG
, "%s: Out of ", dev
->name
);
2255 DBG_PRINT(ERR_DBG
, "memory to allocate SKBs\n");
2258 first_rxdp
->Control_1
|= RXD_OWN_XENA
;
2262 #ifndef CONFIG_2BUFF_MODE
2263 skb_reserve(skb
, NET_IP_ALIGN
);
2264 memset(rxdp
, 0, sizeof(RxD_t
));
2265 rxdp
->Buffer0_ptr
= pci_map_single
2266 (nic
->pdev
, skb
->data
, size
, PCI_DMA_FROMDEVICE
);
2267 rxdp
->Control_2
&= (~MASK_BUFFER0_SIZE
);
2268 rxdp
->Control_2
|= SET_BUFFER0_SIZE(size
);
2269 rxdp
->Host_Control
= (unsigned long) (skb
);
2270 if (alloc_tab
& ((1 << rxsync_frequency
) - 1))
2271 rxdp
->Control_1
|= RXD_OWN_XENA
;
2273 off
%= (MAX_RXDS_PER_BLOCK
+ 1);
2274 mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
= off
;
2276 ba
= &mac_control
->rings
[ring_no
].ba
[block_no
][off
];
2277 skb_reserve(skb
, BUF0_LEN
);
2278 tmp
= ((unsigned long) skb
->data
& ALIGN_SIZE
);
2280 skb_reserve(skb
, (ALIGN_SIZE
+ 1) - tmp
);
2282 memset(rxdp
, 0, sizeof(RxD_t
));
2283 rxdp
->Buffer2_ptr
= pci_map_single
2284 (nic
->pdev
, skb
->data
, dev
->mtu
+ BUF0_LEN
+ 4,
2285 PCI_DMA_FROMDEVICE
);
2287 pci_map_single(nic
->pdev
, ba
->ba_0
, BUF0_LEN
,
2288 PCI_DMA_FROMDEVICE
);
2290 pci_map_single(nic
->pdev
, ba
->ba_1
, BUF1_LEN
,
2291 PCI_DMA_FROMDEVICE
);
2293 rxdp
->Control_2
= SET_BUFFER2_SIZE(dev
->mtu
+ 4);
2294 rxdp
->Control_2
|= SET_BUFFER0_SIZE(BUF0_LEN
);
2295 rxdp
->Control_2
|= SET_BUFFER1_SIZE(1); /* dummy. */
2296 rxdp
->Control_2
|= BIT(0); /* Set Buffer_Empty bit. */
2297 rxdp
->Host_Control
= (u64
) ((unsigned long) (skb
));
2298 if (alloc_tab
& ((1 << rxsync_frequency
) - 1))
2299 rxdp
->Control_1
|= RXD_OWN_XENA
;
2301 mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
= off
;
2303 rxdp
->Control_2
|= SET_RXD_MARKER
;
2305 if (!(alloc_tab
& ((1 << rxsync_frequency
) - 1))) {
2308 first_rxdp
->Control_1
|= RXD_OWN_XENA
;
2312 atomic_inc(&nic
->rx_bufs_left
[ring_no
]);
2317 /* Transfer ownership of first descriptor to adapter just before
2318 * exiting. Before that, use memory barrier so that ownership
2319 * and other fields are seen by adapter correctly.
2323 first_rxdp
->Control_1
|= RXD_OWN_XENA
;
2330 * free_rx_buffers - Frees all Rx buffers
2331 * @sp: device private variable.
2333 * This function will free all Rx buffers allocated by host.
2338 static void free_rx_buffers(struct s2io_nic
*sp
)
2340 struct net_device
*dev
= sp
->dev
;
2341 int i
, j
, blk
= 0, off
, buf_cnt
= 0;
2343 struct sk_buff
*skb
;
2344 mac_info_t
*mac_control
;
2345 struct config_param
*config
;
2346 #ifdef CONFIG_2BUFF_MODE
2350 mac_control
= &sp
->mac_control
;
2351 config
= &sp
->config
;
2353 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2354 for (j
= 0, blk
= 0; j
< config
->rx_cfg
[i
].num_rxd
; j
++) {
2355 off
= j
% (MAX_RXDS_PER_BLOCK
+ 1);
2356 rxdp
= mac_control
->rings
[i
].rx_blocks
[blk
].
2357 block_virt_addr
+ off
;
2359 #ifndef CONFIG_2BUFF_MODE
2360 if (rxdp
->Control_1
== END_OF_BLOCK
) {
2362 (RxD_t
*) ((unsigned long) rxdp
->
2368 if (rxdp
->Host_Control
== END_OF_BLOCK
) {
2374 if (!(rxdp
->Control_1
& RXD_OWN_XENA
)) {
2375 memset(rxdp
, 0, sizeof(RxD_t
));
2380 (struct sk_buff
*) ((unsigned long) rxdp
->
2383 #ifndef CONFIG_2BUFF_MODE
2384 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2387 HEADER_ETHERNET_II_802_3_SIZE
2388 + HEADER_802_2_SIZE
+
2390 PCI_DMA_FROMDEVICE
);
2392 ba
= &mac_control
->rings
[i
].ba
[blk
][off
];
2393 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2396 PCI_DMA_FROMDEVICE
);
2397 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2400 PCI_DMA_FROMDEVICE
);
2401 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2403 dev
->mtu
+ BUF0_LEN
+ 4,
2404 PCI_DMA_FROMDEVICE
);
2407 atomic_dec(&sp
->rx_bufs_left
[i
]);
2410 memset(rxdp
, 0, sizeof(RxD_t
));
2412 mac_control
->rings
[i
].rx_curr_put_info
.block_index
= 0;
2413 mac_control
->rings
[i
].rx_curr_get_info
.block_index
= 0;
2414 mac_control
->rings
[i
].rx_curr_put_info
.offset
= 0;
2415 mac_control
->rings
[i
].rx_curr_get_info
.offset
= 0;
2416 atomic_set(&sp
->rx_bufs_left
[i
], 0);
2417 DBG_PRINT(INIT_DBG
, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2418 dev
->name
, buf_cnt
, i
);
2423 * s2io_poll - Rx interrupt handler for NAPI support
2424 * @dev : pointer to the device structure.
2425 * @budget : The number of packets that were budgeted to be processed
2426 * during one pass through the 'Poll" function.
2428 * Comes into picture only if NAPI support has been incorporated. It does
2429 * the same thing that rx_intr_handler does, but not in a interrupt context
2430 * also It will process only a given number of packets.
2432 * 0 on success and 1 if there are No Rx packets to be processed.
2435 #if defined(CONFIG_S2IO_NAPI)
2436 static int s2io_poll(struct net_device
*dev
, int *budget
)
2438 nic_t
*nic
= dev
->priv
;
2439 int pkt_cnt
= 0, org_pkts_to_process
;
2440 mac_info_t
*mac_control
;
2441 struct config_param
*config
;
2442 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
2446 atomic_inc(&nic
->isr_cnt
);
2447 mac_control
= &nic
->mac_control
;
2448 config
= &nic
->config
;
2450 nic
->pkts_to_process
= *budget
;
2451 if (nic
->pkts_to_process
> dev
->quota
)
2452 nic
->pkts_to_process
= dev
->quota
;
2453 org_pkts_to_process
= nic
->pkts_to_process
;
2455 val64
= readq(&bar0
->rx_traffic_int
);
2456 writeq(val64
, &bar0
->rx_traffic_int
);
2458 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2459 rx_intr_handler(&mac_control
->rings
[i
]);
2460 pkt_cnt
= org_pkts_to_process
- nic
->pkts_to_process
;
2461 if (!nic
->pkts_to_process
) {
2462 /* Quota for the current iteration has been met */
2469 dev
->quota
-= pkt_cnt
;
2471 netif_rx_complete(dev
);
2473 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2474 if (fill_rx_buffers(nic
, i
) == -ENOMEM
) {
2475 DBG_PRINT(ERR_DBG
, "%s:Out of memory", dev
->name
);
2476 DBG_PRINT(ERR_DBG
, " in Rx Poll!!\n");
2480 /* Re enable the Rx interrupts. */
2481 en_dis_able_nic_intrs(nic
, RX_TRAFFIC_INTR
, ENABLE_INTRS
);
2482 atomic_dec(&nic
->isr_cnt
);
2486 dev
->quota
-= pkt_cnt
;
2489 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2490 if (fill_rx_buffers(nic
, i
) == -ENOMEM
) {
2491 DBG_PRINT(ERR_DBG
, "%s:Out of memory", dev
->name
);
2492 DBG_PRINT(ERR_DBG
, " in Rx Poll!!\n");
2496 atomic_dec(&nic
->isr_cnt
);
2502 * rx_intr_handler - Rx interrupt handler
2503 * @nic: device private variable.
2505 * If the interrupt is because of a received frame or if the
2506 * receive ring contains fresh as yet un-processed frames,this function is
2507 * called. It picks out the RxD at which place the last Rx processing had
2508 * stopped and sends the skb to the OSM's Rx handler and then increments
2513 static void rx_intr_handler(ring_info_t
*ring_data
)
2515 nic_t
*nic
= ring_data
->nic
;
2516 struct net_device
*dev
= (struct net_device
*) nic
->dev
;
2517 int get_block
, get_offset
, put_block
, put_offset
, ring_bufs
;
2518 rx_curr_get_info_t get_info
, put_info
;
2520 struct sk_buff
*skb
;
2521 #ifndef CONFIG_S2IO_NAPI
2524 spin_lock(&nic
->rx_lock
);
2525 if (atomic_read(&nic
->card_state
) == CARD_DOWN
) {
2526 DBG_PRINT(INTR_DBG
, "%s: %s going down for reset\n",
2527 __FUNCTION__
, dev
->name
);
2528 spin_unlock(&nic
->rx_lock
);
2532 get_info
= ring_data
->rx_curr_get_info
;
2533 get_block
= get_info
.block_index
;
2534 put_info
= ring_data
->rx_curr_put_info
;
2535 put_block
= put_info
.block_index
;
2536 ring_bufs
= get_info
.ring_len
+1;
2537 rxdp
= ring_data
->rx_blocks
[get_block
].block_virt_addr
+
2539 get_offset
= (get_block
* (MAX_RXDS_PER_BLOCK
+ 1)) +
2541 #ifndef CONFIG_S2IO_NAPI
2542 spin_lock(&nic
->put_lock
);
2543 put_offset
= ring_data
->put_pos
;
2544 spin_unlock(&nic
->put_lock
);
2546 put_offset
= (put_block
* (MAX_RXDS_PER_BLOCK
+ 1)) +
2549 while (RXD_IS_UP2DT(rxdp
) &&
2550 (((get_offset
+ 1) % ring_bufs
) != put_offset
)) {
2551 skb
= (struct sk_buff
*) ((unsigned long)rxdp
->Host_Control
);
2553 DBG_PRINT(ERR_DBG
, "%s: The skb is ",
2555 DBG_PRINT(ERR_DBG
, "Null in Rx Intr\n");
2556 spin_unlock(&nic
->rx_lock
);
2559 #ifndef CONFIG_2BUFF_MODE
2560 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2563 HEADER_ETHERNET_II_802_3_SIZE
+
2566 PCI_DMA_FROMDEVICE
);
2568 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2570 BUF0_LEN
, PCI_DMA_FROMDEVICE
);
2571 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2573 BUF1_LEN
, PCI_DMA_FROMDEVICE
);
2574 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2576 dev
->mtu
+ BUF0_LEN
+ 4,
2577 PCI_DMA_FROMDEVICE
);
2579 rx_osm_handler(ring_data
, rxdp
);
2581 ring_data
->rx_curr_get_info
.offset
=
2583 rxdp
= ring_data
->rx_blocks
[get_block
].block_virt_addr
+
2585 if (get_info
.offset
&&
2586 (!(get_info
.offset
% MAX_RXDS_PER_BLOCK
))) {
2587 get_info
.offset
= 0;
2588 ring_data
->rx_curr_get_info
.offset
2591 get_block
%= ring_data
->block_count
;
2592 ring_data
->rx_curr_get_info
.block_index
2594 rxdp
= ring_data
->rx_blocks
[get_block
].block_virt_addr
;
2597 get_offset
= (get_block
* (MAX_RXDS_PER_BLOCK
+ 1)) +
2599 #ifdef CONFIG_S2IO_NAPI
2600 nic
->pkts_to_process
-= 1;
2601 if (!nic
->pkts_to_process
)
2605 if ((indicate_max_pkts
) && (pkt_cnt
> indicate_max_pkts
))
2609 spin_unlock(&nic
->rx_lock
);
2613 * tx_intr_handler - Transmit interrupt handler
2614 * @nic : device private variable
2616 * If an interrupt was raised to indicate DMA complete of the
2617 * Tx packet, this function is called. It identifies the last TxD
2618 * whose buffer was freed and frees all skbs whose data have already
2619 * DMA'ed into the NICs internal memory.
2624 static void tx_intr_handler(fifo_info_t
*fifo_data
)
2626 nic_t
*nic
= fifo_data
->nic
;
2627 struct net_device
*dev
= (struct net_device
*) nic
->dev
;
2628 tx_curr_get_info_t get_info
, put_info
;
2629 struct sk_buff
*skb
;
2633 get_info
= fifo_data
->tx_curr_get_info
;
2634 put_info
= fifo_data
->tx_curr_put_info
;
2635 txdlp
= (TxD_t
*) fifo_data
->list_info
[get_info
.offset
].
2637 while ((!(txdlp
->Control_1
& TXD_LIST_OWN_XENA
)) &&
2638 (get_info
.offset
!= put_info
.offset
) &&
2639 (txdlp
->Host_Control
)) {
2640 /* Check for TxD errors */
2641 if (txdlp
->Control_1
& TXD_T_CODE
) {
2642 unsigned long long err
;
2643 err
= txdlp
->Control_1
& TXD_T_CODE
;
2644 if ((err
>> 48) == 0xA) {
2645 DBG_PRINT(TX_DBG
, "TxD returned due \
2646 to loss of link\n");
2649 DBG_PRINT(ERR_DBG
, "***TxD error \
2654 skb
= (struct sk_buff
*) ((unsigned long)
2655 txdlp
->Host_Control
);
2657 DBG_PRINT(ERR_DBG
, "%s: Null skb ",
2659 DBG_PRINT(ERR_DBG
, "in Tx Free Intr\n");
2663 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
2664 nic
->tx_pkt_count
++;
2666 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2667 txdlp
->Buffer_Pointer
,
2668 skb
->len
- skb
->data_len
,
2674 for (j
= 0; j
< frg_cnt
; j
++, txdlp
++) {
2676 &skb_shinfo(skb
)->frags
[j
];
2677 if (!txdlp
->Buffer_Pointer
)
2679 pci_unmap_page(nic
->pdev
,
2689 (sizeof(TxD_t
) * fifo_data
->max_txds
));
2691 /* Updating the statistics block */
2692 nic
->stats
.tx_bytes
+= skb
->len
;
2693 dev_kfree_skb_irq(skb
);
2696 get_info
.offset
%= get_info
.fifo_len
+ 1;
2697 txdlp
= (TxD_t
*) fifo_data
->list_info
2698 [get_info
.offset
].list_virt_addr
;
2699 fifo_data
->tx_curr_get_info
.offset
=
2703 spin_lock(&nic
->tx_lock
);
2704 if (netif_queue_stopped(dev
))
2705 netif_wake_queue(dev
);
2706 spin_unlock(&nic
->tx_lock
);
2710 * alarm_intr_handler - Alarm Interrrupt handler
2711 * @nic: device private variable
2712 * Description: If the interrupt was neither because of Rx packet or Tx
2713 * complete, this function is called. If the interrupt was to indicate
2714 * a loss of link, the OSM link status handler is invoked for any other
2715 * alarm interrupt the block that raised the interrupt is displayed
2716 * and a H/W reset is issued.
2721 static void alarm_intr_handler(struct s2io_nic
*nic
)
2723 struct net_device
*dev
= (struct net_device
*) nic
->dev
;
2724 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
2725 register u64 val64
= 0, err_reg
= 0;
2727 /* Handling link status change error Intr */
2728 if (s2io_link_fault_indication(nic
) == MAC_RMAC_ERR_TIMER
) {
2729 err_reg
= readq(&bar0
->mac_rmac_err_reg
);
2730 writeq(err_reg
, &bar0
->mac_rmac_err_reg
);
2731 if (err_reg
& RMAC_LINK_STATE_CHANGE_INT
) {
2732 schedule_work(&nic
->set_link_task
);
2736 /* Handling Ecc errors */
2737 val64
= readq(&bar0
->mc_err_reg
);
2738 writeq(val64
, &bar0
->mc_err_reg
);
2739 if (val64
& (MC_ERR_REG_ECC_ALL_SNG
| MC_ERR_REG_ECC_ALL_DBL
)) {
2740 if (val64
& MC_ERR_REG_ECC_ALL_DBL
) {
2741 nic
->mac_control
.stats_info
->sw_stat
.
2743 DBG_PRINT(INIT_DBG
, "%s: Device indicates ",
2745 DBG_PRINT(INIT_DBG
, "double ECC error!!\n");
2746 if (nic
->device_type
!= XFRAME_II_DEVICE
) {
2747 /* Reset XframeI only if critical error */
2748 if (val64
& (MC_ERR_REG_MIRI_ECC_DB_ERR_0
|
2749 MC_ERR_REG_MIRI_ECC_DB_ERR_1
)) {
2750 netif_stop_queue(dev
);
2751 schedule_work(&nic
->rst_timer_task
);
2755 nic
->mac_control
.stats_info
->sw_stat
.
2760 /* In case of a serious error, the device will be Reset. */
2761 val64
= readq(&bar0
->serr_source
);
2762 if (val64
& SERR_SOURCE_ANY
) {
2763 DBG_PRINT(ERR_DBG
, "%s: Device indicates ", dev
->name
);
2764 DBG_PRINT(ERR_DBG
, "serious error %llx!!\n",
2765 (unsigned long long)val64
);
2766 netif_stop_queue(dev
);
2767 schedule_work(&nic
->rst_timer_task
);
2771 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2772 * Error occurs, the adapter will be recycled by disabling the
2773 * adapter enable bit and enabling it again after the device
2774 * becomes Quiescent.
2776 val64
= readq(&bar0
->pcc_err_reg
);
2777 writeq(val64
, &bar0
->pcc_err_reg
);
2778 if (val64
& PCC_FB_ECC_DB_ERR
) {
2779 u64 ac
= readq(&bar0
->adapter_control
);
2780 ac
&= ~(ADAPTER_CNTL_EN
);
2781 writeq(ac
, &bar0
->adapter_control
);
2782 ac
= readq(&bar0
->adapter_control
);
2783 schedule_work(&nic
->set_link_task
);
2786 /* Other type of interrupts are not being handled now, TODO */
2790 * wait_for_cmd_complete - waits for a command to complete.
2791 * @sp : private member of the device structure, which is a pointer to the
2792 * s2io_nic structure.
2793 * Description: Function that waits for a command to Write into RMAC
2794 * ADDR DATA registers to be completed and returns either success or
2795 * error depending on whether the command was complete or not.
2797 * SUCCESS on success and FAILURE on failure.
2800 int wait_for_cmd_complete(nic_t
* sp
)
2802 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
2803 int ret
= FAILURE
, cnt
= 0;
2807 val64
= readq(&bar0
->rmac_addr_cmd_mem
);
2808 if (!(val64
& RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING
)) {
2821 * s2io_reset - Resets the card.
2822 * @sp : private member of the device structure.
2823 * Description: Function to Reset the card. This function then also
2824 * restores the previously saved PCI configuration space registers as
2825 * the card reset also resets the configuration space.
2830 void s2io_reset(nic_t
* sp
)
2832 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
2836 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2837 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
, &(pci_cmd
));
2839 val64
= SW_RESET_ALL
;
2840 writeq(val64
, &bar0
->sw_reset
);
2843 * At this stage, if the PCI write is indeed completed, the
2844 * card is reset and so is the PCI Config space of the device.
2845 * So a read cannot be issued at this stage on any of the
2846 * registers to ensure the write into "sw_reset" register
2848 * Question: Is there any system call that will explicitly force
2849 * all the write commands still pending on the bus to be pushed
2851 * As of now I'am just giving a 250ms delay and hoping that the
2852 * PCI write to sw_reset register is done by this time.
2856 /* Restore the PCI state saved during initialization. */
2857 pci_restore_state(sp
->pdev
);
2858 pci_write_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
2864 /* Set swapper to enable I/O register access */
2865 s2io_set_swapper(sp
);
2867 /* Restore the MSIX table entries from local variables */
2868 restore_xmsi_data(sp
);
2870 /* Clear certain PCI/PCI-X fields after reset */
2871 if (sp
->device_type
== XFRAME_II_DEVICE
) {
2872 /* Clear parity err detect bit */
2873 pci_write_config_word(sp
->pdev
, PCI_STATUS
, 0x8000);
2875 /* Clearing PCIX Ecc status register */
2876 pci_write_config_dword(sp
->pdev
, 0x68, 0x7C);
2878 /* Clearing PCI_STATUS error reflected here */
2879 writeq(BIT(62), &bar0
->txpic_int_reg
);
2882 /* Reset device statistics maintained by OS */
2883 memset(&sp
->stats
, 0, sizeof (struct net_device_stats
));
2885 /* SXE-002: Configure link and activity LED to turn it off */
2886 subid
= sp
->pdev
->subsystem_device
;
2887 if (((subid
& 0xFF) >= 0x07) &&
2888 (sp
->device_type
== XFRAME_I_DEVICE
)) {
2889 val64
= readq(&bar0
->gpio_control
);
2890 val64
|= 0x0000800000000000ULL
;
2891 writeq(val64
, &bar0
->gpio_control
);
2892 val64
= 0x0411040400000000ULL
;
2893 writeq(val64
, (void __iomem
*)bar0
+ 0x2700);
2897 * Clear spurious ECC interrupts that would have occured on
2898 * XFRAME II cards after reset.
2900 if (sp
->device_type
== XFRAME_II_DEVICE
) {
2901 val64
= readq(&bar0
->pcc_err_reg
);
2902 writeq(val64
, &bar0
->pcc_err_reg
);
2905 sp
->device_enabled_once
= FALSE
;
2909 * s2io_set_swapper - to set the swapper controle on the card
2910 * @sp : private member of the device structure,
2911 * pointer to the s2io_nic structure.
2912 * Description: Function to set the swapper control on the card
2913 * correctly depending on the 'endianness' of the system.
2915 * SUCCESS on success and FAILURE on failure.
2918 int s2io_set_swapper(nic_t
* sp
)
2920 struct net_device
*dev
= sp
->dev
;
2921 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
2922 u64 val64
, valt
, valr
;
2925 * Set proper endian settings and verify the same by reading
2926 * the PIF Feed-back register.
2929 val64
= readq(&bar0
->pif_rd_swapper_fb
);
2930 if (val64
!= 0x0123456789ABCDEFULL
) {
2932 u64 value
[] = { 0xC30000C3C30000C3ULL
, /* FE=1, SE=1 */
2933 0x8100008181000081ULL
, /* FE=1, SE=0 */
2934 0x4200004242000042ULL
, /* FE=0, SE=1 */
2935 0}; /* FE=0, SE=0 */
2938 writeq(value
[i
], &bar0
->swapper_ctrl
);
2939 val64
= readq(&bar0
->pif_rd_swapper_fb
);
2940 if (val64
== 0x0123456789ABCDEFULL
)
2945 DBG_PRINT(ERR_DBG
, "%s: Endian settings are wrong, ",
2947 DBG_PRINT(ERR_DBG
, "feedback read %llx\n",
2948 (unsigned long long) val64
);
2953 valr
= readq(&bar0
->swapper_ctrl
);
2956 valt
= 0x0123456789ABCDEFULL
;
2957 writeq(valt
, &bar0
->xmsi_address
);
2958 val64
= readq(&bar0
->xmsi_address
);
2962 u64 value
[] = { 0x00C3C30000C3C300ULL
, /* FE=1, SE=1 */
2963 0x0081810000818100ULL
, /* FE=1, SE=0 */
2964 0x0042420000424200ULL
, /* FE=0, SE=1 */
2965 0}; /* FE=0, SE=0 */
2968 writeq((value
[i
] | valr
), &bar0
->swapper_ctrl
);
2969 writeq(valt
, &bar0
->xmsi_address
);
2970 val64
= readq(&bar0
->xmsi_address
);
2976 unsigned long long x
= val64
;
2977 DBG_PRINT(ERR_DBG
, "Write failed, Xmsi_addr ");
2978 DBG_PRINT(ERR_DBG
, "reads:0x%llx\n", x
);
2982 val64
= readq(&bar0
->swapper_ctrl
);
2983 val64
&= 0xFFFF000000000000ULL
;
2987 * The device by default set to a big endian format, so a
2988 * big endian driver need not set anything.
2990 val64
|= (SWAPPER_CTRL_TXP_FE
|
2991 SWAPPER_CTRL_TXP_SE
|
2992 SWAPPER_CTRL_TXD_R_FE
|
2993 SWAPPER_CTRL_TXD_W_FE
|
2994 SWAPPER_CTRL_TXF_R_FE
|
2995 SWAPPER_CTRL_RXD_R_FE
|
2996 SWAPPER_CTRL_RXD_W_FE
|
2997 SWAPPER_CTRL_RXF_W_FE
|
2998 SWAPPER_CTRL_XMSI_FE
|
2999 SWAPPER_CTRL_STATS_FE
| SWAPPER_CTRL_STATS_SE
);
3000 if (sp
->intr_type
== INTA
)
3001 val64
|= SWAPPER_CTRL_XMSI_SE
;
3002 writeq(val64
, &bar0
->swapper_ctrl
);
3005 * Initially we enable all bits to make it accessible by the
3006 * driver, then we selectively enable only those bits that
3009 val64
|= (SWAPPER_CTRL_TXP_FE
|
3010 SWAPPER_CTRL_TXP_SE
|
3011 SWAPPER_CTRL_TXD_R_FE
|
3012 SWAPPER_CTRL_TXD_R_SE
|
3013 SWAPPER_CTRL_TXD_W_FE
|
3014 SWAPPER_CTRL_TXD_W_SE
|
3015 SWAPPER_CTRL_TXF_R_FE
|
3016 SWAPPER_CTRL_RXD_R_FE
|
3017 SWAPPER_CTRL_RXD_R_SE
|
3018 SWAPPER_CTRL_RXD_W_FE
|
3019 SWAPPER_CTRL_RXD_W_SE
|
3020 SWAPPER_CTRL_RXF_W_FE
|
3021 SWAPPER_CTRL_XMSI_FE
|
3022 SWAPPER_CTRL_STATS_FE
| SWAPPER_CTRL_STATS_SE
);
3023 if (sp
->intr_type
== INTA
)
3024 val64
|= SWAPPER_CTRL_XMSI_SE
;
3025 writeq(val64
, &bar0
->swapper_ctrl
);
3027 val64
= readq(&bar0
->swapper_ctrl
);
3030 * Verifying if endian settings are accurate by reading a
3031 * feedback register.
3033 val64
= readq(&bar0
->pif_rd_swapper_fb
);
3034 if (val64
!= 0x0123456789ABCDEFULL
) {
3035 /* Endian settings are incorrect, calls for another dekko. */
3036 DBG_PRINT(ERR_DBG
, "%s: Endian settings are wrong, ",
3038 DBG_PRINT(ERR_DBG
, "feedback read %llx\n",
3039 (unsigned long long) val64
);
3046 int wait_for_msix_trans(nic_t
*nic
, int i
)
3048 XENA_dev_config_t
*bar0
= (XENA_dev_config_t
*) nic
->bar0
;
3050 int ret
= 0, cnt
= 0;
3053 val64
= readq(&bar0
->xmsi_access
);
3054 if (!(val64
& BIT(15)))
3060 DBG_PRINT(ERR_DBG
, "XMSI # %d Access failed\n", i
);
3067 void restore_xmsi_data(nic_t
*nic
)
3069 XENA_dev_config_t
*bar0
= (XENA_dev_config_t
*) nic
->bar0
;
3073 for (i
=0; i
< MAX_REQUESTED_MSI_X
; i
++) {
3074 writeq(nic
->msix_info
[i
].addr
, &bar0
->xmsi_address
);
3075 writeq(nic
->msix_info
[i
].data
, &bar0
->xmsi_data
);
3076 val64
= (BIT(7) | BIT(15) | vBIT(i
, 26, 6));
3077 writeq(val64
, &bar0
->xmsi_access
);
3078 if (wait_for_msix_trans(nic
, i
)) {
3079 DBG_PRINT(ERR_DBG
, "failed in %s\n", __FUNCTION__
);
3085 void store_xmsi_data(nic_t
*nic
)
3087 XENA_dev_config_t
*bar0
= (XENA_dev_config_t
*) nic
->bar0
;
3088 u64 val64
, addr
, data
;
3091 /* Store and display */
3092 for (i
=0; i
< MAX_REQUESTED_MSI_X
; i
++) {
3093 val64
= (BIT(15) | vBIT(i
, 26, 6));
3094 writeq(val64
, &bar0
->xmsi_access
);
3095 if (wait_for_msix_trans(nic
, i
)) {
3096 DBG_PRINT(ERR_DBG
, "failed in %s\n", __FUNCTION__
);
3099 addr
= readq(&bar0
->xmsi_address
);
3100 data
= readq(&bar0
->xmsi_data
);
3102 nic
->msix_info
[i
].addr
= addr
;
3103 nic
->msix_info
[i
].data
= data
;
3108 int s2io_enable_msi(nic_t
*nic
)
3110 XENA_dev_config_t
*bar0
= (XENA_dev_config_t
*) nic
->bar0
;
3111 u16 msi_ctrl
, msg_val
;
3112 struct config_param
*config
= &nic
->config
;
3113 struct net_device
*dev
= nic
->dev
;
3114 u64 val64
, tx_mat
, rx_mat
;
3117 val64
= readq(&bar0
->pic_control
);
3119 writeq(val64
, &bar0
->pic_control
);
3121 err
= pci_enable_msi(nic
->pdev
);
3123 DBG_PRINT(ERR_DBG
, "%s: enabling MSI failed\n",
3129 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3130 * for interrupt handling.
3132 pci_read_config_word(nic
->pdev
, 0x4c, &msg_val
);
3134 pci_write_config_word(nic
->pdev
, 0x4c, msg_val
);
3135 pci_read_config_word(nic
->pdev
, 0x4c, &msg_val
);
3137 pci_read_config_word(nic
->pdev
, 0x42, &msi_ctrl
);
3139 pci_write_config_word(nic
->pdev
, 0x42, msi_ctrl
);
3141 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3142 tx_mat
= readq(&bar0
->tx_mat0_n
[0]);
3143 for (i
=0; i
<config
->tx_fifo_num
; i
++) {
3144 tx_mat
|= TX_MAT_SET(i
, 1);
3146 writeq(tx_mat
, &bar0
->tx_mat0_n
[0]);
3148 rx_mat
= readq(&bar0
->rx_mat
);
3149 for (i
=0; i
<config
->rx_ring_num
; i
++) {
3150 rx_mat
|= RX_MAT_SET(i
, 1);
3152 writeq(rx_mat
, &bar0
->rx_mat
);
3154 dev
->irq
= nic
->pdev
->irq
;
3158 int s2io_enable_msi_x(nic_t
*nic
)
3160 XENA_dev_config_t
*bar0
= (XENA_dev_config_t
*) nic
->bar0
;
3162 u16 msi_control
; /* Temp variable */
3163 int ret
, i
, j
, msix_indx
= 1;
3165 nic
->entries
= kmalloc(MAX_REQUESTED_MSI_X
* sizeof(struct msix_entry
),
3167 if (nic
->entries
== NULL
) {
3168 DBG_PRINT(ERR_DBG
, "%s: Memory allocation failed\n", __FUNCTION__
);
3171 memset(nic
->entries
, 0, MAX_REQUESTED_MSI_X
* sizeof(struct msix_entry
));
3174 kmalloc(MAX_REQUESTED_MSI_X
* sizeof(struct s2io_msix_entry
),
3176 if (nic
->s2io_entries
== NULL
) {
3177 DBG_PRINT(ERR_DBG
, "%s: Memory allocation failed\n", __FUNCTION__
);
3178 kfree(nic
->entries
);
3181 memset(nic
->s2io_entries
, 0,
3182 MAX_REQUESTED_MSI_X
* sizeof(struct s2io_msix_entry
));
3184 for (i
=0; i
< MAX_REQUESTED_MSI_X
; i
++) {
3185 nic
->entries
[i
].entry
= i
;
3186 nic
->s2io_entries
[i
].entry
= i
;
3187 nic
->s2io_entries
[i
].arg
= NULL
;
3188 nic
->s2io_entries
[i
].in_use
= 0;
3191 tx_mat
= readq(&bar0
->tx_mat0_n
[0]);
3192 for (i
=0; i
<nic
->config
.tx_fifo_num
; i
++, msix_indx
++) {
3193 tx_mat
|= TX_MAT_SET(i
, msix_indx
);
3194 nic
->s2io_entries
[msix_indx
].arg
= &nic
->mac_control
.fifos
[i
];
3195 nic
->s2io_entries
[msix_indx
].type
= MSIX_FIFO_TYPE
;
3196 nic
->s2io_entries
[msix_indx
].in_use
= MSIX_FLG
;
3198 writeq(tx_mat
, &bar0
->tx_mat0_n
[0]);
3200 if (!nic
->config
.bimodal
) {
3201 rx_mat
= readq(&bar0
->rx_mat
);
3202 for (j
=0; j
<nic
->config
.rx_ring_num
; j
++, msix_indx
++) {
3203 rx_mat
|= RX_MAT_SET(j
, msix_indx
);
3204 nic
->s2io_entries
[msix_indx
].arg
= &nic
->mac_control
.rings
[j
];
3205 nic
->s2io_entries
[msix_indx
].type
= MSIX_RING_TYPE
;
3206 nic
->s2io_entries
[msix_indx
].in_use
= MSIX_FLG
;
3208 writeq(rx_mat
, &bar0
->rx_mat
);
3210 tx_mat
= readq(&bar0
->tx_mat0_n
[7]);
3211 for (j
=0; j
<nic
->config
.rx_ring_num
; j
++, msix_indx
++) {
3212 tx_mat
|= TX_MAT_SET(i
, msix_indx
);
3213 nic
->s2io_entries
[msix_indx
].arg
= &nic
->mac_control
.rings
[j
];
3214 nic
->s2io_entries
[msix_indx
].type
= MSIX_RING_TYPE
;
3215 nic
->s2io_entries
[msix_indx
].in_use
= MSIX_FLG
;
3217 writeq(tx_mat
, &bar0
->tx_mat0_n
[7]);
3220 ret
= pci_enable_msix(nic
->pdev
, nic
->entries
, MAX_REQUESTED_MSI_X
);
3222 DBG_PRINT(ERR_DBG
, "%s: Enabling MSIX failed\n", nic
->dev
->name
);
3223 kfree(nic
->entries
);
3224 kfree(nic
->s2io_entries
);
3225 nic
->entries
= NULL
;
3226 nic
->s2io_entries
= NULL
;
3231 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3232 * in the herc NIC. (Temp change, needs to be removed later)
3234 pci_read_config_word(nic
->pdev
, 0x42, &msi_control
);
3235 msi_control
|= 0x1; /* Enable MSI */
3236 pci_write_config_word(nic
->pdev
, 0x42, msi_control
);
3241 /* ********************************************************* *
3242 * Functions defined below concern the OS part of the driver *
3243 * ********************************************************* */
3246 * s2io_open - open entry point of the driver
3247 * @dev : pointer to the device structure.
3249 * This function is the open entry point of the driver. It mainly calls a
3250 * function to allocate Rx buffers and inserts them into the buffer
3251 * descriptors and then enables the Rx part of the NIC.
3253 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3257 int s2io_open(struct net_device
*dev
)
3259 nic_t
*sp
= dev
->priv
;
3262 u16 msi_control
; /* Temp variable */
3265 * Make sure you have link off by default every time
3266 * Nic is initialized
3268 netif_carrier_off(dev
);
3269 sp
->last_link_state
= 0;
3271 /* Initialize H/W and enable interrupts */
3272 if (s2io_card_up(sp
)) {
3273 DBG_PRINT(ERR_DBG
, "%s: H/W initialization failed\n",
3276 goto hw_init_failed
;
3279 /* Store the values of the MSIX table in the nic_t structure */
3280 store_xmsi_data(sp
);
3282 /* After proper initialization of H/W, register ISR */
3283 if (sp
->intr_type
== MSI
) {
3284 err
= request_irq((int) sp
->pdev
->irq
, s2io_msi_handle
,
3285 SA_SHIRQ
, sp
->name
, dev
);
3287 DBG_PRINT(ERR_DBG
, "%s: MSI registration \
3288 failed\n", dev
->name
);
3289 goto isr_registration_failed
;
3292 if (sp
->intr_type
== MSI_X
) {
3293 for (i
=1; (sp
->s2io_entries
[i
].in_use
== MSIX_FLG
); i
++) {
3294 if (sp
->s2io_entries
[i
].type
== MSIX_FIFO_TYPE
) {
3295 sprintf(sp
->desc1
, "%s:MSI-X-%d-TX",
3297 err
= request_irq(sp
->entries
[i
].vector
,
3298 s2io_msix_fifo_handle
, 0, sp
->desc1
,
3299 sp
->s2io_entries
[i
].arg
);
3300 DBG_PRINT(ERR_DBG
, "%s @ 0x%llx\n", sp
->desc1
,
3301 sp
->msix_info
[i
].addr
);
3303 sprintf(sp
->desc2
, "%s:MSI-X-%d-RX",
3305 err
= request_irq(sp
->entries
[i
].vector
,
3306 s2io_msix_ring_handle
, 0, sp
->desc2
,
3307 sp
->s2io_entries
[i
].arg
);
3308 DBG_PRINT(ERR_DBG
, "%s @ 0x%llx\n", sp
->desc2
,
3309 sp
->msix_info
[i
].addr
);
3312 DBG_PRINT(ERR_DBG
, "%s: MSI-X-%d registration \
3313 failed\n", dev
->name
, i
);
3314 DBG_PRINT(ERR_DBG
, "Returned: %d\n", err
);
3315 goto isr_registration_failed
;
3317 sp
->s2io_entries
[i
].in_use
= MSIX_REGISTERED_SUCCESS
;
3320 if (sp
->intr_type
== INTA
) {
3321 err
= request_irq((int) sp
->pdev
->irq
, s2io_isr
, SA_SHIRQ
,
3324 DBG_PRINT(ERR_DBG
, "%s: ISR registration failed\n",
3326 goto isr_registration_failed
;
3330 if (s2io_set_mac_addr(dev
, dev
->dev_addr
) == FAILURE
) {
3331 DBG_PRINT(ERR_DBG
, "Set Mac Address Failed\n");
3333 goto setting_mac_address_failed
;
3336 netif_start_queue(dev
);
3339 setting_mac_address_failed
:
3340 if (sp
->intr_type
!= MSI_X
)
3341 free_irq(sp
->pdev
->irq
, dev
);
3342 isr_registration_failed
:
3343 del_timer_sync(&sp
->alarm_timer
);
3344 if (sp
->intr_type
== MSI_X
) {
3345 if (sp
->device_type
== XFRAME_II_DEVICE
) {
3346 for (i
=1; (sp
->s2io_entries
[i
].in_use
==
3347 MSIX_REGISTERED_SUCCESS
); i
++) {
3348 int vector
= sp
->entries
[i
].vector
;
3349 void *arg
= sp
->s2io_entries
[i
].arg
;
3351 free_irq(vector
, arg
);
3353 pci_disable_msix(sp
->pdev
);
3356 pci_read_config_word(sp
->pdev
, 0x42, &msi_control
);
3357 msi_control
&= 0xFFFE; /* Disable MSI */
3358 pci_write_config_word(sp
->pdev
, 0x42, msi_control
);
3361 else if (sp
->intr_type
== MSI
)
3362 pci_disable_msi(sp
->pdev
);
3365 if (sp
->intr_type
== MSI_X
) {
3368 if (sp
->s2io_entries
)
3369 kfree(sp
->s2io_entries
);
3375 * s2io_close -close entry point of the driver
3376 * @dev : device pointer.
3378 * This is the stop entry point of the driver. It needs to undo exactly
3379 * whatever was done by the open entry point,thus it's usually referred to
3380 * as the close function.Among other things this function mainly stops the
3381 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3383 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3387 int s2io_close(struct net_device
*dev
)
3389 nic_t
*sp
= dev
->priv
;
3393 flush_scheduled_work();
3394 netif_stop_queue(dev
);
3395 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3398 if (sp
->intr_type
== MSI_X
) {
3399 if (sp
->device_type
== XFRAME_II_DEVICE
) {
3400 for (i
=1; (sp
->s2io_entries
[i
].in_use
==
3401 MSIX_REGISTERED_SUCCESS
); i
++) {
3402 int vector
= sp
->entries
[i
].vector
;
3403 void *arg
= sp
->s2io_entries
[i
].arg
;
3405 free_irq(vector
, arg
);
3407 pci_read_config_word(sp
->pdev
, 0x42, &msi_control
);
3408 msi_control
&= 0xFFFE; /* Disable MSI */
3409 pci_write_config_word(sp
->pdev
, 0x42, msi_control
);
3411 pci_disable_msix(sp
->pdev
);
3415 free_irq(sp
->pdev
->irq
, dev
);
3416 if (sp
->intr_type
== MSI
)
3417 pci_disable_msi(sp
->pdev
);
3419 sp
->device_close_flag
= TRUE
; /* Device is shut down. */
3424 * s2io_xmit - Tx entry point of te driver
3425 * @skb : the socket buffer containing the Tx data.
3426 * @dev : device pointer.
3428 * This function is the Tx entry point of the driver. S2IO NIC supports
3429 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3430 * NOTE: when device cant queue the pkt,just the trans_start variable will
3433 * 0 on success & 1 on failure.
3436 int s2io_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3438 nic_t
*sp
= dev
->priv
;
3439 u16 frg_cnt
, frg_len
, i
, queue
, queue_len
, put_off
, get_off
;
3442 TxFIFO_element_t __iomem
*tx_fifo
;
3443 unsigned long flags
;
3448 int vlan_priority
= 0;
3449 mac_info_t
*mac_control
;
3450 struct config_param
*config
;
3452 mac_control
= &sp
->mac_control
;
3453 config
= &sp
->config
;
3455 DBG_PRINT(TX_DBG
, "%s: In Neterion Tx routine\n", dev
->name
);
3456 spin_lock_irqsave(&sp
->tx_lock
, flags
);
3457 if (atomic_read(&sp
->card_state
) == CARD_DOWN
) {
3458 DBG_PRINT(TX_DBG
, "%s: Card going down for reset\n",
3460 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3467 /* Get Fifo number to Transmit based on vlan priority */
3468 if (sp
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3469 vlan_tag
= vlan_tx_tag_get(skb
);
3470 vlan_priority
= vlan_tag
>> 13;
3471 queue
= config
->fifo_mapping
[vlan_priority
];
3474 put_off
= (u16
) mac_control
->fifos
[queue
].tx_curr_put_info
.offset
;
3475 get_off
= (u16
) mac_control
->fifos
[queue
].tx_curr_get_info
.offset
;
3476 txdp
= (TxD_t
*) mac_control
->fifos
[queue
].list_info
[put_off
].
3479 queue_len
= mac_control
->fifos
[queue
].tx_curr_put_info
.fifo_len
+ 1;
3480 /* Avoid "put" pointer going beyond "get" pointer */
3481 if (txdp
->Host_Control
|| (((put_off
+ 1) % queue_len
) == get_off
)) {
3482 DBG_PRINT(TX_DBG
, "Error in xmit, No free TXDs.\n");
3483 netif_stop_queue(dev
);
3485 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3489 /* A buffer with no data will be dropped */
3491 DBG_PRINT(TX_DBG
, "%s:Buffer has no data..\n", dev
->name
);
3493 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3498 mss
= skb_shinfo(skb
)->tso_size
;
3500 txdp
->Control_1
|= TXD_TCP_LSO_EN
;
3501 txdp
->Control_1
|= TXD_TCP_LSO_MSS(mss
);
3505 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
3506 frg_len
= skb
->len
- skb
->data_len
;
3508 txdp
->Buffer_Pointer
= pci_map_single
3509 (sp
->pdev
, skb
->data
, frg_len
, PCI_DMA_TODEVICE
);
3510 txdp
->Host_Control
= (unsigned long) skb
;
3511 if (skb
->ip_summed
== CHECKSUM_HW
) {
3513 (TXD_TX_CKO_IPV4_EN
| TXD_TX_CKO_TCP_EN
|
3517 txdp
->Control_2
|= config
->tx_intr_type
;
3519 if (sp
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3520 txdp
->Control_2
|= TXD_VLAN_ENABLE
;
3521 txdp
->Control_2
|= TXD_VLAN_TAG(vlan_tag
);
3524 txdp
->Control_1
|= (TXD_BUFFER0_SIZE(frg_len
) |
3525 TXD_GATHER_CODE_FIRST
);
3526 txdp
->Control_1
|= TXD_LIST_OWN_XENA
;
3528 /* For fragmented SKB. */
3529 for (i
= 0; i
< frg_cnt
; i
++) {
3530 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3531 /* A '0' length fragment will be ignored */
3535 txdp
->Buffer_Pointer
= (u64
) pci_map_page
3536 (sp
->pdev
, frag
->page
, frag
->page_offset
,
3537 frag
->size
, PCI_DMA_TODEVICE
);
3538 txdp
->Control_1
|= TXD_BUFFER0_SIZE(frag
->size
);
3540 txdp
->Control_1
|= TXD_GATHER_CODE_LAST
;
3542 tx_fifo
= mac_control
->tx_FIFO_start
[queue
];
3543 val64
= mac_control
->fifos
[queue
].list_info
[put_off
].list_phy_addr
;
3544 writeq(val64
, &tx_fifo
->TxDL_Pointer
);
3546 val64
= (TX_FIFO_LAST_TXD_NUM(frg_cnt
) | TX_FIFO_FIRST_LIST
|
3551 val64
|= TX_FIFO_SPECIAL_FUNC
;
3553 writeq(val64
, &tx_fifo
->List_Control
);
3558 put_off
%= mac_control
->fifos
[queue
].tx_curr_put_info
.fifo_len
+ 1;
3559 mac_control
->fifos
[queue
].tx_curr_put_info
.offset
= put_off
;
3561 /* Avoid "put" pointer going beyond "get" pointer */
3562 if (((put_off
+ 1) % queue_len
) == get_off
) {
3564 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3566 netif_stop_queue(dev
);
3569 dev
->trans_start
= jiffies
;
3570 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3576 s2io_alarm_handle(unsigned long data
)
3578 nic_t
*sp
= (nic_t
*)data
;
3580 alarm_intr_handler(sp
);
3581 mod_timer(&sp
->alarm_timer
, jiffies
+ HZ
/ 2);
3585 s2io_msi_handle(int irq
, void *dev_id
, struct pt_regs
*regs
)
3587 struct net_device
*dev
= (struct net_device
*) dev_id
;
3588 nic_t
*sp
= dev
->priv
;
3591 mac_info_t
*mac_control
;
3592 struct config_param
*config
;
3594 atomic_inc(&sp
->isr_cnt
);
3595 mac_control
= &sp
->mac_control
;
3596 config
= &sp
->config
;
3597 DBG_PRINT(INTR_DBG
, "%s: MSI handler\n", __FUNCTION__
);
3599 /* If Intr is because of Rx Traffic */
3600 for (i
= 0; i
< config
->rx_ring_num
; i
++)
3601 rx_intr_handler(&mac_control
->rings
[i
]);
3603 /* If Intr is because of Tx Traffic */
3604 for (i
= 0; i
< config
->tx_fifo_num
; i
++)
3605 tx_intr_handler(&mac_control
->fifos
[i
]);
3608 * If the Rx buffer count is below the panic threshold then
3609 * reallocate the buffers from the interrupt handler itself,
3610 * else schedule a tasklet to reallocate the buffers.
3612 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
3613 int rxb_size
= atomic_read(&sp
->rx_bufs_left
[i
]);
3614 int level
= rx_buffer_level(sp
, rxb_size
, i
);
3616 if ((level
== PANIC
) && (!TASKLET_IN_USE
)) {
3617 DBG_PRINT(INTR_DBG
, "%s: Rx BD hit ", dev
->name
);
3618 DBG_PRINT(INTR_DBG
, "PANIC levels\n");
3619 if ((ret
= fill_rx_buffers(sp
, i
)) == -ENOMEM
) {
3620 DBG_PRINT(ERR_DBG
, "%s:Out of memory",
3622 DBG_PRINT(ERR_DBG
, " in ISR!!\n");
3623 clear_bit(0, (&sp
->tasklet_status
));
3624 atomic_dec(&sp
->isr_cnt
);
3627 clear_bit(0, (&sp
->tasklet_status
));
3628 } else if (level
== LOW
) {
3629 tasklet_schedule(&sp
->task
);
3633 atomic_dec(&sp
->isr_cnt
);
3638 s2io_msix_ring_handle(int irq
, void *dev_id
, struct pt_regs
*regs
)
3640 ring_info_t
*ring
= (ring_info_t
*)dev_id
;
3641 nic_t
*sp
= ring
->nic
;
3642 int rxb_size
, level
, rng_n
;
3644 atomic_inc(&sp
->isr_cnt
);
3645 rx_intr_handler(ring
);
3647 rng_n
= ring
->ring_no
;
3648 rxb_size
= atomic_read(&sp
->rx_bufs_left
[rng_n
]);
3649 level
= rx_buffer_level(sp
, rxb_size
, rng_n
);
3651 if ((level
== PANIC
) && (!TASKLET_IN_USE
)) {
3653 DBG_PRINT(INTR_DBG
, "%s: Rx BD hit ", __FUNCTION__
);
3654 DBG_PRINT(INTR_DBG
, "PANIC levels\n");
3655 if ((ret
= fill_rx_buffers(sp
, rng_n
)) == -ENOMEM
) {
3656 DBG_PRINT(ERR_DBG
, "Out of memory in %s",
3658 clear_bit(0, (&sp
->tasklet_status
));
3661 clear_bit(0, (&sp
->tasklet_status
));
3662 } else if (level
== LOW
) {
3663 tasklet_schedule(&sp
->task
);
3665 atomic_dec(&sp
->isr_cnt
);
3671 s2io_msix_fifo_handle(int irq
, void *dev_id
, struct pt_regs
*regs
)
3673 fifo_info_t
*fifo
= (fifo_info_t
*)dev_id
;
3674 nic_t
*sp
= fifo
->nic
;
3676 atomic_inc(&sp
->isr_cnt
);
3677 tx_intr_handler(fifo
);
3678 atomic_dec(&sp
->isr_cnt
);
3682 static void s2io_txpic_intr_handle(nic_t
*sp
)
3684 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3687 val64
= readq(&bar0
->pic_int_status
);
3688 if (val64
& PIC_INT_GPIO
) {
3689 val64
= readq(&bar0
->gpio_int_reg
);
3690 if ((val64
& GPIO_INT_REG_LINK_DOWN
) &&
3691 (val64
& GPIO_INT_REG_LINK_UP
)) {
3692 val64
|= GPIO_INT_REG_LINK_DOWN
;
3693 val64
|= GPIO_INT_REG_LINK_UP
;
3694 writeq(val64
, &bar0
->gpio_int_reg
);
3698 if (((sp
->last_link_state
== LINK_UP
) &&
3699 (val64
& GPIO_INT_REG_LINK_DOWN
)) ||
3700 ((sp
->last_link_state
== LINK_DOWN
) &&
3701 (val64
& GPIO_INT_REG_LINK_UP
))) {
3702 val64
= readq(&bar0
->gpio_int_mask
);
3703 val64
|= GPIO_INT_MASK_LINK_DOWN
;
3704 val64
|= GPIO_INT_MASK_LINK_UP
;
3705 writeq(val64
, &bar0
->gpio_int_mask
);
3706 s2io_set_link((unsigned long)sp
);
3709 if (sp
->last_link_state
== LINK_UP
) {
3710 /*enable down interrupt */
3711 val64
= readq(&bar0
->gpio_int_mask
);
3712 /* unmasks link down intr */
3713 val64
&= ~GPIO_INT_MASK_LINK_DOWN
;
3714 /* masks link up intr */
3715 val64
|= GPIO_INT_MASK_LINK_UP
;
3716 writeq(val64
, &bar0
->gpio_int_mask
);
3718 /*enable UP Interrupt */
3719 val64
= readq(&bar0
->gpio_int_mask
);
3720 /* unmasks link up interrupt */
3721 val64
&= ~GPIO_INT_MASK_LINK_UP
;
3722 /* masks link down interrupt */
3723 val64
|= GPIO_INT_MASK_LINK_DOWN
;
3724 writeq(val64
, &bar0
->gpio_int_mask
);
3730 * s2io_isr - ISR handler of the device .
3731 * @irq: the irq of the device.
3732 * @dev_id: a void pointer to the dev structure of the NIC.
3733 * @pt_regs: pointer to the registers pushed on the stack.
3734 * Description: This function is the ISR handler of the device. It
3735 * identifies the reason for the interrupt and calls the relevant
3736 * service routines. As a contongency measure, this ISR allocates the
3737 * recv buffers, if their numbers are below the panic value which is
3738 * presently set to 25% of the original number of rcv buffers allocated.
3740 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
3741 * IRQ_NONE: will be returned if interrupt is not from our device
3743 static irqreturn_t
s2io_isr(int irq
, void *dev_id
, struct pt_regs
*regs
)
3745 struct net_device
*dev
= (struct net_device
*) dev_id
;
3746 nic_t
*sp
= dev
->priv
;
3747 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3749 u64 reason
= 0, val64
;
3750 mac_info_t
*mac_control
;
3751 struct config_param
*config
;
3753 atomic_inc(&sp
->isr_cnt
);
3754 mac_control
= &sp
->mac_control
;
3755 config
= &sp
->config
;
3758 * Identify the cause for interrupt and call the appropriate
3759 * interrupt handler. Causes for the interrupt could be;
3763 * 4. Error in any functional blocks of the NIC.
3765 reason
= readq(&bar0
->general_int_status
);
3768 /* The interrupt was not raised by Xena. */
3769 atomic_dec(&sp
->isr_cnt
);
3773 #ifdef CONFIG_S2IO_NAPI
3774 if (reason
& GEN_INTR_RXTRAFFIC
) {
3775 if (netif_rx_schedule_prep(dev
)) {
3776 en_dis_able_nic_intrs(sp
, RX_TRAFFIC_INTR
,
3778 __netif_rx_schedule(dev
);
3782 /* If Intr is because of Rx Traffic */
3783 if (reason
& GEN_INTR_RXTRAFFIC
) {
3785 * rx_traffic_int reg is an R1 register, writing all 1's
3786 * will ensure that the actual interrupt causing bit get's
3787 * cleared and hence a read can be avoided.
3789 val64
= 0xFFFFFFFFFFFFFFFFULL
;
3790 writeq(val64
, &bar0
->rx_traffic_int
);
3791 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
3792 rx_intr_handler(&mac_control
->rings
[i
]);
3797 /* If Intr is because of Tx Traffic */
3798 if (reason
& GEN_INTR_TXTRAFFIC
) {
3800 * tx_traffic_int reg is an R1 register, writing all 1's
3801 * will ensure that the actual interrupt causing bit get's
3802 * cleared and hence a read can be avoided.
3804 val64
= 0xFFFFFFFFFFFFFFFFULL
;
3805 writeq(val64
, &bar0
->tx_traffic_int
);
3807 for (i
= 0; i
< config
->tx_fifo_num
; i
++)
3808 tx_intr_handler(&mac_control
->fifos
[i
]);
3811 if (reason
& GEN_INTR_TXPIC
)
3812 s2io_txpic_intr_handle(sp
);
3814 * If the Rx buffer count is below the panic threshold then
3815 * reallocate the buffers from the interrupt handler itself,
3816 * else schedule a tasklet to reallocate the buffers.
3818 #ifndef CONFIG_S2IO_NAPI
3819 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
3821 int rxb_size
= atomic_read(&sp
->rx_bufs_left
[i
]);
3822 int level
= rx_buffer_level(sp
, rxb_size
, i
);
3824 if ((level
== PANIC
) && (!TASKLET_IN_USE
)) {
3825 DBG_PRINT(INTR_DBG
, "%s: Rx BD hit ", dev
->name
);
3826 DBG_PRINT(INTR_DBG
, "PANIC levels\n");
3827 if ((ret
= fill_rx_buffers(sp
, i
)) == -ENOMEM
) {
3828 DBG_PRINT(ERR_DBG
, "%s:Out of memory",
3830 DBG_PRINT(ERR_DBG
, " in ISR!!\n");
3831 clear_bit(0, (&sp
->tasklet_status
));
3832 atomic_dec(&sp
->isr_cnt
);
3835 clear_bit(0, (&sp
->tasklet_status
));
3836 } else if (level
== LOW
) {
3837 tasklet_schedule(&sp
->task
);
3842 atomic_dec(&sp
->isr_cnt
);
3849 static void s2io_updt_stats(nic_t
*sp
)
3851 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3855 if (atomic_read(&sp
->card_state
) == CARD_UP
) {
3856 /* Apprx 30us on a 133 MHz bus */
3857 val64
= SET_UPDT_CLICKS(10) |
3858 STAT_CFG_ONE_SHOT_EN
| STAT_CFG_STAT_EN
;
3859 writeq(val64
, &bar0
->stat_cfg
);
3862 val64
= readq(&bar0
->stat_cfg
);
3863 if (!(val64
& BIT(0)))
3867 break; /* Updt failed */
3873 * s2io_get_stats - Updates the device statistics structure.
3874 * @dev : pointer to the device structure.
3876 * This function updates the device statistics structure in the s2io_nic
3877 * structure and returns a pointer to the same.
3879 * pointer to the updated net_device_stats structure.
3882 struct net_device_stats
*s2io_get_stats(struct net_device
*dev
)
3884 nic_t
*sp
= dev
->priv
;
3885 mac_info_t
*mac_control
;
3886 struct config_param
*config
;
3889 mac_control
= &sp
->mac_control
;
3890 config
= &sp
->config
;
3892 /* Configure Stats for immediate updt */
3893 s2io_updt_stats(sp
);
3895 sp
->stats
.tx_packets
=
3896 le32_to_cpu(mac_control
->stats_info
->tmac_frms
);
3897 sp
->stats
.tx_errors
=
3898 le32_to_cpu(mac_control
->stats_info
->tmac_any_err_frms
);
3899 sp
->stats
.rx_errors
=
3900 le32_to_cpu(mac_control
->stats_info
->rmac_drop_frms
);
3901 sp
->stats
.multicast
=
3902 le32_to_cpu(mac_control
->stats_info
->rmac_vld_mcst_frms
);
3903 sp
->stats
.rx_length_errors
=
3904 le32_to_cpu(mac_control
->stats_info
->rmac_long_frms
);
3906 return (&sp
->stats
);
3910 * s2io_set_multicast - entry point for multicast address enable/disable.
3911 * @dev : pointer to the device structure
3913 * This function is a driver entry point which gets called by the kernel
3914 * whenever multicast addresses must be enabled/disabled. This also gets
3915 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3916 * determine, if multicast address must be enabled or if promiscuous mode
3917 * is to be disabled etc.
3922 static void s2io_set_multicast(struct net_device
*dev
)
3925 struct dev_mc_list
*mclist
;
3926 nic_t
*sp
= dev
->priv
;
3927 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3928 u64 val64
= 0, multi_mac
= 0x010203040506ULL
, mask
=
3930 u64 dis_addr
= 0xffffffffffffULL
, mac_addr
= 0;
3933 if ((dev
->flags
& IFF_ALLMULTI
) && (!sp
->m_cast_flg
)) {
3934 /* Enable all Multicast addresses */
3935 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac
),
3936 &bar0
->rmac_addr_data0_mem
);
3937 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask
),
3938 &bar0
->rmac_addr_data1_mem
);
3939 val64
= RMAC_ADDR_CMD_MEM_WE
|
3940 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
3941 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET
);
3942 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
3943 /* Wait till command completes */
3944 wait_for_cmd_complete(sp
);
3947 sp
->all_multi_pos
= MAC_MC_ALL_MC_ADDR_OFFSET
;
3948 } else if ((dev
->flags
& IFF_ALLMULTI
) && (sp
->m_cast_flg
)) {
3949 /* Disable all Multicast addresses */
3950 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr
),
3951 &bar0
->rmac_addr_data0_mem
);
3952 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3953 &bar0
->rmac_addr_data1_mem
);
3954 val64
= RMAC_ADDR_CMD_MEM_WE
|
3955 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
3956 RMAC_ADDR_CMD_MEM_OFFSET(sp
->all_multi_pos
);
3957 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
3958 /* Wait till command completes */
3959 wait_for_cmd_complete(sp
);
3962 sp
->all_multi_pos
= 0;
3965 if ((dev
->flags
& IFF_PROMISC
) && (!sp
->promisc_flg
)) {
3966 /* Put the NIC into promiscuous mode */
3967 add
= &bar0
->mac_cfg
;
3968 val64
= readq(&bar0
->mac_cfg
);
3969 val64
|= MAC_CFG_RMAC_PROM_ENABLE
;
3971 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
3972 writel((u32
) val64
, add
);
3973 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
3974 writel((u32
) (val64
>> 32), (add
+ 4));
3976 val64
= readq(&bar0
->mac_cfg
);
3977 sp
->promisc_flg
= 1;
3978 DBG_PRINT(INFO_DBG
, "%s: entered promiscuous mode\n",
3980 } else if (!(dev
->flags
& IFF_PROMISC
) && (sp
->promisc_flg
)) {
3981 /* Remove the NIC from promiscuous mode */
3982 add
= &bar0
->mac_cfg
;
3983 val64
= readq(&bar0
->mac_cfg
);
3984 val64
&= ~MAC_CFG_RMAC_PROM_ENABLE
;
3986 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
3987 writel((u32
) val64
, add
);
3988 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
3989 writel((u32
) (val64
>> 32), (add
+ 4));
3991 val64
= readq(&bar0
->mac_cfg
);
3992 sp
->promisc_flg
= 0;
3993 DBG_PRINT(INFO_DBG
, "%s: left promiscuous mode\n",
3997 /* Update individual M_CAST address list */
3998 if ((!sp
->m_cast_flg
) && dev
->mc_count
) {
4000 (MAX_ADDRS_SUPPORTED
- MAC_MC_ADDR_START_OFFSET
- 1)) {
4001 DBG_PRINT(ERR_DBG
, "%s: No more Rx filters ",
4003 DBG_PRINT(ERR_DBG
, "can be added, please enable ");
4004 DBG_PRINT(ERR_DBG
, "ALL_MULTI instead\n");
4008 prev_cnt
= sp
->mc_addr_count
;
4009 sp
->mc_addr_count
= dev
->mc_count
;
4011 /* Clear out the previous list of Mc in the H/W. */
4012 for (i
= 0; i
< prev_cnt
; i
++) {
4013 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr
),
4014 &bar0
->rmac_addr_data0_mem
);
4015 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4016 &bar0
->rmac_addr_data1_mem
);
4017 val64
= RMAC_ADDR_CMD_MEM_WE
|
4018 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
4019 RMAC_ADDR_CMD_MEM_OFFSET
4020 (MAC_MC_ADDR_START_OFFSET
+ i
);
4021 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
4023 /* Wait for command completes */
4024 if (wait_for_cmd_complete(sp
)) {
4025 DBG_PRINT(ERR_DBG
, "%s: Adding ",
4027 DBG_PRINT(ERR_DBG
, "Multicasts failed\n");
4032 /* Create the new Rx filter list and update the same in H/W. */
4033 for (i
= 0, mclist
= dev
->mc_list
; i
< dev
->mc_count
;
4034 i
++, mclist
= mclist
->next
) {
4035 memcpy(sp
->usr_addrs
[i
].addr
, mclist
->dmi_addr
,
4037 for (j
= 0; j
< ETH_ALEN
; j
++) {
4038 mac_addr
|= mclist
->dmi_addr
[j
];
4042 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr
),
4043 &bar0
->rmac_addr_data0_mem
);
4044 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4045 &bar0
->rmac_addr_data1_mem
);
4046 val64
= RMAC_ADDR_CMD_MEM_WE
|
4047 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
4048 RMAC_ADDR_CMD_MEM_OFFSET
4049 (i
+ MAC_MC_ADDR_START_OFFSET
);
4050 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
4052 /* Wait for command completes */
4053 if (wait_for_cmd_complete(sp
)) {
4054 DBG_PRINT(ERR_DBG
, "%s: Adding ",
4056 DBG_PRINT(ERR_DBG
, "Multicasts failed\n");
4064 * s2io_set_mac_addr - Programs the Xframe mac address
4065 * @dev : pointer to the device structure.
4066 * @addr: a uchar pointer to the new mac address which is to be set.
4067 * Description : This procedure will program the Xframe to receive
4068 * frames with new Mac Address
4069 * Return value: SUCCESS on success and an appropriate (-)ve integer
4070 * as defined in errno.h file on failure.
4073 int s2io_set_mac_addr(struct net_device
*dev
, u8
* addr
)
4075 nic_t
*sp
= dev
->priv
;
4076 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4077 register u64 val64
, mac_addr
= 0;
4081 * Set the new MAC address as the new unicast filter and reflect this
4082 * change on the device address registered with the OS. It will be
4085 for (i
= 0; i
< ETH_ALEN
; i
++) {
4087 mac_addr
|= addr
[i
];
4090 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr
),
4091 &bar0
->rmac_addr_data0_mem
);
4094 RMAC_ADDR_CMD_MEM_WE
| RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
4095 RMAC_ADDR_CMD_MEM_OFFSET(0);
4096 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
4097 /* Wait till command completes */
4098 if (wait_for_cmd_complete(sp
)) {
4099 DBG_PRINT(ERR_DBG
, "%s: set_mac_addr failed\n", dev
->name
);
4107 * s2io_ethtool_sset - Sets different link parameters.
4108 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4109 * @info: pointer to the structure with parameters given by ethtool to set
4112 * The function sets different link parameters provided by the user onto
4118 static int s2io_ethtool_sset(struct net_device
*dev
,
4119 struct ethtool_cmd
*info
)
4121 nic_t
*sp
= dev
->priv
;
4122 if ((info
->autoneg
== AUTONEG_ENABLE
) ||
4123 (info
->speed
!= SPEED_10000
) || (info
->duplex
!= DUPLEX_FULL
))
4126 s2io_close(sp
->dev
);
4134 * s2io_ethtol_gset - Return link specific information.
4135 * @sp : private member of the device structure, pointer to the
4136 * s2io_nic structure.
4137 * @info : pointer to the structure with parameters given by ethtool
4138 * to return link information.
4140 * Returns link specific information like speed, duplex etc.. to ethtool.
4142 * return 0 on success.
4145 static int s2io_ethtool_gset(struct net_device
*dev
, struct ethtool_cmd
*info
)
4147 nic_t
*sp
= dev
->priv
;
4148 info
->supported
= (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
);
4149 info
->advertising
= (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
);
4150 info
->port
= PORT_FIBRE
;
4151 /* info->transceiver?? TODO */
4153 if (netif_carrier_ok(sp
->dev
)) {
4154 info
->speed
= 10000;
4155 info
->duplex
= DUPLEX_FULL
;
4161 info
->autoneg
= AUTONEG_DISABLE
;
4166 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4167 * @sp : private member of the device structure, which is a pointer to the
4168 * s2io_nic structure.
4169 * @info : pointer to the structure with parameters given by ethtool to
4170 * return driver information.
4172 * Returns driver specefic information like name, version etc.. to ethtool.
4177 static void s2io_ethtool_gdrvinfo(struct net_device
*dev
,
4178 struct ethtool_drvinfo
*info
)
4180 nic_t
*sp
= dev
->priv
;
4182 strncpy(info
->driver
, s2io_driver_name
, sizeof(info
->driver
));
4183 strncpy(info
->version
, s2io_driver_version
, sizeof(info
->version
));
4184 strncpy(info
->fw_version
, "", sizeof(info
->fw_version
));
4185 strncpy(info
->bus_info
, pci_name(sp
->pdev
), sizeof(info
->bus_info
));
4186 info
->regdump_len
= XENA_REG_SPACE
;
4187 info
->eedump_len
= XENA_EEPROM_SPACE
;
4188 info
->testinfo_len
= S2IO_TEST_LEN
;
4189 info
->n_stats
= S2IO_STAT_LEN
;
4193 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4194 * @sp: private member of the device structure, which is a pointer to the
4195 * s2io_nic structure.
4196 * @regs : pointer to the structure with parameters given by ethtool for
4197 * dumping the registers.
4198 * @reg_space: The input argumnet into which all the registers are dumped.
4200 * Dumps the entire register space of xFrame NIC into the user given
4206 static void s2io_ethtool_gregs(struct net_device
*dev
,
4207 struct ethtool_regs
*regs
, void *space
)
4211 u8
*reg_space
= (u8
*) space
;
4212 nic_t
*sp
= dev
->priv
;
4214 regs
->len
= XENA_REG_SPACE
;
4215 regs
->version
= sp
->pdev
->subsystem_device
;
4217 for (i
= 0; i
< regs
->len
; i
+= 8) {
4218 reg
= readq(sp
->bar0
+ i
);
4219 memcpy((reg_space
+ i
), ®
, 8);
4224 * s2io_phy_id - timer function that alternates adapter LED.
4225 * @data : address of the private member of the device structure, which
4226 * is a pointer to the s2io_nic structure, provided as an u32.
4227 * Description: This is actually the timer function that alternates the
4228 * adapter LED bit of the adapter control bit to set/reset every time on
4229 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4230 * once every second.
4232 static void s2io_phy_id(unsigned long data
)
4234 nic_t
*sp
= (nic_t
*) data
;
4235 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4239 subid
= sp
->pdev
->subsystem_device
;
4240 if ((sp
->device_type
== XFRAME_II_DEVICE
) ||
4241 ((subid
& 0xFF) >= 0x07)) {
4242 val64
= readq(&bar0
->gpio_control
);
4243 val64
^= GPIO_CTRL_GPIO_0
;
4244 writeq(val64
, &bar0
->gpio_control
);
4246 val64
= readq(&bar0
->adapter_control
);
4247 val64
^= ADAPTER_LED_ON
;
4248 writeq(val64
, &bar0
->adapter_control
);
4251 mod_timer(&sp
->id_timer
, jiffies
+ HZ
/ 2);
4255 * s2io_ethtool_idnic - To physically identify the nic on the system.
4256 * @sp : private member of the device structure, which is a pointer to the
4257 * s2io_nic structure.
4258 * @id : pointer to the structure with identification parameters given by
4260 * Description: Used to physically identify the NIC on the system.
4261 * The Link LED will blink for a time specified by the user for
4263 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4264 * identification is possible only if it's link is up.
4266 * int , returns 0 on success
4269 static int s2io_ethtool_idnic(struct net_device
*dev
, u32 data
)
4271 u64 val64
= 0, last_gpio_ctrl_val
;
4272 nic_t
*sp
= dev
->priv
;
4273 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4276 subid
= sp
->pdev
->subsystem_device
;
4277 last_gpio_ctrl_val
= readq(&bar0
->gpio_control
);
4278 if ((sp
->device_type
== XFRAME_I_DEVICE
) &&
4279 ((subid
& 0xFF) < 0x07)) {
4280 val64
= readq(&bar0
->adapter_control
);
4281 if (!(val64
& ADAPTER_CNTL_EN
)) {
4283 "Adapter Link down, cannot blink LED\n");
4287 if (sp
->id_timer
.function
== NULL
) {
4288 init_timer(&sp
->id_timer
);
4289 sp
->id_timer
.function
= s2io_phy_id
;
4290 sp
->id_timer
.data
= (unsigned long) sp
;
4292 mod_timer(&sp
->id_timer
, jiffies
);
4294 msleep_interruptible(data
* HZ
);
4296 msleep_interruptible(MAX_FLICKER_TIME
);
4297 del_timer_sync(&sp
->id_timer
);
4299 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp
->device_type
, subid
)) {
4300 writeq(last_gpio_ctrl_val
, &bar0
->gpio_control
);
4301 last_gpio_ctrl_val
= readq(&bar0
->gpio_control
);
4308 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4309 * @sp : private member of the device structure, which is a pointer to the
4310 * s2io_nic structure.
4311 * @ep : pointer to the structure with pause parameters given by ethtool.
4313 * Returns the Pause frame generation and reception capability of the NIC.
4317 static void s2io_ethtool_getpause_data(struct net_device
*dev
,
4318 struct ethtool_pauseparam
*ep
)
4321 nic_t
*sp
= dev
->priv
;
4322 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4324 val64
= readq(&bar0
->rmac_pause_cfg
);
4325 if (val64
& RMAC_PAUSE_GEN_ENABLE
)
4326 ep
->tx_pause
= TRUE
;
4327 if (val64
& RMAC_PAUSE_RX_ENABLE
)
4328 ep
->rx_pause
= TRUE
;
4329 ep
->autoneg
= FALSE
;
4333 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4334 * @sp : private member of the device structure, which is a pointer to the
4335 * s2io_nic structure.
4336 * @ep : pointer to the structure with pause parameters given by ethtool.
4338 * It can be used to set or reset Pause frame generation or reception
4339 * support of the NIC.
4341 * int, returns 0 on Success
4344 static int s2io_ethtool_setpause_data(struct net_device
*dev
,
4345 struct ethtool_pauseparam
*ep
)
4348 nic_t
*sp
= dev
->priv
;
4349 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4351 val64
= readq(&bar0
->rmac_pause_cfg
);
4353 val64
|= RMAC_PAUSE_GEN_ENABLE
;
4355 val64
&= ~RMAC_PAUSE_GEN_ENABLE
;
4357 val64
|= RMAC_PAUSE_RX_ENABLE
;
4359 val64
&= ~RMAC_PAUSE_RX_ENABLE
;
4360 writeq(val64
, &bar0
->rmac_pause_cfg
);
4365 * read_eeprom - reads 4 bytes of data from user given offset.
4366 * @sp : private member of the device structure, which is a pointer to the
4367 * s2io_nic structure.
4368 * @off : offset at which the data must be written
4369 * @data : Its an output parameter where the data read at the given
4372 * Will read 4 bytes of data from the user given offset and return the
4374 * NOTE: Will allow to read only part of the EEPROM visible through the
4377 * -1 on failure and 0 on success.
4380 #define S2IO_DEV_ID 5
4381 static int read_eeprom(nic_t
* sp
, int off
, u64
* data
)
4386 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4388 if (sp
->device_type
== XFRAME_I_DEVICE
) {
4389 val64
= I2C_CONTROL_DEV_ID(S2IO_DEV_ID
) | I2C_CONTROL_ADDR(off
) |
4390 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ
|
4391 I2C_CONTROL_CNTL_START
;
4392 SPECIAL_REG_WRITE(val64
, &bar0
->i2c_control
, LF
);
4394 while (exit_cnt
< 5) {
4395 val64
= readq(&bar0
->i2c_control
);
4396 if (I2C_CONTROL_CNTL_END(val64
)) {
4397 *data
= I2C_CONTROL_GET_DATA(val64
);
4406 if (sp
->device_type
== XFRAME_II_DEVICE
) {
4407 val64
= SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1
|
4408 SPI_CONTROL_BYTECNT(0x3) |
4409 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off
);
4410 SPECIAL_REG_WRITE(val64
, &bar0
->spi_control
, LF
);
4411 val64
|= SPI_CONTROL_REQ
;
4412 SPECIAL_REG_WRITE(val64
, &bar0
->spi_control
, LF
);
4413 while (exit_cnt
< 5) {
4414 val64
= readq(&bar0
->spi_control
);
4415 if (val64
& SPI_CONTROL_NACK
) {
4418 } else if (val64
& SPI_CONTROL_DONE
) {
4419 *data
= readq(&bar0
->spi_data
);
4432 * write_eeprom - actually writes the relevant part of the data value.
4433 * @sp : private member of the device structure, which is a pointer to the
4434 * s2io_nic structure.
4435 * @off : offset at which the data must be written
4436 * @data : The data that is to be written
4437 * @cnt : Number of bytes of the data that are actually to be written into
4438 * the Eeprom. (max of 3)
4440 * Actually writes the relevant part of the data value into the Eeprom
4441 * through the I2C bus.
4443 * 0 on success, -1 on failure.
4446 static int write_eeprom(nic_t
* sp
, int off
, u64 data
, int cnt
)
4448 int exit_cnt
= 0, ret
= -1;
4450 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4452 if (sp
->device_type
== XFRAME_I_DEVICE
) {
4453 val64
= I2C_CONTROL_DEV_ID(S2IO_DEV_ID
) | I2C_CONTROL_ADDR(off
) |
4454 I2C_CONTROL_BYTE_CNT(cnt
) | I2C_CONTROL_SET_DATA((u32
)data
) |
4455 I2C_CONTROL_CNTL_START
;
4456 SPECIAL_REG_WRITE(val64
, &bar0
->i2c_control
, LF
);
4458 while (exit_cnt
< 5) {
4459 val64
= readq(&bar0
->i2c_control
);
4460 if (I2C_CONTROL_CNTL_END(val64
)) {
4461 if (!(val64
& I2C_CONTROL_NACK
))
4470 if (sp
->device_type
== XFRAME_II_DEVICE
) {
4471 int write_cnt
= (cnt
== 8) ? 0 : cnt
;
4472 writeq(SPI_DATA_WRITE(data
,(cnt
<<3)), &bar0
->spi_data
);
4474 val64
= SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1
|
4475 SPI_CONTROL_BYTECNT(write_cnt
) |
4476 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off
);
4477 SPECIAL_REG_WRITE(val64
, &bar0
->spi_control
, LF
);
4478 val64
|= SPI_CONTROL_REQ
;
4479 SPECIAL_REG_WRITE(val64
, &bar0
->spi_control
, LF
);
4480 while (exit_cnt
< 5) {
4481 val64
= readq(&bar0
->spi_control
);
4482 if (val64
& SPI_CONTROL_NACK
) {
4485 } else if (val64
& SPI_CONTROL_DONE
) {
4497 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4498 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4499 * @eeprom : pointer to the user level structure provided by ethtool,
4500 * containing all relevant information.
4501 * @data_buf : user defined value to be written into Eeprom.
4502 * Description: Reads the values stored in the Eeprom at given offset
4503 * for a given length. Stores these values int the input argument data
4504 * buffer 'data_buf' and returns these to the caller (ethtool.)
4509 static int s2io_ethtool_geeprom(struct net_device
*dev
,
4510 struct ethtool_eeprom
*eeprom
, u8
* data_buf
)
4514 nic_t
*sp
= dev
->priv
;
4516 eeprom
->magic
= sp
->pdev
->vendor
| (sp
->pdev
->device
<< 16);
4518 if ((eeprom
->offset
+ eeprom
->len
) > (XENA_EEPROM_SPACE
))
4519 eeprom
->len
= XENA_EEPROM_SPACE
- eeprom
->offset
;
4521 for (i
= 0; i
< eeprom
->len
; i
+= 4) {
4522 if (read_eeprom(sp
, (eeprom
->offset
+ i
), &data
)) {
4523 DBG_PRINT(ERR_DBG
, "Read of EEPROM failed\n");
4527 memcpy((data_buf
+ i
), &valid
, 4);
4533 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4534 * @sp : private member of the device structure, which is a pointer to the
4535 * s2io_nic structure.
4536 * @eeprom : pointer to the user level structure provided by ethtool,
4537 * containing all relevant information.
4538 * @data_buf ; user defined value to be written into Eeprom.
4540 * Tries to write the user provided value in the Eeprom, at the offset
4541 * given by the user.
4543 * 0 on success, -EFAULT on failure.
4546 static int s2io_ethtool_seeprom(struct net_device
*dev
,
4547 struct ethtool_eeprom
*eeprom
,
4550 int len
= eeprom
->len
, cnt
= 0;
4551 u64 valid
= 0, data
;
4552 nic_t
*sp
= dev
->priv
;
4554 if (eeprom
->magic
!= (sp
->pdev
->vendor
| (sp
->pdev
->device
<< 16))) {
4556 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4557 DBG_PRINT(ERR_DBG
, "is wrong, Its not 0x%x\n",
4563 data
= (u32
) data_buf
[cnt
] & 0x000000FF;
4565 valid
= (u32
) (data
<< 24);
4569 if (write_eeprom(sp
, (eeprom
->offset
+ cnt
), valid
, 0)) {
4571 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4573 "write into the specified offset\n");
4584 * s2io_register_test - reads and writes into all clock domains.
4585 * @sp : private member of the device structure, which is a pointer to the
4586 * s2io_nic structure.
4587 * @data : variable that returns the result of each of the test conducted b
4590 * Read and write into all clock domains. The NIC has 3 clock domains,
4591 * see that registers in all the three regions are accessible.
4596 static int s2io_register_test(nic_t
* sp
, uint64_t * data
)
4598 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4599 u64 val64
= 0, exp_val
;
4602 val64
= readq(&bar0
->pif_rd_swapper_fb
);
4603 if (val64
!= 0x123456789abcdefULL
) {
4605 DBG_PRINT(INFO_DBG
, "Read Test level 1 fails\n");
4608 val64
= readq(&bar0
->rmac_pause_cfg
);
4609 if (val64
!= 0xc000ffff00000000ULL
) {
4611 DBG_PRINT(INFO_DBG
, "Read Test level 2 fails\n");
4614 val64
= readq(&bar0
->rx_queue_cfg
);
4615 if (sp
->device_type
== XFRAME_II_DEVICE
)
4616 exp_val
= 0x0404040404040404ULL
;
4618 exp_val
= 0x0808080808080808ULL
;
4619 if (val64
!= exp_val
) {
4621 DBG_PRINT(INFO_DBG
, "Read Test level 3 fails\n");
4624 val64
= readq(&bar0
->xgxs_efifo_cfg
);
4625 if (val64
!= 0x000000001923141EULL
) {
4627 DBG_PRINT(INFO_DBG
, "Read Test level 4 fails\n");
4630 val64
= 0x5A5A5A5A5A5A5A5AULL
;
4631 writeq(val64
, &bar0
->xmsi_data
);
4632 val64
= readq(&bar0
->xmsi_data
);
4633 if (val64
!= 0x5A5A5A5A5A5A5A5AULL
) {
4635 DBG_PRINT(ERR_DBG
, "Write Test level 1 fails\n");
4638 val64
= 0xA5A5A5A5A5A5A5A5ULL
;
4639 writeq(val64
, &bar0
->xmsi_data
);
4640 val64
= readq(&bar0
->xmsi_data
);
4641 if (val64
!= 0xA5A5A5A5A5A5A5A5ULL
) {
4643 DBG_PRINT(ERR_DBG
, "Write Test level 2 fails\n");
4651 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4652 * @sp : private member of the device structure, which is a pointer to the
4653 * s2io_nic structure.
4654 * @data:variable that returns the result of each of the test conducted by
4657 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4663 static int s2io_eeprom_test(nic_t
* sp
, uint64_t * data
)
4666 u64 ret_data
, org_4F0
, org_7F0
;
4667 u8 saved_4F0
= 0, saved_7F0
= 0;
4668 struct net_device
*dev
= sp
->dev
;
4670 /* Test Write Error at offset 0 */
4671 /* Note that SPI interface allows write access to all areas
4672 * of EEPROM. Hence doing all negative testing only for Xframe I.
4674 if (sp
->device_type
== XFRAME_I_DEVICE
)
4675 if (!write_eeprom(sp
, 0, 0, 3))
4678 /* Save current values at offsets 0x4F0 and 0x7F0 */
4679 if (!read_eeprom(sp
, 0x4F0, &org_4F0
))
4681 if (!read_eeprom(sp
, 0x7F0, &org_7F0
))
4684 /* Test Write at offset 4f0 */
4685 if (write_eeprom(sp
, 0x4F0, 0x012345, 3))
4687 if (read_eeprom(sp
, 0x4F0, &ret_data
))
4690 if (ret_data
!= 0x012345) {
4691 DBG_PRINT(ERR_DBG
, "%s: eeprom test error at offset 0x4F0. Data written %llx Data read %llx\n", dev
->name
, (u64
)0x12345, ret_data
);
4695 /* Reset the EEPROM data go FFFF */
4696 write_eeprom(sp
, 0x4F0, 0xFFFFFF, 3);
4698 /* Test Write Request Error at offset 0x7c */
4699 if (sp
->device_type
== XFRAME_I_DEVICE
)
4700 if (!write_eeprom(sp
, 0x07C, 0, 3))
4703 /* Test Write Request at offset 0x7f0 */
4704 if (write_eeprom(sp
, 0x7F0, 0x012345, 3))
4706 if (read_eeprom(sp
, 0x7F0, &ret_data
))
4709 if (ret_data
!= 0x012345) {
4710 DBG_PRINT(ERR_DBG
, "%s: eeprom test error at offset 0x7F0. Data written %llx Data read %llx\n", dev
->name
, (u64
)0x12345, ret_data
);
4714 /* Reset the EEPROM data go FFFF */
4715 write_eeprom(sp
, 0x7F0, 0xFFFFFF, 3);
4717 if (sp
->device_type
== XFRAME_I_DEVICE
) {
4718 /* Test Write Error at offset 0x80 */
4719 if (!write_eeprom(sp
, 0x080, 0, 3))
4722 /* Test Write Error at offset 0xfc */
4723 if (!write_eeprom(sp
, 0x0FC, 0, 3))
4726 /* Test Write Error at offset 0x100 */
4727 if (!write_eeprom(sp
, 0x100, 0, 3))
4730 /* Test Write Error at offset 4ec */
4731 if (!write_eeprom(sp
, 0x4EC, 0, 3))
4735 /* Restore values at offsets 0x4F0 and 0x7F0 */
4737 write_eeprom(sp
, 0x4F0, org_4F0
, 3);
4739 write_eeprom(sp
, 0x7F0, org_7F0
, 3);
4746 * s2io_bist_test - invokes the MemBist test of the card .
4747 * @sp : private member of the device structure, which is a pointer to the
4748 * s2io_nic structure.
4749 * @data:variable that returns the result of each of the test conducted by
4752 * This invokes the MemBist test of the card. We give around
4753 * 2 secs time for the Test to complete. If it's still not complete
4754 * within this peiod, we consider that the test failed.
4756 * 0 on success and -1 on failure.
4759 static int s2io_bist_test(nic_t
* sp
, uint64_t * data
)
4762 int cnt
= 0, ret
= -1;
4764 pci_read_config_byte(sp
->pdev
, PCI_BIST
, &bist
);
4765 bist
|= PCI_BIST_START
;
4766 pci_write_config_word(sp
->pdev
, PCI_BIST
, bist
);
4769 pci_read_config_byte(sp
->pdev
, PCI_BIST
, &bist
);
4770 if (!(bist
& PCI_BIST_START
)) {
4771 *data
= (bist
& PCI_BIST_CODE_MASK
);
4783 * s2io-link_test - verifies the link state of the nic
4784 * @sp ; private member of the device structure, which is a pointer to the
4785 * s2io_nic structure.
4786 * @data: variable that returns the result of each of the test conducted by
4789 * The function verifies the link state of the NIC and updates the input
4790 * argument 'data' appropriately.
4795 static int s2io_link_test(nic_t
* sp
, uint64_t * data
)
4797 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4800 val64
= readq(&bar0
->adapter_status
);
4801 if (val64
& ADAPTER_STATUS_RMAC_LOCAL_FAULT
)
4808 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4809 * @sp - private member of the device structure, which is a pointer to the
4810 * s2io_nic structure.
4811 * @data - variable that returns the result of each of the test
4812 * conducted by the driver.
4814 * This is one of the offline test that tests the read and write
4815 * access to the RldRam chip on the NIC.
4820 static int s2io_rldram_test(nic_t
* sp
, uint64_t * data
)
4822 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4824 int cnt
, iteration
= 0, test_fail
= 0;
4826 val64
= readq(&bar0
->adapter_control
);
4827 val64
&= ~ADAPTER_ECC_EN
;
4828 writeq(val64
, &bar0
->adapter_control
);
4830 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4831 val64
|= MC_RLDRAM_TEST_MODE
;
4832 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_test_ctrl
, LF
);
4834 val64
= readq(&bar0
->mc_rldram_mrs
);
4835 val64
|= MC_RLDRAM_QUEUE_SIZE_ENABLE
;
4836 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_mrs
, UF
);
4838 val64
|= MC_RLDRAM_MRS_ENABLE
;
4839 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_mrs
, UF
);
4841 while (iteration
< 2) {
4842 val64
= 0x55555555aaaa0000ULL
;
4843 if (iteration
== 1) {
4844 val64
^= 0xFFFFFFFFFFFF0000ULL
;
4846 writeq(val64
, &bar0
->mc_rldram_test_d0
);
4848 val64
= 0xaaaa5a5555550000ULL
;
4849 if (iteration
== 1) {
4850 val64
^= 0xFFFFFFFFFFFF0000ULL
;
4852 writeq(val64
, &bar0
->mc_rldram_test_d1
);
4854 val64
= 0x55aaaaaaaa5a0000ULL
;
4855 if (iteration
== 1) {
4856 val64
^= 0xFFFFFFFFFFFF0000ULL
;
4858 writeq(val64
, &bar0
->mc_rldram_test_d2
);
4860 val64
= (u64
) (0x0000003ffffe0100ULL
);
4861 writeq(val64
, &bar0
->mc_rldram_test_add
);
4863 val64
= MC_RLDRAM_TEST_MODE
| MC_RLDRAM_TEST_WRITE
|
4865 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_test_ctrl
, LF
);
4867 for (cnt
= 0; cnt
< 5; cnt
++) {
4868 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4869 if (val64
& MC_RLDRAM_TEST_DONE
)
4877 val64
= MC_RLDRAM_TEST_MODE
| MC_RLDRAM_TEST_GO
;
4878 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_test_ctrl
, LF
);
4880 for (cnt
= 0; cnt
< 5; cnt
++) {
4881 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4882 if (val64
& MC_RLDRAM_TEST_DONE
)
4890 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4891 if (!(val64
& MC_RLDRAM_TEST_PASS
))
4899 /* Bring the adapter out of test mode */
4900 SPECIAL_REG_WRITE(0, &bar0
->mc_rldram_test_ctrl
, LF
);
4906 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4907 * @sp : private member of the device structure, which is a pointer to the
4908 * s2io_nic structure.
4909 * @ethtest : pointer to a ethtool command specific structure that will be
4910 * returned to the user.
4911 * @data : variable that returns the result of each of the test
4912 * conducted by the driver.
4914 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4915 * the health of the card.
4920 static void s2io_ethtool_test(struct net_device
*dev
,
4921 struct ethtool_test
*ethtest
,
4924 nic_t
*sp
= dev
->priv
;
4925 int orig_state
= netif_running(sp
->dev
);
4927 if (ethtest
->flags
== ETH_TEST_FL_OFFLINE
) {
4928 /* Offline Tests. */
4930 s2io_close(sp
->dev
);
4932 if (s2io_register_test(sp
, &data
[0]))
4933 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
4937 if (s2io_rldram_test(sp
, &data
[3]))
4938 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
4942 if (s2io_eeprom_test(sp
, &data
[1]))
4943 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
4945 if (s2io_bist_test(sp
, &data
[4]))
4946 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
4956 "%s: is not up, cannot run test\n",
4965 if (s2io_link_test(sp
, &data
[2]))
4966 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
4975 static void s2io_get_ethtool_stats(struct net_device
*dev
,
4976 struct ethtool_stats
*estats
,
4980 nic_t
*sp
= dev
->priv
;
4981 StatInfo_t
*stat_info
= sp
->mac_control
.stats_info
;
4983 s2io_updt_stats(sp
);
4985 (u64
)le32_to_cpu(stat_info
->tmac_frms_oflow
) << 32 |
4986 le32_to_cpu(stat_info
->tmac_frms
);
4988 (u64
)le32_to_cpu(stat_info
->tmac_data_octets_oflow
) << 32 |
4989 le32_to_cpu(stat_info
->tmac_data_octets
);
4990 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_drop_frms
);
4992 (u64
)le32_to_cpu(stat_info
->tmac_mcst_frms_oflow
) << 32 |
4993 le32_to_cpu(stat_info
->tmac_mcst_frms
);
4995 (u64
)le32_to_cpu(stat_info
->tmac_bcst_frms_oflow
) << 32 |
4996 le32_to_cpu(stat_info
->tmac_bcst_frms
);
4997 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_pause_ctrl_frms
);
4999 (u64
)le32_to_cpu(stat_info
->tmac_any_err_frms_oflow
) << 32 |
5000 le32_to_cpu(stat_info
->tmac_any_err_frms
);
5001 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_vld_ip_octets
);
5003 (u64
)le32_to_cpu(stat_info
->tmac_vld_ip_oflow
) << 32 |
5004 le32_to_cpu(stat_info
->tmac_vld_ip
);
5006 (u64
)le32_to_cpu(stat_info
->tmac_drop_ip_oflow
) << 32 |
5007 le32_to_cpu(stat_info
->tmac_drop_ip
);
5009 (u64
)le32_to_cpu(stat_info
->tmac_icmp_oflow
) << 32 |
5010 le32_to_cpu(stat_info
->tmac_icmp
);
5012 (u64
)le32_to_cpu(stat_info
->tmac_rst_tcp_oflow
) << 32 |
5013 le32_to_cpu(stat_info
->tmac_rst_tcp
);
5014 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_tcp
);
5015 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->tmac_udp_oflow
) << 32 |
5016 le32_to_cpu(stat_info
->tmac_udp
);
5018 (u64
)le32_to_cpu(stat_info
->rmac_vld_frms_oflow
) << 32 |
5019 le32_to_cpu(stat_info
->rmac_vld_frms
);
5021 (u64
)le32_to_cpu(stat_info
->rmac_data_octets_oflow
) << 32 |
5022 le32_to_cpu(stat_info
->rmac_data_octets
);
5023 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_fcs_err_frms
);
5024 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_drop_frms
);
5026 (u64
)le32_to_cpu(stat_info
->rmac_vld_mcst_frms_oflow
) << 32 |
5027 le32_to_cpu(stat_info
->rmac_vld_mcst_frms
);
5029 (u64
)le32_to_cpu(stat_info
->rmac_vld_bcst_frms_oflow
) << 32 |
5030 le32_to_cpu(stat_info
->rmac_vld_bcst_frms
);
5031 tmp_stats
[i
++] = le32_to_cpu(stat_info
->rmac_in_rng_len_err_frms
);
5032 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_long_frms
);
5033 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_pause_ctrl_frms
);
5035 (u64
)le32_to_cpu(stat_info
->rmac_discarded_frms_oflow
) << 32 |
5036 le32_to_cpu(stat_info
->rmac_discarded_frms
);
5038 (u64
)le32_to_cpu(stat_info
->rmac_usized_frms_oflow
) << 32 |
5039 le32_to_cpu(stat_info
->rmac_usized_frms
);
5041 (u64
)le32_to_cpu(stat_info
->rmac_osized_frms_oflow
) << 32 |
5042 le32_to_cpu(stat_info
->rmac_osized_frms
);
5044 (u64
)le32_to_cpu(stat_info
->rmac_frag_frms_oflow
) << 32 |
5045 le32_to_cpu(stat_info
->rmac_frag_frms
);
5047 (u64
)le32_to_cpu(stat_info
->rmac_jabber_frms_oflow
) << 32 |
5048 le32_to_cpu(stat_info
->rmac_jabber_frms
);
5049 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_ip_oflow
) << 32 |
5050 le32_to_cpu(stat_info
->rmac_ip
);
5051 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_ip_octets
);
5052 tmp_stats
[i
++] = le32_to_cpu(stat_info
->rmac_hdr_err_ip
);
5053 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_drop_ip_oflow
) << 32 |
5054 le32_to_cpu(stat_info
->rmac_drop_ip
);
5055 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_icmp_oflow
) << 32 |
5056 le32_to_cpu(stat_info
->rmac_icmp
);
5057 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_tcp
);
5058 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_udp_oflow
) << 32 |
5059 le32_to_cpu(stat_info
->rmac_udp
);
5061 (u64
)le32_to_cpu(stat_info
->rmac_err_drp_udp_oflow
) << 32 |
5062 le32_to_cpu(stat_info
->rmac_err_drp_udp
);
5064 (u64
)le32_to_cpu(stat_info
->rmac_pause_cnt_oflow
) << 32 |
5065 le32_to_cpu(stat_info
->rmac_pause_cnt
);
5067 (u64
)le32_to_cpu(stat_info
->rmac_accepted_ip_oflow
) << 32 |
5068 le32_to_cpu(stat_info
->rmac_accepted_ip
);
5069 tmp_stats
[i
++] = le32_to_cpu(stat_info
->rmac_err_tcp
);
5071 tmp_stats
[i
++] = stat_info
->sw_stat
.single_ecc_errs
;
5072 tmp_stats
[i
++] = stat_info
->sw_stat
.double_ecc_errs
;
5075 int s2io_ethtool_get_regs_len(struct net_device
*dev
)
5077 return (XENA_REG_SPACE
);
5081 u32
s2io_ethtool_get_rx_csum(struct net_device
* dev
)
5083 nic_t
*sp
= dev
->priv
;
5085 return (sp
->rx_csum
);
5087 int s2io_ethtool_set_rx_csum(struct net_device
*dev
, u32 data
)
5089 nic_t
*sp
= dev
->priv
;
5098 int s2io_get_eeprom_len(struct net_device
*dev
)
5100 return (XENA_EEPROM_SPACE
);
5103 int s2io_ethtool_self_test_count(struct net_device
*dev
)
5105 return (S2IO_TEST_LEN
);
5107 void s2io_ethtool_get_strings(struct net_device
*dev
,
5108 u32 stringset
, u8
* data
)
5110 switch (stringset
) {
5112 memcpy(data
, s2io_gstrings
, S2IO_STRINGS_LEN
);
5115 memcpy(data
, ðtool_stats_keys
,
5116 sizeof(ethtool_stats_keys
));
5119 static int s2io_ethtool_get_stats_count(struct net_device
*dev
)
5121 return (S2IO_STAT_LEN
);
5124 int s2io_ethtool_op_set_tx_csum(struct net_device
*dev
, u32 data
)
5127 dev
->features
|= NETIF_F_IP_CSUM
;
5129 dev
->features
&= ~NETIF_F_IP_CSUM
;
5135 static struct ethtool_ops netdev_ethtool_ops
= {
5136 .get_settings
= s2io_ethtool_gset
,
5137 .set_settings
= s2io_ethtool_sset
,
5138 .get_drvinfo
= s2io_ethtool_gdrvinfo
,
5139 .get_regs_len
= s2io_ethtool_get_regs_len
,
5140 .get_regs
= s2io_ethtool_gregs
,
5141 .get_link
= ethtool_op_get_link
,
5142 .get_eeprom_len
= s2io_get_eeprom_len
,
5143 .get_eeprom
= s2io_ethtool_geeprom
,
5144 .set_eeprom
= s2io_ethtool_seeprom
,
5145 .get_pauseparam
= s2io_ethtool_getpause_data
,
5146 .set_pauseparam
= s2io_ethtool_setpause_data
,
5147 .get_rx_csum
= s2io_ethtool_get_rx_csum
,
5148 .set_rx_csum
= s2io_ethtool_set_rx_csum
,
5149 .get_tx_csum
= ethtool_op_get_tx_csum
,
5150 .set_tx_csum
= s2io_ethtool_op_set_tx_csum
,
5151 .get_sg
= ethtool_op_get_sg
,
5152 .set_sg
= ethtool_op_set_sg
,
5154 .get_tso
= ethtool_op_get_tso
,
5155 .set_tso
= ethtool_op_set_tso
,
5157 .self_test_count
= s2io_ethtool_self_test_count
,
5158 .self_test
= s2io_ethtool_test
,
5159 .get_strings
= s2io_ethtool_get_strings
,
5160 .phys_id
= s2io_ethtool_idnic
,
5161 .get_stats_count
= s2io_ethtool_get_stats_count
,
5162 .get_ethtool_stats
= s2io_get_ethtool_stats
5166 * s2io_ioctl - Entry point for the Ioctl
5167 * @dev : Device pointer.
5168 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5169 * a proprietary structure used to pass information to the driver.
5170 * @cmd : This is used to distinguish between the different commands that
5171 * can be passed to the IOCTL functions.
5173 * Currently there are no special functionality supported in IOCTL, hence
5174 * function always return EOPNOTSUPPORTED
5177 int s2io_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
5183 * s2io_change_mtu - entry point to change MTU size for the device.
5184 * @dev : device pointer.
5185 * @new_mtu : the new MTU size for the device.
5186 * Description: A driver entry point to change MTU size for the device.
5187 * Before changing the MTU the device must be stopped.
5189 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5193 int s2io_change_mtu(struct net_device
*dev
, int new_mtu
)
5195 nic_t
*sp
= dev
->priv
;
5197 if ((new_mtu
< MIN_MTU
) || (new_mtu
> S2IO_JUMBO_SIZE
)) {
5198 DBG_PRINT(ERR_DBG
, "%s: MTU size is invalid.\n",
5204 if (netif_running(dev
)) {
5206 netif_stop_queue(dev
);
5207 if (s2io_card_up(sp
)) {
5208 DBG_PRINT(ERR_DBG
, "%s: Device bring up failed\n",
5211 if (netif_queue_stopped(dev
))
5212 netif_wake_queue(dev
);
5213 } else { /* Device is down */
5214 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
5215 u64 val64
= new_mtu
;
5217 writeq(vBIT(val64
, 2, 14), &bar0
->rmac_max_pyld_len
);
5224 * s2io_tasklet - Bottom half of the ISR.
5225 * @dev_adr : address of the device structure in dma_addr_t format.
5227 * This is the tasklet or the bottom half of the ISR. This is
5228 * an extension of the ISR which is scheduled by the scheduler to be run
5229 * when the load on the CPU is low. All low priority tasks of the ISR can
5230 * be pushed into the tasklet. For now the tasklet is used only to
5231 * replenish the Rx buffers in the Rx buffer descriptors.
5236 static void s2io_tasklet(unsigned long dev_addr
)
5238 struct net_device
*dev
= (struct net_device
*) dev_addr
;
5239 nic_t
*sp
= dev
->priv
;
5241 mac_info_t
*mac_control
;
5242 struct config_param
*config
;
5244 mac_control
= &sp
->mac_control
;
5245 config
= &sp
->config
;
5247 if (!TASKLET_IN_USE
) {
5248 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
5249 ret
= fill_rx_buffers(sp
, i
);
5250 if (ret
== -ENOMEM
) {
5251 DBG_PRINT(ERR_DBG
, "%s: Out of ",
5253 DBG_PRINT(ERR_DBG
, "memory in tasklet\n");
5255 } else if (ret
== -EFILL
) {
5257 "%s: Rx Ring %d is full\n",
5262 clear_bit(0, (&sp
->tasklet_status
));
5267 * s2io_set_link - Set the LInk status
5268 * @data: long pointer to device private structue
5269 * Description: Sets the link status for the adapter
5272 static void s2io_set_link(unsigned long data
)
5274 nic_t
*nic
= (nic_t
*) data
;
5275 struct net_device
*dev
= nic
->dev
;
5276 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
5280 if (test_and_set_bit(0, &(nic
->link_state
))) {
5281 /* The card is being reset, no point doing anything */
5285 subid
= nic
->pdev
->subsystem_device
;
5286 if (s2io_link_fault_indication(nic
) == MAC_RMAC_ERR_TIMER
) {
5288 * Allow a small delay for the NICs self initiated
5289 * cleanup to complete.
5294 val64
= readq(&bar0
->adapter_status
);
5295 if (verify_xena_quiescence(nic
, val64
, nic
->device_enabled_once
)) {
5296 if (LINK_IS_UP(val64
)) {
5297 val64
= readq(&bar0
->adapter_control
);
5298 val64
|= ADAPTER_CNTL_EN
;
5299 writeq(val64
, &bar0
->adapter_control
);
5300 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic
->device_type
,
5302 val64
= readq(&bar0
->gpio_control
);
5303 val64
|= GPIO_CTRL_GPIO_0
;
5304 writeq(val64
, &bar0
->gpio_control
);
5305 val64
= readq(&bar0
->gpio_control
);
5307 val64
|= ADAPTER_LED_ON
;
5308 writeq(val64
, &bar0
->adapter_control
);
5310 if (s2io_link_fault_indication(nic
) ==
5311 MAC_RMAC_ERR_TIMER
) {
5312 val64
= readq(&bar0
->adapter_status
);
5313 if (!LINK_IS_UP(val64
)) {
5314 DBG_PRINT(ERR_DBG
, "%s:", dev
->name
);
5315 DBG_PRINT(ERR_DBG
, " Link down");
5316 DBG_PRINT(ERR_DBG
, "after ");
5317 DBG_PRINT(ERR_DBG
, "enabling ");
5318 DBG_PRINT(ERR_DBG
, "device \n");
5321 if (nic
->device_enabled_once
== FALSE
) {
5322 nic
->device_enabled_once
= TRUE
;
5324 s2io_link(nic
, LINK_UP
);
5326 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic
->device_type
,
5328 val64
= readq(&bar0
->gpio_control
);
5329 val64
&= ~GPIO_CTRL_GPIO_0
;
5330 writeq(val64
, &bar0
->gpio_control
);
5331 val64
= readq(&bar0
->gpio_control
);
5333 s2io_link(nic
, LINK_DOWN
);
5335 } else { /* NIC is not Quiescent. */
5336 DBG_PRINT(ERR_DBG
, "%s: Error: ", dev
->name
);
5337 DBG_PRINT(ERR_DBG
, "device is not Quiescent\n");
5338 netif_stop_queue(dev
);
5340 clear_bit(0, &(nic
->link_state
));
5343 static void s2io_card_down(nic_t
* sp
)
5346 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
5347 unsigned long flags
;
5348 register u64 val64
= 0;
5350 del_timer_sync(&sp
->alarm_timer
);
5351 /* If s2io_set_link task is executing, wait till it completes. */
5352 while (test_and_set_bit(0, &(sp
->link_state
))) {
5355 atomic_set(&sp
->card_state
, CARD_DOWN
);
5357 /* disable Tx and Rx traffic on the NIC */
5361 tasklet_kill(&sp
->task
);
5363 /* Check if the device is Quiescent and then Reset the NIC */
5365 val64
= readq(&bar0
->adapter_status
);
5366 if (verify_xena_quiescence(sp
, val64
, sp
->device_enabled_once
)) {
5374 "s2io_close:Device not Quiescent ");
5375 DBG_PRINT(ERR_DBG
, "adaper status reads 0x%llx\n",
5376 (unsigned long long) val64
);
5382 /* Waiting till all Interrupt handlers are complete */
5386 if (!atomic_read(&sp
->isr_cnt
))
5391 spin_lock_irqsave(&sp
->tx_lock
, flags
);
5392 /* Free all Tx buffers */
5393 free_tx_buffers(sp
);
5394 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
5396 /* Free all Rx buffers */
5397 spin_lock_irqsave(&sp
->rx_lock
, flags
);
5398 free_rx_buffers(sp
);
5399 spin_unlock_irqrestore(&sp
->rx_lock
, flags
);
5401 clear_bit(0, &(sp
->link_state
));
5404 static int s2io_card_up(nic_t
* sp
)
5407 mac_info_t
*mac_control
;
5408 struct config_param
*config
;
5409 struct net_device
*dev
= (struct net_device
*) sp
->dev
;
5411 /* Initialize the H/W I/O registers */
5412 if (init_nic(sp
) != 0) {
5413 DBG_PRINT(ERR_DBG
, "%s: H/W initialization failed\n",
5418 if (sp
->intr_type
== MSI
)
5419 ret
= s2io_enable_msi(sp
);
5420 else if (sp
->intr_type
== MSI_X
)
5421 ret
= s2io_enable_msi_x(sp
);
5423 DBG_PRINT(ERR_DBG
, "%s: Defaulting to INTA\n", dev
->name
);
5424 sp
->intr_type
= INTA
;
5428 * Initializing the Rx buffers. For now we are considering only 1
5429 * Rx ring and initializing buffers into 30 Rx blocks
5431 mac_control
= &sp
->mac_control
;
5432 config
= &sp
->config
;
5434 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
5435 if ((ret
= fill_rx_buffers(sp
, i
))) {
5436 DBG_PRINT(ERR_DBG
, "%s: Out of memory in Open\n",
5439 free_rx_buffers(sp
);
5442 DBG_PRINT(INFO_DBG
, "Buf in ring:%d is %d:\n", i
,
5443 atomic_read(&sp
->rx_bufs_left
[i
]));
5446 /* Setting its receive mode */
5447 s2io_set_multicast(dev
);
5449 /* Enable tasklet for the device */
5450 tasklet_init(&sp
->task
, s2io_tasklet
, (unsigned long) dev
);
5452 /* Enable Rx Traffic and interrupts on the NIC */
5453 if (start_nic(sp
)) {
5454 DBG_PRINT(ERR_DBG
, "%s: Starting NIC failed\n", dev
->name
);
5455 tasklet_kill(&sp
->task
);
5457 free_irq(dev
->irq
, dev
);
5458 free_rx_buffers(sp
);
5462 S2IO_TIMER_CONF(sp
->alarm_timer
, s2io_alarm_handle
, sp
, (HZ
/2));
5464 atomic_set(&sp
->card_state
, CARD_UP
);
5469 * s2io_restart_nic - Resets the NIC.
5470 * @data : long pointer to the device private structure
5472 * This function is scheduled to be run by the s2io_tx_watchdog
5473 * function after 0.5 secs to reset the NIC. The idea is to reduce
5474 * the run time of the watch dog routine which is run holding a
5478 static void s2io_restart_nic(unsigned long data
)
5480 struct net_device
*dev
= (struct net_device
*) data
;
5481 nic_t
*sp
= dev
->priv
;
5484 if (s2io_card_up(sp
)) {
5485 DBG_PRINT(ERR_DBG
, "%s: Device bring up failed\n",
5488 netif_wake_queue(dev
);
5489 DBG_PRINT(ERR_DBG
, "%s: was reset by Tx watchdog timer\n",
5495 * s2io_tx_watchdog - Watchdog for transmit side.
5496 * @dev : Pointer to net device structure
5498 * This function is triggered if the Tx Queue is stopped
5499 * for a pre-defined amount of time when the Interface is still up.
5500 * If the Interface is jammed in such a situation, the hardware is
5501 * reset (by s2io_close) and restarted again (by s2io_open) to
5502 * overcome any problem that might have been caused in the hardware.
5507 static void s2io_tx_watchdog(struct net_device
*dev
)
5509 nic_t
*sp
= dev
->priv
;
5511 if (netif_carrier_ok(dev
)) {
5512 schedule_work(&sp
->rst_timer_task
);
5517 * rx_osm_handler - To perform some OS related operations on SKB.
5518 * @sp: private member of the device structure,pointer to s2io_nic structure.
5519 * @skb : the socket buffer pointer.
5520 * @len : length of the packet
5521 * @cksum : FCS checksum of the frame.
5522 * @ring_no : the ring from which this RxD was extracted.
5524 * This function is called by the Tx interrupt serivce routine to perform
5525 * some OS related operations on the SKB before passing it to the upper
5526 * layers. It mainly checks if the checksum is OK, if so adds it to the
5527 * SKBs cksum variable, increments the Rx packet count and passes the SKB
5528 * to the upper layer. If the checksum is wrong, it increments the Rx
5529 * packet error count, frees the SKB and returns error.
5531 * SUCCESS on success and -1 on failure.
5533 static int rx_osm_handler(ring_info_t
*ring_data
, RxD_t
* rxdp
)
5535 nic_t
*sp
= ring_data
->nic
;
5536 struct net_device
*dev
= (struct net_device
*) sp
->dev
;
5537 struct sk_buff
*skb
= (struct sk_buff
*)
5538 ((unsigned long) rxdp
->Host_Control
);
5539 int ring_no
= ring_data
->ring_no
;
5540 u16 l3_csum
, l4_csum
;
5541 #ifdef CONFIG_2BUFF_MODE
5542 int buf0_len
= RXD_GET_BUFFER0_SIZE(rxdp
->Control_2
);
5543 int buf2_len
= RXD_GET_BUFFER2_SIZE(rxdp
->Control_2
);
5544 int get_block
= ring_data
->rx_curr_get_info
.block_index
;
5545 int get_off
= ring_data
->rx_curr_get_info
.offset
;
5546 buffAdd_t
*ba
= &ring_data
->ba
[get_block
][get_off
];
5547 unsigned char *buff
;
5549 u16 len
= (u16
) ((RXD_GET_BUFFER0_SIZE(rxdp
->Control_2
)) >> 48);;
5552 if (rxdp
->Control_1
& RXD_T_CODE
) {
5553 unsigned long long err
= rxdp
->Control_1
& RXD_T_CODE
;
5554 DBG_PRINT(ERR_DBG
, "%s: Rx error Value: 0x%llx\n",
5557 sp
->stats
.rx_crc_errors
++;
5558 atomic_dec(&sp
->rx_bufs_left
[ring_no
]);
5559 rxdp
->Host_Control
= 0;
5563 /* Updating statistics */
5564 rxdp
->Host_Control
= 0;
5566 sp
->stats
.rx_packets
++;
5567 #ifndef CONFIG_2BUFF_MODE
5568 sp
->stats
.rx_bytes
+= len
;
5570 sp
->stats
.rx_bytes
+= buf0_len
+ buf2_len
;
5573 #ifndef CONFIG_2BUFF_MODE
5576 buff
= skb_push(skb
, buf0_len
);
5577 memcpy(buff
, ba
->ba_0
, buf0_len
);
5578 skb_put(skb
, buf2_len
);
5581 if ((rxdp
->Control_1
& TCP_OR_UDP_FRAME
) &&
5583 l3_csum
= RXD_GET_L3_CKSUM(rxdp
->Control_1
);
5584 l4_csum
= RXD_GET_L4_CKSUM(rxdp
->Control_1
);
5585 if ((l3_csum
== L3_CKSUM_OK
) && (l4_csum
== L4_CKSUM_OK
)) {
5587 * NIC verifies if the Checksum of the received
5588 * frame is Ok or not and accordingly returns
5589 * a flag in the RxD.
5591 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5594 * Packet with erroneous checksum, let the
5595 * upper layers deal with it.
5597 skb
->ip_summed
= CHECKSUM_NONE
;
5600 skb
->ip_summed
= CHECKSUM_NONE
;
5603 skb
->protocol
= eth_type_trans(skb
, dev
);
5604 #ifdef CONFIG_S2IO_NAPI
5605 if (sp
->vlgrp
&& RXD_GET_VLAN_TAG(rxdp
->Control_2
)) {
5606 /* Queueing the vlan frame to the upper layer */
5607 vlan_hwaccel_receive_skb(skb
, sp
->vlgrp
,
5608 RXD_GET_VLAN_TAG(rxdp
->Control_2
));
5610 netif_receive_skb(skb
);
5613 if (sp
->vlgrp
&& RXD_GET_VLAN_TAG(rxdp
->Control_2
)) {
5614 /* Queueing the vlan frame to the upper layer */
5615 vlan_hwaccel_rx(skb
, sp
->vlgrp
,
5616 RXD_GET_VLAN_TAG(rxdp
->Control_2
));
5621 dev
->last_rx
= jiffies
;
5622 atomic_dec(&sp
->rx_bufs_left
[ring_no
]);
5627 * s2io_link - stops/starts the Tx queue.
5628 * @sp : private member of the device structure, which is a pointer to the
5629 * s2io_nic structure.
5630 * @link : inidicates whether link is UP/DOWN.
5632 * This function stops/starts the Tx queue depending on whether the link
5633 * status of the NIC is is down or up. This is called by the Alarm
5634 * interrupt handler whenever a link change interrupt comes up.
5639 void s2io_link(nic_t
* sp
, int link
)
5641 struct net_device
*dev
= (struct net_device
*) sp
->dev
;
5643 if (link
!= sp
->last_link_state
) {
5644 if (link
== LINK_DOWN
) {
5645 DBG_PRINT(ERR_DBG
, "%s: Link down\n", dev
->name
);
5646 netif_carrier_off(dev
);
5648 DBG_PRINT(ERR_DBG
, "%s: Link Up\n", dev
->name
);
5649 netif_carrier_on(dev
);
5652 sp
->last_link_state
= link
;
5656 * get_xena_rev_id - to identify revision ID of xena.
5657 * @pdev : PCI Dev structure
5659 * Function to identify the Revision ID of xena.
5661 * returns the revision ID of the device.
5664 int get_xena_rev_id(struct pci_dev
*pdev
)
5668 ret
= pci_read_config_byte(pdev
, PCI_REVISION_ID
, (u8
*) & id
);
5673 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5674 * @sp : private member of the device structure, which is a pointer to the
5675 * s2io_nic structure.
5677 * This function initializes a few of the PCI and PCI-X configuration registers
5678 * with recommended values.
5683 static void s2io_init_pci(nic_t
* sp
)
5685 u16 pci_cmd
= 0, pcix_cmd
= 0;
5687 /* Enable Data Parity Error Recovery in PCI-X command register. */
5688 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5690 pci_write_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5692 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5695 /* Set the PErr Response bit in PCI command register. */
5696 pci_read_config_word(sp
->pdev
, PCI_COMMAND
, &pci_cmd
);
5697 pci_write_config_word(sp
->pdev
, PCI_COMMAND
,
5698 (pci_cmd
| PCI_COMMAND_PARITY
));
5699 pci_read_config_word(sp
->pdev
, PCI_COMMAND
, &pci_cmd
);
5701 /* Forcibly disabling relaxed ordering capability of the card. */
5703 pci_write_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5705 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5709 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5710 MODULE_LICENSE("GPL");
5711 MODULE_VERSION(DRV_VERSION
);
5713 module_param(tx_fifo_num
, int, 0);
5714 module_param(rx_ring_num
, int, 0);
5715 module_param_array(tx_fifo_len
, uint
, NULL
, 0);
5716 module_param_array(rx_ring_sz
, uint
, NULL
, 0);
5717 module_param_array(rts_frm_len
, uint
, NULL
, 0);
5718 module_param(use_continuous_tx_intrs
, int, 1);
5719 module_param(rmac_pause_time
, int, 0);
5720 module_param(mc_pause_threshold_q0q3
, int, 0);
5721 module_param(mc_pause_threshold_q4q7
, int, 0);
5722 module_param(shared_splits
, int, 0);
5723 module_param(tmac_util_period
, int, 0);
5724 module_param(rmac_util_period
, int, 0);
5725 module_param(bimodal
, bool, 0);
5726 #ifndef CONFIG_S2IO_NAPI
5727 module_param(indicate_max_pkts
, int, 0);
5729 module_param(rxsync_frequency
, int, 0);
5730 module_param(intr_type
, int, 0);
5733 * s2io_init_nic - Initialization of the adapter .
5734 * @pdev : structure containing the PCI related information of the device.
5735 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5737 * The function initializes an adapter identified by the pci_dec structure.
5738 * All OS related initialization including memory and device structure and
5739 * initlaization of the device private variable is done. Also the swapper
5740 * control register is initialized to enable read and write into the I/O
5741 * registers of the device.
5743 * returns 0 on success and negative on failure.
5746 static int __devinit
5747 s2io_init_nic(struct pci_dev
*pdev
, const struct pci_device_id
*pre
)
5750 struct net_device
*dev
;
5752 int dma_flag
= FALSE
;
5753 u32 mac_up
, mac_down
;
5754 u64 val64
= 0, tmp64
= 0;
5755 XENA_dev_config_t __iomem
*bar0
= NULL
;
5757 mac_info_t
*mac_control
;
5758 struct config_param
*config
;
5760 u8 dev_intr_type
= intr_type
;
5762 #ifdef CONFIG_S2IO_NAPI
5763 if (dev_intr_type
!= INTA
) {
5764 DBG_PRINT(ERR_DBG
, "NAPI cannot be enabled when MSI/MSI-X \
5765 is enabled. Defaulting to INTA\n");
5766 dev_intr_type
= INTA
;
5769 DBG_PRINT(ERR_DBG
, "NAPI support has been enabled\n");
5772 if ((ret
= pci_enable_device(pdev
))) {
5774 "s2io_init_nic: pci_enable_device failed\n");
5778 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
5779 DBG_PRINT(INIT_DBG
, "s2io_init_nic: Using 64bit DMA\n");
5781 if (pci_set_consistent_dma_mask
5782 (pdev
, DMA_64BIT_MASK
)) {
5784 "Unable to obtain 64bit DMA for \
5785 consistent allocations\n");
5786 pci_disable_device(pdev
);
5789 } else if (!pci_set_dma_mask(pdev
, DMA_32BIT_MASK
)) {
5790 DBG_PRINT(INIT_DBG
, "s2io_init_nic: Using 32bit DMA\n");
5792 pci_disable_device(pdev
);
5796 if ((dev_intr_type
== MSI_X
) &&
5797 ((pdev
->device
!= PCI_DEVICE_ID_HERC_WIN
) &&
5798 (pdev
->device
!= PCI_DEVICE_ID_HERC_UNI
))) {
5799 DBG_PRINT(ERR_DBG
, "Xframe I does not support MSI_X. \
5800 Defaulting to INTA\n");
5801 dev_intr_type
= INTA
;
5803 if (dev_intr_type
!= MSI_X
) {
5804 if (pci_request_regions(pdev
, s2io_driver_name
)) {
5805 DBG_PRINT(ERR_DBG
, "Request Regions failed\n"),
5806 pci_disable_device(pdev
);
5811 if (!(request_mem_region(pci_resource_start(pdev
, 0),
5812 pci_resource_len(pdev
, 0), s2io_driver_name
))) {
5813 DBG_PRINT(ERR_DBG
, "bar0 Request Regions failed\n");
5814 pci_disable_device(pdev
);
5817 if (!(request_mem_region(pci_resource_start(pdev
, 2),
5818 pci_resource_len(pdev
, 2), s2io_driver_name
))) {
5819 DBG_PRINT(ERR_DBG
, "bar1 Request Regions failed\n");
5820 release_mem_region(pci_resource_start(pdev
, 0),
5821 pci_resource_len(pdev
, 0));
5822 pci_disable_device(pdev
);
5827 dev
= alloc_etherdev(sizeof(nic_t
));
5829 DBG_PRINT(ERR_DBG
, "Device allocation failed\n");
5830 pci_disable_device(pdev
);
5831 pci_release_regions(pdev
);
5835 pci_set_master(pdev
);
5836 pci_set_drvdata(pdev
, dev
);
5837 SET_MODULE_OWNER(dev
);
5838 SET_NETDEV_DEV(dev
, &pdev
->dev
);
5840 /* Private member variable initialized to s2io NIC structure */
5842 memset(sp
, 0, sizeof(nic_t
));
5845 sp
->high_dma_flag
= dma_flag
;
5846 sp
->device_enabled_once
= FALSE
;
5847 sp
->intr_type
= dev_intr_type
;
5849 if ((pdev
->device
== PCI_DEVICE_ID_HERC_WIN
) ||
5850 (pdev
->device
== PCI_DEVICE_ID_HERC_UNI
))
5851 sp
->device_type
= XFRAME_II_DEVICE
;
5853 sp
->device_type
= XFRAME_I_DEVICE
;
5856 /* Initialize some PCI/PCI-X fields of the NIC. */
5860 * Setting the device configuration parameters.
5861 * Most of these parameters can be specified by the user during
5862 * module insertion as they are module loadable parameters. If
5863 * these parameters are not not specified during load time, they
5864 * are initialized with default values.
5866 mac_control
= &sp
->mac_control
;
5867 config
= &sp
->config
;
5869 /* Tx side parameters. */
5870 if (tx_fifo_len
[0] == 0)
5871 tx_fifo_len
[0] = DEFAULT_FIFO_LEN
; /* Default value. */
5872 config
->tx_fifo_num
= tx_fifo_num
;
5873 for (i
= 0; i
< MAX_TX_FIFOS
; i
++) {
5874 config
->tx_cfg
[i
].fifo_len
= tx_fifo_len
[i
];
5875 config
->tx_cfg
[i
].fifo_priority
= i
;
5878 /* mapping the QoS priority to the configured fifos */
5879 for (i
= 0; i
< MAX_TX_FIFOS
; i
++)
5880 config
->fifo_mapping
[i
] = fifo_map
[config
->tx_fifo_num
][i
];
5882 config
->tx_intr_type
= TXD_INT_TYPE_UTILZ
;
5883 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
5884 config
->tx_cfg
[i
].f_no_snoop
=
5885 (NO_SNOOP_TXD
| NO_SNOOP_TXD_BUFFER
);
5886 if (config
->tx_cfg
[i
].fifo_len
< 65) {
5887 config
->tx_intr_type
= TXD_INT_TYPE_PER_LIST
;
5891 config
->max_txds
= MAX_SKB_FRAGS
+ 1;
5893 /* Rx side parameters. */
5894 if (rx_ring_sz
[0] == 0)
5895 rx_ring_sz
[0] = SMALL_BLK_CNT
; /* Default value. */
5896 config
->rx_ring_num
= rx_ring_num
;
5897 for (i
= 0; i
< MAX_RX_RINGS
; i
++) {
5898 config
->rx_cfg
[i
].num_rxd
= rx_ring_sz
[i
] *
5899 (MAX_RXDS_PER_BLOCK
+ 1);
5900 config
->rx_cfg
[i
].ring_priority
= i
;
5903 for (i
= 0; i
< rx_ring_num
; i
++) {
5904 config
->rx_cfg
[i
].ring_org
= RING_ORG_BUFF1
;
5905 config
->rx_cfg
[i
].f_no_snoop
=
5906 (NO_SNOOP_RXD
| NO_SNOOP_RXD_BUFFER
);
5909 /* Setting Mac Control parameters */
5910 mac_control
->rmac_pause_time
= rmac_pause_time
;
5911 mac_control
->mc_pause_threshold_q0q3
= mc_pause_threshold_q0q3
;
5912 mac_control
->mc_pause_threshold_q4q7
= mc_pause_threshold_q4q7
;
5915 /* Initialize Ring buffer parameters. */
5916 for (i
= 0; i
< config
->rx_ring_num
; i
++)
5917 atomic_set(&sp
->rx_bufs_left
[i
], 0);
5919 /* Initialize the number of ISRs currently running */
5920 atomic_set(&sp
->isr_cnt
, 0);
5922 /* initialize the shared memory used by the NIC and the host */
5923 if (init_shared_mem(sp
)) {
5924 DBG_PRINT(ERR_DBG
, "%s: Memory allocation failed\n",
5927 goto mem_alloc_failed
;
5930 sp
->bar0
= ioremap(pci_resource_start(pdev
, 0),
5931 pci_resource_len(pdev
, 0));
5933 DBG_PRINT(ERR_DBG
, "%s: S2IO: cannot remap io mem1\n",
5936 goto bar0_remap_failed
;
5939 sp
->bar1
= ioremap(pci_resource_start(pdev
, 2),
5940 pci_resource_len(pdev
, 2));
5942 DBG_PRINT(ERR_DBG
, "%s: S2IO: cannot remap io mem2\n",
5945 goto bar1_remap_failed
;
5948 dev
->irq
= pdev
->irq
;
5949 dev
->base_addr
= (unsigned long) sp
->bar0
;
5951 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5952 for (j
= 0; j
< MAX_TX_FIFOS
; j
++) {
5953 mac_control
->tx_FIFO_start
[j
] = (TxFIFO_element_t __iomem
*)
5954 (sp
->bar1
+ (j
* 0x00020000));
5957 /* Driver entry points */
5958 dev
->open
= &s2io_open
;
5959 dev
->stop
= &s2io_close
;
5960 dev
->hard_start_xmit
= &s2io_xmit
;
5961 dev
->get_stats
= &s2io_get_stats
;
5962 dev
->set_multicast_list
= &s2io_set_multicast
;
5963 dev
->do_ioctl
= &s2io_ioctl
;
5964 dev
->change_mtu
= &s2io_change_mtu
;
5965 SET_ETHTOOL_OPS(dev
, &netdev_ethtool_ops
);
5966 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
5967 dev
->vlan_rx_register
= s2io_vlan_rx_register
;
5968 dev
->vlan_rx_kill_vid
= (void *)s2io_vlan_rx_kill_vid
;
5971 * will use eth_mac_addr() for dev->set_mac_address
5972 * mac address will be set every time dev->open() is called
5974 #if defined(CONFIG_S2IO_NAPI)
5975 dev
->poll
= s2io_poll
;
5979 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
5980 if (sp
->high_dma_flag
== TRUE
)
5981 dev
->features
|= NETIF_F_HIGHDMA
;
5983 dev
->features
|= NETIF_F_TSO
;
5986 dev
->tx_timeout
= &s2io_tx_watchdog
;
5987 dev
->watchdog_timeo
= WATCH_DOG_TIMEOUT
;
5988 INIT_WORK(&sp
->rst_timer_task
,
5989 (void (*)(void *)) s2io_restart_nic
, dev
);
5990 INIT_WORK(&sp
->set_link_task
,
5991 (void (*)(void *)) s2io_set_link
, sp
);
5993 pci_save_state(sp
->pdev
);
5995 /* Setting swapper control on the NIC, for proper reset operation */
5996 if (s2io_set_swapper(sp
)) {
5997 DBG_PRINT(ERR_DBG
, "%s:swapper settings are wrong\n",
6000 goto set_swap_failed
;
6003 /* Verify if the Herc works on the slot its placed into */
6004 if (sp
->device_type
& XFRAME_II_DEVICE
) {
6005 mode
= s2io_verify_pci_mode(sp
);
6007 DBG_PRINT(ERR_DBG
, "%s: ", __FUNCTION__
);
6008 DBG_PRINT(ERR_DBG
, " Unsupported PCI bus mode\n");
6010 goto set_swap_failed
;
6014 /* Not needed for Herc */
6015 if (sp
->device_type
& XFRAME_I_DEVICE
) {
6017 * Fix for all "FFs" MAC address problems observed on
6020 fix_mac_address(sp
);
6025 * MAC address initialization.
6026 * For now only one mac address will be read and used.
6029 val64
= RMAC_ADDR_CMD_MEM_RD
| RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
6030 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET
);
6031 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
6032 wait_for_cmd_complete(sp
);
6034 tmp64
= readq(&bar0
->rmac_addr_data0_mem
);
6035 mac_down
= (u32
) tmp64
;
6036 mac_up
= (u32
) (tmp64
>> 32);
6038 memset(sp
->def_mac_addr
[0].mac_addr
, 0, sizeof(ETH_ALEN
));
6040 sp
->def_mac_addr
[0].mac_addr
[3] = (u8
) (mac_up
);
6041 sp
->def_mac_addr
[0].mac_addr
[2] = (u8
) (mac_up
>> 8);
6042 sp
->def_mac_addr
[0].mac_addr
[1] = (u8
) (mac_up
>> 16);
6043 sp
->def_mac_addr
[0].mac_addr
[0] = (u8
) (mac_up
>> 24);
6044 sp
->def_mac_addr
[0].mac_addr
[5] = (u8
) (mac_down
>> 16);
6045 sp
->def_mac_addr
[0].mac_addr
[4] = (u8
) (mac_down
>> 24);
6047 /* Set the factory defined MAC address initially */
6048 dev
->addr_len
= ETH_ALEN
;
6049 memcpy(dev
->dev_addr
, sp
->def_mac_addr
, ETH_ALEN
);
6052 * Initialize the tasklet status and link state flags
6053 * and the card state parameter
6055 atomic_set(&(sp
->card_state
), 0);
6056 sp
->tasklet_status
= 0;
6059 /* Initialize spinlocks */
6060 spin_lock_init(&sp
->tx_lock
);
6061 #ifndef CONFIG_S2IO_NAPI
6062 spin_lock_init(&sp
->put_lock
);
6064 spin_lock_init(&sp
->rx_lock
);
6067 * SXE-002: Configure link and activity LED to init state
6070 subid
= sp
->pdev
->subsystem_device
;
6071 if ((subid
& 0xFF) >= 0x07) {
6072 val64
= readq(&bar0
->gpio_control
);
6073 val64
|= 0x0000800000000000ULL
;
6074 writeq(val64
, &bar0
->gpio_control
);
6075 val64
= 0x0411040400000000ULL
;
6076 writeq(val64
, (void __iomem
*) bar0
+ 0x2700);
6077 val64
= readq(&bar0
->gpio_control
);
6080 sp
->rx_csum
= 1; /* Rx chksum verify enabled by default */
6082 if (register_netdev(dev
)) {
6083 DBG_PRINT(ERR_DBG
, "Device registration failed\n");
6085 goto register_failed
;
6088 if (sp
->device_type
& XFRAME_II_DEVICE
) {
6089 DBG_PRINT(ERR_DBG
, "%s: Neterion Xframe II 10GbE adapter ",
6091 DBG_PRINT(ERR_DBG
, "(rev %d), Version %s",
6092 get_xena_rev_id(sp
->pdev
),
6093 s2io_driver_version
);
6094 #ifdef CONFIG_2BUFF_MODE
6095 DBG_PRINT(ERR_DBG
, ", Buffer mode %d",2);
6097 switch(sp
->intr_type
) {
6099 DBG_PRINT(ERR_DBG
, ", Intr type INTA");
6102 DBG_PRINT(ERR_DBG
, ", Intr type MSI");
6105 DBG_PRINT(ERR_DBG
, ", Intr type MSI-X");
6109 DBG_PRINT(ERR_DBG
, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6110 DBG_PRINT(ERR_DBG
, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6111 sp
->def_mac_addr
[0].mac_addr
[0],
6112 sp
->def_mac_addr
[0].mac_addr
[1],
6113 sp
->def_mac_addr
[0].mac_addr
[2],
6114 sp
->def_mac_addr
[0].mac_addr
[3],
6115 sp
->def_mac_addr
[0].mac_addr
[4],
6116 sp
->def_mac_addr
[0].mac_addr
[5]);
6117 mode
= s2io_print_pci_mode(sp
);
6119 DBG_PRINT(ERR_DBG
, " Unsupported PCI bus mode ");
6121 goto set_swap_failed
;
6124 DBG_PRINT(ERR_DBG
, "%s: Neterion Xframe I 10GbE adapter ",
6126 DBG_PRINT(ERR_DBG
, "(rev %d), Version %s",
6127 get_xena_rev_id(sp
->pdev
),
6128 s2io_driver_version
);
6129 #ifdef CONFIG_2BUFF_MODE
6130 DBG_PRINT(ERR_DBG
, ", Buffer mode %d",2);
6132 switch(sp
->intr_type
) {
6134 DBG_PRINT(ERR_DBG
, ", Intr type INTA");
6137 DBG_PRINT(ERR_DBG
, ", Intr type MSI");
6140 DBG_PRINT(ERR_DBG
, ", Intr type MSI-X");
6143 DBG_PRINT(ERR_DBG
, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6144 DBG_PRINT(ERR_DBG
, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6145 sp
->def_mac_addr
[0].mac_addr
[0],
6146 sp
->def_mac_addr
[0].mac_addr
[1],
6147 sp
->def_mac_addr
[0].mac_addr
[2],
6148 sp
->def_mac_addr
[0].mac_addr
[3],
6149 sp
->def_mac_addr
[0].mac_addr
[4],
6150 sp
->def_mac_addr
[0].mac_addr
[5]);
6153 /* Initialize device name */
6154 strcpy(sp
->name
, dev
->name
);
6155 if (sp
->device_type
& XFRAME_II_DEVICE
)
6156 strcat(sp
->name
, ": Neterion Xframe II 10GbE adapter");
6158 strcat(sp
->name
, ": Neterion Xframe I 10GbE adapter");
6160 /* Initialize bimodal Interrupts */
6161 sp
->config
.bimodal
= bimodal
;
6162 if (!(sp
->device_type
& XFRAME_II_DEVICE
) && bimodal
) {
6163 sp
->config
.bimodal
= 0;
6164 DBG_PRINT(ERR_DBG
,"%s:Bimodal intr not supported by Xframe I\n",
6169 * Make Link state as off at this point, when the Link change
6170 * interrupt comes the state will be automatically changed to
6173 netif_carrier_off(dev
);
6184 free_shared_mem(sp
);
6185 pci_disable_device(pdev
);
6186 if (dev_intr_type
!= MSI_X
)
6187 pci_release_regions(pdev
);
6189 release_mem_region(pci_resource_start(pdev
, 0),
6190 pci_resource_len(pdev
, 0));
6191 release_mem_region(pci_resource_start(pdev
, 2),
6192 pci_resource_len(pdev
, 2));
6194 pci_set_drvdata(pdev
, NULL
);
6201 * s2io_rem_nic - Free the PCI device
6202 * @pdev: structure containing the PCI related information of the device.
6203 * Description: This function is called by the Pci subsystem to release a
6204 * PCI device and free up all resource held up by the device. This could
6205 * be in response to a Hot plug event or when the driver is to be removed
6209 static void __devexit
s2io_rem_nic(struct pci_dev
*pdev
)
6211 struct net_device
*dev
=
6212 (struct net_device
*) pci_get_drvdata(pdev
);
6216 DBG_PRINT(ERR_DBG
, "Driver Data is NULL!!\n");
6221 unregister_netdev(dev
);
6223 free_shared_mem(sp
);
6226 pci_disable_device(pdev
);
6227 if (sp
->intr_type
!= MSI_X
)
6228 pci_release_regions(pdev
);
6230 release_mem_region(pci_resource_start(pdev
, 0),
6231 pci_resource_len(pdev
, 0));
6232 release_mem_region(pci_resource_start(pdev
, 2),
6233 pci_resource_len(pdev
, 2));
6235 pci_set_drvdata(pdev
, NULL
);
6240 * s2io_starter - Entry point for the driver
6241 * Description: This function is the entry point for the driver. It verifies
6242 * the module loadable parameters and initializes PCI configuration space.
6245 int __init
s2io_starter(void)
6247 return pci_module_init(&s2io_driver
);
6251 * s2io_closer - Cleanup routine for the driver
6252 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
6255 void s2io_closer(void)
6257 pci_unregister_driver(&s2io_driver
);
6258 DBG_PRINT(INIT_DBG
, "cleanup done\n");
6261 module_init(s2io_starter
);
6262 module_exit(s2io_closer
);