1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
66 #include "s2io-regs.h"
68 /* S2io Driver name & version. */
69 static char s2io_driver_name
[] = "Neterion";
70 static char s2io_driver_version
[] = "Version 2.0.8.1";
72 static inline int RXD_IS_UP2DT(RxD_t
*rxdp
)
76 ret
= ((!(rxdp
->Control_1
& RXD_OWN_XENA
)) &&
77 (GET_RXD_MARKER(rxdp
->Control_2
) != THE_RXD_MARK
));
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88 (dev_type == XFRAME_I_DEVICE) ? \
89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
92 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
97 static inline int rx_buffer_level(nic_t
* sp
, int rxb_size
, int ring
)
100 mac_info_t
*mac_control
;
102 mac_control
= &sp
->mac_control
;
103 if ((mac_control
->rings
[ring
].pkt_cnt
- rxb_size
) > 16) {
105 if (rxb_size
<= MAX_RXDS_PER_BLOCK
) {
113 /* Ethtool related variables and Macros. */
114 static char s2io_gstrings
[][ETH_GSTRING_LEN
] = {
115 "Register test\t(offline)",
116 "Eeprom test\t(offline)",
117 "Link test\t(online)",
118 "RLDRAM test\t(offline)",
119 "BIST Test\t(offline)"
122 static char ethtool_stats_keys
[][ETH_GSTRING_LEN
] = {
124 {"tmac_data_octets"},
128 {"tmac_pause_ctrl_frms"},
129 {"tmac_any_err_frms"},
130 {"tmac_vld_ip_octets"},
138 {"rmac_data_octets"},
139 {"rmac_fcs_err_frms"},
141 {"rmac_vld_mcst_frms"},
142 {"rmac_vld_bcst_frms"},
143 {"rmac_in_rng_len_err_frms"},
145 {"rmac_pause_ctrl_frms"},
146 {"rmac_discarded_frms"},
147 {"rmac_usized_frms"},
148 {"rmac_osized_frms"},
150 {"rmac_jabber_frms"},
158 {"rmac_err_drp_udp"},
160 {"rmac_accepted_ip"},
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
167 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
170 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
173 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
180 static void s2io_vlan_rx_register(struct net_device
*dev
,
181 struct vlan_group
*grp
)
183 nic_t
*nic
= dev
->priv
;
186 spin_lock_irqsave(&nic
->tx_lock
, flags
);
188 spin_unlock_irqrestore(&nic
->tx_lock
, flags
);
191 /* Unregister the vlan */
192 static void s2io_vlan_rx_kill_vid(struct net_device
*dev
, unsigned long vid
)
194 nic_t
*nic
= dev
->priv
;
197 spin_lock_irqsave(&nic
->tx_lock
, flags
);
199 nic
->vlgrp
->vlan_devices
[vid
] = NULL
;
200 spin_unlock_irqrestore(&nic
->tx_lock
, flags
);
204 * Constants to be programmed into the Xena's registers, to configure
208 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
211 static u64 herc_act_dtx_cfg
[] = {
213 0x8000051536750000ULL
, 0x80000515367500E0ULL
,
215 0x8000051536750004ULL
, 0x80000515367500E4ULL
,
217 0x80010515003F0000ULL
, 0x80010515003F00E0ULL
,
219 0x80010515003F0004ULL
, 0x80010515003F00E4ULL
,
221 0x801205150D440000ULL
, 0x801205150D4400E0ULL
,
223 0x801205150D440004ULL
, 0x801205150D4400E4ULL
,
225 0x80020515F2100000ULL
, 0x80020515F21000E0ULL
,
227 0x80020515F2100004ULL
, 0x80020515F21000E4ULL
,
232 static u64 xena_mdio_cfg
[] = {
234 0xC001010000000000ULL
, 0xC0010100000000E0ULL
,
235 0xC0010100008000E4ULL
,
236 /* Remove Reset from PMA PLL */
237 0xC001010000000000ULL
, 0xC0010100000000E0ULL
,
238 0xC0010100000000E4ULL
,
242 static u64 xena_dtx_cfg
[] = {
243 0x8000051500000000ULL
, 0x80000515000000E0ULL
,
244 0x80000515D93500E4ULL
, 0x8001051500000000ULL
,
245 0x80010515000000E0ULL
, 0x80010515001E00E4ULL
,
246 0x8002051500000000ULL
, 0x80020515000000E0ULL
,
247 0x80020515F21000E4ULL
,
248 /* Set PADLOOPBACKN */
249 0x8002051500000000ULL
, 0x80020515000000E0ULL
,
250 0x80020515B20000E4ULL
, 0x8003051500000000ULL
,
251 0x80030515000000E0ULL
, 0x80030515B20000E4ULL
,
252 0x8004051500000000ULL
, 0x80040515000000E0ULL
,
253 0x80040515B20000E4ULL
, 0x8005051500000000ULL
,
254 0x80050515000000E0ULL
, 0x80050515B20000E4ULL
,
256 /* Remove PADLOOPBACKN */
257 0x8002051500000000ULL
, 0x80020515000000E0ULL
,
258 0x80020515F20000E4ULL
, 0x8003051500000000ULL
,
259 0x80030515000000E0ULL
, 0x80030515F20000E4ULL
,
260 0x8004051500000000ULL
, 0x80040515000000E0ULL
,
261 0x80040515F20000E4ULL
, 0x8005051500000000ULL
,
262 0x80050515000000E0ULL
, 0x80050515F20000E4ULL
,
267 * Constants for Fixing the MacAddress problem seen mostly on
270 static u64 fix_mac
[] = {
271 0x0060000000000000ULL
, 0x0060600000000000ULL
,
272 0x0040600000000000ULL
, 0x0000600000000000ULL
,
273 0x0020600000000000ULL
, 0x0060600000000000ULL
,
274 0x0020600000000000ULL
, 0x0060600000000000ULL
,
275 0x0020600000000000ULL
, 0x0060600000000000ULL
,
276 0x0020600000000000ULL
, 0x0060600000000000ULL
,
277 0x0020600000000000ULL
, 0x0060600000000000ULL
,
278 0x0020600000000000ULL
, 0x0060600000000000ULL
,
279 0x0020600000000000ULL
, 0x0060600000000000ULL
,
280 0x0020600000000000ULL
, 0x0060600000000000ULL
,
281 0x0020600000000000ULL
, 0x0060600000000000ULL
,
282 0x0020600000000000ULL
, 0x0060600000000000ULL
,
283 0x0020600000000000ULL
, 0x0000600000000000ULL
,
284 0x0040600000000000ULL
, 0x0060600000000000ULL
,
288 /* Module Loadable parameters. */
289 static unsigned int tx_fifo_num
= 1;
290 static unsigned int tx_fifo_len
[MAX_TX_FIFOS
] =
291 {[0 ...(MAX_TX_FIFOS
- 1)] = 0 };
292 static unsigned int rx_ring_num
= 1;
293 static unsigned int rx_ring_sz
[MAX_RX_RINGS
] =
294 {[0 ...(MAX_RX_RINGS
- 1)] = 0 };
295 static unsigned int rts_frm_len
[MAX_RX_RINGS
] =
296 {[0 ...(MAX_RX_RINGS
- 1)] = 0 };
297 static unsigned int use_continuous_tx_intrs
= 1;
298 static unsigned int rmac_pause_time
= 65535;
299 static unsigned int mc_pause_threshold_q0q3
= 187;
300 static unsigned int mc_pause_threshold_q4q7
= 187;
301 static unsigned int shared_splits
;
302 static unsigned int tmac_util_period
= 5;
303 static unsigned int rmac_util_period
= 5;
304 static unsigned int bimodal
= 0;
305 #ifndef CONFIG_S2IO_NAPI
306 static unsigned int indicate_max_pkts
;
308 /* Frequency of Rx desc syncs expressed as power of 2 */
309 static unsigned int rxsync_frequency
= 3;
313 * This table lists all the devices that this driver supports.
315 static struct pci_device_id s2io_tbl
[] __devinitdata
= {
316 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_S2IO_WIN
,
317 PCI_ANY_ID
, PCI_ANY_ID
},
318 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_S2IO_UNI
,
319 PCI_ANY_ID
, PCI_ANY_ID
},
320 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_HERC_WIN
,
321 PCI_ANY_ID
, PCI_ANY_ID
},
322 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_HERC_UNI
,
323 PCI_ANY_ID
, PCI_ANY_ID
},
327 MODULE_DEVICE_TABLE(pci
, s2io_tbl
);
329 static struct pci_driver s2io_driver
= {
331 .id_table
= s2io_tbl
,
332 .probe
= s2io_init_nic
,
333 .remove
= __devexit_p(s2io_rem_nic
),
336 /* A simplifier macro used both by init and free shared_mem Fns(). */
337 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
340 * init_shared_mem - Allocation and Initialization of Memory
341 * @nic: Device private variable.
342 * Description: The function allocates all the memory areas shared
343 * between the NIC and the driver. This includes Tx descriptors,
344 * Rx descriptors and the statistics block.
347 static int init_shared_mem(struct s2io_nic
*nic
)
350 void *tmp_v_addr
, *tmp_v_addr_next
;
351 dma_addr_t tmp_p_addr
, tmp_p_addr_next
;
352 RxD_block_t
*pre_rxd_blk
= NULL
;
353 int i
, j
, blk_cnt
, rx_sz
, tx_sz
;
354 int lst_size
, lst_per_page
;
355 struct net_device
*dev
= nic
->dev
;
356 #ifdef CONFIG_2BUFF_MODE
361 mac_info_t
*mac_control
;
362 struct config_param
*config
;
364 mac_control
= &nic
->mac_control
;
365 config
= &nic
->config
;
368 /* Allocation and initialization of TXDLs in FIOFs */
370 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
371 size
+= config
->tx_cfg
[i
].fifo_len
;
373 if (size
> MAX_AVAILABLE_TXDS
) {
374 DBG_PRINT(ERR_DBG
, "%s: Requested TxDs too high, ",
376 DBG_PRINT(ERR_DBG
, "Requested: %d, max supported: 8192\n", size
);
380 lst_size
= (sizeof(TxD_t
) * config
->max_txds
);
381 tx_sz
= lst_size
* size
;
382 lst_per_page
= PAGE_SIZE
/ lst_size
;
384 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
385 int fifo_len
= config
->tx_cfg
[i
].fifo_len
;
386 int list_holder_size
= fifo_len
* sizeof(list_info_hold_t
);
387 mac_control
->fifos
[i
].list_info
= kmalloc(list_holder_size
,
389 if (!mac_control
->fifos
[i
].list_info
) {
391 "Malloc failed for list_info\n");
394 memset(mac_control
->fifos
[i
].list_info
, 0, list_holder_size
);
396 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
397 int page_num
= TXD_MEM_PAGE_CNT(config
->tx_cfg
[i
].fifo_len
,
399 mac_control
->fifos
[i
].tx_curr_put_info
.offset
= 0;
400 mac_control
->fifos
[i
].tx_curr_put_info
.fifo_len
=
401 config
->tx_cfg
[i
].fifo_len
- 1;
402 mac_control
->fifos
[i
].tx_curr_get_info
.offset
= 0;
403 mac_control
->fifos
[i
].tx_curr_get_info
.fifo_len
=
404 config
->tx_cfg
[i
].fifo_len
- 1;
405 mac_control
->fifos
[i
].fifo_no
= i
;
406 mac_control
->fifos
[i
].nic
= nic
;
407 mac_control
->fifos
[i
].max_txds
= MAX_SKB_FRAGS
+ 1;
409 for (j
= 0; j
< page_num
; j
++) {
413 tmp_v
= pci_alloc_consistent(nic
->pdev
,
417 "pci_alloc_consistent ");
418 DBG_PRINT(ERR_DBG
, "failed for TxDL\n");
421 /* If we got a zero DMA address(can happen on
422 * certain platforms like PPC), reallocate.
423 * Store virtual address of page we don't want,
427 mac_control
->zerodma_virt_addr
= tmp_v
;
429 "%s: Zero DMA address for TxDL. ", dev
->name
);
431 "Virtual address %llx\n", (u64
)tmp_v
);
432 tmp_v
= pci_alloc_consistent(nic
->pdev
,
436 "pci_alloc_consistent ");
437 DBG_PRINT(ERR_DBG
, "failed for TxDL\n");
441 while (k
< lst_per_page
) {
442 int l
= (j
* lst_per_page
) + k
;
443 if (l
== config
->tx_cfg
[i
].fifo_len
)
445 mac_control
->fifos
[i
].list_info
[l
].list_virt_addr
=
446 tmp_v
+ (k
* lst_size
);
447 mac_control
->fifos
[i
].list_info
[l
].list_phy_addr
=
448 tmp_p
+ (k
* lst_size
);
454 /* Allocation and initialization of RXDs in Rings */
456 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
457 if (config
->rx_cfg
[i
].num_rxd
% (MAX_RXDS_PER_BLOCK
+ 1)) {
458 DBG_PRINT(ERR_DBG
, "%s: RxD count of ", dev
->name
);
459 DBG_PRINT(ERR_DBG
, "Ring%d is not a multiple of ",
461 DBG_PRINT(ERR_DBG
, "RxDs per Block");
464 size
+= config
->rx_cfg
[i
].num_rxd
;
465 mac_control
->rings
[i
].block_count
=
466 config
->rx_cfg
[i
].num_rxd
/ (MAX_RXDS_PER_BLOCK
+ 1);
467 mac_control
->rings
[i
].pkt_cnt
=
468 config
->rx_cfg
[i
].num_rxd
- mac_control
->rings
[i
].block_count
;
470 size
= (size
* (sizeof(RxD_t
)));
473 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
474 mac_control
->rings
[i
].rx_curr_get_info
.block_index
= 0;
475 mac_control
->rings
[i
].rx_curr_get_info
.offset
= 0;
476 mac_control
->rings
[i
].rx_curr_get_info
.ring_len
=
477 config
->rx_cfg
[i
].num_rxd
- 1;
478 mac_control
->rings
[i
].rx_curr_put_info
.block_index
= 0;
479 mac_control
->rings
[i
].rx_curr_put_info
.offset
= 0;
480 mac_control
->rings
[i
].rx_curr_put_info
.ring_len
=
481 config
->rx_cfg
[i
].num_rxd
- 1;
482 mac_control
->rings
[i
].nic
= nic
;
483 mac_control
->rings
[i
].ring_no
= i
;
486 config
->rx_cfg
[i
].num_rxd
/ (MAX_RXDS_PER_BLOCK
+ 1);
487 /* Allocating all the Rx blocks */
488 for (j
= 0; j
< blk_cnt
; j
++) {
489 #ifndef CONFIG_2BUFF_MODE
490 size
= (MAX_RXDS_PER_BLOCK
+ 1) * (sizeof(RxD_t
));
492 size
= SIZE_OF_BLOCK
;
494 tmp_v_addr
= pci_alloc_consistent(nic
->pdev
, size
,
496 if (tmp_v_addr
== NULL
) {
498 * In case of failure, free_shared_mem()
499 * is called, which should free any
500 * memory that was alloced till the
503 mac_control
->rings
[i
].rx_blocks
[j
].block_virt_addr
=
507 memset(tmp_v_addr
, 0, size
);
508 mac_control
->rings
[i
].rx_blocks
[j
].block_virt_addr
=
510 mac_control
->rings
[i
].rx_blocks
[j
].block_dma_addr
=
513 /* Interlinking all Rx Blocks */
514 for (j
= 0; j
< blk_cnt
; j
++) {
516 mac_control
->rings
[i
].rx_blocks
[j
].block_virt_addr
;
518 mac_control
->rings
[i
].rx_blocks
[(j
+ 1) %
519 blk_cnt
].block_virt_addr
;
521 mac_control
->rings
[i
].rx_blocks
[j
].block_dma_addr
;
523 mac_control
->rings
[i
].rx_blocks
[(j
+ 1) %
524 blk_cnt
].block_dma_addr
;
526 pre_rxd_blk
= (RxD_block_t
*) tmp_v_addr
;
527 pre_rxd_blk
->reserved_1
= END_OF_BLOCK
; /* last RxD
530 #ifndef CONFIG_2BUFF_MODE
531 pre_rxd_blk
->reserved_2_pNext_RxD_block
=
532 (unsigned long) tmp_v_addr_next
;
534 pre_rxd_blk
->pNext_RxD_Blk_physical
=
535 (u64
) tmp_p_addr_next
;
539 #ifdef CONFIG_2BUFF_MODE
541 * Allocation of Storages for buffer addresses in 2BUFF mode
542 * and the buffers as well.
544 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
546 config
->rx_cfg
[i
].num_rxd
/ (MAX_RXDS_PER_BLOCK
+ 1);
547 mac_control
->rings
[i
].ba
= kmalloc((sizeof(buffAdd_t
*) * blk_cnt
),
549 if (!mac_control
->rings
[i
].ba
)
551 for (j
= 0; j
< blk_cnt
; j
++) {
553 mac_control
->rings
[i
].ba
[j
] = kmalloc((sizeof(buffAdd_t
) *
554 (MAX_RXDS_PER_BLOCK
+ 1)),
556 if (!mac_control
->rings
[i
].ba
[j
])
558 while (k
!= MAX_RXDS_PER_BLOCK
) {
559 ba
= &mac_control
->rings
[i
].ba
[j
][k
];
561 ba
->ba_0_org
= (void *) kmalloc
562 (BUF0_LEN
+ ALIGN_SIZE
, GFP_KERNEL
);
565 tmp
= (unsigned long) ba
->ba_0_org
;
567 tmp
&= ~((unsigned long) ALIGN_SIZE
);
568 ba
->ba_0
= (void *) tmp
;
570 ba
->ba_1_org
= (void *) kmalloc
571 (BUF1_LEN
+ ALIGN_SIZE
, GFP_KERNEL
);
574 tmp
= (unsigned long) ba
->ba_1_org
;
576 tmp
&= ~((unsigned long) ALIGN_SIZE
);
577 ba
->ba_1
= (void *) tmp
;
584 /* Allocation and initialization of Statistics block */
585 size
= sizeof(StatInfo_t
);
586 mac_control
->stats_mem
= pci_alloc_consistent
587 (nic
->pdev
, size
, &mac_control
->stats_mem_phy
);
589 if (!mac_control
->stats_mem
) {
591 * In case of failure, free_shared_mem() is called, which
592 * should free any memory that was alloced till the
597 mac_control
->stats_mem_sz
= size
;
599 tmp_v_addr
= mac_control
->stats_mem
;
600 mac_control
->stats_info
= (StatInfo_t
*) tmp_v_addr
;
601 memset(tmp_v_addr
, 0, size
);
602 DBG_PRINT(INIT_DBG
, "%s:Ring Mem PHY: 0x%llx\n", dev
->name
,
603 (unsigned long long) tmp_p_addr
);
609 * free_shared_mem - Free the allocated Memory
610 * @nic: Device private variable.
611 * Description: This function is to free all memory locations allocated by
612 * the init_shared_mem() function and return it to the kernel.
615 static void free_shared_mem(struct s2io_nic
*nic
)
617 int i
, j
, blk_cnt
, size
;
619 dma_addr_t tmp_p_addr
;
620 mac_info_t
*mac_control
;
621 struct config_param
*config
;
622 int lst_size
, lst_per_page
;
623 struct net_device
*dev
= nic
->dev
;
628 mac_control
= &nic
->mac_control
;
629 config
= &nic
->config
;
631 lst_size
= (sizeof(TxD_t
) * config
->max_txds
);
632 lst_per_page
= PAGE_SIZE
/ lst_size
;
634 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
635 int page_num
= TXD_MEM_PAGE_CNT(config
->tx_cfg
[i
].fifo_len
,
637 for (j
= 0; j
< page_num
; j
++) {
638 int mem_blks
= (j
* lst_per_page
);
639 if (!mac_control
->fifos
[i
].list_info
)
641 if (!mac_control
->fifos
[i
].list_info
[mem_blks
].
644 pci_free_consistent(nic
->pdev
, PAGE_SIZE
,
645 mac_control
->fifos
[i
].
648 mac_control
->fifos
[i
].
652 /* If we got a zero DMA address during allocation,
655 if (mac_control
->zerodma_virt_addr
) {
656 pci_free_consistent(nic
->pdev
, PAGE_SIZE
,
657 mac_control
->zerodma_virt_addr
,
660 "%s: Freeing TxDL with zero DMA addr. ", dev
->name
);
661 DBG_PRINT(INIT_DBG
, "Virtual address %llx\n",
662 (u64
)(mac_control
->zerodma_virt_addr
));
664 kfree(mac_control
->fifos
[i
].list_info
);
667 #ifndef CONFIG_2BUFF_MODE
668 size
= (MAX_RXDS_PER_BLOCK
+ 1) * (sizeof(RxD_t
));
670 size
= SIZE_OF_BLOCK
;
672 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
673 blk_cnt
= mac_control
->rings
[i
].block_count
;
674 for (j
= 0; j
< blk_cnt
; j
++) {
675 tmp_v_addr
= mac_control
->rings
[i
].rx_blocks
[j
].
677 tmp_p_addr
= mac_control
->rings
[i
].rx_blocks
[j
].
679 if (tmp_v_addr
== NULL
)
681 pci_free_consistent(nic
->pdev
, size
,
682 tmp_v_addr
, tmp_p_addr
);
686 #ifdef CONFIG_2BUFF_MODE
687 /* Freeing buffer storage addresses in 2BUFF mode. */
688 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
690 config
->rx_cfg
[i
].num_rxd
/ (MAX_RXDS_PER_BLOCK
+ 1);
691 for (j
= 0; j
< blk_cnt
; j
++) {
693 if (!mac_control
->rings
[i
].ba
[j
])
695 while (k
!= MAX_RXDS_PER_BLOCK
) {
696 buffAdd_t
*ba
= &mac_control
->rings
[i
].ba
[j
][k
];
701 kfree(mac_control
->rings
[i
].ba
[j
]);
703 if (mac_control
->rings
[i
].ba
)
704 kfree(mac_control
->rings
[i
].ba
);
708 if (mac_control
->stats_mem
) {
709 pci_free_consistent(nic
->pdev
,
710 mac_control
->stats_mem_sz
,
711 mac_control
->stats_mem
,
712 mac_control
->stats_mem_phy
);
717 * s2io_verify_pci_mode -
720 static int s2io_verify_pci_mode(nic_t
*nic
)
722 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
723 register u64 val64
= 0;
726 val64
= readq(&bar0
->pci_mode
);
727 mode
= (u8
)GET_PCI_MODE(val64
);
729 if ( val64
& PCI_MODE_UNKNOWN_MODE
)
730 return -1; /* Unknown PCI mode */
736 * s2io_print_pci_mode -
738 static int s2io_print_pci_mode(nic_t
*nic
)
740 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
741 register u64 val64
= 0;
743 struct config_param
*config
= &nic
->config
;
745 val64
= readq(&bar0
->pci_mode
);
746 mode
= (u8
)GET_PCI_MODE(val64
);
748 if ( val64
& PCI_MODE_UNKNOWN_MODE
)
749 return -1; /* Unknown PCI mode */
751 if (val64
& PCI_MODE_32_BITS
) {
752 DBG_PRINT(ERR_DBG
, "%s: Device is on 32 bit ", nic
->dev
->name
);
754 DBG_PRINT(ERR_DBG
, "%s: Device is on 64 bit ", nic
->dev
->name
);
758 case PCI_MODE_PCI_33
:
759 DBG_PRINT(ERR_DBG
, "33MHz PCI bus\n");
760 config
->bus_speed
= 33;
762 case PCI_MODE_PCI_66
:
763 DBG_PRINT(ERR_DBG
, "66MHz PCI bus\n");
764 config
->bus_speed
= 133;
766 case PCI_MODE_PCIX_M1_66
:
767 DBG_PRINT(ERR_DBG
, "66MHz PCIX(M1) bus\n");
768 config
->bus_speed
= 133; /* Herc doubles the clock rate */
770 case PCI_MODE_PCIX_M1_100
:
771 DBG_PRINT(ERR_DBG
, "100MHz PCIX(M1) bus\n");
772 config
->bus_speed
= 200;
774 case PCI_MODE_PCIX_M1_133
:
775 DBG_PRINT(ERR_DBG
, "133MHz PCIX(M1) bus\n");
776 config
->bus_speed
= 266;
778 case PCI_MODE_PCIX_M2_66
:
779 DBG_PRINT(ERR_DBG
, "133MHz PCIX(M2) bus\n");
780 config
->bus_speed
= 133;
782 case PCI_MODE_PCIX_M2_100
:
783 DBG_PRINT(ERR_DBG
, "200MHz PCIX(M2) bus\n");
784 config
->bus_speed
= 200;
786 case PCI_MODE_PCIX_M2_133
:
787 DBG_PRINT(ERR_DBG
, "266MHz PCIX(M2) bus\n");
788 config
->bus_speed
= 266;
791 return -1; /* Unsupported bus speed */
798 * init_nic - Initialization of hardware
799 * @nic: device peivate variable
800 * Description: The function sequentially configures every block
801 * of the H/W from their reset values.
802 * Return Value: SUCCESS on success and
803 * '-1' on failure (endian settings incorrect).
806 static int init_nic(struct s2io_nic
*nic
)
808 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
809 struct net_device
*dev
= nic
->dev
;
810 register u64 val64
= 0;
814 mac_info_t
*mac_control
;
815 struct config_param
*config
;
816 int mdio_cnt
= 0, dtx_cnt
= 0;
817 unsigned long long mem_share
;
820 mac_control
= &nic
->mac_control
;
821 config
= &nic
->config
;
823 /* to set the swapper controle on the card */
824 if(s2io_set_swapper(nic
)) {
825 DBG_PRINT(ERR_DBG
,"ERROR: Setting Swapper failed\n");
830 * Herc requires EOI to be removed from reset before XGXS, so..
832 if (nic
->device_type
& XFRAME_II_DEVICE
) {
833 val64
= 0xA500000000ULL
;
834 writeq(val64
, &bar0
->sw_reset
);
836 val64
= readq(&bar0
->sw_reset
);
839 /* Remove XGXS from reset state */
841 writeq(val64
, &bar0
->sw_reset
);
843 val64
= readq(&bar0
->sw_reset
);
845 /* Enable Receiving broadcasts */
846 add
= &bar0
->mac_cfg
;
847 val64
= readq(&bar0
->mac_cfg
);
848 val64
|= MAC_RMAC_BCAST_ENABLE
;
849 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
850 writel((u32
) val64
, add
);
851 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
852 writel((u32
) (val64
>> 32), (add
+ 4));
854 /* Read registers in all blocks */
855 val64
= readq(&bar0
->mac_int_mask
);
856 val64
= readq(&bar0
->mc_int_mask
);
857 val64
= readq(&bar0
->xgxs_int_mask
);
861 writeq(vBIT(val64
, 2, 14), &bar0
->rmac_max_pyld_len
);
864 * Configuring the XAUI Interface of Xena.
865 * ***************************************
866 * To Configure the Xena's XAUI, one has to write a series
867 * of 64 bit values into two registers in a particular
868 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
869 * which will be defined in the array of configuration values
870 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
871 * to switch writing from one regsiter to another. We continue
872 * writing these values until we encounter the 'END_SIGN' macro.
873 * For example, After making a series of 21 writes into
874 * dtx_control register the 'SWITCH_SIGN' appears and hence we
875 * start writing into mdio_control until we encounter END_SIGN.
877 if (nic
->device_type
& XFRAME_II_DEVICE
) {
878 while (herc_act_dtx_cfg
[dtx_cnt
] != END_SIGN
) {
879 SPECIAL_REG_WRITE(herc_act_dtx_cfg
[dtx_cnt
],
880 &bar0
->dtx_control
, UF
);
882 msleep(1); /* Necessary!! */
888 while (xena_dtx_cfg
[dtx_cnt
] != END_SIGN
) {
889 if (xena_dtx_cfg
[dtx_cnt
] == SWITCH_SIGN
) {
893 SPECIAL_REG_WRITE(xena_dtx_cfg
[dtx_cnt
],
894 &bar0
->dtx_control
, UF
);
895 val64
= readq(&bar0
->dtx_control
);
899 while (xena_mdio_cfg
[mdio_cnt
] != END_SIGN
) {
900 if (xena_mdio_cfg
[mdio_cnt
] == SWITCH_SIGN
) {
904 SPECIAL_REG_WRITE(xena_mdio_cfg
[mdio_cnt
],
905 &bar0
->mdio_control
, UF
);
906 val64
= readq(&bar0
->mdio_control
);
909 if ((xena_dtx_cfg
[dtx_cnt
] == END_SIGN
) &&
910 (xena_mdio_cfg
[mdio_cnt
] == END_SIGN
)) {
918 /* Tx DMA Initialization */
920 writeq(val64
, &bar0
->tx_fifo_partition_0
);
921 writeq(val64
, &bar0
->tx_fifo_partition_1
);
922 writeq(val64
, &bar0
->tx_fifo_partition_2
);
923 writeq(val64
, &bar0
->tx_fifo_partition_3
);
926 for (i
= 0, j
= 0; i
< config
->tx_fifo_num
; i
++) {
928 vBIT(config
->tx_cfg
[i
].fifo_len
- 1, ((i
* 32) + 19),
929 13) | vBIT(config
->tx_cfg
[i
].fifo_priority
,
932 if (i
== (config
->tx_fifo_num
- 1)) {
939 writeq(val64
, &bar0
->tx_fifo_partition_0
);
943 writeq(val64
, &bar0
->tx_fifo_partition_1
);
947 writeq(val64
, &bar0
->tx_fifo_partition_2
);
951 writeq(val64
, &bar0
->tx_fifo_partition_3
);
956 /* Enable Tx FIFO partition 0. */
957 val64
= readq(&bar0
->tx_fifo_partition_0
);
958 val64
|= BIT(0); /* To enable the FIFO partition. */
959 writeq(val64
, &bar0
->tx_fifo_partition_0
);
962 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
963 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
965 if ((nic
->device_type
== XFRAME_I_DEVICE
) &&
966 (get_xena_rev_id(nic
->pdev
) < 4))
967 writeq(PCC_ENABLE_FOUR
, &bar0
->pcc_enable
);
969 val64
= readq(&bar0
->tx_fifo_partition_0
);
970 DBG_PRINT(INIT_DBG
, "Fifo partition at: 0x%p is: 0x%llx\n",
971 &bar0
->tx_fifo_partition_0
, (unsigned long long) val64
);
974 * Initialization of Tx_PA_CONFIG register to ignore packet
975 * integrity checking.
977 val64
= readq(&bar0
->tx_pa_cfg
);
978 val64
|= TX_PA_CFG_IGNORE_FRM_ERR
| TX_PA_CFG_IGNORE_SNAP_OUI
|
979 TX_PA_CFG_IGNORE_LLC_CTRL
| TX_PA_CFG_IGNORE_L2_ERR
;
980 writeq(val64
, &bar0
->tx_pa_cfg
);
982 /* Rx DMA intialization. */
984 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
986 vBIT(config
->rx_cfg
[i
].ring_priority
, (5 + (i
* 8)),
989 writeq(val64
, &bar0
->rx_queue_priority
);
992 * Allocating equal share of memory to all the
996 if (nic
->device_type
& XFRAME_II_DEVICE
)
1001 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1004 mem_share
= (mem_size
/ config
->rx_ring_num
+
1005 mem_size
% config
->rx_ring_num
);
1006 val64
|= RX_QUEUE_CFG_Q0_SZ(mem_share
);
1009 mem_share
= (mem_size
/ config
->rx_ring_num
);
1010 val64
|= RX_QUEUE_CFG_Q1_SZ(mem_share
);
1013 mem_share
= (mem_size
/ config
->rx_ring_num
);
1014 val64
|= RX_QUEUE_CFG_Q2_SZ(mem_share
);
1017 mem_share
= (mem_size
/ config
->rx_ring_num
);
1018 val64
|= RX_QUEUE_CFG_Q3_SZ(mem_share
);
1021 mem_share
= (mem_size
/ config
->rx_ring_num
);
1022 val64
|= RX_QUEUE_CFG_Q4_SZ(mem_share
);
1025 mem_share
= (mem_size
/ config
->rx_ring_num
);
1026 val64
|= RX_QUEUE_CFG_Q5_SZ(mem_share
);
1029 mem_share
= (mem_size
/ config
->rx_ring_num
);
1030 val64
|= RX_QUEUE_CFG_Q6_SZ(mem_share
);
1033 mem_share
= (mem_size
/ config
->rx_ring_num
);
1034 val64
|= RX_QUEUE_CFG_Q7_SZ(mem_share
);
1038 writeq(val64
, &bar0
->rx_queue_cfg
);
1041 * Filling Tx round robin registers
1042 * as per the number of FIFOs
1044 switch (config
->tx_fifo_num
) {
1046 val64
= 0x0000000000000000ULL
;
1047 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1048 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1049 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1050 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1051 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1054 val64
= 0x0000010000010000ULL
;
1055 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1056 val64
= 0x0100000100000100ULL
;
1057 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1058 val64
= 0x0001000001000001ULL
;
1059 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1060 val64
= 0x0000010000010000ULL
;
1061 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1062 val64
= 0x0100000000000000ULL
;
1063 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1066 val64
= 0x0001000102000001ULL
;
1067 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1068 val64
= 0x0001020000010001ULL
;
1069 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1070 val64
= 0x0200000100010200ULL
;
1071 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1072 val64
= 0x0001000102000001ULL
;
1073 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1074 val64
= 0x0001020000000000ULL
;
1075 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1078 val64
= 0x0001020300010200ULL
;
1079 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1080 val64
= 0x0100000102030001ULL
;
1081 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1082 val64
= 0x0200010000010203ULL
;
1083 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1084 val64
= 0x0001020001000001ULL
;
1085 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1086 val64
= 0x0203000100000000ULL
;
1087 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1090 val64
= 0x0001000203000102ULL
;
1091 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1092 val64
= 0x0001020001030004ULL
;
1093 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1094 val64
= 0x0001000203000102ULL
;
1095 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1096 val64
= 0x0001020001030004ULL
;
1097 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1098 val64
= 0x0001000000000000ULL
;
1099 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1102 val64
= 0x0001020304000102ULL
;
1103 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1104 val64
= 0x0304050001020001ULL
;
1105 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1106 val64
= 0x0203000100000102ULL
;
1107 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1108 val64
= 0x0304000102030405ULL
;
1109 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1110 val64
= 0x0001000200000000ULL
;
1111 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1114 val64
= 0x0001020001020300ULL
;
1115 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1116 val64
= 0x0102030400010203ULL
;
1117 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1118 val64
= 0x0405060001020001ULL
;
1119 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1120 val64
= 0x0304050000010200ULL
;
1121 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1122 val64
= 0x0102030000000000ULL
;
1123 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1126 val64
= 0x0001020300040105ULL
;
1127 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1128 val64
= 0x0200030106000204ULL
;
1129 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1130 val64
= 0x0103000502010007ULL
;
1131 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1132 val64
= 0x0304010002060500ULL
;
1133 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1134 val64
= 0x0103020400000000ULL
;
1135 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1139 /* Filling the Rx round robin registers as per the
1140 * number of Rings and steering based on QoS.
1142 switch (config
->rx_ring_num
) {
1144 val64
= 0x8080808080808080ULL
;
1145 writeq(val64
, &bar0
->rts_qos_steering
);
1148 val64
= 0x0000010000010000ULL
;
1149 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1150 val64
= 0x0100000100000100ULL
;
1151 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1152 val64
= 0x0001000001000001ULL
;
1153 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1154 val64
= 0x0000010000010000ULL
;
1155 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1156 val64
= 0x0100000000000000ULL
;
1157 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1159 val64
= 0x8080808040404040ULL
;
1160 writeq(val64
, &bar0
->rts_qos_steering
);
1163 val64
= 0x0001000102000001ULL
;
1164 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1165 val64
= 0x0001020000010001ULL
;
1166 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1167 val64
= 0x0200000100010200ULL
;
1168 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1169 val64
= 0x0001000102000001ULL
;
1170 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1171 val64
= 0x0001020000000000ULL
;
1172 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1174 val64
= 0x8080804040402020ULL
;
1175 writeq(val64
, &bar0
->rts_qos_steering
);
1178 val64
= 0x0001020300010200ULL
;
1179 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1180 val64
= 0x0100000102030001ULL
;
1181 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1182 val64
= 0x0200010000010203ULL
;
1183 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1184 val64
= 0x0001020001000001ULL
;
1185 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1186 val64
= 0x0203000100000000ULL
;
1187 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1189 val64
= 0x8080404020201010ULL
;
1190 writeq(val64
, &bar0
->rts_qos_steering
);
1193 val64
= 0x0001000203000102ULL
;
1194 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1195 val64
= 0x0001020001030004ULL
;
1196 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1197 val64
= 0x0001000203000102ULL
;
1198 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1199 val64
= 0x0001020001030004ULL
;
1200 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1201 val64
= 0x0001000000000000ULL
;
1202 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1204 val64
= 0x8080404020201008ULL
;
1205 writeq(val64
, &bar0
->rts_qos_steering
);
1208 val64
= 0x0001020304000102ULL
;
1209 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1210 val64
= 0x0304050001020001ULL
;
1211 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1212 val64
= 0x0203000100000102ULL
;
1213 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1214 val64
= 0x0304000102030405ULL
;
1215 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1216 val64
= 0x0001000200000000ULL
;
1217 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1219 val64
= 0x8080404020100804ULL
;
1220 writeq(val64
, &bar0
->rts_qos_steering
);
1223 val64
= 0x0001020001020300ULL
;
1224 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1225 val64
= 0x0102030400010203ULL
;
1226 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1227 val64
= 0x0405060001020001ULL
;
1228 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1229 val64
= 0x0304050000010200ULL
;
1230 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1231 val64
= 0x0102030000000000ULL
;
1232 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1234 val64
= 0x8080402010080402ULL
;
1235 writeq(val64
, &bar0
->rts_qos_steering
);
1238 val64
= 0x0001020300040105ULL
;
1239 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1240 val64
= 0x0200030106000204ULL
;
1241 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1242 val64
= 0x0103000502010007ULL
;
1243 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1244 val64
= 0x0304010002060500ULL
;
1245 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1246 val64
= 0x0103020400000000ULL
;
1247 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1249 val64
= 0x8040201008040201ULL
;
1250 writeq(val64
, &bar0
->rts_qos_steering
);
1256 for (i
= 0; i
< 8; i
++)
1257 writeq(val64
, &bar0
->rts_frm_len_n
[i
]);
1259 /* Set the default rts frame length for the rings configured */
1260 val64
= MAC_RTS_FRM_LEN_SET(dev
->mtu
+22);
1261 for (i
= 0 ; i
< config
->rx_ring_num
; i
++)
1262 writeq(val64
, &bar0
->rts_frm_len_n
[i
]);
1264 /* Set the frame length for the configured rings
1265 * desired by the user
1267 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1268 /* If rts_frm_len[i] == 0 then it is assumed that user not
1269 * specified frame length steering.
1270 * If the user provides the frame length then program
1271 * the rts_frm_len register for those values or else
1272 * leave it as it is.
1274 if (rts_frm_len
[i
] != 0) {
1275 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len
[i
]),
1276 &bar0
->rts_frm_len_n
[i
]);
1280 /* Program statistics memory */
1281 writeq(mac_control
->stats_mem_phy
, &bar0
->stat_addr
);
1283 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1284 val64
= STAT_BC(0x320);
1285 writeq(val64
, &bar0
->stat_byte_cnt
);
1289 * Initializing the sampling rate for the device to calculate the
1290 * bandwidth utilization.
1292 val64
= MAC_TX_LINK_UTIL_VAL(tmac_util_period
) |
1293 MAC_RX_LINK_UTIL_VAL(rmac_util_period
);
1294 writeq(val64
, &bar0
->mac_link_util
);
1298 * Initializing the Transmit and Receive Traffic Interrupt
1302 * TTI Initialization. Default Tx timer gets us about
1303 * 250 interrupts per sec. Continuous interrupts are enabled
1306 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1307 int count
= (nic
->config
.bus_speed
* 125)/2;
1308 val64
= TTI_DATA1_MEM_TX_TIMER_VAL(count
);
1311 val64
= TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1313 val64
|= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1314 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1315 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN
;
1316 if (use_continuous_tx_intrs
)
1317 val64
|= TTI_DATA1_MEM_TX_TIMER_CI_EN
;
1318 writeq(val64
, &bar0
->tti_data1_mem
);
1320 val64
= TTI_DATA2_MEM_TX_UFC_A(0x10) |
1321 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1322 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1323 writeq(val64
, &bar0
->tti_data2_mem
);
1325 val64
= TTI_CMD_MEM_WE
| TTI_CMD_MEM_STROBE_NEW_CMD
;
1326 writeq(val64
, &bar0
->tti_command_mem
);
1329 * Once the operation completes, the Strobe bit of the command
1330 * register will be reset. We poll for this particular condition
1331 * We wait for a maximum of 500ms for the operation to complete,
1332 * if it's not complete by then we return error.
1336 val64
= readq(&bar0
->tti_command_mem
);
1337 if (!(val64
& TTI_CMD_MEM_STROBE_NEW_CMD
)) {
1341 DBG_PRINT(ERR_DBG
, "%s: TTI init Failed\n",
1349 if (nic
->config
.bimodal
) {
1351 for (k
= 0; k
< config
->rx_ring_num
; k
++) {
1352 val64
= TTI_CMD_MEM_WE
| TTI_CMD_MEM_STROBE_NEW_CMD
;
1353 val64
|= TTI_CMD_MEM_OFFSET(0x38+k
);
1354 writeq(val64
, &bar0
->tti_command_mem
);
1357 * Once the operation completes, the Strobe bit of the command
1358 * register will be reset. We poll for this particular condition
1359 * We wait for a maximum of 500ms for the operation to complete,
1360 * if it's not complete by then we return error.
1364 val64
= readq(&bar0
->tti_command_mem
);
1365 if (!(val64
& TTI_CMD_MEM_STROBE_NEW_CMD
)) {
1370 "%s: TTI init Failed\n",
1380 /* RTI Initialization */
1381 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1383 * Programmed to generate Apprx 500 Intrs per
1386 int count
= (nic
->config
.bus_speed
* 125)/4;
1387 val64
= RTI_DATA1_MEM_RX_TIMER_VAL(count
);
1389 val64
= RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1391 val64
|= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1392 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1393 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN
;
1395 writeq(val64
, &bar0
->rti_data1_mem
);
1397 val64
= RTI_DATA2_MEM_RX_UFC_A(0x1) |
1398 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1399 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1400 writeq(val64
, &bar0
->rti_data2_mem
);
1402 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1403 val64
= RTI_CMD_MEM_WE
| RTI_CMD_MEM_STROBE_NEW_CMD
1404 | RTI_CMD_MEM_OFFSET(i
);
1405 writeq(val64
, &bar0
->rti_command_mem
);
1408 * Once the operation completes, the Strobe bit of the
1409 * command register will be reset. We poll for this
1410 * particular condition. We wait for a maximum of 500ms
1411 * for the operation to complete, if it's not complete
1412 * by then we return error.
1416 val64
= readq(&bar0
->rti_command_mem
);
1417 if (!(val64
& RTI_CMD_MEM_STROBE_NEW_CMD
)) {
1421 DBG_PRINT(ERR_DBG
, "%s: RTI init Failed\n",
1432 * Initializing proper values as Pause threshold into all
1433 * the 8 Queues on Rx side.
1435 writeq(0xffbbffbbffbbffbbULL
, &bar0
->mc_pause_thresh_q0q3
);
1436 writeq(0xffbbffbbffbbffbbULL
, &bar0
->mc_pause_thresh_q4q7
);
1438 /* Disable RMAC PAD STRIPPING */
1439 add
= &bar0
->mac_cfg
;
1440 val64
= readq(&bar0
->mac_cfg
);
1441 val64
&= ~(MAC_CFG_RMAC_STRIP_PAD
);
1442 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
1443 writel((u32
) (val64
), add
);
1444 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
1445 writel((u32
) (val64
>> 32), (add
+ 4));
1446 val64
= readq(&bar0
->mac_cfg
);
1449 * Set the time value to be inserted in the pause frame
1450 * generated by xena.
1452 val64
= readq(&bar0
->rmac_pause_cfg
);
1453 val64
&= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1454 val64
|= RMAC_PAUSE_HG_PTIME(nic
->mac_control
.rmac_pause_time
);
1455 writeq(val64
, &bar0
->rmac_pause_cfg
);
1458 * Set the Threshold Limit for Generating the pause frame
1459 * If the amount of data in any Queue exceeds ratio of
1460 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1461 * pause frame is generated
1464 for (i
= 0; i
< 4; i
++) {
1466 (((u64
) 0xFF00 | nic
->mac_control
.
1467 mc_pause_threshold_q0q3
)
1470 writeq(val64
, &bar0
->mc_pause_thresh_q0q3
);
1473 for (i
= 0; i
< 4; i
++) {
1475 (((u64
) 0xFF00 | nic
->mac_control
.
1476 mc_pause_threshold_q4q7
)
1479 writeq(val64
, &bar0
->mc_pause_thresh_q4q7
);
1482 * TxDMA will stop Read request if the number of read split has
1483 * exceeded the limit pointed by shared_splits
1485 val64
= readq(&bar0
->pic_control
);
1486 val64
|= PIC_CNTL_SHARED_SPLITS(shared_splits
);
1487 writeq(val64
, &bar0
->pic_control
);
1490 * Programming the Herc to split every write transaction
1491 * that does not start on an ADB to reduce disconnects.
1493 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1494 val64
= WREQ_SPLIT_MASK_SET_MASK(255);
1495 writeq(val64
, &bar0
->wreq_split_mask
);
1498 /* Setting Link stability period to 64 ms */
1499 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1500 val64
= MISC_LINK_STABILITY_PRD(3);
1501 writeq(val64
, &bar0
->misc_control
);
1506 #define LINK_UP_DOWN_INTERRUPT 1
1507 #define MAC_RMAC_ERR_TIMER 2
1509 #if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1510 #define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1512 int s2io_link_fault_indication(nic_t
*nic
)
1514 if (nic
->device_type
== XFRAME_II_DEVICE
)
1515 return LINK_UP_DOWN_INTERRUPT
;
1517 return MAC_RMAC_ERR_TIMER
;
1522 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1523 * @nic: device private variable,
1524 * @mask: A mask indicating which Intr block must be modified and,
1525 * @flag: A flag indicating whether to enable or disable the Intrs.
1526 * Description: This function will either disable or enable the interrupts
1527 * depending on the flag argument. The mask argument can be used to
1528 * enable/disable any Intr block.
1529 * Return Value: NONE.
1532 static void en_dis_able_nic_intrs(struct s2io_nic
*nic
, u16 mask
, int flag
)
1534 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
1535 register u64 val64
= 0, temp64
= 0;
1537 /* Top level interrupt classification */
1538 /* PIC Interrupts */
1539 if ((mask
& (TX_PIC_INTR
| RX_PIC_INTR
))) {
1540 /* Enable PIC Intrs in the general intr mask register */
1541 val64
= TXPIC_INT_M
| PIC_RX_INT_M
;
1542 if (flag
== ENABLE_INTRS
) {
1543 temp64
= readq(&bar0
->general_int_mask
);
1544 temp64
&= ~((u64
) val64
);
1545 writeq(temp64
, &bar0
->general_int_mask
);
1547 * If Hercules adapter enable GPIO otherwise
1548 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1549 * interrupts for now.
1552 if (s2io_link_fault_indication(nic
) ==
1553 LINK_UP_DOWN_INTERRUPT
) {
1554 temp64
= readq(&bar0
->pic_int_mask
);
1555 temp64
&= ~((u64
) PIC_INT_GPIO
);
1556 writeq(temp64
, &bar0
->pic_int_mask
);
1557 temp64
= readq(&bar0
->gpio_int_mask
);
1558 temp64
&= ~((u64
) GPIO_INT_MASK_LINK_UP
);
1559 writeq(temp64
, &bar0
->gpio_int_mask
);
1561 writeq(DISABLE_ALL_INTRS
, &bar0
->pic_int_mask
);
1564 * No MSI Support is available presently, so TTI and
1565 * RTI interrupts are also disabled.
1567 } else if (flag
== DISABLE_INTRS
) {
1569 * Disable PIC Intrs in the general
1570 * intr mask register
1572 writeq(DISABLE_ALL_INTRS
, &bar0
->pic_int_mask
);
1573 temp64
= readq(&bar0
->general_int_mask
);
1575 writeq(val64
, &bar0
->general_int_mask
);
1579 /* DMA Interrupts */
1580 /* Enabling/Disabling Tx DMA interrupts */
1581 if (mask
& TX_DMA_INTR
) {
1582 /* Enable TxDMA Intrs in the general intr mask register */
1583 val64
= TXDMA_INT_M
;
1584 if (flag
== ENABLE_INTRS
) {
1585 temp64
= readq(&bar0
->general_int_mask
);
1586 temp64
&= ~((u64
) val64
);
1587 writeq(temp64
, &bar0
->general_int_mask
);
1589 * Keep all interrupts other than PFC interrupt
1590 * and PCC interrupt disabled in DMA level.
1592 val64
= DISABLE_ALL_INTRS
& ~(TXDMA_PFC_INT_M
|
1594 writeq(val64
, &bar0
->txdma_int_mask
);
1596 * Enable only the MISC error 1 interrupt in PFC block
1598 val64
= DISABLE_ALL_INTRS
& (~PFC_MISC_ERR_1
);
1599 writeq(val64
, &bar0
->pfc_err_mask
);
1601 * Enable only the FB_ECC error interrupt in PCC block
1603 val64
= DISABLE_ALL_INTRS
& (~PCC_FB_ECC_ERR
);
1604 writeq(val64
, &bar0
->pcc_err_mask
);
1605 } else if (flag
== DISABLE_INTRS
) {
1607 * Disable TxDMA Intrs in the general intr mask
1610 writeq(DISABLE_ALL_INTRS
, &bar0
->txdma_int_mask
);
1611 writeq(DISABLE_ALL_INTRS
, &bar0
->pfc_err_mask
);
1612 temp64
= readq(&bar0
->general_int_mask
);
1614 writeq(val64
, &bar0
->general_int_mask
);
1618 /* Enabling/Disabling Rx DMA interrupts */
1619 if (mask
& RX_DMA_INTR
) {
1620 /* Enable RxDMA Intrs in the general intr mask register */
1621 val64
= RXDMA_INT_M
;
1622 if (flag
== ENABLE_INTRS
) {
1623 temp64
= readq(&bar0
->general_int_mask
);
1624 temp64
&= ~((u64
) val64
);
1625 writeq(temp64
, &bar0
->general_int_mask
);
1627 * All RxDMA block interrupts are disabled for now
1630 writeq(DISABLE_ALL_INTRS
, &bar0
->rxdma_int_mask
);
1631 } else if (flag
== DISABLE_INTRS
) {
1633 * Disable RxDMA Intrs in the general intr mask
1636 writeq(DISABLE_ALL_INTRS
, &bar0
->rxdma_int_mask
);
1637 temp64
= readq(&bar0
->general_int_mask
);
1639 writeq(val64
, &bar0
->general_int_mask
);
1643 /* MAC Interrupts */
1644 /* Enabling/Disabling MAC interrupts */
1645 if (mask
& (TX_MAC_INTR
| RX_MAC_INTR
)) {
1646 val64
= TXMAC_INT_M
| RXMAC_INT_M
;
1647 if (flag
== ENABLE_INTRS
) {
1648 temp64
= readq(&bar0
->general_int_mask
);
1649 temp64
&= ~((u64
) val64
);
1650 writeq(temp64
, &bar0
->general_int_mask
);
1652 * All MAC block error interrupts are disabled for now
1655 } else if (flag
== DISABLE_INTRS
) {
1657 * Disable MAC Intrs in the general intr mask register
1659 writeq(DISABLE_ALL_INTRS
, &bar0
->mac_int_mask
);
1660 writeq(DISABLE_ALL_INTRS
,
1661 &bar0
->mac_rmac_err_mask
);
1663 temp64
= readq(&bar0
->general_int_mask
);
1665 writeq(val64
, &bar0
->general_int_mask
);
1669 /* XGXS Interrupts */
1670 if (mask
& (TX_XGXS_INTR
| RX_XGXS_INTR
)) {
1671 val64
= TXXGXS_INT_M
| RXXGXS_INT_M
;
1672 if (flag
== ENABLE_INTRS
) {
1673 temp64
= readq(&bar0
->general_int_mask
);
1674 temp64
&= ~((u64
) val64
);
1675 writeq(temp64
, &bar0
->general_int_mask
);
1677 * All XGXS block error interrupts are disabled for now
1680 writeq(DISABLE_ALL_INTRS
, &bar0
->xgxs_int_mask
);
1681 } else if (flag
== DISABLE_INTRS
) {
1683 * Disable MC Intrs in the general intr mask register
1685 writeq(DISABLE_ALL_INTRS
, &bar0
->xgxs_int_mask
);
1686 temp64
= readq(&bar0
->general_int_mask
);
1688 writeq(val64
, &bar0
->general_int_mask
);
1692 /* Memory Controller(MC) interrupts */
1693 if (mask
& MC_INTR
) {
1695 if (flag
== ENABLE_INTRS
) {
1696 temp64
= readq(&bar0
->general_int_mask
);
1697 temp64
&= ~((u64
) val64
);
1698 writeq(temp64
, &bar0
->general_int_mask
);
1700 * Enable all MC Intrs.
1702 writeq(0x0, &bar0
->mc_int_mask
);
1703 writeq(0x0, &bar0
->mc_err_mask
);
1704 } else if (flag
== DISABLE_INTRS
) {
1706 * Disable MC Intrs in the general intr mask register
1708 writeq(DISABLE_ALL_INTRS
, &bar0
->mc_int_mask
);
1709 temp64
= readq(&bar0
->general_int_mask
);
1711 writeq(val64
, &bar0
->general_int_mask
);
1716 /* Tx traffic interrupts */
1717 if (mask
& TX_TRAFFIC_INTR
) {
1718 val64
= TXTRAFFIC_INT_M
;
1719 if (flag
== ENABLE_INTRS
) {
1720 temp64
= readq(&bar0
->general_int_mask
);
1721 temp64
&= ~((u64
) val64
);
1722 writeq(temp64
, &bar0
->general_int_mask
);
1724 * Enable all the Tx side interrupts
1725 * writing 0 Enables all 64 TX interrupt levels
1727 writeq(0x0, &bar0
->tx_traffic_mask
);
1728 } else if (flag
== DISABLE_INTRS
) {
1730 * Disable Tx Traffic Intrs in the general intr mask
1733 writeq(DISABLE_ALL_INTRS
, &bar0
->tx_traffic_mask
);
1734 temp64
= readq(&bar0
->general_int_mask
);
1736 writeq(val64
, &bar0
->general_int_mask
);
1740 /* Rx traffic interrupts */
1741 if (mask
& RX_TRAFFIC_INTR
) {
1742 val64
= RXTRAFFIC_INT_M
;
1743 if (flag
== ENABLE_INTRS
) {
1744 temp64
= readq(&bar0
->general_int_mask
);
1745 temp64
&= ~((u64
) val64
);
1746 writeq(temp64
, &bar0
->general_int_mask
);
1747 /* writing 0 Enables all 8 RX interrupt levels */
1748 writeq(0x0, &bar0
->rx_traffic_mask
);
1749 } else if (flag
== DISABLE_INTRS
) {
1751 * Disable Rx Traffic Intrs in the general intr mask
1754 writeq(DISABLE_ALL_INTRS
, &bar0
->rx_traffic_mask
);
1755 temp64
= readq(&bar0
->general_int_mask
);
1757 writeq(val64
, &bar0
->general_int_mask
);
1762 static int check_prc_pcc_state(u64 val64
, int flag
, int rev_id
, int herc
)
1766 if (flag
== FALSE
) {
1767 if ((!herc
&& (rev_id
>= 4)) || herc
) {
1768 if (!(val64
& ADAPTER_STATUS_RMAC_PCC_IDLE
) &&
1769 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1770 ADAPTER_STATUS_RC_PRC_QUIESCENT
)) {
1774 if (!(val64
& ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE
) &&
1775 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1776 ADAPTER_STATUS_RC_PRC_QUIESCENT
)) {
1781 if ((!herc
&& (rev_id
>= 4)) || herc
) {
1782 if (((val64
& ADAPTER_STATUS_RMAC_PCC_IDLE
) ==
1783 ADAPTER_STATUS_RMAC_PCC_IDLE
) &&
1784 (!(val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ||
1785 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1786 ADAPTER_STATUS_RC_PRC_QUIESCENT
))) {
1790 if (((val64
& ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE
) ==
1791 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE
) &&
1792 (!(val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ||
1793 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1794 ADAPTER_STATUS_RC_PRC_QUIESCENT
))) {
1803 * verify_xena_quiescence - Checks whether the H/W is ready
1804 * @val64 : Value read from adapter status register.
1805 * @flag : indicates if the adapter enable bit was ever written once
1807 * Description: Returns whether the H/W is ready to go or not. Depending
1808 * on whether adapter enable bit was written or not the comparison
1809 * differs and the calling function passes the input argument flag to
1811 * Return: 1 If xena is quiescence
1812 * 0 If Xena is not quiescence
1815 static int verify_xena_quiescence(nic_t
*sp
, u64 val64
, int flag
)
1818 u64 tmp64
= ~((u64
) val64
);
1819 int rev_id
= get_xena_rev_id(sp
->pdev
);
1821 herc
= (sp
->device_type
== XFRAME_II_DEVICE
);
1824 (ADAPTER_STATUS_TDMA_READY
| ADAPTER_STATUS_RDMA_READY
|
1825 ADAPTER_STATUS_PFC_READY
| ADAPTER_STATUS_TMAC_BUF_EMPTY
|
1826 ADAPTER_STATUS_PIC_QUIESCENT
| ADAPTER_STATUS_MC_DRAM_READY
|
1827 ADAPTER_STATUS_MC_QUEUES_READY
| ADAPTER_STATUS_M_PLL_LOCK
|
1828 ADAPTER_STATUS_P_PLL_LOCK
))) {
1829 ret
= check_prc_pcc_state(val64
, flag
, rev_id
, herc
);
1836 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1837 * @sp: Pointer to device specifc structure
1839 * New procedure to clear mac address reading problems on Alpha platforms
1843 void fix_mac_address(nic_t
* sp
)
1845 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
1849 while (fix_mac
[i
] != END_SIGN
) {
1850 writeq(fix_mac
[i
++], &bar0
->gpio_control
);
1852 val64
= readq(&bar0
->gpio_control
);
1857 * start_nic - Turns the device on
1858 * @nic : device private variable.
1860 * This function actually turns the device on. Before this function is
1861 * called,all Registers are configured from their reset states
1862 * and shared memory is allocated but the NIC is still quiescent. On
1863 * calling this function, the device interrupts are cleared and the NIC is
1864 * literally switched on by writing into the adapter control register.
1866 * SUCCESS on success and -1 on failure.
1869 static int start_nic(struct s2io_nic
*nic
)
1871 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
1872 struct net_device
*dev
= nic
->dev
;
1873 register u64 val64
= 0;
1876 mac_info_t
*mac_control
;
1877 struct config_param
*config
;
1879 mac_control
= &nic
->mac_control
;
1880 config
= &nic
->config
;
1882 /* PRC Initialization and configuration */
1883 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1884 writeq((u64
) mac_control
->rings
[i
].rx_blocks
[0].block_dma_addr
,
1885 &bar0
->prc_rxd0_n
[i
]);
1887 val64
= readq(&bar0
->prc_ctrl_n
[i
]);
1888 if (nic
->config
.bimodal
)
1889 val64
|= PRC_CTRL_BIMODAL_INTERRUPT
;
1890 #ifndef CONFIG_2BUFF_MODE
1891 val64
|= PRC_CTRL_RC_ENABLED
;
1893 val64
|= PRC_CTRL_RC_ENABLED
| PRC_CTRL_RING_MODE_3
;
1895 writeq(val64
, &bar0
->prc_ctrl_n
[i
]);
1898 #ifdef CONFIG_2BUFF_MODE
1899 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1900 val64
= readq(&bar0
->rx_pa_cfg
);
1901 val64
|= RX_PA_CFG_IGNORE_L2_ERR
;
1902 writeq(val64
, &bar0
->rx_pa_cfg
);
1906 * Enabling MC-RLDRAM. After enabling the device, we timeout
1907 * for around 100ms, which is approximately the time required
1908 * for the device to be ready for operation.
1910 val64
= readq(&bar0
->mc_rldram_mrs
);
1911 val64
|= MC_RLDRAM_QUEUE_SIZE_ENABLE
| MC_RLDRAM_MRS_ENABLE
;
1912 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_mrs
, UF
);
1913 val64
= readq(&bar0
->mc_rldram_mrs
);
1915 msleep(100); /* Delay by around 100 ms. */
1917 /* Enabling ECC Protection. */
1918 val64
= readq(&bar0
->adapter_control
);
1919 val64
&= ~ADAPTER_ECC_EN
;
1920 writeq(val64
, &bar0
->adapter_control
);
1923 * Clearing any possible Link state change interrupts that
1924 * could have popped up just before Enabling the card.
1926 val64
= readq(&bar0
->mac_rmac_err_reg
);
1928 writeq(val64
, &bar0
->mac_rmac_err_reg
);
1931 * Verify if the device is ready to be enabled, if so enable
1934 val64
= readq(&bar0
->adapter_status
);
1935 if (!verify_xena_quiescence(nic
, val64
, nic
->device_enabled_once
)) {
1936 DBG_PRINT(ERR_DBG
, "%s: device is not ready, ", dev
->name
);
1937 DBG_PRINT(ERR_DBG
, "Adapter status reads: 0x%llx\n",
1938 (unsigned long long) val64
);
1942 /* Enable select interrupts */
1943 interruptible
= TX_TRAFFIC_INTR
| RX_TRAFFIC_INTR
;
1944 interruptible
|= TX_PIC_INTR
| RX_PIC_INTR
;
1945 interruptible
|= TX_MAC_INTR
| RX_MAC_INTR
;
1947 en_dis_able_nic_intrs(nic
, interruptible
, ENABLE_INTRS
);
1950 * With some switches, link might be already up at this point.
1951 * Because of this weird behavior, when we enable laser,
1952 * we may not get link. We need to handle this. We cannot
1953 * figure out which switch is misbehaving. So we are forced to
1954 * make a global change.
1957 /* Enabling Laser. */
1958 val64
= readq(&bar0
->adapter_control
);
1959 val64
|= ADAPTER_EOI_TX_ON
;
1960 writeq(val64
, &bar0
->adapter_control
);
1962 /* SXE-002: Initialize link and activity LED */
1963 subid
= nic
->pdev
->subsystem_device
;
1964 if (((subid
& 0xFF) >= 0x07) &&
1965 (nic
->device_type
== XFRAME_I_DEVICE
)) {
1966 val64
= readq(&bar0
->gpio_control
);
1967 val64
|= 0x0000800000000000ULL
;
1968 writeq(val64
, &bar0
->gpio_control
);
1969 val64
= 0x0411040400000000ULL
;
1970 writeq(val64
, (void __iomem
*)bar0
+ 0x2700);
1974 * Don't see link state interrupts on certain switches, so
1975 * directly scheduling a link state task from here.
1977 schedule_work(&nic
->set_link_task
);
1983 * free_tx_buffers - Free all queued Tx buffers
1984 * @nic : device private variable.
1986 * Free all queued Tx buffers.
1987 * Return Value: void
1990 static void free_tx_buffers(struct s2io_nic
*nic
)
1992 struct net_device
*dev
= nic
->dev
;
1993 struct sk_buff
*skb
;
1996 mac_info_t
*mac_control
;
1997 struct config_param
*config
;
1998 int cnt
= 0, frg_cnt
;
2000 mac_control
= &nic
->mac_control
;
2001 config
= &nic
->config
;
2003 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
2004 for (j
= 0; j
< config
->tx_cfg
[i
].fifo_len
- 1; j
++) {
2005 txdp
= (TxD_t
*) mac_control
->fifos
[i
].list_info
[j
].
2008 (struct sk_buff
*) ((unsigned long) txdp
->
2011 memset(txdp
, 0, sizeof(TxD_t
) *
2015 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
2016 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2017 txdp
->Buffer_Pointer
,
2018 skb
->len
- skb
->data_len
,
2024 for (j
= 0; j
< frg_cnt
; j
++, txdp
++) {
2026 &skb_shinfo(skb
)->frags
[j
];
2027 pci_unmap_page(nic
->pdev
,
2037 memset(txdp
, 0, sizeof(TxD_t
) * config
->max_txds
);
2041 "%s:forcibly freeing %d skbs on FIFO%d\n",
2043 mac_control
->fifos
[i
].tx_curr_get_info
.offset
= 0;
2044 mac_control
->fifos
[i
].tx_curr_put_info
.offset
= 0;
2049 * stop_nic - To stop the nic
2050 * @nic ; device private variable.
2052 * This function does exactly the opposite of what the start_nic()
2053 * function does. This function is called to stop the device.
2058 static void stop_nic(struct s2io_nic
*nic
)
2060 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
2061 register u64 val64
= 0;
2062 u16 interruptible
, i
;
2063 mac_info_t
*mac_control
;
2064 struct config_param
*config
;
2066 mac_control
= &nic
->mac_control
;
2067 config
= &nic
->config
;
2069 /* Disable all interrupts */
2070 interruptible
= TX_TRAFFIC_INTR
| RX_TRAFFIC_INTR
;
2071 interruptible
|= TX_PIC_INTR
| RX_PIC_INTR
;
2072 interruptible
|= TX_MAC_INTR
| RX_MAC_INTR
;
2073 en_dis_able_nic_intrs(nic
, interruptible
, DISABLE_INTRS
);
2076 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2077 val64
= readq(&bar0
->prc_ctrl_n
[i
]);
2078 val64
&= ~((u64
) PRC_CTRL_RC_ENABLED
);
2079 writeq(val64
, &bar0
->prc_ctrl_n
[i
]);
2084 * fill_rx_buffers - Allocates the Rx side skbs
2085 * @nic: device private variable
2086 * @ring_no: ring number
2088 * The function allocates Rx side skbs and puts the physical
2089 * address of these buffers into the RxD buffer pointers, so that the NIC
2090 * can DMA the received frame into these locations.
2091 * The NIC supports 3 receive modes, viz
2093 * 2. three buffer and
2094 * 3. Five buffer modes.
2095 * Each mode defines how many fragments the received frame will be split
2096 * up into by the NIC. The frame is split into L3 header, L4 Header,
2097 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2098 * is split into 3 fragments. As of now only single buffer mode is
2101 * SUCCESS on success or an appropriate -ve value on failure.
2104 int fill_rx_buffers(struct s2io_nic
*nic
, int ring_no
)
2106 struct net_device
*dev
= nic
->dev
;
2107 struct sk_buff
*skb
;
2109 int off
, off1
, size
, block_no
, block_no1
;
2110 int offset
, offset1
;
2113 mac_info_t
*mac_control
;
2114 struct config_param
*config
;
2115 #ifdef CONFIG_2BUFF_MODE
2120 dma_addr_t rxdpphys
;
2122 #ifndef CONFIG_S2IO_NAPI
2123 unsigned long flags
;
2125 RxD_t
*first_rxdp
= NULL
;
2127 mac_control
= &nic
->mac_control
;
2128 config
= &nic
->config
;
2129 alloc_cnt
= mac_control
->rings
[ring_no
].pkt_cnt
-
2130 atomic_read(&nic
->rx_bufs_left
[ring_no
]);
2131 size
= dev
->mtu
+ HEADER_ETHERNET_II_802_3_SIZE
+
2132 HEADER_802_2_SIZE
+ HEADER_SNAP_SIZE
;
2134 while (alloc_tab
< alloc_cnt
) {
2135 block_no
= mac_control
->rings
[ring_no
].rx_curr_put_info
.
2137 block_no1
= mac_control
->rings
[ring_no
].rx_curr_get_info
.
2139 off
= mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
;
2140 off1
= mac_control
->rings
[ring_no
].rx_curr_get_info
.offset
;
2141 #ifndef CONFIG_2BUFF_MODE
2142 offset
= block_no
* (MAX_RXDS_PER_BLOCK
+ 1) + off
;
2143 offset1
= block_no1
* (MAX_RXDS_PER_BLOCK
+ 1) + off1
;
2145 offset
= block_no
* (MAX_RXDS_PER_BLOCK
) + off
;
2146 offset1
= block_no1
* (MAX_RXDS_PER_BLOCK
) + off1
;
2149 rxdp
= mac_control
->rings
[ring_no
].rx_blocks
[block_no
].
2150 block_virt_addr
+ off
;
2151 if ((offset
== offset1
) && (rxdp
->Host_Control
)) {
2152 DBG_PRINT(INTR_DBG
, "%s: Get and Put", dev
->name
);
2153 DBG_PRINT(INTR_DBG
, " info equated\n");
2156 #ifndef CONFIG_2BUFF_MODE
2157 if (rxdp
->Control_1
== END_OF_BLOCK
) {
2158 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2160 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2161 block_index
%= mac_control
->rings
[ring_no
].block_count
;
2162 block_no
= mac_control
->rings
[ring_no
].rx_curr_put_info
.
2165 off
%= (MAX_RXDS_PER_BLOCK
+ 1);
2166 mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
=
2168 rxdp
= (RxD_t
*) ((unsigned long) rxdp
->Control_2
);
2169 DBG_PRINT(INTR_DBG
, "%s: Next block at: %p\n",
2172 #ifndef CONFIG_S2IO_NAPI
2173 spin_lock_irqsave(&nic
->put_lock
, flags
);
2174 mac_control
->rings
[ring_no
].put_pos
=
2175 (block_no
* (MAX_RXDS_PER_BLOCK
+ 1)) + off
;
2176 spin_unlock_irqrestore(&nic
->put_lock
, flags
);
2179 if (rxdp
->Host_Control
== END_OF_BLOCK
) {
2180 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2182 mac_control
->rings
[ring_no
].rx_curr_put_info
.block_index
2183 %= mac_control
->rings
[ring_no
].block_count
;
2184 block_no
= mac_control
->rings
[ring_no
].rx_curr_put_info
2187 DBG_PRINT(INTR_DBG
, "%s: block%d at: 0x%llx\n",
2188 dev
->name
, block_no
,
2189 (unsigned long long) rxdp
->Control_1
);
2190 mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
=
2192 rxdp
= mac_control
->rings
[ring_no
].rx_blocks
[block_no
].
2195 #ifndef CONFIG_S2IO_NAPI
2196 spin_lock_irqsave(&nic
->put_lock
, flags
);
2197 mac_control
->rings
[ring_no
].put_pos
= (block_no
*
2198 (MAX_RXDS_PER_BLOCK
+ 1)) + off
;
2199 spin_unlock_irqrestore(&nic
->put_lock
, flags
);
2203 #ifndef CONFIG_2BUFF_MODE
2204 if (rxdp
->Control_1
& RXD_OWN_XENA
)
2206 if (rxdp
->Control_2
& BIT(0))
2209 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2213 #ifdef CONFIG_2BUFF_MODE
2215 * RxDs Spanning cache lines will be replenished only
2216 * if the succeeding RxD is also owned by Host. It
2217 * will always be the ((8*i)+3) and ((8*i)+6)
2218 * descriptors for the 48 byte descriptor. The offending
2219 * decsriptor is of-course the 3rd descriptor.
2221 rxdpphys
= mac_control
->rings
[ring_no
].rx_blocks
[block_no
].
2222 block_dma_addr
+ (off
* sizeof(RxD_t
));
2223 if (((u64
) (rxdpphys
)) % 128 > 80) {
2224 rxdpnext
= mac_control
->rings
[ring_no
].rx_blocks
[block_no
].
2225 block_virt_addr
+ (off
+ 1);
2226 if (rxdpnext
->Host_Control
== END_OF_BLOCK
) {
2227 nextblk
= (block_no
+ 1) %
2228 (mac_control
->rings
[ring_no
].block_count
);
2229 rxdpnext
= mac_control
->rings
[ring_no
].rx_blocks
2230 [nextblk
].block_virt_addr
;
2232 if (rxdpnext
->Control_2
& BIT(0))
2237 #ifndef CONFIG_2BUFF_MODE
2238 skb
= dev_alloc_skb(size
+ NET_IP_ALIGN
);
2240 skb
= dev_alloc_skb(dev
->mtu
+ ALIGN_SIZE
+ BUF0_LEN
+ 4);
2243 DBG_PRINT(ERR_DBG
, "%s: Out of ", dev
->name
);
2244 DBG_PRINT(ERR_DBG
, "memory to allocate SKBs\n");
2247 first_rxdp
->Control_1
|= RXD_OWN_XENA
;
2251 #ifndef CONFIG_2BUFF_MODE
2252 skb_reserve(skb
, NET_IP_ALIGN
);
2253 memset(rxdp
, 0, sizeof(RxD_t
));
2254 rxdp
->Buffer0_ptr
= pci_map_single
2255 (nic
->pdev
, skb
->data
, size
, PCI_DMA_FROMDEVICE
);
2256 rxdp
->Control_2
&= (~MASK_BUFFER0_SIZE
);
2257 rxdp
->Control_2
|= SET_BUFFER0_SIZE(size
);
2258 rxdp
->Host_Control
= (unsigned long) (skb
);
2259 if (alloc_tab
& ((1 << rxsync_frequency
) - 1))
2260 rxdp
->Control_1
|= RXD_OWN_XENA
;
2262 off
%= (MAX_RXDS_PER_BLOCK
+ 1);
2263 mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
= off
;
2265 ba
= &mac_control
->rings
[ring_no
].ba
[block_no
][off
];
2266 skb_reserve(skb
, BUF0_LEN
);
2267 tmp
= ((unsigned long) skb
->data
& ALIGN_SIZE
);
2269 skb_reserve(skb
, (ALIGN_SIZE
+ 1) - tmp
);
2271 memset(rxdp
, 0, sizeof(RxD_t
));
2272 rxdp
->Buffer2_ptr
= pci_map_single
2273 (nic
->pdev
, skb
->data
, dev
->mtu
+ BUF0_LEN
+ 4,
2274 PCI_DMA_FROMDEVICE
);
2276 pci_map_single(nic
->pdev
, ba
->ba_0
, BUF0_LEN
,
2277 PCI_DMA_FROMDEVICE
);
2279 pci_map_single(nic
->pdev
, ba
->ba_1
, BUF1_LEN
,
2280 PCI_DMA_FROMDEVICE
);
2282 rxdp
->Control_2
= SET_BUFFER2_SIZE(dev
->mtu
+ 4);
2283 rxdp
->Control_2
|= SET_BUFFER0_SIZE(BUF0_LEN
);
2284 rxdp
->Control_2
|= SET_BUFFER1_SIZE(1); /* dummy. */
2285 rxdp
->Control_2
|= BIT(0); /* Set Buffer_Empty bit. */
2286 rxdp
->Host_Control
= (u64
) ((unsigned long) (skb
));
2287 if (alloc_tab
& ((1 << rxsync_frequency
) - 1))
2288 rxdp
->Control_1
|= RXD_OWN_XENA
;
2290 mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
= off
;
2292 rxdp
->Control_2
|= SET_RXD_MARKER
;
2294 if (!(alloc_tab
& ((1 << rxsync_frequency
) - 1))) {
2297 first_rxdp
->Control_1
|= RXD_OWN_XENA
;
2301 atomic_inc(&nic
->rx_bufs_left
[ring_no
]);
2306 /* Transfer ownership of first descriptor to adapter just before
2307 * exiting. Before that, use memory barrier so that ownership
2308 * and other fields are seen by adapter correctly.
2312 first_rxdp
->Control_1
|= RXD_OWN_XENA
;
2319 * free_rx_buffers - Frees all Rx buffers
2320 * @sp: device private variable.
2322 * This function will free all Rx buffers allocated by host.
2327 static void free_rx_buffers(struct s2io_nic
*sp
)
2329 struct net_device
*dev
= sp
->dev
;
2330 int i
, j
, blk
= 0, off
, buf_cnt
= 0;
2332 struct sk_buff
*skb
;
2333 mac_info_t
*mac_control
;
2334 struct config_param
*config
;
2335 #ifdef CONFIG_2BUFF_MODE
2339 mac_control
= &sp
->mac_control
;
2340 config
= &sp
->config
;
2342 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2343 for (j
= 0, blk
= 0; j
< config
->rx_cfg
[i
].num_rxd
; j
++) {
2344 off
= j
% (MAX_RXDS_PER_BLOCK
+ 1);
2345 rxdp
= mac_control
->rings
[i
].rx_blocks
[blk
].
2346 block_virt_addr
+ off
;
2348 #ifndef CONFIG_2BUFF_MODE
2349 if (rxdp
->Control_1
== END_OF_BLOCK
) {
2351 (RxD_t
*) ((unsigned long) rxdp
->
2357 if (rxdp
->Host_Control
== END_OF_BLOCK
) {
2363 if (!(rxdp
->Control_1
& RXD_OWN_XENA
)) {
2364 memset(rxdp
, 0, sizeof(RxD_t
));
2369 (struct sk_buff
*) ((unsigned long) rxdp
->
2372 #ifndef CONFIG_2BUFF_MODE
2373 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2376 HEADER_ETHERNET_II_802_3_SIZE
2377 + HEADER_802_2_SIZE
+
2379 PCI_DMA_FROMDEVICE
);
2381 ba
= &mac_control
->rings
[i
].ba
[blk
][off
];
2382 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2385 PCI_DMA_FROMDEVICE
);
2386 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2389 PCI_DMA_FROMDEVICE
);
2390 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2392 dev
->mtu
+ BUF0_LEN
+ 4,
2393 PCI_DMA_FROMDEVICE
);
2396 atomic_dec(&sp
->rx_bufs_left
[i
]);
2399 memset(rxdp
, 0, sizeof(RxD_t
));
2401 mac_control
->rings
[i
].rx_curr_put_info
.block_index
= 0;
2402 mac_control
->rings
[i
].rx_curr_get_info
.block_index
= 0;
2403 mac_control
->rings
[i
].rx_curr_put_info
.offset
= 0;
2404 mac_control
->rings
[i
].rx_curr_get_info
.offset
= 0;
2405 atomic_set(&sp
->rx_bufs_left
[i
], 0);
2406 DBG_PRINT(INIT_DBG
, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2407 dev
->name
, buf_cnt
, i
);
2412 * s2io_poll - Rx interrupt handler for NAPI support
2413 * @dev : pointer to the device structure.
2414 * @budget : The number of packets that were budgeted to be processed
2415 * during one pass through the 'Poll" function.
2417 * Comes into picture only if NAPI support has been incorporated. It does
2418 * the same thing that rx_intr_handler does, but not in a interrupt context
2419 * also It will process only a given number of packets.
2421 * 0 on success and 1 if there are No Rx packets to be processed.
2424 #if defined(CONFIG_S2IO_NAPI)
2425 static int s2io_poll(struct net_device
*dev
, int *budget
)
2427 nic_t
*nic
= dev
->priv
;
2428 int pkt_cnt
= 0, org_pkts_to_process
;
2429 mac_info_t
*mac_control
;
2430 struct config_param
*config
;
2431 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
2435 atomic_inc(&nic
->isr_cnt
);
2436 mac_control
= &nic
->mac_control
;
2437 config
= &nic
->config
;
2439 nic
->pkts_to_process
= *budget
;
2440 if (nic
->pkts_to_process
> dev
->quota
)
2441 nic
->pkts_to_process
= dev
->quota
;
2442 org_pkts_to_process
= nic
->pkts_to_process
;
2444 val64
= readq(&bar0
->rx_traffic_int
);
2445 writeq(val64
, &bar0
->rx_traffic_int
);
2447 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2448 rx_intr_handler(&mac_control
->rings
[i
]);
2449 pkt_cnt
= org_pkts_to_process
- nic
->pkts_to_process
;
2450 if (!nic
->pkts_to_process
) {
2451 /* Quota for the current iteration has been met */
2458 dev
->quota
-= pkt_cnt
;
2460 netif_rx_complete(dev
);
2462 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2463 if (fill_rx_buffers(nic
, i
) == -ENOMEM
) {
2464 DBG_PRINT(ERR_DBG
, "%s:Out of memory", dev
->name
);
2465 DBG_PRINT(ERR_DBG
, " in Rx Poll!!\n");
2469 /* Re enable the Rx interrupts. */
2470 en_dis_able_nic_intrs(nic
, RX_TRAFFIC_INTR
, ENABLE_INTRS
);
2471 atomic_dec(&nic
->isr_cnt
);
2475 dev
->quota
-= pkt_cnt
;
2478 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2479 if (fill_rx_buffers(nic
, i
) == -ENOMEM
) {
2480 DBG_PRINT(ERR_DBG
, "%s:Out of memory", dev
->name
);
2481 DBG_PRINT(ERR_DBG
, " in Rx Poll!!\n");
2485 atomic_dec(&nic
->isr_cnt
);
2491 * rx_intr_handler - Rx interrupt handler
2492 * @nic: device private variable.
2494 * If the interrupt is because of a received frame or if the
2495 * receive ring contains fresh as yet un-processed frames,this function is
2496 * called. It picks out the RxD at which place the last Rx processing had
2497 * stopped and sends the skb to the OSM's Rx handler and then increments
2502 static void rx_intr_handler(ring_info_t
*ring_data
)
2504 nic_t
*nic
= ring_data
->nic
;
2505 struct net_device
*dev
= (struct net_device
*) nic
->dev
;
2506 int get_block
, get_offset
, put_block
, put_offset
, ring_bufs
;
2507 rx_curr_get_info_t get_info
, put_info
;
2509 struct sk_buff
*skb
;
2510 #ifndef CONFIG_S2IO_NAPI
2513 spin_lock(&nic
->rx_lock
);
2514 if (atomic_read(&nic
->card_state
) == CARD_DOWN
) {
2515 DBG_PRINT(INTR_DBG
, "%s: %s going down for reset\n",
2516 __FUNCTION__
, dev
->name
);
2517 spin_unlock(&nic
->rx_lock
);
2521 get_info
= ring_data
->rx_curr_get_info
;
2522 get_block
= get_info
.block_index
;
2523 put_info
= ring_data
->rx_curr_put_info
;
2524 put_block
= put_info
.block_index
;
2525 ring_bufs
= get_info
.ring_len
+1;
2526 rxdp
= ring_data
->rx_blocks
[get_block
].block_virt_addr
+
2528 get_offset
= (get_block
* (MAX_RXDS_PER_BLOCK
+ 1)) +
2530 #ifndef CONFIG_S2IO_NAPI
2531 spin_lock(&nic
->put_lock
);
2532 put_offset
= ring_data
->put_pos
;
2533 spin_unlock(&nic
->put_lock
);
2535 put_offset
= (put_block
* (MAX_RXDS_PER_BLOCK
+ 1)) +
2538 while (RXD_IS_UP2DT(rxdp
) &&
2539 (((get_offset
+ 1) % ring_bufs
) != put_offset
)) {
2540 skb
= (struct sk_buff
*) ((unsigned long)rxdp
->Host_Control
);
2542 DBG_PRINT(ERR_DBG
, "%s: The skb is ",
2544 DBG_PRINT(ERR_DBG
, "Null in Rx Intr\n");
2545 spin_unlock(&nic
->rx_lock
);
2548 #ifndef CONFIG_2BUFF_MODE
2549 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2552 HEADER_ETHERNET_II_802_3_SIZE
+
2555 PCI_DMA_FROMDEVICE
);
2557 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2559 BUF0_LEN
, PCI_DMA_FROMDEVICE
);
2560 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2562 BUF1_LEN
, PCI_DMA_FROMDEVICE
);
2563 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2565 dev
->mtu
+ BUF0_LEN
+ 4,
2566 PCI_DMA_FROMDEVICE
);
2568 rx_osm_handler(ring_data
, rxdp
);
2570 ring_data
->rx_curr_get_info
.offset
=
2572 rxdp
= ring_data
->rx_blocks
[get_block
].block_virt_addr
+
2574 if (get_info
.offset
&&
2575 (!(get_info
.offset
% MAX_RXDS_PER_BLOCK
))) {
2576 get_info
.offset
= 0;
2577 ring_data
->rx_curr_get_info
.offset
2580 get_block
%= ring_data
->block_count
;
2581 ring_data
->rx_curr_get_info
.block_index
2583 rxdp
= ring_data
->rx_blocks
[get_block
].block_virt_addr
;
2586 get_offset
= (get_block
* (MAX_RXDS_PER_BLOCK
+ 1)) +
2588 #ifdef CONFIG_S2IO_NAPI
2589 nic
->pkts_to_process
-= 1;
2590 if (!nic
->pkts_to_process
)
2594 if ((indicate_max_pkts
) && (pkt_cnt
> indicate_max_pkts
))
2598 spin_unlock(&nic
->rx_lock
);
2602 * tx_intr_handler - Transmit interrupt handler
2603 * @nic : device private variable
2605 * If an interrupt was raised to indicate DMA complete of the
2606 * Tx packet, this function is called. It identifies the last TxD
2607 * whose buffer was freed and frees all skbs whose data have already
2608 * DMA'ed into the NICs internal memory.
2613 static void tx_intr_handler(fifo_info_t
*fifo_data
)
2615 nic_t
*nic
= fifo_data
->nic
;
2616 struct net_device
*dev
= (struct net_device
*) nic
->dev
;
2617 tx_curr_get_info_t get_info
, put_info
;
2618 struct sk_buff
*skb
;
2622 get_info
= fifo_data
->tx_curr_get_info
;
2623 put_info
= fifo_data
->tx_curr_put_info
;
2624 txdlp
= (TxD_t
*) fifo_data
->list_info
[get_info
.offset
].
2626 while ((!(txdlp
->Control_1
& TXD_LIST_OWN_XENA
)) &&
2627 (get_info
.offset
!= put_info
.offset
) &&
2628 (txdlp
->Host_Control
)) {
2629 /* Check for TxD errors */
2630 if (txdlp
->Control_1
& TXD_T_CODE
) {
2631 unsigned long long err
;
2632 err
= txdlp
->Control_1
& TXD_T_CODE
;
2633 if ((err
>> 48) == 0xA) {
2634 DBG_PRINT(TX_DBG
, "TxD returned due \
2635 to loss of link\n");
2638 DBG_PRINT(ERR_DBG
, "***TxD error \
2643 skb
= (struct sk_buff
*) ((unsigned long)
2644 txdlp
->Host_Control
);
2646 DBG_PRINT(ERR_DBG
, "%s: Null skb ",
2648 DBG_PRINT(ERR_DBG
, "in Tx Free Intr\n");
2652 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
2653 nic
->tx_pkt_count
++;
2655 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2656 txdlp
->Buffer_Pointer
,
2657 skb
->len
- skb
->data_len
,
2663 for (j
= 0; j
< frg_cnt
; j
++, txdlp
++) {
2665 &skb_shinfo(skb
)->frags
[j
];
2666 if (!txdlp
->Buffer_Pointer
)
2668 pci_unmap_page(nic
->pdev
,
2678 (sizeof(TxD_t
) * fifo_data
->max_txds
));
2680 /* Updating the statistics block */
2681 nic
->stats
.tx_bytes
+= skb
->len
;
2682 dev_kfree_skb_irq(skb
);
2685 get_info
.offset
%= get_info
.fifo_len
+ 1;
2686 txdlp
= (TxD_t
*) fifo_data
->list_info
2687 [get_info
.offset
].list_virt_addr
;
2688 fifo_data
->tx_curr_get_info
.offset
=
2692 spin_lock(&nic
->tx_lock
);
2693 if (netif_queue_stopped(dev
))
2694 netif_wake_queue(dev
);
2695 spin_unlock(&nic
->tx_lock
);
2699 * alarm_intr_handler - Alarm Interrrupt handler
2700 * @nic: device private variable
2701 * Description: If the interrupt was neither because of Rx packet or Tx
2702 * complete, this function is called. If the interrupt was to indicate
2703 * a loss of link, the OSM link status handler is invoked for any other
2704 * alarm interrupt the block that raised the interrupt is displayed
2705 * and a H/W reset is issued.
2710 static void alarm_intr_handler(struct s2io_nic
*nic
)
2712 struct net_device
*dev
= (struct net_device
*) nic
->dev
;
2713 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
2714 register u64 val64
= 0, err_reg
= 0;
2716 /* Handling link status change error Intr */
2717 if (s2io_link_fault_indication(nic
) == MAC_RMAC_ERR_TIMER
) {
2718 err_reg
= readq(&bar0
->mac_rmac_err_reg
);
2719 writeq(err_reg
, &bar0
->mac_rmac_err_reg
);
2720 if (err_reg
& RMAC_LINK_STATE_CHANGE_INT
) {
2721 schedule_work(&nic
->set_link_task
);
2725 /* Handling Ecc errors */
2726 val64
= readq(&bar0
->mc_err_reg
);
2727 writeq(val64
, &bar0
->mc_err_reg
);
2728 if (val64
& (MC_ERR_REG_ECC_ALL_SNG
| MC_ERR_REG_ECC_ALL_DBL
)) {
2729 if (val64
& MC_ERR_REG_ECC_ALL_DBL
) {
2730 nic
->mac_control
.stats_info
->sw_stat
.
2732 DBG_PRINT(INIT_DBG
, "%s: Device indicates ",
2734 DBG_PRINT(INIT_DBG
, "double ECC error!!\n");
2735 if (nic
->device_type
!= XFRAME_II_DEVICE
) {
2736 /* Reset XframeI only if critical error */
2737 if (val64
& (MC_ERR_REG_MIRI_ECC_DB_ERR_0
|
2738 MC_ERR_REG_MIRI_ECC_DB_ERR_1
)) {
2739 netif_stop_queue(dev
);
2740 schedule_work(&nic
->rst_timer_task
);
2744 nic
->mac_control
.stats_info
->sw_stat
.
2749 /* In case of a serious error, the device will be Reset. */
2750 val64
= readq(&bar0
->serr_source
);
2751 if (val64
& SERR_SOURCE_ANY
) {
2752 DBG_PRINT(ERR_DBG
, "%s: Device indicates ", dev
->name
);
2753 DBG_PRINT(ERR_DBG
, "serious error %llx!!\n",
2754 (unsigned long long)val64
);
2755 netif_stop_queue(dev
);
2756 schedule_work(&nic
->rst_timer_task
);
2760 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2761 * Error occurs, the adapter will be recycled by disabling the
2762 * adapter enable bit and enabling it again after the device
2763 * becomes Quiescent.
2765 val64
= readq(&bar0
->pcc_err_reg
);
2766 writeq(val64
, &bar0
->pcc_err_reg
);
2767 if (val64
& PCC_FB_ECC_DB_ERR
) {
2768 u64 ac
= readq(&bar0
->adapter_control
);
2769 ac
&= ~(ADAPTER_CNTL_EN
);
2770 writeq(ac
, &bar0
->adapter_control
);
2771 ac
= readq(&bar0
->adapter_control
);
2772 schedule_work(&nic
->set_link_task
);
2775 /* Other type of interrupts are not being handled now, TODO */
2779 * wait_for_cmd_complete - waits for a command to complete.
2780 * @sp : private member of the device structure, which is a pointer to the
2781 * s2io_nic structure.
2782 * Description: Function that waits for a command to Write into RMAC
2783 * ADDR DATA registers to be completed and returns either success or
2784 * error depending on whether the command was complete or not.
2786 * SUCCESS on success and FAILURE on failure.
2789 int wait_for_cmd_complete(nic_t
* sp
)
2791 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
2792 int ret
= FAILURE
, cnt
= 0;
2796 val64
= readq(&bar0
->rmac_addr_cmd_mem
);
2797 if (!(val64
& RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING
)) {
2810 * s2io_reset - Resets the card.
2811 * @sp : private member of the device structure.
2812 * Description: Function to Reset the card. This function then also
2813 * restores the previously saved PCI configuration space registers as
2814 * the card reset also resets the configuration space.
2819 void s2io_reset(nic_t
* sp
)
2821 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
2825 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2826 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
, &(pci_cmd
));
2828 val64
= SW_RESET_ALL
;
2829 writeq(val64
, &bar0
->sw_reset
);
2832 * At this stage, if the PCI write is indeed completed, the
2833 * card is reset and so is the PCI Config space of the device.
2834 * So a read cannot be issued at this stage on any of the
2835 * registers to ensure the write into "sw_reset" register
2837 * Question: Is there any system call that will explicitly force
2838 * all the write commands still pending on the bus to be pushed
2840 * As of now I'am just giving a 250ms delay and hoping that the
2841 * PCI write to sw_reset register is done by this time.
2845 /* Restore the PCI state saved during initialization. */
2846 pci_restore_state(sp
->pdev
);
2847 pci_write_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
2853 /* Set swapper to enable I/O register access */
2854 s2io_set_swapper(sp
);
2856 /* Clear certain PCI/PCI-X fields after reset */
2857 if (sp
->device_type
== XFRAME_II_DEVICE
) {
2858 /* Clear parity err detect bit */
2859 pci_write_config_word(sp
->pdev
, PCI_STATUS
, 0x8000);
2861 /* Clearing PCIX Ecc status register */
2862 pci_write_config_dword(sp
->pdev
, 0x68, 0x7C);
2864 /* Clearing PCI_STATUS error reflected here */
2865 writeq(BIT(62), &bar0
->txpic_int_reg
);
2868 /* Reset device statistics maintained by OS */
2869 memset(&sp
->stats
, 0, sizeof (struct net_device_stats
));
2871 /* SXE-002: Configure link and activity LED to turn it off */
2872 subid
= sp
->pdev
->subsystem_device
;
2873 if (((subid
& 0xFF) >= 0x07) &&
2874 (sp
->device_type
== XFRAME_I_DEVICE
)) {
2875 val64
= readq(&bar0
->gpio_control
);
2876 val64
|= 0x0000800000000000ULL
;
2877 writeq(val64
, &bar0
->gpio_control
);
2878 val64
= 0x0411040400000000ULL
;
2879 writeq(val64
, (void __iomem
*)bar0
+ 0x2700);
2883 * Clear spurious ECC interrupts that would have occured on
2884 * XFRAME II cards after reset.
2886 if (sp
->device_type
== XFRAME_II_DEVICE
) {
2887 val64
= readq(&bar0
->pcc_err_reg
);
2888 writeq(val64
, &bar0
->pcc_err_reg
);
2891 sp
->device_enabled_once
= FALSE
;
2895 * s2io_set_swapper - to set the swapper controle on the card
2896 * @sp : private member of the device structure,
2897 * pointer to the s2io_nic structure.
2898 * Description: Function to set the swapper control on the card
2899 * correctly depending on the 'endianness' of the system.
2901 * SUCCESS on success and FAILURE on failure.
2904 int s2io_set_swapper(nic_t
* sp
)
2906 struct net_device
*dev
= sp
->dev
;
2907 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
2908 u64 val64
, valt
, valr
;
2911 * Set proper endian settings and verify the same by reading
2912 * the PIF Feed-back register.
2915 val64
= readq(&bar0
->pif_rd_swapper_fb
);
2916 if (val64
!= 0x0123456789ABCDEFULL
) {
2918 u64 value
[] = { 0xC30000C3C30000C3ULL
, /* FE=1, SE=1 */
2919 0x8100008181000081ULL
, /* FE=1, SE=0 */
2920 0x4200004242000042ULL
, /* FE=0, SE=1 */
2921 0}; /* FE=0, SE=0 */
2924 writeq(value
[i
], &bar0
->swapper_ctrl
);
2925 val64
= readq(&bar0
->pif_rd_swapper_fb
);
2926 if (val64
== 0x0123456789ABCDEFULL
)
2931 DBG_PRINT(ERR_DBG
, "%s: Endian settings are wrong, ",
2933 DBG_PRINT(ERR_DBG
, "feedback read %llx\n",
2934 (unsigned long long) val64
);
2939 valr
= readq(&bar0
->swapper_ctrl
);
2942 valt
= 0x0123456789ABCDEFULL
;
2943 writeq(valt
, &bar0
->xmsi_address
);
2944 val64
= readq(&bar0
->xmsi_address
);
2948 u64 value
[] = { 0x00C3C30000C3C300ULL
, /* FE=1, SE=1 */
2949 0x0081810000818100ULL
, /* FE=1, SE=0 */
2950 0x0042420000424200ULL
, /* FE=0, SE=1 */
2951 0}; /* FE=0, SE=0 */
2954 writeq((value
[i
] | valr
), &bar0
->swapper_ctrl
);
2955 writeq(valt
, &bar0
->xmsi_address
);
2956 val64
= readq(&bar0
->xmsi_address
);
2962 unsigned long long x
= val64
;
2963 DBG_PRINT(ERR_DBG
, "Write failed, Xmsi_addr ");
2964 DBG_PRINT(ERR_DBG
, "reads:0x%llx\n", x
);
2968 val64
= readq(&bar0
->swapper_ctrl
);
2969 val64
&= 0xFFFF000000000000ULL
;
2973 * The device by default set to a big endian format, so a
2974 * big endian driver need not set anything.
2976 val64
|= (SWAPPER_CTRL_TXP_FE
|
2977 SWAPPER_CTRL_TXP_SE
|
2978 SWAPPER_CTRL_TXD_R_FE
|
2979 SWAPPER_CTRL_TXD_W_FE
|
2980 SWAPPER_CTRL_TXF_R_FE
|
2981 SWAPPER_CTRL_RXD_R_FE
|
2982 SWAPPER_CTRL_RXD_W_FE
|
2983 SWAPPER_CTRL_RXF_W_FE
|
2984 SWAPPER_CTRL_XMSI_FE
|
2985 SWAPPER_CTRL_XMSI_SE
|
2986 SWAPPER_CTRL_STATS_FE
| SWAPPER_CTRL_STATS_SE
);
2987 writeq(val64
, &bar0
->swapper_ctrl
);
2990 * Initially we enable all bits to make it accessible by the
2991 * driver, then we selectively enable only those bits that
2994 val64
|= (SWAPPER_CTRL_TXP_FE
|
2995 SWAPPER_CTRL_TXP_SE
|
2996 SWAPPER_CTRL_TXD_R_FE
|
2997 SWAPPER_CTRL_TXD_R_SE
|
2998 SWAPPER_CTRL_TXD_W_FE
|
2999 SWAPPER_CTRL_TXD_W_SE
|
3000 SWAPPER_CTRL_TXF_R_FE
|
3001 SWAPPER_CTRL_RXD_R_FE
|
3002 SWAPPER_CTRL_RXD_R_SE
|
3003 SWAPPER_CTRL_RXD_W_FE
|
3004 SWAPPER_CTRL_RXD_W_SE
|
3005 SWAPPER_CTRL_RXF_W_FE
|
3006 SWAPPER_CTRL_XMSI_FE
|
3007 SWAPPER_CTRL_XMSI_SE
|
3008 SWAPPER_CTRL_STATS_FE
| SWAPPER_CTRL_STATS_SE
);
3009 writeq(val64
, &bar0
->swapper_ctrl
);
3011 val64
= readq(&bar0
->swapper_ctrl
);
3014 * Verifying if endian settings are accurate by reading a
3015 * feedback register.
3017 val64
= readq(&bar0
->pif_rd_swapper_fb
);
3018 if (val64
!= 0x0123456789ABCDEFULL
) {
3019 /* Endian settings are incorrect, calls for another dekko. */
3020 DBG_PRINT(ERR_DBG
, "%s: Endian settings are wrong, ",
3022 DBG_PRINT(ERR_DBG
, "feedback read %llx\n",
3023 (unsigned long long) val64
);
3030 /* ********************************************************* *
3031 * Functions defined below concern the OS part of the driver *
3032 * ********************************************************* */
3035 * s2io_open - open entry point of the driver
3036 * @dev : pointer to the device structure.
3038 * This function is the open entry point of the driver. It mainly calls a
3039 * function to allocate Rx buffers and inserts them into the buffer
3040 * descriptors and then enables the Rx part of the NIC.
3042 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3046 int s2io_open(struct net_device
*dev
)
3048 nic_t
*sp
= dev
->priv
;
3052 * Make sure you have link off by default every time
3053 * Nic is initialized
3055 netif_carrier_off(dev
);
3056 sp
->last_link_state
= 0;
3058 /* Initialize H/W and enable interrupts */
3059 if (s2io_card_up(sp
)) {
3060 DBG_PRINT(ERR_DBG
, "%s: H/W initialization failed\n",
3063 goto hw_init_failed
;
3066 /* After proper initialization of H/W, register ISR */
3067 err
= request_irq((int) sp
->pdev
->irq
, s2io_isr
, SA_SHIRQ
,
3070 DBG_PRINT(ERR_DBG
, "%s: ISR registration failed\n",
3072 goto isr_registration_failed
;
3075 if (s2io_set_mac_addr(dev
, dev
->dev_addr
) == FAILURE
) {
3076 DBG_PRINT(ERR_DBG
, "Set Mac Address Failed\n");
3078 goto setting_mac_address_failed
;
3081 netif_start_queue(dev
);
3084 setting_mac_address_failed
:
3085 free_irq(sp
->pdev
->irq
, dev
);
3086 isr_registration_failed
:
3087 del_timer_sync(&sp
->alarm_timer
);
3094 * s2io_close -close entry point of the driver
3095 * @dev : device pointer.
3097 * This is the stop entry point of the driver. It needs to undo exactly
3098 * whatever was done by the open entry point,thus it's usually referred to
3099 * as the close function.Among other things this function mainly stops the
3100 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3102 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3106 int s2io_close(struct net_device
*dev
)
3108 nic_t
*sp
= dev
->priv
;
3109 flush_scheduled_work();
3110 netif_stop_queue(dev
);
3111 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3114 free_irq(sp
->pdev
->irq
, dev
);
3115 sp
->device_close_flag
= TRUE
; /* Device is shut down. */
3120 * s2io_xmit - Tx entry point of te driver
3121 * @skb : the socket buffer containing the Tx data.
3122 * @dev : device pointer.
3124 * This function is the Tx entry point of the driver. S2IO NIC supports
3125 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3126 * NOTE: when device cant queue the pkt,just the trans_start variable will
3129 * 0 on success & 1 on failure.
3132 int s2io_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3134 nic_t
*sp
= dev
->priv
;
3135 u16 frg_cnt
, frg_len
, i
, queue
, queue_len
, put_off
, get_off
;
3138 TxFIFO_element_t __iomem
*tx_fifo
;
3139 unsigned long flags
;
3144 int vlan_priority
= 0;
3145 mac_info_t
*mac_control
;
3146 struct config_param
*config
;
3148 mac_control
= &sp
->mac_control
;
3149 config
= &sp
->config
;
3151 DBG_PRINT(TX_DBG
, "%s: In Neterion Tx routine\n", dev
->name
);
3152 spin_lock_irqsave(&sp
->tx_lock
, flags
);
3153 if (atomic_read(&sp
->card_state
) == CARD_DOWN
) {
3154 DBG_PRINT(TX_DBG
, "%s: Card going down for reset\n",
3156 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3163 /* Get Fifo number to Transmit based on vlan priority */
3164 if (sp
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3165 vlan_tag
= vlan_tx_tag_get(skb
);
3166 vlan_priority
= vlan_tag
>> 13;
3167 queue
= config
->fifo_mapping
[vlan_priority
];
3170 put_off
= (u16
) mac_control
->fifos
[queue
].tx_curr_put_info
.offset
;
3171 get_off
= (u16
) mac_control
->fifos
[queue
].tx_curr_get_info
.offset
;
3172 txdp
= (TxD_t
*) mac_control
->fifos
[queue
].list_info
[put_off
].
3175 queue_len
= mac_control
->fifos
[queue
].tx_curr_put_info
.fifo_len
+ 1;
3176 /* Avoid "put" pointer going beyond "get" pointer */
3177 if (txdp
->Host_Control
|| (((put_off
+ 1) % queue_len
) == get_off
)) {
3178 DBG_PRINT(TX_DBG
, "Error in xmit, No free TXDs.\n");
3179 netif_stop_queue(dev
);
3181 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3185 /* A buffer with no data will be dropped */
3187 DBG_PRINT(TX_DBG
, "%s:Buffer has no data..\n", dev
->name
);
3189 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3194 mss
= skb_shinfo(skb
)->tso_size
;
3196 txdp
->Control_1
|= TXD_TCP_LSO_EN
;
3197 txdp
->Control_1
|= TXD_TCP_LSO_MSS(mss
);
3201 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
3202 frg_len
= skb
->len
- skb
->data_len
;
3204 txdp
->Buffer_Pointer
= pci_map_single
3205 (sp
->pdev
, skb
->data
, frg_len
, PCI_DMA_TODEVICE
);
3206 txdp
->Host_Control
= (unsigned long) skb
;
3207 if (skb
->ip_summed
== CHECKSUM_HW
) {
3209 (TXD_TX_CKO_IPV4_EN
| TXD_TX_CKO_TCP_EN
|
3213 txdp
->Control_2
|= config
->tx_intr_type
;
3215 if (sp
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3216 txdp
->Control_2
|= TXD_VLAN_ENABLE
;
3217 txdp
->Control_2
|= TXD_VLAN_TAG(vlan_tag
);
3220 txdp
->Control_1
|= (TXD_BUFFER0_SIZE(frg_len
) |
3221 TXD_GATHER_CODE_FIRST
);
3222 txdp
->Control_1
|= TXD_LIST_OWN_XENA
;
3224 /* For fragmented SKB. */
3225 for (i
= 0; i
< frg_cnt
; i
++) {
3226 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3227 /* A '0' length fragment will be ignored */
3231 txdp
->Buffer_Pointer
= (u64
) pci_map_page
3232 (sp
->pdev
, frag
->page
, frag
->page_offset
,
3233 frag
->size
, PCI_DMA_TODEVICE
);
3234 txdp
->Control_1
|= TXD_BUFFER0_SIZE(frag
->size
);
3236 txdp
->Control_1
|= TXD_GATHER_CODE_LAST
;
3238 tx_fifo
= mac_control
->tx_FIFO_start
[queue
];
3239 val64
= mac_control
->fifos
[queue
].list_info
[put_off
].list_phy_addr
;
3240 writeq(val64
, &tx_fifo
->TxDL_Pointer
);
3242 val64
= (TX_FIFO_LAST_TXD_NUM(frg_cnt
) | TX_FIFO_FIRST_LIST
|
3247 val64
|= TX_FIFO_SPECIAL_FUNC
;
3249 writeq(val64
, &tx_fifo
->List_Control
);
3254 put_off
%= mac_control
->fifos
[queue
].tx_curr_put_info
.fifo_len
+ 1;
3255 mac_control
->fifos
[queue
].tx_curr_put_info
.offset
= put_off
;
3257 /* Avoid "put" pointer going beyond "get" pointer */
3258 if (((put_off
+ 1) % queue_len
) == get_off
) {
3260 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3262 netif_stop_queue(dev
);
3265 dev
->trans_start
= jiffies
;
3266 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3272 s2io_alarm_handle(unsigned long data
)
3274 nic_t
*sp
= (nic_t
*)data
;
3276 alarm_intr_handler(sp
);
3277 mod_timer(&sp
->alarm_timer
, jiffies
+ HZ
/ 2);
3280 static void s2io_txpic_intr_handle(nic_t
*sp
)
3282 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3285 val64
= readq(&bar0
->pic_int_status
);
3286 if (val64
& PIC_INT_GPIO
) {
3287 val64
= readq(&bar0
->gpio_int_reg
);
3288 if ((val64
& GPIO_INT_REG_LINK_DOWN
) &&
3289 (val64
& GPIO_INT_REG_LINK_UP
)) {
3290 val64
|= GPIO_INT_REG_LINK_DOWN
;
3291 val64
|= GPIO_INT_REG_LINK_UP
;
3292 writeq(val64
, &bar0
->gpio_int_reg
);
3296 if (((sp
->last_link_state
== LINK_UP
) &&
3297 (val64
& GPIO_INT_REG_LINK_DOWN
)) ||
3298 ((sp
->last_link_state
== LINK_DOWN
) &&
3299 (val64
& GPIO_INT_REG_LINK_UP
))) {
3300 val64
= readq(&bar0
->gpio_int_mask
);
3301 val64
|= GPIO_INT_MASK_LINK_DOWN
;
3302 val64
|= GPIO_INT_MASK_LINK_UP
;
3303 writeq(val64
, &bar0
->gpio_int_mask
);
3304 s2io_set_link((unsigned long)sp
);
3307 if (sp
->last_link_state
== LINK_UP
) {
3308 /*enable down interrupt */
3309 val64
= readq(&bar0
->gpio_int_mask
);
3310 /* unmasks link down intr */
3311 val64
&= ~GPIO_INT_MASK_LINK_DOWN
;
3312 /* masks link up intr */
3313 val64
|= GPIO_INT_MASK_LINK_UP
;
3314 writeq(val64
, &bar0
->gpio_int_mask
);
3316 /*enable UP Interrupt */
3317 val64
= readq(&bar0
->gpio_int_mask
);
3318 /* unmasks link up interrupt */
3319 val64
&= ~GPIO_INT_MASK_LINK_UP
;
3320 /* masks link down interrupt */
3321 val64
|= GPIO_INT_MASK_LINK_DOWN
;
3322 writeq(val64
, &bar0
->gpio_int_mask
);
3328 * s2io_isr - ISR handler of the device .
3329 * @irq: the irq of the device.
3330 * @dev_id: a void pointer to the dev structure of the NIC.
3331 * @pt_regs: pointer to the registers pushed on the stack.
3332 * Description: This function is the ISR handler of the device. It
3333 * identifies the reason for the interrupt and calls the relevant
3334 * service routines. As a contongency measure, this ISR allocates the
3335 * recv buffers, if their numbers are below the panic value which is
3336 * presently set to 25% of the original number of rcv buffers allocated.
3338 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
3339 * IRQ_NONE: will be returned if interrupt is not from our device
3341 static irqreturn_t
s2io_isr(int irq
, void *dev_id
, struct pt_regs
*regs
)
3343 struct net_device
*dev
= (struct net_device
*) dev_id
;
3344 nic_t
*sp
= dev
->priv
;
3345 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3347 u64 reason
= 0, val64
;
3348 mac_info_t
*mac_control
;
3349 struct config_param
*config
;
3351 atomic_inc(&sp
->isr_cnt
);
3352 mac_control
= &sp
->mac_control
;
3353 config
= &sp
->config
;
3356 * Identify the cause for interrupt and call the appropriate
3357 * interrupt handler. Causes for the interrupt could be;
3361 * 4. Error in any functional blocks of the NIC.
3363 reason
= readq(&bar0
->general_int_status
);
3366 /* The interrupt was not raised by Xena. */
3367 atomic_dec(&sp
->isr_cnt
);
3371 #ifdef CONFIG_S2IO_NAPI
3372 if (reason
& GEN_INTR_RXTRAFFIC
) {
3373 if (netif_rx_schedule_prep(dev
)) {
3374 en_dis_able_nic_intrs(sp
, RX_TRAFFIC_INTR
,
3376 __netif_rx_schedule(dev
);
3380 /* If Intr is because of Rx Traffic */
3381 if (reason
& GEN_INTR_RXTRAFFIC
) {
3383 * rx_traffic_int reg is an R1 register, writing all 1's
3384 * will ensure that the actual interrupt causing bit get's
3385 * cleared and hence a read can be avoided.
3387 val64
= 0xFFFFFFFFFFFFFFFFULL
;
3388 writeq(val64
, &bar0
->rx_traffic_int
);
3389 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
3390 rx_intr_handler(&mac_control
->rings
[i
]);
3395 /* If Intr is because of Tx Traffic */
3396 if (reason
& GEN_INTR_TXTRAFFIC
) {
3398 * tx_traffic_int reg is an R1 register, writing all 1's
3399 * will ensure that the actual interrupt causing bit get's
3400 * cleared and hence a read can be avoided.
3402 val64
= 0xFFFFFFFFFFFFFFFFULL
;
3403 writeq(val64
, &bar0
->tx_traffic_int
);
3405 for (i
= 0; i
< config
->tx_fifo_num
; i
++)
3406 tx_intr_handler(&mac_control
->fifos
[i
]);
3409 if (reason
& GEN_INTR_TXPIC
)
3410 s2io_txpic_intr_handle(sp
);
3412 * If the Rx buffer count is below the panic threshold then
3413 * reallocate the buffers from the interrupt handler itself,
3414 * else schedule a tasklet to reallocate the buffers.
3416 #ifndef CONFIG_S2IO_NAPI
3417 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
3419 int rxb_size
= atomic_read(&sp
->rx_bufs_left
[i
]);
3420 int level
= rx_buffer_level(sp
, rxb_size
, i
);
3422 if ((level
== PANIC
) && (!TASKLET_IN_USE
)) {
3423 DBG_PRINT(INTR_DBG
, "%s: Rx BD hit ", dev
->name
);
3424 DBG_PRINT(INTR_DBG
, "PANIC levels\n");
3425 if ((ret
= fill_rx_buffers(sp
, i
)) == -ENOMEM
) {
3426 DBG_PRINT(ERR_DBG
, "%s:Out of memory",
3428 DBG_PRINT(ERR_DBG
, " in ISR!!\n");
3429 clear_bit(0, (&sp
->tasklet_status
));
3430 atomic_dec(&sp
->isr_cnt
);
3433 clear_bit(0, (&sp
->tasklet_status
));
3434 } else if (level
== LOW
) {
3435 tasklet_schedule(&sp
->task
);
3440 atomic_dec(&sp
->isr_cnt
);
3447 static void s2io_updt_stats(nic_t
*sp
)
3449 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3453 if (atomic_read(&sp
->card_state
) == CARD_UP
) {
3454 /* Apprx 30us on a 133 MHz bus */
3455 val64
= SET_UPDT_CLICKS(10) |
3456 STAT_CFG_ONE_SHOT_EN
| STAT_CFG_STAT_EN
;
3457 writeq(val64
, &bar0
->stat_cfg
);
3460 val64
= readq(&bar0
->stat_cfg
);
3461 if (!(val64
& BIT(0)))
3465 break; /* Updt failed */
3471 * s2io_get_stats - Updates the device statistics structure.
3472 * @dev : pointer to the device structure.
3474 * This function updates the device statistics structure in the s2io_nic
3475 * structure and returns a pointer to the same.
3477 * pointer to the updated net_device_stats structure.
3480 struct net_device_stats
*s2io_get_stats(struct net_device
*dev
)
3482 nic_t
*sp
= dev
->priv
;
3483 mac_info_t
*mac_control
;
3484 struct config_param
*config
;
3487 mac_control
= &sp
->mac_control
;
3488 config
= &sp
->config
;
3490 /* Configure Stats for immediate updt */
3491 s2io_updt_stats(sp
);
3493 sp
->stats
.tx_packets
=
3494 le32_to_cpu(mac_control
->stats_info
->tmac_frms
);
3495 sp
->stats
.tx_errors
=
3496 le32_to_cpu(mac_control
->stats_info
->tmac_any_err_frms
);
3497 sp
->stats
.rx_errors
=
3498 le32_to_cpu(mac_control
->stats_info
->rmac_drop_frms
);
3499 sp
->stats
.multicast
=
3500 le32_to_cpu(mac_control
->stats_info
->rmac_vld_mcst_frms
);
3501 sp
->stats
.rx_length_errors
=
3502 le32_to_cpu(mac_control
->stats_info
->rmac_long_frms
);
3504 return (&sp
->stats
);
3508 * s2io_set_multicast - entry point for multicast address enable/disable.
3509 * @dev : pointer to the device structure
3511 * This function is a driver entry point which gets called by the kernel
3512 * whenever multicast addresses must be enabled/disabled. This also gets
3513 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3514 * determine, if multicast address must be enabled or if promiscuous mode
3515 * is to be disabled etc.
3520 static void s2io_set_multicast(struct net_device
*dev
)
3523 struct dev_mc_list
*mclist
;
3524 nic_t
*sp
= dev
->priv
;
3525 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3526 u64 val64
= 0, multi_mac
= 0x010203040506ULL
, mask
=
3528 u64 dis_addr
= 0xffffffffffffULL
, mac_addr
= 0;
3531 if ((dev
->flags
& IFF_ALLMULTI
) && (!sp
->m_cast_flg
)) {
3532 /* Enable all Multicast addresses */
3533 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac
),
3534 &bar0
->rmac_addr_data0_mem
);
3535 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask
),
3536 &bar0
->rmac_addr_data1_mem
);
3537 val64
= RMAC_ADDR_CMD_MEM_WE
|
3538 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
3539 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET
);
3540 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
3541 /* Wait till command completes */
3542 wait_for_cmd_complete(sp
);
3545 sp
->all_multi_pos
= MAC_MC_ALL_MC_ADDR_OFFSET
;
3546 } else if ((dev
->flags
& IFF_ALLMULTI
) && (sp
->m_cast_flg
)) {
3547 /* Disable all Multicast addresses */
3548 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr
),
3549 &bar0
->rmac_addr_data0_mem
);
3550 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3551 &bar0
->rmac_addr_data1_mem
);
3552 val64
= RMAC_ADDR_CMD_MEM_WE
|
3553 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
3554 RMAC_ADDR_CMD_MEM_OFFSET(sp
->all_multi_pos
);
3555 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
3556 /* Wait till command completes */
3557 wait_for_cmd_complete(sp
);
3560 sp
->all_multi_pos
= 0;
3563 if ((dev
->flags
& IFF_PROMISC
) && (!sp
->promisc_flg
)) {
3564 /* Put the NIC into promiscuous mode */
3565 add
= &bar0
->mac_cfg
;
3566 val64
= readq(&bar0
->mac_cfg
);
3567 val64
|= MAC_CFG_RMAC_PROM_ENABLE
;
3569 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
3570 writel((u32
) val64
, add
);
3571 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
3572 writel((u32
) (val64
>> 32), (add
+ 4));
3574 val64
= readq(&bar0
->mac_cfg
);
3575 sp
->promisc_flg
= 1;
3576 DBG_PRINT(INFO_DBG
, "%s: entered promiscuous mode\n",
3578 } else if (!(dev
->flags
& IFF_PROMISC
) && (sp
->promisc_flg
)) {
3579 /* Remove the NIC from promiscuous mode */
3580 add
= &bar0
->mac_cfg
;
3581 val64
= readq(&bar0
->mac_cfg
);
3582 val64
&= ~MAC_CFG_RMAC_PROM_ENABLE
;
3584 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
3585 writel((u32
) val64
, add
);
3586 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
3587 writel((u32
) (val64
>> 32), (add
+ 4));
3589 val64
= readq(&bar0
->mac_cfg
);
3590 sp
->promisc_flg
= 0;
3591 DBG_PRINT(INFO_DBG
, "%s: left promiscuous mode\n",
3595 /* Update individual M_CAST address list */
3596 if ((!sp
->m_cast_flg
) && dev
->mc_count
) {
3598 (MAX_ADDRS_SUPPORTED
- MAC_MC_ADDR_START_OFFSET
- 1)) {
3599 DBG_PRINT(ERR_DBG
, "%s: No more Rx filters ",
3601 DBG_PRINT(ERR_DBG
, "can be added, please enable ");
3602 DBG_PRINT(ERR_DBG
, "ALL_MULTI instead\n");
3606 prev_cnt
= sp
->mc_addr_count
;
3607 sp
->mc_addr_count
= dev
->mc_count
;
3609 /* Clear out the previous list of Mc in the H/W. */
3610 for (i
= 0; i
< prev_cnt
; i
++) {
3611 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr
),
3612 &bar0
->rmac_addr_data0_mem
);
3613 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3614 &bar0
->rmac_addr_data1_mem
);
3615 val64
= RMAC_ADDR_CMD_MEM_WE
|
3616 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
3617 RMAC_ADDR_CMD_MEM_OFFSET
3618 (MAC_MC_ADDR_START_OFFSET
+ i
);
3619 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
3621 /* Wait for command completes */
3622 if (wait_for_cmd_complete(sp
)) {
3623 DBG_PRINT(ERR_DBG
, "%s: Adding ",
3625 DBG_PRINT(ERR_DBG
, "Multicasts failed\n");
3630 /* Create the new Rx filter list and update the same in H/W. */
3631 for (i
= 0, mclist
= dev
->mc_list
; i
< dev
->mc_count
;
3632 i
++, mclist
= mclist
->next
) {
3633 memcpy(sp
->usr_addrs
[i
].addr
, mclist
->dmi_addr
,
3635 for (j
= 0; j
< ETH_ALEN
; j
++) {
3636 mac_addr
|= mclist
->dmi_addr
[j
];
3640 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr
),
3641 &bar0
->rmac_addr_data0_mem
);
3642 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3643 &bar0
->rmac_addr_data1_mem
);
3644 val64
= RMAC_ADDR_CMD_MEM_WE
|
3645 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
3646 RMAC_ADDR_CMD_MEM_OFFSET
3647 (i
+ MAC_MC_ADDR_START_OFFSET
);
3648 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
3650 /* Wait for command completes */
3651 if (wait_for_cmd_complete(sp
)) {
3652 DBG_PRINT(ERR_DBG
, "%s: Adding ",
3654 DBG_PRINT(ERR_DBG
, "Multicasts failed\n");
3662 * s2io_set_mac_addr - Programs the Xframe mac address
3663 * @dev : pointer to the device structure.
3664 * @addr: a uchar pointer to the new mac address which is to be set.
3665 * Description : This procedure will program the Xframe to receive
3666 * frames with new Mac Address
3667 * Return value: SUCCESS on success and an appropriate (-)ve integer
3668 * as defined in errno.h file on failure.
3671 int s2io_set_mac_addr(struct net_device
*dev
, u8
* addr
)
3673 nic_t
*sp
= dev
->priv
;
3674 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3675 register u64 val64
, mac_addr
= 0;
3679 * Set the new MAC address as the new unicast filter and reflect this
3680 * change on the device address registered with the OS. It will be
3683 for (i
= 0; i
< ETH_ALEN
; i
++) {
3685 mac_addr
|= addr
[i
];
3688 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr
),
3689 &bar0
->rmac_addr_data0_mem
);
3692 RMAC_ADDR_CMD_MEM_WE
| RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
3693 RMAC_ADDR_CMD_MEM_OFFSET(0);
3694 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
3695 /* Wait till command completes */
3696 if (wait_for_cmd_complete(sp
)) {
3697 DBG_PRINT(ERR_DBG
, "%s: set_mac_addr failed\n", dev
->name
);
3705 * s2io_ethtool_sset - Sets different link parameters.
3706 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3707 * @info: pointer to the structure with parameters given by ethtool to set
3710 * The function sets different link parameters provided by the user onto
3716 static int s2io_ethtool_sset(struct net_device
*dev
,
3717 struct ethtool_cmd
*info
)
3719 nic_t
*sp
= dev
->priv
;
3720 if ((info
->autoneg
== AUTONEG_ENABLE
) ||
3721 (info
->speed
!= SPEED_10000
) || (info
->duplex
!= DUPLEX_FULL
))
3724 s2io_close(sp
->dev
);
3732 * s2io_ethtol_gset - Return link specific information.
3733 * @sp : private member of the device structure, pointer to the
3734 * s2io_nic structure.
3735 * @info : pointer to the structure with parameters given by ethtool
3736 * to return link information.
3738 * Returns link specific information like speed, duplex etc.. to ethtool.
3740 * return 0 on success.
3743 static int s2io_ethtool_gset(struct net_device
*dev
, struct ethtool_cmd
*info
)
3745 nic_t
*sp
= dev
->priv
;
3746 info
->supported
= (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
);
3747 info
->advertising
= (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
);
3748 info
->port
= PORT_FIBRE
;
3749 /* info->transceiver?? TODO */
3751 if (netif_carrier_ok(sp
->dev
)) {
3752 info
->speed
= 10000;
3753 info
->duplex
= DUPLEX_FULL
;
3759 info
->autoneg
= AUTONEG_DISABLE
;
3764 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3765 * @sp : private member of the device structure, which is a pointer to the
3766 * s2io_nic structure.
3767 * @info : pointer to the structure with parameters given by ethtool to
3768 * return driver information.
3770 * Returns driver specefic information like name, version etc.. to ethtool.
3775 static void s2io_ethtool_gdrvinfo(struct net_device
*dev
,
3776 struct ethtool_drvinfo
*info
)
3778 nic_t
*sp
= dev
->priv
;
3780 strncpy(info
->driver
, s2io_driver_name
, sizeof(s2io_driver_name
));
3781 strncpy(info
->version
, s2io_driver_version
,
3782 sizeof(s2io_driver_version
));
3783 strncpy(info
->fw_version
, "", 32);
3784 strncpy(info
->bus_info
, pci_name(sp
->pdev
), 32);
3785 info
->regdump_len
= XENA_REG_SPACE
;
3786 info
->eedump_len
= XENA_EEPROM_SPACE
;
3787 info
->testinfo_len
= S2IO_TEST_LEN
;
3788 info
->n_stats
= S2IO_STAT_LEN
;
3792 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3793 * @sp: private member of the device structure, which is a pointer to the
3794 * s2io_nic structure.
3795 * @regs : pointer to the structure with parameters given by ethtool for
3796 * dumping the registers.
3797 * @reg_space: The input argumnet into which all the registers are dumped.
3799 * Dumps the entire register space of xFrame NIC into the user given
3805 static void s2io_ethtool_gregs(struct net_device
*dev
,
3806 struct ethtool_regs
*regs
, void *space
)
3810 u8
*reg_space
= (u8
*) space
;
3811 nic_t
*sp
= dev
->priv
;
3813 regs
->len
= XENA_REG_SPACE
;
3814 regs
->version
= sp
->pdev
->subsystem_device
;
3816 for (i
= 0; i
< regs
->len
; i
+= 8) {
3817 reg
= readq(sp
->bar0
+ i
);
3818 memcpy((reg_space
+ i
), ®
, 8);
3823 * s2io_phy_id - timer function that alternates adapter LED.
3824 * @data : address of the private member of the device structure, which
3825 * is a pointer to the s2io_nic structure, provided as an u32.
3826 * Description: This is actually the timer function that alternates the
3827 * adapter LED bit of the adapter control bit to set/reset every time on
3828 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3829 * once every second.
3831 static void s2io_phy_id(unsigned long data
)
3833 nic_t
*sp
= (nic_t
*) data
;
3834 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3838 subid
= sp
->pdev
->subsystem_device
;
3839 if ((sp
->device_type
== XFRAME_II_DEVICE
) ||
3840 ((subid
& 0xFF) >= 0x07)) {
3841 val64
= readq(&bar0
->gpio_control
);
3842 val64
^= GPIO_CTRL_GPIO_0
;
3843 writeq(val64
, &bar0
->gpio_control
);
3845 val64
= readq(&bar0
->adapter_control
);
3846 val64
^= ADAPTER_LED_ON
;
3847 writeq(val64
, &bar0
->adapter_control
);
3850 mod_timer(&sp
->id_timer
, jiffies
+ HZ
/ 2);
3854 * s2io_ethtool_idnic - To physically identify the nic on the system.
3855 * @sp : private member of the device structure, which is a pointer to the
3856 * s2io_nic structure.
3857 * @id : pointer to the structure with identification parameters given by
3859 * Description: Used to physically identify the NIC on the system.
3860 * The Link LED will blink for a time specified by the user for
3862 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3863 * identification is possible only if it's link is up.
3865 * int , returns 0 on success
3868 static int s2io_ethtool_idnic(struct net_device
*dev
, u32 data
)
3870 u64 val64
= 0, last_gpio_ctrl_val
;
3871 nic_t
*sp
= dev
->priv
;
3872 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3875 subid
= sp
->pdev
->subsystem_device
;
3876 last_gpio_ctrl_val
= readq(&bar0
->gpio_control
);
3877 if ((sp
->device_type
== XFRAME_I_DEVICE
) &&
3878 ((subid
& 0xFF) < 0x07)) {
3879 val64
= readq(&bar0
->adapter_control
);
3880 if (!(val64
& ADAPTER_CNTL_EN
)) {
3882 "Adapter Link down, cannot blink LED\n");
3886 if (sp
->id_timer
.function
== NULL
) {
3887 init_timer(&sp
->id_timer
);
3888 sp
->id_timer
.function
= s2io_phy_id
;
3889 sp
->id_timer
.data
= (unsigned long) sp
;
3891 mod_timer(&sp
->id_timer
, jiffies
);
3893 msleep_interruptible(data
* HZ
);
3895 msleep_interruptible(MAX_FLICKER_TIME
);
3896 del_timer_sync(&sp
->id_timer
);
3898 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp
->device_type
, subid
)) {
3899 writeq(last_gpio_ctrl_val
, &bar0
->gpio_control
);
3900 last_gpio_ctrl_val
= readq(&bar0
->gpio_control
);
3907 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3908 * @sp : private member of the device structure, which is a pointer to the
3909 * s2io_nic structure.
3910 * @ep : pointer to the structure with pause parameters given by ethtool.
3912 * Returns the Pause frame generation and reception capability of the NIC.
3916 static void s2io_ethtool_getpause_data(struct net_device
*dev
,
3917 struct ethtool_pauseparam
*ep
)
3920 nic_t
*sp
= dev
->priv
;
3921 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3923 val64
= readq(&bar0
->rmac_pause_cfg
);
3924 if (val64
& RMAC_PAUSE_GEN_ENABLE
)
3925 ep
->tx_pause
= TRUE
;
3926 if (val64
& RMAC_PAUSE_RX_ENABLE
)
3927 ep
->rx_pause
= TRUE
;
3928 ep
->autoneg
= FALSE
;
3932 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3933 * @sp : private member of the device structure, which is a pointer to the
3934 * s2io_nic structure.
3935 * @ep : pointer to the structure with pause parameters given by ethtool.
3937 * It can be used to set or reset Pause frame generation or reception
3938 * support of the NIC.
3940 * int, returns 0 on Success
3943 static int s2io_ethtool_setpause_data(struct net_device
*dev
,
3944 struct ethtool_pauseparam
*ep
)
3947 nic_t
*sp
= dev
->priv
;
3948 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3950 val64
= readq(&bar0
->rmac_pause_cfg
);
3952 val64
|= RMAC_PAUSE_GEN_ENABLE
;
3954 val64
&= ~RMAC_PAUSE_GEN_ENABLE
;
3956 val64
|= RMAC_PAUSE_RX_ENABLE
;
3958 val64
&= ~RMAC_PAUSE_RX_ENABLE
;
3959 writeq(val64
, &bar0
->rmac_pause_cfg
);
3964 * read_eeprom - reads 4 bytes of data from user given offset.
3965 * @sp : private member of the device structure, which is a pointer to the
3966 * s2io_nic structure.
3967 * @off : offset at which the data must be written
3968 * @data : Its an output parameter where the data read at the given
3971 * Will read 4 bytes of data from the user given offset and return the
3973 * NOTE: Will allow to read only part of the EEPROM visible through the
3976 * -1 on failure and 0 on success.
3979 #define S2IO_DEV_ID 5
3980 static int read_eeprom(nic_t
* sp
, int off
, u32
* data
)
3985 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3987 val64
= I2C_CONTROL_DEV_ID(S2IO_DEV_ID
) | I2C_CONTROL_ADDR(off
) |
3988 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ
|
3989 I2C_CONTROL_CNTL_START
;
3990 SPECIAL_REG_WRITE(val64
, &bar0
->i2c_control
, LF
);
3992 while (exit_cnt
< 5) {
3993 val64
= readq(&bar0
->i2c_control
);
3994 if (I2C_CONTROL_CNTL_END(val64
)) {
3995 *data
= I2C_CONTROL_GET_DATA(val64
);
4007 * write_eeprom - actually writes the relevant part of the data value.
4008 * @sp : private member of the device structure, which is a pointer to the
4009 * s2io_nic structure.
4010 * @off : offset at which the data must be written
4011 * @data : The data that is to be written
4012 * @cnt : Number of bytes of the data that are actually to be written into
4013 * the Eeprom. (max of 3)
4015 * Actually writes the relevant part of the data value into the Eeprom
4016 * through the I2C bus.
4018 * 0 on success, -1 on failure.
4021 static int write_eeprom(nic_t
* sp
, int off
, u32 data
, int cnt
)
4023 int exit_cnt
= 0, ret
= -1;
4025 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4027 val64
= I2C_CONTROL_DEV_ID(S2IO_DEV_ID
) | I2C_CONTROL_ADDR(off
) |
4028 I2C_CONTROL_BYTE_CNT(cnt
) | I2C_CONTROL_SET_DATA(data
) |
4029 I2C_CONTROL_CNTL_START
;
4030 SPECIAL_REG_WRITE(val64
, &bar0
->i2c_control
, LF
);
4032 while (exit_cnt
< 5) {
4033 val64
= readq(&bar0
->i2c_control
);
4034 if (I2C_CONTROL_CNTL_END(val64
)) {
4035 if (!(val64
& I2C_CONTROL_NACK
))
4047 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4048 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4049 * @eeprom : pointer to the user level structure provided by ethtool,
4050 * containing all relevant information.
4051 * @data_buf : user defined value to be written into Eeprom.
4052 * Description: Reads the values stored in the Eeprom at given offset
4053 * for a given length. Stores these values int the input argument data
4054 * buffer 'data_buf' and returns these to the caller (ethtool.)
4059 static int s2io_ethtool_geeprom(struct net_device
*dev
,
4060 struct ethtool_eeprom
*eeprom
, u8
* data_buf
)
4063 nic_t
*sp
= dev
->priv
;
4065 eeprom
->magic
= sp
->pdev
->vendor
| (sp
->pdev
->device
<< 16);
4067 if ((eeprom
->offset
+ eeprom
->len
) > (XENA_EEPROM_SPACE
))
4068 eeprom
->len
= XENA_EEPROM_SPACE
- eeprom
->offset
;
4070 for (i
= 0; i
< eeprom
->len
; i
+= 4) {
4071 if (read_eeprom(sp
, (eeprom
->offset
+ i
), &data
)) {
4072 DBG_PRINT(ERR_DBG
, "Read of EEPROM failed\n");
4076 memcpy((data_buf
+ i
), &valid
, 4);
4082 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4083 * @sp : private member of the device structure, which is a pointer to the
4084 * s2io_nic structure.
4085 * @eeprom : pointer to the user level structure provided by ethtool,
4086 * containing all relevant information.
4087 * @data_buf ; user defined value to be written into Eeprom.
4089 * Tries to write the user provided value in the Eeprom, at the offset
4090 * given by the user.
4092 * 0 on success, -EFAULT on failure.
4095 static int s2io_ethtool_seeprom(struct net_device
*dev
,
4096 struct ethtool_eeprom
*eeprom
,
4099 int len
= eeprom
->len
, cnt
= 0;
4100 u32 valid
= 0, data
;
4101 nic_t
*sp
= dev
->priv
;
4103 if (eeprom
->magic
!= (sp
->pdev
->vendor
| (sp
->pdev
->device
<< 16))) {
4105 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4106 DBG_PRINT(ERR_DBG
, "is wrong, Its not 0x%x\n",
4112 data
= (u32
) data_buf
[cnt
] & 0x000000FF;
4114 valid
= (u32
) (data
<< 24);
4118 if (write_eeprom(sp
, (eeprom
->offset
+ cnt
), valid
, 0)) {
4120 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4122 "write into the specified offset\n");
4133 * s2io_register_test - reads and writes into all clock domains.
4134 * @sp : private member of the device structure, which is a pointer to the
4135 * s2io_nic structure.
4136 * @data : variable that returns the result of each of the test conducted b
4139 * Read and write into all clock domains. The NIC has 3 clock domains,
4140 * see that registers in all the three regions are accessible.
4145 static int s2io_register_test(nic_t
* sp
, uint64_t * data
)
4147 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4151 val64
= readq(&bar0
->pif_rd_swapper_fb
);
4152 if (val64
!= 0x123456789abcdefULL
) {
4154 DBG_PRINT(INFO_DBG
, "Read Test level 1 fails\n");
4157 val64
= readq(&bar0
->rmac_pause_cfg
);
4158 if (val64
!= 0xc000ffff00000000ULL
) {
4160 DBG_PRINT(INFO_DBG
, "Read Test level 2 fails\n");
4163 val64
= readq(&bar0
->rx_queue_cfg
);
4164 if (val64
!= 0x0808080808080808ULL
) {
4166 DBG_PRINT(INFO_DBG
, "Read Test level 3 fails\n");
4169 val64
= readq(&bar0
->xgxs_efifo_cfg
);
4170 if (val64
!= 0x000000001923141EULL
) {
4172 DBG_PRINT(INFO_DBG
, "Read Test level 4 fails\n");
4175 val64
= 0x5A5A5A5A5A5A5A5AULL
;
4176 writeq(val64
, &bar0
->xmsi_data
);
4177 val64
= readq(&bar0
->xmsi_data
);
4178 if (val64
!= 0x5A5A5A5A5A5A5A5AULL
) {
4180 DBG_PRINT(ERR_DBG
, "Write Test level 1 fails\n");
4183 val64
= 0xA5A5A5A5A5A5A5A5ULL
;
4184 writeq(val64
, &bar0
->xmsi_data
);
4185 val64
= readq(&bar0
->xmsi_data
);
4186 if (val64
!= 0xA5A5A5A5A5A5A5A5ULL
) {
4188 DBG_PRINT(ERR_DBG
, "Write Test level 2 fails\n");
4196 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4197 * @sp : private member of the device structure, which is a pointer to the
4198 * s2io_nic structure.
4199 * @data:variable that returns the result of each of the test conducted by
4202 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4208 static int s2io_eeprom_test(nic_t
* sp
, uint64_t * data
)
4213 /* Test Write Error at offset 0 */
4214 if (!write_eeprom(sp
, 0, 0, 3))
4217 /* Test Write at offset 4f0 */
4218 if (write_eeprom(sp
, 0x4F0, 0x01234567, 3))
4220 if (read_eeprom(sp
, 0x4F0, &ret_data
))
4223 if (ret_data
!= 0x01234567)
4226 /* Reset the EEPROM data go FFFF */
4227 write_eeprom(sp
, 0x4F0, 0xFFFFFFFF, 3);
4229 /* Test Write Request Error at offset 0x7c */
4230 if (!write_eeprom(sp
, 0x07C, 0, 3))
4233 /* Test Write Request at offset 0x7fc */
4234 if (write_eeprom(sp
, 0x7FC, 0x01234567, 3))
4236 if (read_eeprom(sp
, 0x7FC, &ret_data
))
4239 if (ret_data
!= 0x01234567)
4242 /* Reset the EEPROM data go FFFF */
4243 write_eeprom(sp
, 0x7FC, 0xFFFFFFFF, 3);
4245 /* Test Write Error at offset 0x80 */
4246 if (!write_eeprom(sp
, 0x080, 0, 3))
4249 /* Test Write Error at offset 0xfc */
4250 if (!write_eeprom(sp
, 0x0FC, 0, 3))
4253 /* Test Write Error at offset 0x100 */
4254 if (!write_eeprom(sp
, 0x100, 0, 3))
4257 /* Test Write Error at offset 4ec */
4258 if (!write_eeprom(sp
, 0x4EC, 0, 3))
4266 * s2io_bist_test - invokes the MemBist test of the card .
4267 * @sp : private member of the device structure, which is a pointer to the
4268 * s2io_nic structure.
4269 * @data:variable that returns the result of each of the test conducted by
4272 * This invokes the MemBist test of the card. We give around
4273 * 2 secs time for the Test to complete. If it's still not complete
4274 * within this peiod, we consider that the test failed.
4276 * 0 on success and -1 on failure.
4279 static int s2io_bist_test(nic_t
* sp
, uint64_t * data
)
4282 int cnt
= 0, ret
= -1;
4284 pci_read_config_byte(sp
->pdev
, PCI_BIST
, &bist
);
4285 bist
|= PCI_BIST_START
;
4286 pci_write_config_word(sp
->pdev
, PCI_BIST
, bist
);
4289 pci_read_config_byte(sp
->pdev
, PCI_BIST
, &bist
);
4290 if (!(bist
& PCI_BIST_START
)) {
4291 *data
= (bist
& PCI_BIST_CODE_MASK
);
4303 * s2io-link_test - verifies the link state of the nic
4304 * @sp ; private member of the device structure, which is a pointer to the
4305 * s2io_nic structure.
4306 * @data: variable that returns the result of each of the test conducted by
4309 * The function verifies the link state of the NIC and updates the input
4310 * argument 'data' appropriately.
4315 static int s2io_link_test(nic_t
* sp
, uint64_t * data
)
4317 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4320 val64
= readq(&bar0
->adapter_status
);
4321 if (val64
& ADAPTER_STATUS_RMAC_LOCAL_FAULT
)
4328 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4329 * @sp - private member of the device structure, which is a pointer to the
4330 * s2io_nic structure.
4331 * @data - variable that returns the result of each of the test
4332 * conducted by the driver.
4334 * This is one of the offline test that tests the read and write
4335 * access to the RldRam chip on the NIC.
4340 static int s2io_rldram_test(nic_t
* sp
, uint64_t * data
)
4342 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4344 int cnt
, iteration
= 0, test_pass
= 0;
4346 val64
= readq(&bar0
->adapter_control
);
4347 val64
&= ~ADAPTER_ECC_EN
;
4348 writeq(val64
, &bar0
->adapter_control
);
4350 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4351 val64
|= MC_RLDRAM_TEST_MODE
;
4352 writeq(val64
, &bar0
->mc_rldram_test_ctrl
);
4354 val64
= readq(&bar0
->mc_rldram_mrs
);
4355 val64
|= MC_RLDRAM_QUEUE_SIZE_ENABLE
;
4356 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_mrs
, UF
);
4358 val64
|= MC_RLDRAM_MRS_ENABLE
;
4359 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_mrs
, UF
);
4361 while (iteration
< 2) {
4362 val64
= 0x55555555aaaa0000ULL
;
4363 if (iteration
== 1) {
4364 val64
^= 0xFFFFFFFFFFFF0000ULL
;
4366 writeq(val64
, &bar0
->mc_rldram_test_d0
);
4368 val64
= 0xaaaa5a5555550000ULL
;
4369 if (iteration
== 1) {
4370 val64
^= 0xFFFFFFFFFFFF0000ULL
;
4372 writeq(val64
, &bar0
->mc_rldram_test_d1
);
4374 val64
= 0x55aaaaaaaa5a0000ULL
;
4375 if (iteration
== 1) {
4376 val64
^= 0xFFFFFFFFFFFF0000ULL
;
4378 writeq(val64
, &bar0
->mc_rldram_test_d2
);
4380 val64
= (u64
) (0x0000003fffff0000ULL
);
4381 writeq(val64
, &bar0
->mc_rldram_test_add
);
4384 val64
= MC_RLDRAM_TEST_MODE
;
4385 writeq(val64
, &bar0
->mc_rldram_test_ctrl
);
4388 MC_RLDRAM_TEST_MODE
| MC_RLDRAM_TEST_WRITE
|
4390 writeq(val64
, &bar0
->mc_rldram_test_ctrl
);
4392 for (cnt
= 0; cnt
< 5; cnt
++) {
4393 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4394 if (val64
& MC_RLDRAM_TEST_DONE
)
4402 val64
= MC_RLDRAM_TEST_MODE
;
4403 writeq(val64
, &bar0
->mc_rldram_test_ctrl
);
4405 val64
|= MC_RLDRAM_TEST_MODE
| MC_RLDRAM_TEST_GO
;
4406 writeq(val64
, &bar0
->mc_rldram_test_ctrl
);
4408 for (cnt
= 0; cnt
< 5; cnt
++) {
4409 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4410 if (val64
& MC_RLDRAM_TEST_DONE
)
4418 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4419 if (val64
& MC_RLDRAM_TEST_PASS
)
4434 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4435 * @sp : private member of the device structure, which is a pointer to the
4436 * s2io_nic structure.
4437 * @ethtest : pointer to a ethtool command specific structure that will be
4438 * returned to the user.
4439 * @data : variable that returns the result of each of the test
4440 * conducted by the driver.
4442 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4443 * the health of the card.
4448 static void s2io_ethtool_test(struct net_device
*dev
,
4449 struct ethtool_test
*ethtest
,
4452 nic_t
*sp
= dev
->priv
;
4453 int orig_state
= netif_running(sp
->dev
);
4455 if (ethtest
->flags
== ETH_TEST_FL_OFFLINE
) {
4456 /* Offline Tests. */
4458 s2io_close(sp
->dev
);
4460 if (s2io_register_test(sp
, &data
[0]))
4461 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
4465 if (s2io_rldram_test(sp
, &data
[3]))
4466 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
4470 if (s2io_eeprom_test(sp
, &data
[1]))
4471 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
4473 if (s2io_bist_test(sp
, &data
[4]))
4474 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
4484 "%s: is not up, cannot run test\n",
4493 if (s2io_link_test(sp
, &data
[2]))
4494 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
4503 static void s2io_get_ethtool_stats(struct net_device
*dev
,
4504 struct ethtool_stats
*estats
,
4508 nic_t
*sp
= dev
->priv
;
4509 StatInfo_t
*stat_info
= sp
->mac_control
.stats_info
;
4511 s2io_updt_stats(sp
);
4513 (u64
)le32_to_cpu(stat_info
->tmac_frms_oflow
) << 32 |
4514 le32_to_cpu(stat_info
->tmac_frms
);
4516 (u64
)le32_to_cpu(stat_info
->tmac_data_octets_oflow
) << 32 |
4517 le32_to_cpu(stat_info
->tmac_data_octets
);
4518 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_drop_frms
);
4520 (u64
)le32_to_cpu(stat_info
->tmac_mcst_frms_oflow
) << 32 |
4521 le32_to_cpu(stat_info
->tmac_mcst_frms
);
4523 (u64
)le32_to_cpu(stat_info
->tmac_bcst_frms_oflow
) << 32 |
4524 le32_to_cpu(stat_info
->tmac_bcst_frms
);
4525 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_pause_ctrl_frms
);
4527 (u64
)le32_to_cpu(stat_info
->tmac_any_err_frms_oflow
) << 32 |
4528 le32_to_cpu(stat_info
->tmac_any_err_frms
);
4529 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_vld_ip_octets
);
4531 (u64
)le32_to_cpu(stat_info
->tmac_vld_ip_oflow
) << 32 |
4532 le32_to_cpu(stat_info
->tmac_vld_ip
);
4534 (u64
)le32_to_cpu(stat_info
->tmac_drop_ip_oflow
) << 32 |
4535 le32_to_cpu(stat_info
->tmac_drop_ip
);
4537 (u64
)le32_to_cpu(stat_info
->tmac_icmp_oflow
) << 32 |
4538 le32_to_cpu(stat_info
->tmac_icmp
);
4540 (u64
)le32_to_cpu(stat_info
->tmac_rst_tcp_oflow
) << 32 |
4541 le32_to_cpu(stat_info
->tmac_rst_tcp
);
4542 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_tcp
);
4543 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->tmac_udp_oflow
) << 32 |
4544 le32_to_cpu(stat_info
->tmac_udp
);
4546 (u64
)le32_to_cpu(stat_info
->rmac_vld_frms_oflow
) << 32 |
4547 le32_to_cpu(stat_info
->rmac_vld_frms
);
4549 (u64
)le32_to_cpu(stat_info
->rmac_data_octets_oflow
) << 32 |
4550 le32_to_cpu(stat_info
->rmac_data_octets
);
4551 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_fcs_err_frms
);
4552 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_drop_frms
);
4554 (u64
)le32_to_cpu(stat_info
->rmac_vld_mcst_frms_oflow
) << 32 |
4555 le32_to_cpu(stat_info
->rmac_vld_mcst_frms
);
4557 (u64
)le32_to_cpu(stat_info
->rmac_vld_bcst_frms_oflow
) << 32 |
4558 le32_to_cpu(stat_info
->rmac_vld_bcst_frms
);
4559 tmp_stats
[i
++] = le32_to_cpu(stat_info
->rmac_in_rng_len_err_frms
);
4560 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_long_frms
);
4561 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_pause_ctrl_frms
);
4563 (u64
)le32_to_cpu(stat_info
->rmac_discarded_frms_oflow
) << 32 |
4564 le32_to_cpu(stat_info
->rmac_discarded_frms
);
4566 (u64
)le32_to_cpu(stat_info
->rmac_usized_frms_oflow
) << 32 |
4567 le32_to_cpu(stat_info
->rmac_usized_frms
);
4569 (u64
)le32_to_cpu(stat_info
->rmac_osized_frms_oflow
) << 32 |
4570 le32_to_cpu(stat_info
->rmac_osized_frms
);
4572 (u64
)le32_to_cpu(stat_info
->rmac_frag_frms_oflow
) << 32 |
4573 le32_to_cpu(stat_info
->rmac_frag_frms
);
4575 (u64
)le32_to_cpu(stat_info
->rmac_jabber_frms_oflow
) << 32 |
4576 le32_to_cpu(stat_info
->rmac_jabber_frms
);
4577 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_ip_oflow
) << 32 |
4578 le32_to_cpu(stat_info
->rmac_ip
);
4579 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_ip_octets
);
4580 tmp_stats
[i
++] = le32_to_cpu(stat_info
->rmac_hdr_err_ip
);
4581 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_drop_ip_oflow
) << 32 |
4582 le32_to_cpu(stat_info
->rmac_drop_ip
);
4583 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_icmp_oflow
) << 32 |
4584 le32_to_cpu(stat_info
->rmac_icmp
);
4585 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_tcp
);
4586 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_udp_oflow
) << 32 |
4587 le32_to_cpu(stat_info
->rmac_udp
);
4589 (u64
)le32_to_cpu(stat_info
->rmac_err_drp_udp_oflow
) << 32 |
4590 le32_to_cpu(stat_info
->rmac_err_drp_udp
);
4592 (u64
)le32_to_cpu(stat_info
->rmac_pause_cnt_oflow
) << 32 |
4593 le32_to_cpu(stat_info
->rmac_pause_cnt
);
4595 (u64
)le32_to_cpu(stat_info
->rmac_accepted_ip_oflow
) << 32 |
4596 le32_to_cpu(stat_info
->rmac_accepted_ip
);
4597 tmp_stats
[i
++] = le32_to_cpu(stat_info
->rmac_err_tcp
);
4599 tmp_stats
[i
++] = stat_info
->sw_stat
.single_ecc_errs
;
4600 tmp_stats
[i
++] = stat_info
->sw_stat
.double_ecc_errs
;
4603 int s2io_ethtool_get_regs_len(struct net_device
*dev
)
4605 return (XENA_REG_SPACE
);
4609 u32
s2io_ethtool_get_rx_csum(struct net_device
* dev
)
4611 nic_t
*sp
= dev
->priv
;
4613 return (sp
->rx_csum
);
4615 int s2io_ethtool_set_rx_csum(struct net_device
*dev
, u32 data
)
4617 nic_t
*sp
= dev
->priv
;
4626 int s2io_get_eeprom_len(struct net_device
*dev
)
4628 return (XENA_EEPROM_SPACE
);
4631 int s2io_ethtool_self_test_count(struct net_device
*dev
)
4633 return (S2IO_TEST_LEN
);
4635 void s2io_ethtool_get_strings(struct net_device
*dev
,
4636 u32 stringset
, u8
* data
)
4638 switch (stringset
) {
4640 memcpy(data
, s2io_gstrings
, S2IO_STRINGS_LEN
);
4643 memcpy(data
, ðtool_stats_keys
,
4644 sizeof(ethtool_stats_keys
));
4647 static int s2io_ethtool_get_stats_count(struct net_device
*dev
)
4649 return (S2IO_STAT_LEN
);
4652 int s2io_ethtool_op_set_tx_csum(struct net_device
*dev
, u32 data
)
4655 dev
->features
|= NETIF_F_IP_CSUM
;
4657 dev
->features
&= ~NETIF_F_IP_CSUM
;
4663 static struct ethtool_ops netdev_ethtool_ops
= {
4664 .get_settings
= s2io_ethtool_gset
,
4665 .set_settings
= s2io_ethtool_sset
,
4666 .get_drvinfo
= s2io_ethtool_gdrvinfo
,
4667 .get_regs_len
= s2io_ethtool_get_regs_len
,
4668 .get_regs
= s2io_ethtool_gregs
,
4669 .get_link
= ethtool_op_get_link
,
4670 .get_eeprom_len
= s2io_get_eeprom_len
,
4671 .get_eeprom
= s2io_ethtool_geeprom
,
4672 .set_eeprom
= s2io_ethtool_seeprom
,
4673 .get_pauseparam
= s2io_ethtool_getpause_data
,
4674 .set_pauseparam
= s2io_ethtool_setpause_data
,
4675 .get_rx_csum
= s2io_ethtool_get_rx_csum
,
4676 .set_rx_csum
= s2io_ethtool_set_rx_csum
,
4677 .get_tx_csum
= ethtool_op_get_tx_csum
,
4678 .set_tx_csum
= s2io_ethtool_op_set_tx_csum
,
4679 .get_sg
= ethtool_op_get_sg
,
4680 .set_sg
= ethtool_op_set_sg
,
4682 .get_tso
= ethtool_op_get_tso
,
4683 .set_tso
= ethtool_op_set_tso
,
4685 .self_test_count
= s2io_ethtool_self_test_count
,
4686 .self_test
= s2io_ethtool_test
,
4687 .get_strings
= s2io_ethtool_get_strings
,
4688 .phys_id
= s2io_ethtool_idnic
,
4689 .get_stats_count
= s2io_ethtool_get_stats_count
,
4690 .get_ethtool_stats
= s2io_get_ethtool_stats
4694 * s2io_ioctl - Entry point for the Ioctl
4695 * @dev : Device pointer.
4696 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4697 * a proprietary structure used to pass information to the driver.
4698 * @cmd : This is used to distinguish between the different commands that
4699 * can be passed to the IOCTL functions.
4701 * Currently there are no special functionality supported in IOCTL, hence
4702 * function always return EOPNOTSUPPORTED
4705 int s2io_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
4711 * s2io_change_mtu - entry point to change MTU size for the device.
4712 * @dev : device pointer.
4713 * @new_mtu : the new MTU size for the device.
4714 * Description: A driver entry point to change MTU size for the device.
4715 * Before changing the MTU the device must be stopped.
4717 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4721 int s2io_change_mtu(struct net_device
*dev
, int new_mtu
)
4723 nic_t
*sp
= dev
->priv
;
4725 if ((new_mtu
< MIN_MTU
) || (new_mtu
> S2IO_JUMBO_SIZE
)) {
4726 DBG_PRINT(ERR_DBG
, "%s: MTU size is invalid.\n",
4732 if (netif_running(dev
)) {
4734 netif_stop_queue(dev
);
4735 if (s2io_card_up(sp
)) {
4736 DBG_PRINT(ERR_DBG
, "%s: Device bring up failed\n",
4739 if (netif_queue_stopped(dev
))
4740 netif_wake_queue(dev
);
4741 } else { /* Device is down */
4742 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4743 u64 val64
= new_mtu
;
4745 writeq(vBIT(val64
, 2, 14), &bar0
->rmac_max_pyld_len
);
4752 * s2io_tasklet - Bottom half of the ISR.
4753 * @dev_adr : address of the device structure in dma_addr_t format.
4755 * This is the tasklet or the bottom half of the ISR. This is
4756 * an extension of the ISR which is scheduled by the scheduler to be run
4757 * when the load on the CPU is low. All low priority tasks of the ISR can
4758 * be pushed into the tasklet. For now the tasklet is used only to
4759 * replenish the Rx buffers in the Rx buffer descriptors.
4764 static void s2io_tasklet(unsigned long dev_addr
)
4766 struct net_device
*dev
= (struct net_device
*) dev_addr
;
4767 nic_t
*sp
= dev
->priv
;
4769 mac_info_t
*mac_control
;
4770 struct config_param
*config
;
4772 mac_control
= &sp
->mac_control
;
4773 config
= &sp
->config
;
4775 if (!TASKLET_IN_USE
) {
4776 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
4777 ret
= fill_rx_buffers(sp
, i
);
4778 if (ret
== -ENOMEM
) {
4779 DBG_PRINT(ERR_DBG
, "%s: Out of ",
4781 DBG_PRINT(ERR_DBG
, "memory in tasklet\n");
4783 } else if (ret
== -EFILL
) {
4785 "%s: Rx Ring %d is full\n",
4790 clear_bit(0, (&sp
->tasklet_status
));
4795 * s2io_set_link - Set the LInk status
4796 * @data: long pointer to device private structue
4797 * Description: Sets the link status for the adapter
4800 static void s2io_set_link(unsigned long data
)
4802 nic_t
*nic
= (nic_t
*) data
;
4803 struct net_device
*dev
= nic
->dev
;
4804 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
4808 if (test_and_set_bit(0, &(nic
->link_state
))) {
4809 /* The card is being reset, no point doing anything */
4813 subid
= nic
->pdev
->subsystem_device
;
4814 if (s2io_link_fault_indication(nic
) == MAC_RMAC_ERR_TIMER
) {
4816 * Allow a small delay for the NICs self initiated
4817 * cleanup to complete.
4822 val64
= readq(&bar0
->adapter_status
);
4823 if (verify_xena_quiescence(nic
, val64
, nic
->device_enabled_once
)) {
4824 if (LINK_IS_UP(val64
)) {
4825 val64
= readq(&bar0
->adapter_control
);
4826 val64
|= ADAPTER_CNTL_EN
;
4827 writeq(val64
, &bar0
->adapter_control
);
4828 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic
->device_type
,
4830 val64
= readq(&bar0
->gpio_control
);
4831 val64
|= GPIO_CTRL_GPIO_0
;
4832 writeq(val64
, &bar0
->gpio_control
);
4833 val64
= readq(&bar0
->gpio_control
);
4835 val64
|= ADAPTER_LED_ON
;
4836 writeq(val64
, &bar0
->adapter_control
);
4838 if (s2io_link_fault_indication(nic
) ==
4839 MAC_RMAC_ERR_TIMER
) {
4840 val64
= readq(&bar0
->adapter_status
);
4841 if (!LINK_IS_UP(val64
)) {
4842 DBG_PRINT(ERR_DBG
, "%s:", dev
->name
);
4843 DBG_PRINT(ERR_DBG
, " Link down");
4844 DBG_PRINT(ERR_DBG
, "after ");
4845 DBG_PRINT(ERR_DBG
, "enabling ");
4846 DBG_PRINT(ERR_DBG
, "device \n");
4849 if (nic
->device_enabled_once
== FALSE
) {
4850 nic
->device_enabled_once
= TRUE
;
4852 s2io_link(nic
, LINK_UP
);
4854 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic
->device_type
,
4856 val64
= readq(&bar0
->gpio_control
);
4857 val64
&= ~GPIO_CTRL_GPIO_0
;
4858 writeq(val64
, &bar0
->gpio_control
);
4859 val64
= readq(&bar0
->gpio_control
);
4861 s2io_link(nic
, LINK_DOWN
);
4863 } else { /* NIC is not Quiescent. */
4864 DBG_PRINT(ERR_DBG
, "%s: Error: ", dev
->name
);
4865 DBG_PRINT(ERR_DBG
, "device is not Quiescent\n");
4866 netif_stop_queue(dev
);
4868 clear_bit(0, &(nic
->link_state
));
4871 static void s2io_card_down(nic_t
* sp
)
4874 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4875 unsigned long flags
;
4876 register u64 val64
= 0;
4878 del_timer_sync(&sp
->alarm_timer
);
4879 /* If s2io_set_link task is executing, wait till it completes. */
4880 while (test_and_set_bit(0, &(sp
->link_state
))) {
4883 atomic_set(&sp
->card_state
, CARD_DOWN
);
4885 /* disable Tx and Rx traffic on the NIC */
4889 tasklet_kill(&sp
->task
);
4891 /* Check if the device is Quiescent and then Reset the NIC */
4893 val64
= readq(&bar0
->adapter_status
);
4894 if (verify_xena_quiescence(sp
, val64
, sp
->device_enabled_once
)) {
4902 "s2io_close:Device not Quiescent ");
4903 DBG_PRINT(ERR_DBG
, "adaper status reads 0x%llx\n",
4904 (unsigned long long) val64
);
4910 /* Waiting till all Interrupt handlers are complete */
4914 if (!atomic_read(&sp
->isr_cnt
))
4919 spin_lock_irqsave(&sp
->tx_lock
, flags
);
4920 /* Free all Tx buffers */
4921 free_tx_buffers(sp
);
4922 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
4924 /* Free all Rx buffers */
4925 spin_lock_irqsave(&sp
->rx_lock
, flags
);
4926 free_rx_buffers(sp
);
4927 spin_unlock_irqrestore(&sp
->rx_lock
, flags
);
4929 clear_bit(0, &(sp
->link_state
));
4932 static int s2io_card_up(nic_t
* sp
)
4935 mac_info_t
*mac_control
;
4936 struct config_param
*config
;
4937 struct net_device
*dev
= (struct net_device
*) sp
->dev
;
4939 /* Initialize the H/W I/O registers */
4940 if (init_nic(sp
) != 0) {
4941 DBG_PRINT(ERR_DBG
, "%s: H/W initialization failed\n",
4947 * Initializing the Rx buffers. For now we are considering only 1
4948 * Rx ring and initializing buffers into 30 Rx blocks
4950 mac_control
= &sp
->mac_control
;
4951 config
= &sp
->config
;
4953 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
4954 if ((ret
= fill_rx_buffers(sp
, i
))) {
4955 DBG_PRINT(ERR_DBG
, "%s: Out of memory in Open\n",
4958 free_rx_buffers(sp
);
4961 DBG_PRINT(INFO_DBG
, "Buf in ring:%d is %d:\n", i
,
4962 atomic_read(&sp
->rx_bufs_left
[i
]));
4965 /* Setting its receive mode */
4966 s2io_set_multicast(dev
);
4968 /* Enable tasklet for the device */
4969 tasklet_init(&sp
->task
, s2io_tasklet
, (unsigned long) dev
);
4971 /* Enable Rx Traffic and interrupts on the NIC */
4972 if (start_nic(sp
)) {
4973 DBG_PRINT(ERR_DBG
, "%s: Starting NIC failed\n", dev
->name
);
4974 tasklet_kill(&sp
->task
);
4976 free_irq(dev
->irq
, dev
);
4977 free_rx_buffers(sp
);
4981 S2IO_TIMER_CONF(sp
->alarm_timer
, s2io_alarm_handle
, sp
, (HZ
/2));
4983 atomic_set(&sp
->card_state
, CARD_UP
);
4988 * s2io_restart_nic - Resets the NIC.
4989 * @data : long pointer to the device private structure
4991 * This function is scheduled to be run by the s2io_tx_watchdog
4992 * function after 0.5 secs to reset the NIC. The idea is to reduce
4993 * the run time of the watch dog routine which is run holding a
4997 static void s2io_restart_nic(unsigned long data
)
4999 struct net_device
*dev
= (struct net_device
*) data
;
5000 nic_t
*sp
= dev
->priv
;
5003 if (s2io_card_up(sp
)) {
5004 DBG_PRINT(ERR_DBG
, "%s: Device bring up failed\n",
5007 netif_wake_queue(dev
);
5008 DBG_PRINT(ERR_DBG
, "%s: was reset by Tx watchdog timer\n",
5014 * s2io_tx_watchdog - Watchdog for transmit side.
5015 * @dev : Pointer to net device structure
5017 * This function is triggered if the Tx Queue is stopped
5018 * for a pre-defined amount of time when the Interface is still up.
5019 * If the Interface is jammed in such a situation, the hardware is
5020 * reset (by s2io_close) and restarted again (by s2io_open) to
5021 * overcome any problem that might have been caused in the hardware.
5026 static void s2io_tx_watchdog(struct net_device
*dev
)
5028 nic_t
*sp
= dev
->priv
;
5030 if (netif_carrier_ok(dev
)) {
5031 schedule_work(&sp
->rst_timer_task
);
5036 * rx_osm_handler - To perform some OS related operations on SKB.
5037 * @sp: private member of the device structure,pointer to s2io_nic structure.
5038 * @skb : the socket buffer pointer.
5039 * @len : length of the packet
5040 * @cksum : FCS checksum of the frame.
5041 * @ring_no : the ring from which this RxD was extracted.
5043 * This function is called by the Tx interrupt serivce routine to perform
5044 * some OS related operations on the SKB before passing it to the upper
5045 * layers. It mainly checks if the checksum is OK, if so adds it to the
5046 * SKBs cksum variable, increments the Rx packet count and passes the SKB
5047 * to the upper layer. If the checksum is wrong, it increments the Rx
5048 * packet error count, frees the SKB and returns error.
5050 * SUCCESS on success and -1 on failure.
5052 static int rx_osm_handler(ring_info_t
*ring_data
, RxD_t
* rxdp
)
5054 nic_t
*sp
= ring_data
->nic
;
5055 struct net_device
*dev
= (struct net_device
*) sp
->dev
;
5056 struct sk_buff
*skb
= (struct sk_buff
*)
5057 ((unsigned long) rxdp
->Host_Control
);
5058 int ring_no
= ring_data
->ring_no
;
5059 u16 l3_csum
, l4_csum
;
5060 #ifdef CONFIG_2BUFF_MODE
5061 int buf0_len
= RXD_GET_BUFFER0_SIZE(rxdp
->Control_2
);
5062 int buf2_len
= RXD_GET_BUFFER2_SIZE(rxdp
->Control_2
);
5063 int get_block
= ring_data
->rx_curr_get_info
.block_index
;
5064 int get_off
= ring_data
->rx_curr_get_info
.offset
;
5065 buffAdd_t
*ba
= &ring_data
->ba
[get_block
][get_off
];
5066 unsigned char *buff
;
5068 u16 len
= (u16
) ((RXD_GET_BUFFER0_SIZE(rxdp
->Control_2
)) >> 48);;
5071 if (rxdp
->Control_1
& RXD_T_CODE
) {
5072 unsigned long long err
= rxdp
->Control_1
& RXD_T_CODE
;
5073 DBG_PRINT(ERR_DBG
, "%s: Rx error Value: 0x%llx\n",
5076 sp
->stats
.rx_crc_errors
++;
5077 atomic_dec(&sp
->rx_bufs_left
[ring_no
]);
5078 rxdp
->Host_Control
= 0;
5082 /* Updating statistics */
5083 rxdp
->Host_Control
= 0;
5085 sp
->stats
.rx_packets
++;
5086 #ifndef CONFIG_2BUFF_MODE
5087 sp
->stats
.rx_bytes
+= len
;
5089 sp
->stats
.rx_bytes
+= buf0_len
+ buf2_len
;
5092 #ifndef CONFIG_2BUFF_MODE
5095 buff
= skb_push(skb
, buf0_len
);
5096 memcpy(buff
, ba
->ba_0
, buf0_len
);
5097 skb_put(skb
, buf2_len
);
5100 if ((rxdp
->Control_1
& TCP_OR_UDP_FRAME
) &&
5102 l3_csum
= RXD_GET_L3_CKSUM(rxdp
->Control_1
);
5103 l4_csum
= RXD_GET_L4_CKSUM(rxdp
->Control_1
);
5104 if ((l3_csum
== L3_CKSUM_OK
) && (l4_csum
== L4_CKSUM_OK
)) {
5106 * NIC verifies if the Checksum of the received
5107 * frame is Ok or not and accordingly returns
5108 * a flag in the RxD.
5110 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5113 * Packet with erroneous checksum, let the
5114 * upper layers deal with it.
5116 skb
->ip_summed
= CHECKSUM_NONE
;
5119 skb
->ip_summed
= CHECKSUM_NONE
;
5122 skb
->protocol
= eth_type_trans(skb
, dev
);
5123 #ifdef CONFIG_S2IO_NAPI
5124 if (sp
->vlgrp
&& RXD_GET_VLAN_TAG(rxdp
->Control_2
)) {
5125 /* Queueing the vlan frame to the upper layer */
5126 vlan_hwaccel_receive_skb(skb
, sp
->vlgrp
,
5127 RXD_GET_VLAN_TAG(rxdp
->Control_2
));
5129 netif_receive_skb(skb
);
5132 if (sp
->vlgrp
&& RXD_GET_VLAN_TAG(rxdp
->Control_2
)) {
5133 /* Queueing the vlan frame to the upper layer */
5134 vlan_hwaccel_rx(skb
, sp
->vlgrp
,
5135 RXD_GET_VLAN_TAG(rxdp
->Control_2
));
5140 dev
->last_rx
= jiffies
;
5141 atomic_dec(&sp
->rx_bufs_left
[ring_no
]);
5146 * s2io_link - stops/starts the Tx queue.
5147 * @sp : private member of the device structure, which is a pointer to the
5148 * s2io_nic structure.
5149 * @link : inidicates whether link is UP/DOWN.
5151 * This function stops/starts the Tx queue depending on whether the link
5152 * status of the NIC is is down or up. This is called by the Alarm
5153 * interrupt handler whenever a link change interrupt comes up.
5158 void s2io_link(nic_t
* sp
, int link
)
5160 struct net_device
*dev
= (struct net_device
*) sp
->dev
;
5162 if (link
!= sp
->last_link_state
) {
5163 if (link
== LINK_DOWN
) {
5164 DBG_PRINT(ERR_DBG
, "%s: Link down\n", dev
->name
);
5165 netif_carrier_off(dev
);
5167 DBG_PRINT(ERR_DBG
, "%s: Link Up\n", dev
->name
);
5168 netif_carrier_on(dev
);
5171 sp
->last_link_state
= link
;
5175 * get_xena_rev_id - to identify revision ID of xena.
5176 * @pdev : PCI Dev structure
5178 * Function to identify the Revision ID of xena.
5180 * returns the revision ID of the device.
5183 int get_xena_rev_id(struct pci_dev
*pdev
)
5187 ret
= pci_read_config_byte(pdev
, PCI_REVISION_ID
, (u8
*) & id
);
5192 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5193 * @sp : private member of the device structure, which is a pointer to the
5194 * s2io_nic structure.
5196 * This function initializes a few of the PCI and PCI-X configuration registers
5197 * with recommended values.
5202 static void s2io_init_pci(nic_t
* sp
)
5204 u16 pci_cmd
= 0, pcix_cmd
= 0;
5206 /* Enable Data Parity Error Recovery in PCI-X command register. */
5207 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5209 pci_write_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5211 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5214 /* Set the PErr Response bit in PCI command register. */
5215 pci_read_config_word(sp
->pdev
, PCI_COMMAND
, &pci_cmd
);
5216 pci_write_config_word(sp
->pdev
, PCI_COMMAND
,
5217 (pci_cmd
| PCI_COMMAND_PARITY
));
5218 pci_read_config_word(sp
->pdev
, PCI_COMMAND
, &pci_cmd
);
5220 /* Forcibly disabling relaxed ordering capability of the card. */
5222 pci_write_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5224 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5228 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5229 MODULE_LICENSE("GPL");
5230 module_param(tx_fifo_num
, int, 0);
5231 module_param(rx_ring_num
, int, 0);
5232 module_param_array(tx_fifo_len
, uint
, NULL
, 0);
5233 module_param_array(rx_ring_sz
, uint
, NULL
, 0);
5234 module_param_array(rts_frm_len
, uint
, NULL
, 0);
5235 module_param(use_continuous_tx_intrs
, int, 1);
5236 module_param(rmac_pause_time
, int, 0);
5237 module_param(mc_pause_threshold_q0q3
, int, 0);
5238 module_param(mc_pause_threshold_q4q7
, int, 0);
5239 module_param(shared_splits
, int, 0);
5240 module_param(tmac_util_period
, int, 0);
5241 module_param(rmac_util_period
, int, 0);
5242 module_param(bimodal
, bool, 0);
5243 #ifndef CONFIG_S2IO_NAPI
5244 module_param(indicate_max_pkts
, int, 0);
5246 module_param(rxsync_frequency
, int, 0);
5249 * s2io_init_nic - Initialization of the adapter .
5250 * @pdev : structure containing the PCI related information of the device.
5251 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5253 * The function initializes an adapter identified by the pci_dec structure.
5254 * All OS related initialization including memory and device structure and
5255 * initlaization of the device private variable is done. Also the swapper
5256 * control register is initialized to enable read and write into the I/O
5257 * registers of the device.
5259 * returns 0 on success and negative on failure.
5262 static int __devinit
5263 s2io_init_nic(struct pci_dev
*pdev
, const struct pci_device_id
*pre
)
5266 struct net_device
*dev
;
5268 int dma_flag
= FALSE
;
5269 u32 mac_up
, mac_down
;
5270 u64 val64
= 0, tmp64
= 0;
5271 XENA_dev_config_t __iomem
*bar0
= NULL
;
5273 mac_info_t
*mac_control
;
5274 struct config_param
*config
;
5277 #ifdef CONFIG_S2IO_NAPI
5278 DBG_PRINT(ERR_DBG
, "NAPI support has been enabled\n");
5281 if ((ret
= pci_enable_device(pdev
))) {
5283 "s2io_init_nic: pci_enable_device failed\n");
5287 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
5288 DBG_PRINT(INIT_DBG
, "s2io_init_nic: Using 64bit DMA\n");
5290 if (pci_set_consistent_dma_mask
5291 (pdev
, DMA_64BIT_MASK
)) {
5293 "Unable to obtain 64bit DMA for \
5294 consistent allocations\n");
5295 pci_disable_device(pdev
);
5298 } else if (!pci_set_dma_mask(pdev
, DMA_32BIT_MASK
)) {
5299 DBG_PRINT(INIT_DBG
, "s2io_init_nic: Using 32bit DMA\n");
5301 pci_disable_device(pdev
);
5305 if (pci_request_regions(pdev
, s2io_driver_name
)) {
5306 DBG_PRINT(ERR_DBG
, "Request Regions failed\n"),
5307 pci_disable_device(pdev
);
5311 dev
= alloc_etherdev(sizeof(nic_t
));
5313 DBG_PRINT(ERR_DBG
, "Device allocation failed\n");
5314 pci_disable_device(pdev
);
5315 pci_release_regions(pdev
);
5319 pci_set_master(pdev
);
5320 pci_set_drvdata(pdev
, dev
);
5321 SET_MODULE_OWNER(dev
);
5322 SET_NETDEV_DEV(dev
, &pdev
->dev
);
5324 /* Private member variable initialized to s2io NIC structure */
5326 memset(sp
, 0, sizeof(nic_t
));
5329 sp
->high_dma_flag
= dma_flag
;
5330 sp
->device_enabled_once
= FALSE
;
5332 if ((pdev
->device
== PCI_DEVICE_ID_HERC_WIN
) ||
5333 (pdev
->device
== PCI_DEVICE_ID_HERC_UNI
))
5334 sp
->device_type
= XFRAME_II_DEVICE
;
5336 sp
->device_type
= XFRAME_I_DEVICE
;
5338 /* Initialize some PCI/PCI-X fields of the NIC. */
5342 * Setting the device configuration parameters.
5343 * Most of these parameters can be specified by the user during
5344 * module insertion as they are module loadable parameters. If
5345 * these parameters are not not specified during load time, they
5346 * are initialized with default values.
5348 mac_control
= &sp
->mac_control
;
5349 config
= &sp
->config
;
5351 /* Tx side parameters. */
5352 if (tx_fifo_len
[0] == 0)
5353 tx_fifo_len
[0] = DEFAULT_FIFO_LEN
; /* Default value. */
5354 config
->tx_fifo_num
= tx_fifo_num
;
5355 for (i
= 0; i
< MAX_TX_FIFOS
; i
++) {
5356 config
->tx_cfg
[i
].fifo_len
= tx_fifo_len
[i
];
5357 config
->tx_cfg
[i
].fifo_priority
= i
;
5360 /* mapping the QoS priority to the configured fifos */
5361 for (i
= 0; i
< MAX_TX_FIFOS
; i
++)
5362 config
->fifo_mapping
[i
] = fifo_map
[config
->tx_fifo_num
][i
];
5364 config
->tx_intr_type
= TXD_INT_TYPE_UTILZ
;
5365 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
5366 config
->tx_cfg
[i
].f_no_snoop
=
5367 (NO_SNOOP_TXD
| NO_SNOOP_TXD_BUFFER
);
5368 if (config
->tx_cfg
[i
].fifo_len
< 65) {
5369 config
->tx_intr_type
= TXD_INT_TYPE_PER_LIST
;
5373 config
->max_txds
= MAX_SKB_FRAGS
+ 1;
5375 /* Rx side parameters. */
5376 if (rx_ring_sz
[0] == 0)
5377 rx_ring_sz
[0] = SMALL_BLK_CNT
; /* Default value. */
5378 config
->rx_ring_num
= rx_ring_num
;
5379 for (i
= 0; i
< MAX_RX_RINGS
; i
++) {
5380 config
->rx_cfg
[i
].num_rxd
= rx_ring_sz
[i
] *
5381 (MAX_RXDS_PER_BLOCK
+ 1);
5382 config
->rx_cfg
[i
].ring_priority
= i
;
5385 for (i
= 0; i
< rx_ring_num
; i
++) {
5386 config
->rx_cfg
[i
].ring_org
= RING_ORG_BUFF1
;
5387 config
->rx_cfg
[i
].f_no_snoop
=
5388 (NO_SNOOP_RXD
| NO_SNOOP_RXD_BUFFER
);
5391 /* Setting Mac Control parameters */
5392 mac_control
->rmac_pause_time
= rmac_pause_time
;
5393 mac_control
->mc_pause_threshold_q0q3
= mc_pause_threshold_q0q3
;
5394 mac_control
->mc_pause_threshold_q4q7
= mc_pause_threshold_q4q7
;
5397 /* Initialize Ring buffer parameters. */
5398 for (i
= 0; i
< config
->rx_ring_num
; i
++)
5399 atomic_set(&sp
->rx_bufs_left
[i
], 0);
5401 /* Initialize the number of ISRs currently running */
5402 atomic_set(&sp
->isr_cnt
, 0);
5404 /* initialize the shared memory used by the NIC and the host */
5405 if (init_shared_mem(sp
)) {
5406 DBG_PRINT(ERR_DBG
, "%s: Memory allocation failed\n",
5409 goto mem_alloc_failed
;
5412 sp
->bar0
= ioremap(pci_resource_start(pdev
, 0),
5413 pci_resource_len(pdev
, 0));
5415 DBG_PRINT(ERR_DBG
, "%s: S2IO: cannot remap io mem1\n",
5418 goto bar0_remap_failed
;
5421 sp
->bar1
= ioremap(pci_resource_start(pdev
, 2),
5422 pci_resource_len(pdev
, 2));
5424 DBG_PRINT(ERR_DBG
, "%s: S2IO: cannot remap io mem2\n",
5427 goto bar1_remap_failed
;
5430 dev
->irq
= pdev
->irq
;
5431 dev
->base_addr
= (unsigned long) sp
->bar0
;
5433 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5434 for (j
= 0; j
< MAX_TX_FIFOS
; j
++) {
5435 mac_control
->tx_FIFO_start
[j
] = (TxFIFO_element_t __iomem
*)
5436 (sp
->bar1
+ (j
* 0x00020000));
5439 /* Driver entry points */
5440 dev
->open
= &s2io_open
;
5441 dev
->stop
= &s2io_close
;
5442 dev
->hard_start_xmit
= &s2io_xmit
;
5443 dev
->get_stats
= &s2io_get_stats
;
5444 dev
->set_multicast_list
= &s2io_set_multicast
;
5445 dev
->do_ioctl
= &s2io_ioctl
;
5446 dev
->change_mtu
= &s2io_change_mtu
;
5447 SET_ETHTOOL_OPS(dev
, &netdev_ethtool_ops
);
5448 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
5449 dev
->vlan_rx_register
= s2io_vlan_rx_register
;
5450 dev
->vlan_rx_kill_vid
= (void *)s2io_vlan_rx_kill_vid
;
5453 * will use eth_mac_addr() for dev->set_mac_address
5454 * mac address will be set every time dev->open() is called
5456 #if defined(CONFIG_S2IO_NAPI)
5457 dev
->poll
= s2io_poll
;
5461 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
5462 if (sp
->high_dma_flag
== TRUE
)
5463 dev
->features
|= NETIF_F_HIGHDMA
;
5465 dev
->features
|= NETIF_F_TSO
;
5468 dev
->tx_timeout
= &s2io_tx_watchdog
;
5469 dev
->watchdog_timeo
= WATCH_DOG_TIMEOUT
;
5470 INIT_WORK(&sp
->rst_timer_task
,
5471 (void (*)(void *)) s2io_restart_nic
, dev
);
5472 INIT_WORK(&sp
->set_link_task
,
5473 (void (*)(void *)) s2io_set_link
, sp
);
5475 pci_save_state(sp
->pdev
);
5477 /* Setting swapper control on the NIC, for proper reset operation */
5478 if (s2io_set_swapper(sp
)) {
5479 DBG_PRINT(ERR_DBG
, "%s:swapper settings are wrong\n",
5482 goto set_swap_failed
;
5485 /* Verify if the Herc works on the slot its placed into */
5486 if (sp
->device_type
& XFRAME_II_DEVICE
) {
5487 mode
= s2io_verify_pci_mode(sp
);
5489 DBG_PRINT(ERR_DBG
, "%s: ", __FUNCTION__
);
5490 DBG_PRINT(ERR_DBG
, " Unsupported PCI bus mode\n");
5492 goto set_swap_failed
;
5496 /* Not needed for Herc */
5497 if (sp
->device_type
& XFRAME_I_DEVICE
) {
5499 * Fix for all "FFs" MAC address problems observed on
5502 fix_mac_address(sp
);
5507 * MAC address initialization.
5508 * For now only one mac address will be read and used.
5511 val64
= RMAC_ADDR_CMD_MEM_RD
| RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
5512 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET
);
5513 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
5514 wait_for_cmd_complete(sp
);
5516 tmp64
= readq(&bar0
->rmac_addr_data0_mem
);
5517 mac_down
= (u32
) tmp64
;
5518 mac_up
= (u32
) (tmp64
>> 32);
5520 memset(sp
->def_mac_addr
[0].mac_addr
, 0, sizeof(ETH_ALEN
));
5522 sp
->def_mac_addr
[0].mac_addr
[3] = (u8
) (mac_up
);
5523 sp
->def_mac_addr
[0].mac_addr
[2] = (u8
) (mac_up
>> 8);
5524 sp
->def_mac_addr
[0].mac_addr
[1] = (u8
) (mac_up
>> 16);
5525 sp
->def_mac_addr
[0].mac_addr
[0] = (u8
) (mac_up
>> 24);
5526 sp
->def_mac_addr
[0].mac_addr
[5] = (u8
) (mac_down
>> 16);
5527 sp
->def_mac_addr
[0].mac_addr
[4] = (u8
) (mac_down
>> 24);
5529 /* Set the factory defined MAC address initially */
5530 dev
->addr_len
= ETH_ALEN
;
5531 memcpy(dev
->dev_addr
, sp
->def_mac_addr
, ETH_ALEN
);
5534 * Initialize the tasklet status and link state flags
5535 * and the card state parameter
5537 atomic_set(&(sp
->card_state
), 0);
5538 sp
->tasklet_status
= 0;
5541 /* Initialize spinlocks */
5542 spin_lock_init(&sp
->tx_lock
);
5543 #ifndef CONFIG_S2IO_NAPI
5544 spin_lock_init(&sp
->put_lock
);
5546 spin_lock_init(&sp
->rx_lock
);
5549 * SXE-002: Configure link and activity LED to init state
5552 subid
= sp
->pdev
->subsystem_device
;
5553 if ((subid
& 0xFF) >= 0x07) {
5554 val64
= readq(&bar0
->gpio_control
);
5555 val64
|= 0x0000800000000000ULL
;
5556 writeq(val64
, &bar0
->gpio_control
);
5557 val64
= 0x0411040400000000ULL
;
5558 writeq(val64
, (void __iomem
*) bar0
+ 0x2700);
5559 val64
= readq(&bar0
->gpio_control
);
5562 sp
->rx_csum
= 1; /* Rx chksum verify enabled by default */
5564 if (register_netdev(dev
)) {
5565 DBG_PRINT(ERR_DBG
, "Device registration failed\n");
5567 goto register_failed
;
5570 if (sp
->device_type
& XFRAME_II_DEVICE
) {
5571 DBG_PRINT(ERR_DBG
, "%s: Neterion Xframe II 10GbE adapter ",
5573 DBG_PRINT(ERR_DBG
, "(rev %d), %s",
5574 get_xena_rev_id(sp
->pdev
),
5575 s2io_driver_version
);
5576 #ifdef CONFIG_2BUFF_MODE
5577 DBG_PRINT(ERR_DBG
, ", Buffer mode %d",2);
5580 DBG_PRINT(ERR_DBG
, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
5581 DBG_PRINT(ERR_DBG
, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5582 sp
->def_mac_addr
[0].mac_addr
[0],
5583 sp
->def_mac_addr
[0].mac_addr
[1],
5584 sp
->def_mac_addr
[0].mac_addr
[2],
5585 sp
->def_mac_addr
[0].mac_addr
[3],
5586 sp
->def_mac_addr
[0].mac_addr
[4],
5587 sp
->def_mac_addr
[0].mac_addr
[5]);
5588 mode
= s2io_print_pci_mode(sp
);
5590 DBG_PRINT(ERR_DBG
, " Unsupported PCI bus mode ");
5592 goto set_swap_failed
;
5595 DBG_PRINT(ERR_DBG
, "%s: Neterion Xframe I 10GbE adapter ",
5597 DBG_PRINT(ERR_DBG
, "(rev %d), %s",
5598 get_xena_rev_id(sp
->pdev
),
5599 s2io_driver_version
);
5600 #ifdef CONFIG_2BUFF_MODE
5601 DBG_PRINT(ERR_DBG
, ", Buffer mode %d",2);
5603 DBG_PRINT(ERR_DBG
, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
5604 DBG_PRINT(ERR_DBG
, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5605 sp
->def_mac_addr
[0].mac_addr
[0],
5606 sp
->def_mac_addr
[0].mac_addr
[1],
5607 sp
->def_mac_addr
[0].mac_addr
[2],
5608 sp
->def_mac_addr
[0].mac_addr
[3],
5609 sp
->def_mac_addr
[0].mac_addr
[4],
5610 sp
->def_mac_addr
[0].mac_addr
[5]);
5613 /* Initialize device name */
5614 strcpy(sp
->name
, dev
->name
);
5615 if (sp
->device_type
& XFRAME_II_DEVICE
)
5616 strcat(sp
->name
, ": Neterion Xframe II 10GbE adapter");
5618 strcat(sp
->name
, ": Neterion Xframe I 10GbE adapter");
5620 /* Initialize bimodal Interrupts */
5621 sp
->config
.bimodal
= bimodal
;
5622 if (!(sp
->device_type
& XFRAME_II_DEVICE
) && bimodal
) {
5623 sp
->config
.bimodal
= 0;
5624 DBG_PRINT(ERR_DBG
,"%s:Bimodal intr not supported by Xframe I\n",
5629 * Make Link state as off at this point, when the Link change
5630 * interrupt comes the state will be automatically changed to
5633 netif_carrier_off(dev
);
5644 free_shared_mem(sp
);
5645 pci_disable_device(pdev
);
5646 pci_release_regions(pdev
);
5647 pci_set_drvdata(pdev
, NULL
);
5654 * s2io_rem_nic - Free the PCI device
5655 * @pdev: structure containing the PCI related information of the device.
5656 * Description: This function is called by the Pci subsystem to release a
5657 * PCI device and free up all resource held up by the device. This could
5658 * be in response to a Hot plug event or when the driver is to be removed
5662 static void __devexit
s2io_rem_nic(struct pci_dev
*pdev
)
5664 struct net_device
*dev
=
5665 (struct net_device
*) pci_get_drvdata(pdev
);
5669 DBG_PRINT(ERR_DBG
, "Driver Data is NULL!!\n");
5674 unregister_netdev(dev
);
5676 free_shared_mem(sp
);
5679 pci_disable_device(pdev
);
5680 pci_release_regions(pdev
);
5681 pci_set_drvdata(pdev
, NULL
);
5686 * s2io_starter - Entry point for the driver
5687 * Description: This function is the entry point for the driver. It verifies
5688 * the module loadable parameters and initializes PCI configuration space.
5691 int __init
s2io_starter(void)
5693 return pci_module_init(&s2io_driver
);
5697 * s2io_closer - Cleanup routine for the driver
5698 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5701 void s2io_closer(void)
5703 pci_unregister_driver(&s2io_driver
);
5704 DBG_PRINT(INIT_DBG
, "cleanup done\n");
5707 module_init(s2io_starter
);
5708 module_exit(s2io_closer
);