1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
34 * values are 1, 2 and 3.
35 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
36 * tx_fifo_len: This too is an array of 8. Each element defines the number of
37 * Tx descriptors that can be associated with each corresponding FIFO.
38 ************************************************************************/
40 #include <linux/config.h>
41 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/errno.h>
44 #include <linux/ioport.h>
45 #include <linux/pci.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/kernel.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/skbuff.h>
51 #include <linux/init.h>
52 #include <linux/delay.h>
53 #include <linux/stddef.h>
54 #include <linux/ioctl.h>
55 #include <linux/timex.h>
56 #include <linux/sched.h>
57 #include <linux/ethtool.h>
58 #include <linux/workqueue.h>
59 #include <linux/if_vlan.h>
61 #include <asm/system.h>
62 #include <asm/uaccess.h>
67 #include "s2io-regs.h"
69 #define DRV_VERSION "Version 2.0.9.4"
71 /* S2io Driver name & version. */
72 static char s2io_driver_name
[] = "Neterion";
73 static char s2io_driver_version
[] = DRV_VERSION
;
75 int rxd_size
[4] = {32,48,48,64};
76 int rxd_count
[4] = {127,85,85,63};
78 static inline int RXD_IS_UP2DT(RxD_t
*rxdp
)
82 ret
= ((!(rxdp
->Control_1
& RXD_OWN_XENA
)) &&
83 (GET_RXD_MARKER(rxdp
->Control_2
) != THE_RXD_MARK
));
89 * Cards with following subsystem_id have a link state indication
90 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
91 * macro below identifies these cards given the subsystem_id.
93 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
94 (dev_type == XFRAME_I_DEVICE) ? \
95 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
96 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
98 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
99 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
100 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
103 static inline int rx_buffer_level(nic_t
* sp
, int rxb_size
, int ring
)
106 mac_info_t
*mac_control
;
108 mac_control
= &sp
->mac_control
;
109 if ((mac_control
->rings
[ring
].pkt_cnt
- rxb_size
) > 16) {
111 if (rxb_size
<= rxd_count
[sp
->rxd_mode
]) {
119 /* Ethtool related variables and Macros. */
120 static char s2io_gstrings
[][ETH_GSTRING_LEN
] = {
121 "Register test\t(offline)",
122 "Eeprom test\t(offline)",
123 "Link test\t(online)",
124 "RLDRAM test\t(offline)",
125 "BIST Test\t(offline)"
128 static char ethtool_stats_keys
[][ETH_GSTRING_LEN
] = {
130 {"tmac_data_octets"},
134 {"tmac_pause_ctrl_frms"},
135 {"tmac_any_err_frms"},
136 {"tmac_vld_ip_octets"},
144 {"rmac_data_octets"},
145 {"rmac_fcs_err_frms"},
147 {"rmac_vld_mcst_frms"},
148 {"rmac_vld_bcst_frms"},
149 {"rmac_in_rng_len_err_frms"},
151 {"rmac_pause_ctrl_frms"},
152 {"rmac_discarded_frms"},
153 {"rmac_usized_frms"},
154 {"rmac_osized_frms"},
156 {"rmac_jabber_frms"},
164 {"rmac_err_drp_udp"},
166 {"rmac_accepted_ip"},
168 {"\n DRIVER STATISTICS"},
169 {"single_bit_ecc_errs"},
170 {"double_bit_ecc_errs"},
173 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
174 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
176 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
177 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
179 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
180 init_timer(&timer); \
181 timer.function = handle; \
182 timer.data = (unsigned long) arg; \
183 mod_timer(&timer, (jiffies + exp)) \
186 static void s2io_vlan_rx_register(struct net_device
*dev
,
187 struct vlan_group
*grp
)
189 nic_t
*nic
= dev
->priv
;
192 spin_lock_irqsave(&nic
->tx_lock
, flags
);
194 spin_unlock_irqrestore(&nic
->tx_lock
, flags
);
197 /* Unregister the vlan */
198 static void s2io_vlan_rx_kill_vid(struct net_device
*dev
, unsigned long vid
)
200 nic_t
*nic
= dev
->priv
;
203 spin_lock_irqsave(&nic
->tx_lock
, flags
);
205 nic
->vlgrp
->vlan_devices
[vid
] = NULL
;
206 spin_unlock_irqrestore(&nic
->tx_lock
, flags
);
210 * Constants to be programmed into the Xena's registers, to configure
214 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
217 static u64 herc_act_dtx_cfg
[] = {
219 0x8000051536750000ULL
, 0x80000515367500E0ULL
,
221 0x8000051536750004ULL
, 0x80000515367500E4ULL
,
223 0x80010515003F0000ULL
, 0x80010515003F00E0ULL
,
225 0x80010515003F0004ULL
, 0x80010515003F00E4ULL
,
227 0x801205150D440000ULL
, 0x801205150D4400E0ULL
,
229 0x801205150D440004ULL
, 0x801205150D4400E4ULL
,
231 0x80020515F2100000ULL
, 0x80020515F21000E0ULL
,
233 0x80020515F2100004ULL
, 0x80020515F21000E4ULL
,
238 static u64 xena_mdio_cfg
[] = {
240 0xC001010000000000ULL
, 0xC0010100000000E0ULL
,
241 0xC0010100008000E4ULL
,
242 /* Remove Reset from PMA PLL */
243 0xC001010000000000ULL
, 0xC0010100000000E0ULL
,
244 0xC0010100000000E4ULL
,
248 static u64 xena_dtx_cfg
[] = {
249 0x8000051500000000ULL
, 0x80000515000000E0ULL
,
250 0x80000515D93500E4ULL
, 0x8001051500000000ULL
,
251 0x80010515000000E0ULL
, 0x80010515001E00E4ULL
,
252 0x8002051500000000ULL
, 0x80020515000000E0ULL
,
253 0x80020515F21000E4ULL
,
254 /* Set PADLOOPBACKN */
255 0x8002051500000000ULL
, 0x80020515000000E0ULL
,
256 0x80020515B20000E4ULL
, 0x8003051500000000ULL
,
257 0x80030515000000E0ULL
, 0x80030515B20000E4ULL
,
258 0x8004051500000000ULL
, 0x80040515000000E0ULL
,
259 0x80040515B20000E4ULL
, 0x8005051500000000ULL
,
260 0x80050515000000E0ULL
, 0x80050515B20000E4ULL
,
262 /* Remove PADLOOPBACKN */
263 0x8002051500000000ULL
, 0x80020515000000E0ULL
,
264 0x80020515F20000E4ULL
, 0x8003051500000000ULL
,
265 0x80030515000000E0ULL
, 0x80030515F20000E4ULL
,
266 0x8004051500000000ULL
, 0x80040515000000E0ULL
,
267 0x80040515F20000E4ULL
, 0x8005051500000000ULL
,
268 0x80050515000000E0ULL
, 0x80050515F20000E4ULL
,
273 * Constants for Fixing the MacAddress problem seen mostly on
276 static u64 fix_mac
[] = {
277 0x0060000000000000ULL
, 0x0060600000000000ULL
,
278 0x0040600000000000ULL
, 0x0000600000000000ULL
,
279 0x0020600000000000ULL
, 0x0060600000000000ULL
,
280 0x0020600000000000ULL
, 0x0060600000000000ULL
,
281 0x0020600000000000ULL
, 0x0060600000000000ULL
,
282 0x0020600000000000ULL
, 0x0060600000000000ULL
,
283 0x0020600000000000ULL
, 0x0060600000000000ULL
,
284 0x0020600000000000ULL
, 0x0060600000000000ULL
,
285 0x0020600000000000ULL
, 0x0060600000000000ULL
,
286 0x0020600000000000ULL
, 0x0060600000000000ULL
,
287 0x0020600000000000ULL
, 0x0060600000000000ULL
,
288 0x0020600000000000ULL
, 0x0060600000000000ULL
,
289 0x0020600000000000ULL
, 0x0000600000000000ULL
,
290 0x0040600000000000ULL
, 0x0060600000000000ULL
,
294 /* Module Loadable parameters. */
295 static unsigned int tx_fifo_num
= 1;
296 static unsigned int tx_fifo_len
[MAX_TX_FIFOS
] =
297 {[0 ...(MAX_TX_FIFOS
- 1)] = 0 };
298 static unsigned int rx_ring_num
= 1;
299 static unsigned int rx_ring_sz
[MAX_RX_RINGS
] =
300 {[0 ...(MAX_RX_RINGS
- 1)] = 0 };
301 static unsigned int rts_frm_len
[MAX_RX_RINGS
] =
302 {[0 ...(MAX_RX_RINGS
- 1)] = 0 };
303 static unsigned int rx_ring_mode
= 1;
304 static unsigned int use_continuous_tx_intrs
= 1;
305 static unsigned int rmac_pause_time
= 65535;
306 static unsigned int mc_pause_threshold_q0q3
= 187;
307 static unsigned int mc_pause_threshold_q4q7
= 187;
308 static unsigned int shared_splits
;
309 static unsigned int tmac_util_period
= 5;
310 static unsigned int rmac_util_period
= 5;
311 static unsigned int bimodal
= 0;
312 static unsigned int l3l4hdr_size
= 128;
313 #ifndef CONFIG_S2IO_NAPI
314 static unsigned int indicate_max_pkts
;
316 /* Frequency of Rx desc syncs expressed as power of 2 */
317 static unsigned int rxsync_frequency
= 3;
318 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
319 static unsigned int intr_type
= 0;
323 * This table lists all the devices that this driver supports.
325 static struct pci_device_id s2io_tbl
[] __devinitdata
= {
326 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_S2IO_WIN
,
327 PCI_ANY_ID
, PCI_ANY_ID
},
328 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_S2IO_UNI
,
329 PCI_ANY_ID
, PCI_ANY_ID
},
330 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_HERC_WIN
,
331 PCI_ANY_ID
, PCI_ANY_ID
},
332 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_HERC_UNI
,
333 PCI_ANY_ID
, PCI_ANY_ID
},
337 MODULE_DEVICE_TABLE(pci
, s2io_tbl
);
339 static struct pci_driver s2io_driver
= {
341 .id_table
= s2io_tbl
,
342 .probe
= s2io_init_nic
,
343 .remove
= __devexit_p(s2io_rem_nic
),
346 /* A simplifier macro used both by init and free shared_mem Fns(). */
347 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
350 * init_shared_mem - Allocation and Initialization of Memory
351 * @nic: Device private variable.
352 * Description: The function allocates all the memory areas shared
353 * between the NIC and the driver. This includes Tx descriptors,
354 * Rx descriptors and the statistics block.
357 static int init_shared_mem(struct s2io_nic
*nic
)
360 void *tmp_v_addr
, *tmp_v_addr_next
;
361 dma_addr_t tmp_p_addr
, tmp_p_addr_next
;
362 RxD_block_t
*pre_rxd_blk
= NULL
;
363 int i
, j
, blk_cnt
, rx_sz
, tx_sz
;
364 int lst_size
, lst_per_page
;
365 struct net_device
*dev
= nic
->dev
;
369 mac_info_t
*mac_control
;
370 struct config_param
*config
;
372 mac_control
= &nic
->mac_control
;
373 config
= &nic
->config
;
376 /* Allocation and initialization of TXDLs in FIOFs */
378 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
379 size
+= config
->tx_cfg
[i
].fifo_len
;
381 if (size
> MAX_AVAILABLE_TXDS
) {
382 DBG_PRINT(ERR_DBG
, "%s: Requested TxDs too high, ",
384 DBG_PRINT(ERR_DBG
, "Requested: %d, max supported: 8192\n", size
);
388 lst_size
= (sizeof(TxD_t
) * config
->max_txds
);
389 tx_sz
= lst_size
* size
;
390 lst_per_page
= PAGE_SIZE
/ lst_size
;
392 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
393 int fifo_len
= config
->tx_cfg
[i
].fifo_len
;
394 int list_holder_size
= fifo_len
* sizeof(list_info_hold_t
);
395 mac_control
->fifos
[i
].list_info
= kmalloc(list_holder_size
,
397 if (!mac_control
->fifos
[i
].list_info
) {
399 "Malloc failed for list_info\n");
402 memset(mac_control
->fifos
[i
].list_info
, 0, list_holder_size
);
404 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
405 int page_num
= TXD_MEM_PAGE_CNT(config
->tx_cfg
[i
].fifo_len
,
407 mac_control
->fifos
[i
].tx_curr_put_info
.offset
= 0;
408 mac_control
->fifos
[i
].tx_curr_put_info
.fifo_len
=
409 config
->tx_cfg
[i
].fifo_len
- 1;
410 mac_control
->fifos
[i
].tx_curr_get_info
.offset
= 0;
411 mac_control
->fifos
[i
].tx_curr_get_info
.fifo_len
=
412 config
->tx_cfg
[i
].fifo_len
- 1;
413 mac_control
->fifos
[i
].fifo_no
= i
;
414 mac_control
->fifos
[i
].nic
= nic
;
415 mac_control
->fifos
[i
].max_txds
= MAX_SKB_FRAGS
+ 2;
417 for (j
= 0; j
< page_num
; j
++) {
421 tmp_v
= pci_alloc_consistent(nic
->pdev
,
425 "pci_alloc_consistent ");
426 DBG_PRINT(ERR_DBG
, "failed for TxDL\n");
429 /* If we got a zero DMA address(can happen on
430 * certain platforms like PPC), reallocate.
431 * Store virtual address of page we don't want,
435 mac_control
->zerodma_virt_addr
= tmp_v
;
437 "%s: Zero DMA address for TxDL. ", dev
->name
);
439 "Virtual address %p\n", tmp_v
);
440 tmp_v
= pci_alloc_consistent(nic
->pdev
,
444 "pci_alloc_consistent ");
445 DBG_PRINT(ERR_DBG
, "failed for TxDL\n");
449 while (k
< lst_per_page
) {
450 int l
= (j
* lst_per_page
) + k
;
451 if (l
== config
->tx_cfg
[i
].fifo_len
)
453 mac_control
->fifos
[i
].list_info
[l
].list_virt_addr
=
454 tmp_v
+ (k
* lst_size
);
455 mac_control
->fifos
[i
].list_info
[l
].list_phy_addr
=
456 tmp_p
+ (k
* lst_size
);
462 nic
->ufo_in_band_v
= kmalloc((sizeof(u64
) * size
), GFP_KERNEL
);
463 if (!nic
->ufo_in_band_v
)
466 /* Allocation and initialization of RXDs in Rings */
468 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
469 if (config
->rx_cfg
[i
].num_rxd
%
470 (rxd_count
[nic
->rxd_mode
] + 1)) {
471 DBG_PRINT(ERR_DBG
, "%s: RxD count of ", dev
->name
);
472 DBG_PRINT(ERR_DBG
, "Ring%d is not a multiple of ",
474 DBG_PRINT(ERR_DBG
, "RxDs per Block");
477 size
+= config
->rx_cfg
[i
].num_rxd
;
478 mac_control
->rings
[i
].block_count
=
479 config
->rx_cfg
[i
].num_rxd
/
480 (rxd_count
[nic
->rxd_mode
] + 1 );
481 mac_control
->rings
[i
].pkt_cnt
= config
->rx_cfg
[i
].num_rxd
-
482 mac_control
->rings
[i
].block_count
;
484 if (nic
->rxd_mode
== RXD_MODE_1
)
485 size
= (size
* (sizeof(RxD1_t
)));
487 size
= (size
* (sizeof(RxD3_t
)));
490 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
491 mac_control
->rings
[i
].rx_curr_get_info
.block_index
= 0;
492 mac_control
->rings
[i
].rx_curr_get_info
.offset
= 0;
493 mac_control
->rings
[i
].rx_curr_get_info
.ring_len
=
494 config
->rx_cfg
[i
].num_rxd
- 1;
495 mac_control
->rings
[i
].rx_curr_put_info
.block_index
= 0;
496 mac_control
->rings
[i
].rx_curr_put_info
.offset
= 0;
497 mac_control
->rings
[i
].rx_curr_put_info
.ring_len
=
498 config
->rx_cfg
[i
].num_rxd
- 1;
499 mac_control
->rings
[i
].nic
= nic
;
500 mac_control
->rings
[i
].ring_no
= i
;
502 blk_cnt
= config
->rx_cfg
[i
].num_rxd
/
503 (rxd_count
[nic
->rxd_mode
] + 1);
504 /* Allocating all the Rx blocks */
505 for (j
= 0; j
< blk_cnt
; j
++) {
506 rx_block_info_t
*rx_blocks
;
509 rx_blocks
= &mac_control
->rings
[i
].rx_blocks
[j
];
510 size
= SIZE_OF_BLOCK
; //size is always page size
511 tmp_v_addr
= pci_alloc_consistent(nic
->pdev
, size
,
513 if (tmp_v_addr
== NULL
) {
515 * In case of failure, free_shared_mem()
516 * is called, which should free any
517 * memory that was alloced till the
520 rx_blocks
->block_virt_addr
= tmp_v_addr
;
523 memset(tmp_v_addr
, 0, size
);
524 rx_blocks
->block_virt_addr
= tmp_v_addr
;
525 rx_blocks
->block_dma_addr
= tmp_p_addr
;
526 rx_blocks
->rxds
= kmalloc(sizeof(rxd_info_t
)*
527 rxd_count
[nic
->rxd_mode
],
529 for (l
=0; l
<rxd_count
[nic
->rxd_mode
];l
++) {
530 rx_blocks
->rxds
[l
].virt_addr
=
531 rx_blocks
->block_virt_addr
+
532 (rxd_size
[nic
->rxd_mode
] * l
);
533 rx_blocks
->rxds
[l
].dma_addr
=
534 rx_blocks
->block_dma_addr
+
535 (rxd_size
[nic
->rxd_mode
] * l
);
538 mac_control
->rings
[i
].rx_blocks
[j
].block_virt_addr
=
540 mac_control
->rings
[i
].rx_blocks
[j
].block_dma_addr
=
543 /* Interlinking all Rx Blocks */
544 for (j
= 0; j
< blk_cnt
; j
++) {
546 mac_control
->rings
[i
].rx_blocks
[j
].block_virt_addr
;
548 mac_control
->rings
[i
].rx_blocks
[(j
+ 1) %
549 blk_cnt
].block_virt_addr
;
551 mac_control
->rings
[i
].rx_blocks
[j
].block_dma_addr
;
553 mac_control
->rings
[i
].rx_blocks
[(j
+ 1) %
554 blk_cnt
].block_dma_addr
;
556 pre_rxd_blk
= (RxD_block_t
*) tmp_v_addr
;
557 pre_rxd_blk
->reserved_2_pNext_RxD_block
=
558 (unsigned long) tmp_v_addr_next
;
559 pre_rxd_blk
->pNext_RxD_Blk_physical
=
560 (u64
) tmp_p_addr_next
;
563 if (nic
->rxd_mode
>= RXD_MODE_3A
) {
565 * Allocation of Storages for buffer addresses in 2BUFF mode
566 * and the buffers as well.
568 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
569 blk_cnt
= config
->rx_cfg
[i
].num_rxd
/
570 (rxd_count
[nic
->rxd_mode
]+ 1);
571 mac_control
->rings
[i
].ba
=
572 kmalloc((sizeof(buffAdd_t
*) * blk_cnt
),
574 if (!mac_control
->rings
[i
].ba
)
576 for (j
= 0; j
< blk_cnt
; j
++) {
578 mac_control
->rings
[i
].ba
[j
] =
579 kmalloc((sizeof(buffAdd_t
) *
580 (rxd_count
[nic
->rxd_mode
] + 1)),
582 if (!mac_control
->rings
[i
].ba
[j
])
584 while (k
!= rxd_count
[nic
->rxd_mode
]) {
585 ba
= &mac_control
->rings
[i
].ba
[j
][k
];
587 ba
->ba_0_org
= (void *) kmalloc
588 (BUF0_LEN
+ ALIGN_SIZE
, GFP_KERNEL
);
591 tmp
= (unsigned long)ba
->ba_0_org
;
593 tmp
&= ~((unsigned long) ALIGN_SIZE
);
594 ba
->ba_0
= (void *) tmp
;
596 ba
->ba_1_org
= (void *) kmalloc
597 (BUF1_LEN
+ ALIGN_SIZE
, GFP_KERNEL
);
600 tmp
= (unsigned long) ba
->ba_1_org
;
602 tmp
&= ~((unsigned long) ALIGN_SIZE
);
603 ba
->ba_1
= (void *) tmp
;
610 /* Allocation and initialization of Statistics block */
611 size
= sizeof(StatInfo_t
);
612 mac_control
->stats_mem
= pci_alloc_consistent
613 (nic
->pdev
, size
, &mac_control
->stats_mem_phy
);
615 if (!mac_control
->stats_mem
) {
617 * In case of failure, free_shared_mem() is called, which
618 * should free any memory that was alloced till the
623 mac_control
->stats_mem_sz
= size
;
625 tmp_v_addr
= mac_control
->stats_mem
;
626 mac_control
->stats_info
= (StatInfo_t
*) tmp_v_addr
;
627 memset(tmp_v_addr
, 0, size
);
628 DBG_PRINT(INIT_DBG
, "%s:Ring Mem PHY: 0x%llx\n", dev
->name
,
629 (unsigned long long) tmp_p_addr
);
635 * free_shared_mem - Free the allocated Memory
636 * @nic: Device private variable.
637 * Description: This function is to free all memory locations allocated by
638 * the init_shared_mem() function and return it to the kernel.
641 static void free_shared_mem(struct s2io_nic
*nic
)
643 int i
, j
, blk_cnt
, size
;
645 dma_addr_t tmp_p_addr
;
646 mac_info_t
*mac_control
;
647 struct config_param
*config
;
648 int lst_size
, lst_per_page
;
649 struct net_device
*dev
= nic
->dev
;
654 mac_control
= &nic
->mac_control
;
655 config
= &nic
->config
;
657 lst_size
= (sizeof(TxD_t
) * config
->max_txds
);
658 lst_per_page
= PAGE_SIZE
/ lst_size
;
660 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
661 int page_num
= TXD_MEM_PAGE_CNT(config
->tx_cfg
[i
].fifo_len
,
663 for (j
= 0; j
< page_num
; j
++) {
664 int mem_blks
= (j
* lst_per_page
);
665 if (!mac_control
->fifos
[i
].list_info
)
667 if (!mac_control
->fifos
[i
].list_info
[mem_blks
].
670 pci_free_consistent(nic
->pdev
, PAGE_SIZE
,
671 mac_control
->fifos
[i
].
674 mac_control
->fifos
[i
].
678 /* If we got a zero DMA address during allocation,
681 if (mac_control
->zerodma_virt_addr
) {
682 pci_free_consistent(nic
->pdev
, PAGE_SIZE
,
683 mac_control
->zerodma_virt_addr
,
686 "%s: Freeing TxDL with zero DMA addr. ",
688 DBG_PRINT(INIT_DBG
, "Virtual address %p\n",
689 mac_control
->zerodma_virt_addr
);
691 kfree(mac_control
->fifos
[i
].list_info
);
694 size
= SIZE_OF_BLOCK
;
695 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
696 blk_cnt
= mac_control
->rings
[i
].block_count
;
697 for (j
= 0; j
< blk_cnt
; j
++) {
698 tmp_v_addr
= mac_control
->rings
[i
].rx_blocks
[j
].
700 tmp_p_addr
= mac_control
->rings
[i
].rx_blocks
[j
].
702 if (tmp_v_addr
== NULL
)
704 pci_free_consistent(nic
->pdev
, size
,
705 tmp_v_addr
, tmp_p_addr
);
706 kfree(mac_control
->rings
[i
].rx_blocks
[j
].rxds
);
710 if (nic
->rxd_mode
>= RXD_MODE_3A
) {
711 /* Freeing buffer storage addresses in 2BUFF mode. */
712 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
713 blk_cnt
= config
->rx_cfg
[i
].num_rxd
/
714 (rxd_count
[nic
->rxd_mode
] + 1);
715 for (j
= 0; j
< blk_cnt
; j
++) {
717 if (!mac_control
->rings
[i
].ba
[j
])
719 while (k
!= rxd_count
[nic
->rxd_mode
]) {
721 &mac_control
->rings
[i
].ba
[j
][k
];
726 kfree(mac_control
->rings
[i
].ba
[j
]);
728 kfree(mac_control
->rings
[i
].ba
);
732 if (mac_control
->stats_mem
) {
733 pci_free_consistent(nic
->pdev
,
734 mac_control
->stats_mem_sz
,
735 mac_control
->stats_mem
,
736 mac_control
->stats_mem_phy
);
738 if (nic
->ufo_in_band_v
)
739 kfree(nic
->ufo_in_band_v
);
743 * s2io_verify_pci_mode -
746 static int s2io_verify_pci_mode(nic_t
*nic
)
748 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
749 register u64 val64
= 0;
752 val64
= readq(&bar0
->pci_mode
);
753 mode
= (u8
)GET_PCI_MODE(val64
);
755 if ( val64
& PCI_MODE_UNKNOWN_MODE
)
756 return -1; /* Unknown PCI mode */
762 * s2io_print_pci_mode -
764 static int s2io_print_pci_mode(nic_t
*nic
)
766 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
767 register u64 val64
= 0;
769 struct config_param
*config
= &nic
->config
;
771 val64
= readq(&bar0
->pci_mode
);
772 mode
= (u8
)GET_PCI_MODE(val64
);
774 if ( val64
& PCI_MODE_UNKNOWN_MODE
)
775 return -1; /* Unknown PCI mode */
777 if (val64
& PCI_MODE_32_BITS
) {
778 DBG_PRINT(ERR_DBG
, "%s: Device is on 32 bit ", nic
->dev
->name
);
780 DBG_PRINT(ERR_DBG
, "%s: Device is on 64 bit ", nic
->dev
->name
);
784 case PCI_MODE_PCI_33
:
785 DBG_PRINT(ERR_DBG
, "33MHz PCI bus\n");
786 config
->bus_speed
= 33;
788 case PCI_MODE_PCI_66
:
789 DBG_PRINT(ERR_DBG
, "66MHz PCI bus\n");
790 config
->bus_speed
= 133;
792 case PCI_MODE_PCIX_M1_66
:
793 DBG_PRINT(ERR_DBG
, "66MHz PCIX(M1) bus\n");
794 config
->bus_speed
= 133; /* Herc doubles the clock rate */
796 case PCI_MODE_PCIX_M1_100
:
797 DBG_PRINT(ERR_DBG
, "100MHz PCIX(M1) bus\n");
798 config
->bus_speed
= 200;
800 case PCI_MODE_PCIX_M1_133
:
801 DBG_PRINT(ERR_DBG
, "133MHz PCIX(M1) bus\n");
802 config
->bus_speed
= 266;
804 case PCI_MODE_PCIX_M2_66
:
805 DBG_PRINT(ERR_DBG
, "133MHz PCIX(M2) bus\n");
806 config
->bus_speed
= 133;
808 case PCI_MODE_PCIX_M2_100
:
809 DBG_PRINT(ERR_DBG
, "200MHz PCIX(M2) bus\n");
810 config
->bus_speed
= 200;
812 case PCI_MODE_PCIX_M2_133
:
813 DBG_PRINT(ERR_DBG
, "266MHz PCIX(M2) bus\n");
814 config
->bus_speed
= 266;
817 return -1; /* Unsupported bus speed */
824 * init_nic - Initialization of hardware
825 * @nic: device peivate variable
826 * Description: The function sequentially configures every block
827 * of the H/W from their reset values.
828 * Return Value: SUCCESS on success and
829 * '-1' on failure (endian settings incorrect).
832 static int init_nic(struct s2io_nic
*nic
)
834 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
835 struct net_device
*dev
= nic
->dev
;
836 register u64 val64
= 0;
840 mac_info_t
*mac_control
;
841 struct config_param
*config
;
842 int mdio_cnt
= 0, dtx_cnt
= 0;
843 unsigned long long mem_share
;
846 mac_control
= &nic
->mac_control
;
847 config
= &nic
->config
;
849 /* to set the swapper controle on the card */
850 if(s2io_set_swapper(nic
)) {
851 DBG_PRINT(ERR_DBG
,"ERROR: Setting Swapper failed\n");
856 * Herc requires EOI to be removed from reset before XGXS, so..
858 if (nic
->device_type
& XFRAME_II_DEVICE
) {
859 val64
= 0xA500000000ULL
;
860 writeq(val64
, &bar0
->sw_reset
);
862 val64
= readq(&bar0
->sw_reset
);
865 /* Remove XGXS from reset state */
867 writeq(val64
, &bar0
->sw_reset
);
869 val64
= readq(&bar0
->sw_reset
);
871 /* Enable Receiving broadcasts */
872 add
= &bar0
->mac_cfg
;
873 val64
= readq(&bar0
->mac_cfg
);
874 val64
|= MAC_RMAC_BCAST_ENABLE
;
875 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
876 writel((u32
) val64
, add
);
877 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
878 writel((u32
) (val64
>> 32), (add
+ 4));
880 /* Read registers in all blocks */
881 val64
= readq(&bar0
->mac_int_mask
);
882 val64
= readq(&bar0
->mc_int_mask
);
883 val64
= readq(&bar0
->xgxs_int_mask
);
887 writeq(vBIT(val64
, 2, 14), &bar0
->rmac_max_pyld_len
);
890 * Configuring the XAUI Interface of Xena.
891 * ***************************************
892 * To Configure the Xena's XAUI, one has to write a series
893 * of 64 bit values into two registers in a particular
894 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
895 * which will be defined in the array of configuration values
896 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
897 * to switch writing from one regsiter to another. We continue
898 * writing these values until we encounter the 'END_SIGN' macro.
899 * For example, After making a series of 21 writes into
900 * dtx_control register the 'SWITCH_SIGN' appears and hence we
901 * start writing into mdio_control until we encounter END_SIGN.
903 if (nic
->device_type
& XFRAME_II_DEVICE
) {
904 while (herc_act_dtx_cfg
[dtx_cnt
] != END_SIGN
) {
905 SPECIAL_REG_WRITE(herc_act_dtx_cfg
[dtx_cnt
],
906 &bar0
->dtx_control
, UF
);
908 msleep(1); /* Necessary!! */
914 while (xena_dtx_cfg
[dtx_cnt
] != END_SIGN
) {
915 if (xena_dtx_cfg
[dtx_cnt
] == SWITCH_SIGN
) {
919 SPECIAL_REG_WRITE(xena_dtx_cfg
[dtx_cnt
],
920 &bar0
->dtx_control
, UF
);
921 val64
= readq(&bar0
->dtx_control
);
925 while (xena_mdio_cfg
[mdio_cnt
] != END_SIGN
) {
926 if (xena_mdio_cfg
[mdio_cnt
] == SWITCH_SIGN
) {
930 SPECIAL_REG_WRITE(xena_mdio_cfg
[mdio_cnt
],
931 &bar0
->mdio_control
, UF
);
932 val64
= readq(&bar0
->mdio_control
);
935 if ((xena_dtx_cfg
[dtx_cnt
] == END_SIGN
) &&
936 (xena_mdio_cfg
[mdio_cnt
] == END_SIGN
)) {
944 /* Tx DMA Initialization */
946 writeq(val64
, &bar0
->tx_fifo_partition_0
);
947 writeq(val64
, &bar0
->tx_fifo_partition_1
);
948 writeq(val64
, &bar0
->tx_fifo_partition_2
);
949 writeq(val64
, &bar0
->tx_fifo_partition_3
);
952 for (i
= 0, j
= 0; i
< config
->tx_fifo_num
; i
++) {
954 vBIT(config
->tx_cfg
[i
].fifo_len
- 1, ((i
* 32) + 19),
955 13) | vBIT(config
->tx_cfg
[i
].fifo_priority
,
958 if (i
== (config
->tx_fifo_num
- 1)) {
965 writeq(val64
, &bar0
->tx_fifo_partition_0
);
969 writeq(val64
, &bar0
->tx_fifo_partition_1
);
973 writeq(val64
, &bar0
->tx_fifo_partition_2
);
977 writeq(val64
, &bar0
->tx_fifo_partition_3
);
982 /* Enable Tx FIFO partition 0. */
983 val64
= readq(&bar0
->tx_fifo_partition_0
);
984 val64
|= BIT(0); /* To enable the FIFO partition. */
985 writeq(val64
, &bar0
->tx_fifo_partition_0
);
988 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
989 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
991 if ((nic
->device_type
== XFRAME_I_DEVICE
) &&
992 (get_xena_rev_id(nic
->pdev
) < 4))
993 writeq(PCC_ENABLE_FOUR
, &bar0
->pcc_enable
);
995 val64
= readq(&bar0
->tx_fifo_partition_0
);
996 DBG_PRINT(INIT_DBG
, "Fifo partition at: 0x%p is: 0x%llx\n",
997 &bar0
->tx_fifo_partition_0
, (unsigned long long) val64
);
1000 * Initialization of Tx_PA_CONFIG register to ignore packet
1001 * integrity checking.
1003 val64
= readq(&bar0
->tx_pa_cfg
);
1004 val64
|= TX_PA_CFG_IGNORE_FRM_ERR
| TX_PA_CFG_IGNORE_SNAP_OUI
|
1005 TX_PA_CFG_IGNORE_LLC_CTRL
| TX_PA_CFG_IGNORE_L2_ERR
;
1006 writeq(val64
, &bar0
->tx_pa_cfg
);
1008 /* Rx DMA intialization. */
1010 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1012 vBIT(config
->rx_cfg
[i
].ring_priority
, (5 + (i
* 8)),
1015 writeq(val64
, &bar0
->rx_queue_priority
);
1018 * Allocating equal share of memory to all the
1022 if (nic
->device_type
& XFRAME_II_DEVICE
)
1027 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1030 mem_share
= (mem_size
/ config
->rx_ring_num
+
1031 mem_size
% config
->rx_ring_num
);
1032 val64
|= RX_QUEUE_CFG_Q0_SZ(mem_share
);
1035 mem_share
= (mem_size
/ config
->rx_ring_num
);
1036 val64
|= RX_QUEUE_CFG_Q1_SZ(mem_share
);
1039 mem_share
= (mem_size
/ config
->rx_ring_num
);
1040 val64
|= RX_QUEUE_CFG_Q2_SZ(mem_share
);
1043 mem_share
= (mem_size
/ config
->rx_ring_num
);
1044 val64
|= RX_QUEUE_CFG_Q3_SZ(mem_share
);
1047 mem_share
= (mem_size
/ config
->rx_ring_num
);
1048 val64
|= RX_QUEUE_CFG_Q4_SZ(mem_share
);
1051 mem_share
= (mem_size
/ config
->rx_ring_num
);
1052 val64
|= RX_QUEUE_CFG_Q5_SZ(mem_share
);
1055 mem_share
= (mem_size
/ config
->rx_ring_num
);
1056 val64
|= RX_QUEUE_CFG_Q6_SZ(mem_share
);
1059 mem_share
= (mem_size
/ config
->rx_ring_num
);
1060 val64
|= RX_QUEUE_CFG_Q7_SZ(mem_share
);
1064 writeq(val64
, &bar0
->rx_queue_cfg
);
1067 * Filling Tx round robin registers
1068 * as per the number of FIFOs
1070 switch (config
->tx_fifo_num
) {
1072 val64
= 0x0000000000000000ULL
;
1073 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1074 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1075 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1076 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1077 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1080 val64
= 0x0000010000010000ULL
;
1081 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1082 val64
= 0x0100000100000100ULL
;
1083 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1084 val64
= 0x0001000001000001ULL
;
1085 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1086 val64
= 0x0000010000010000ULL
;
1087 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1088 val64
= 0x0100000000000000ULL
;
1089 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1092 val64
= 0x0001000102000001ULL
;
1093 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1094 val64
= 0x0001020000010001ULL
;
1095 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1096 val64
= 0x0200000100010200ULL
;
1097 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1098 val64
= 0x0001000102000001ULL
;
1099 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1100 val64
= 0x0001020000000000ULL
;
1101 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1104 val64
= 0x0001020300010200ULL
;
1105 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1106 val64
= 0x0100000102030001ULL
;
1107 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1108 val64
= 0x0200010000010203ULL
;
1109 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1110 val64
= 0x0001020001000001ULL
;
1111 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1112 val64
= 0x0203000100000000ULL
;
1113 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1116 val64
= 0x0001000203000102ULL
;
1117 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1118 val64
= 0x0001020001030004ULL
;
1119 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1120 val64
= 0x0001000203000102ULL
;
1121 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1122 val64
= 0x0001020001030004ULL
;
1123 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1124 val64
= 0x0001000000000000ULL
;
1125 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1128 val64
= 0x0001020304000102ULL
;
1129 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1130 val64
= 0x0304050001020001ULL
;
1131 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1132 val64
= 0x0203000100000102ULL
;
1133 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1134 val64
= 0x0304000102030405ULL
;
1135 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1136 val64
= 0x0001000200000000ULL
;
1137 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1140 val64
= 0x0001020001020300ULL
;
1141 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1142 val64
= 0x0102030400010203ULL
;
1143 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1144 val64
= 0x0405060001020001ULL
;
1145 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1146 val64
= 0x0304050000010200ULL
;
1147 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1148 val64
= 0x0102030000000000ULL
;
1149 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1152 val64
= 0x0001020300040105ULL
;
1153 writeq(val64
, &bar0
->tx_w_round_robin_0
);
1154 val64
= 0x0200030106000204ULL
;
1155 writeq(val64
, &bar0
->tx_w_round_robin_1
);
1156 val64
= 0x0103000502010007ULL
;
1157 writeq(val64
, &bar0
->tx_w_round_robin_2
);
1158 val64
= 0x0304010002060500ULL
;
1159 writeq(val64
, &bar0
->tx_w_round_robin_3
);
1160 val64
= 0x0103020400000000ULL
;
1161 writeq(val64
, &bar0
->tx_w_round_robin_4
);
1165 /* Filling the Rx round robin registers as per the
1166 * number of Rings and steering based on QoS.
1168 switch (config
->rx_ring_num
) {
1170 val64
= 0x8080808080808080ULL
;
1171 writeq(val64
, &bar0
->rts_qos_steering
);
1174 val64
= 0x0000010000010000ULL
;
1175 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1176 val64
= 0x0100000100000100ULL
;
1177 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1178 val64
= 0x0001000001000001ULL
;
1179 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1180 val64
= 0x0000010000010000ULL
;
1181 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1182 val64
= 0x0100000000000000ULL
;
1183 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1185 val64
= 0x8080808040404040ULL
;
1186 writeq(val64
, &bar0
->rts_qos_steering
);
1189 val64
= 0x0001000102000001ULL
;
1190 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1191 val64
= 0x0001020000010001ULL
;
1192 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1193 val64
= 0x0200000100010200ULL
;
1194 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1195 val64
= 0x0001000102000001ULL
;
1196 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1197 val64
= 0x0001020000000000ULL
;
1198 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1200 val64
= 0x8080804040402020ULL
;
1201 writeq(val64
, &bar0
->rts_qos_steering
);
1204 val64
= 0x0001020300010200ULL
;
1205 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1206 val64
= 0x0100000102030001ULL
;
1207 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1208 val64
= 0x0200010000010203ULL
;
1209 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1210 val64
= 0x0001020001000001ULL
;
1211 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1212 val64
= 0x0203000100000000ULL
;
1213 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1215 val64
= 0x8080404020201010ULL
;
1216 writeq(val64
, &bar0
->rts_qos_steering
);
1219 val64
= 0x0001000203000102ULL
;
1220 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1221 val64
= 0x0001020001030004ULL
;
1222 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1223 val64
= 0x0001000203000102ULL
;
1224 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1225 val64
= 0x0001020001030004ULL
;
1226 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1227 val64
= 0x0001000000000000ULL
;
1228 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1230 val64
= 0x8080404020201008ULL
;
1231 writeq(val64
, &bar0
->rts_qos_steering
);
1234 val64
= 0x0001020304000102ULL
;
1235 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1236 val64
= 0x0304050001020001ULL
;
1237 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1238 val64
= 0x0203000100000102ULL
;
1239 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1240 val64
= 0x0304000102030405ULL
;
1241 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1242 val64
= 0x0001000200000000ULL
;
1243 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1245 val64
= 0x8080404020100804ULL
;
1246 writeq(val64
, &bar0
->rts_qos_steering
);
1249 val64
= 0x0001020001020300ULL
;
1250 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1251 val64
= 0x0102030400010203ULL
;
1252 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1253 val64
= 0x0405060001020001ULL
;
1254 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1255 val64
= 0x0304050000010200ULL
;
1256 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1257 val64
= 0x0102030000000000ULL
;
1258 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1260 val64
= 0x8080402010080402ULL
;
1261 writeq(val64
, &bar0
->rts_qos_steering
);
1264 val64
= 0x0001020300040105ULL
;
1265 writeq(val64
, &bar0
->rx_w_round_robin_0
);
1266 val64
= 0x0200030106000204ULL
;
1267 writeq(val64
, &bar0
->rx_w_round_robin_1
);
1268 val64
= 0x0103000502010007ULL
;
1269 writeq(val64
, &bar0
->rx_w_round_robin_2
);
1270 val64
= 0x0304010002060500ULL
;
1271 writeq(val64
, &bar0
->rx_w_round_robin_3
);
1272 val64
= 0x0103020400000000ULL
;
1273 writeq(val64
, &bar0
->rx_w_round_robin_4
);
1275 val64
= 0x8040201008040201ULL
;
1276 writeq(val64
, &bar0
->rts_qos_steering
);
1282 for (i
= 0; i
< 8; i
++)
1283 writeq(val64
, &bar0
->rts_frm_len_n
[i
]);
1285 /* Set the default rts frame length for the rings configured */
1286 val64
= MAC_RTS_FRM_LEN_SET(dev
->mtu
+22);
1287 for (i
= 0 ; i
< config
->rx_ring_num
; i
++)
1288 writeq(val64
, &bar0
->rts_frm_len_n
[i
]);
1290 /* Set the frame length for the configured rings
1291 * desired by the user
1293 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1294 /* If rts_frm_len[i] == 0 then it is assumed that user not
1295 * specified frame length steering.
1296 * If the user provides the frame length then program
1297 * the rts_frm_len register for those values or else
1298 * leave it as it is.
1300 if (rts_frm_len
[i
] != 0) {
1301 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len
[i
]),
1302 &bar0
->rts_frm_len_n
[i
]);
1306 /* Program statistics memory */
1307 writeq(mac_control
->stats_mem_phy
, &bar0
->stat_addr
);
1309 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1310 val64
= STAT_BC(0x320);
1311 writeq(val64
, &bar0
->stat_byte_cnt
);
1315 * Initializing the sampling rate for the device to calculate the
1316 * bandwidth utilization.
1318 val64
= MAC_TX_LINK_UTIL_VAL(tmac_util_period
) |
1319 MAC_RX_LINK_UTIL_VAL(rmac_util_period
);
1320 writeq(val64
, &bar0
->mac_link_util
);
1324 * Initializing the Transmit and Receive Traffic Interrupt
1328 * TTI Initialization. Default Tx timer gets us about
1329 * 250 interrupts per sec. Continuous interrupts are enabled
1332 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1333 int count
= (nic
->config
.bus_speed
* 125)/2;
1334 val64
= TTI_DATA1_MEM_TX_TIMER_VAL(count
);
1337 val64
= TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1339 val64
|= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1340 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1341 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN
;
1342 if (use_continuous_tx_intrs
)
1343 val64
|= TTI_DATA1_MEM_TX_TIMER_CI_EN
;
1344 writeq(val64
, &bar0
->tti_data1_mem
);
1346 val64
= TTI_DATA2_MEM_TX_UFC_A(0x10) |
1347 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1348 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1349 writeq(val64
, &bar0
->tti_data2_mem
);
1351 val64
= TTI_CMD_MEM_WE
| TTI_CMD_MEM_STROBE_NEW_CMD
;
1352 writeq(val64
, &bar0
->tti_command_mem
);
1355 * Once the operation completes, the Strobe bit of the command
1356 * register will be reset. We poll for this particular condition
1357 * We wait for a maximum of 500ms for the operation to complete,
1358 * if it's not complete by then we return error.
1362 val64
= readq(&bar0
->tti_command_mem
);
1363 if (!(val64
& TTI_CMD_MEM_STROBE_NEW_CMD
)) {
1367 DBG_PRINT(ERR_DBG
, "%s: TTI init Failed\n",
1375 if (nic
->config
.bimodal
) {
1377 for (k
= 0; k
< config
->rx_ring_num
; k
++) {
1378 val64
= TTI_CMD_MEM_WE
| TTI_CMD_MEM_STROBE_NEW_CMD
;
1379 val64
|= TTI_CMD_MEM_OFFSET(0x38+k
);
1380 writeq(val64
, &bar0
->tti_command_mem
);
1383 * Once the operation completes, the Strobe bit of the command
1384 * register will be reset. We poll for this particular condition
1385 * We wait for a maximum of 500ms for the operation to complete,
1386 * if it's not complete by then we return error.
1390 val64
= readq(&bar0
->tti_command_mem
);
1391 if (!(val64
& TTI_CMD_MEM_STROBE_NEW_CMD
)) {
1396 "%s: TTI init Failed\n",
1406 /* RTI Initialization */
1407 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1409 * Programmed to generate Apprx 500 Intrs per
1412 int count
= (nic
->config
.bus_speed
* 125)/4;
1413 val64
= RTI_DATA1_MEM_RX_TIMER_VAL(count
);
1415 val64
= RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1417 val64
|= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1418 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1419 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN
;
1421 writeq(val64
, &bar0
->rti_data1_mem
);
1423 val64
= RTI_DATA2_MEM_RX_UFC_A(0x1) |
1424 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1425 if (nic
->intr_type
== MSI_X
)
1426 val64
|= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1427 RTI_DATA2_MEM_RX_UFC_D(0x40));
1429 val64
|= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1430 RTI_DATA2_MEM_RX_UFC_D(0x80));
1431 writeq(val64
, &bar0
->rti_data2_mem
);
1433 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1434 val64
= RTI_CMD_MEM_WE
| RTI_CMD_MEM_STROBE_NEW_CMD
1435 | RTI_CMD_MEM_OFFSET(i
);
1436 writeq(val64
, &bar0
->rti_command_mem
);
1439 * Once the operation completes, the Strobe bit of the
1440 * command register will be reset. We poll for this
1441 * particular condition. We wait for a maximum of 500ms
1442 * for the operation to complete, if it's not complete
1443 * by then we return error.
1447 val64
= readq(&bar0
->rti_command_mem
);
1448 if (!(val64
& RTI_CMD_MEM_STROBE_NEW_CMD
)) {
1452 DBG_PRINT(ERR_DBG
, "%s: RTI init Failed\n",
1463 * Initializing proper values as Pause threshold into all
1464 * the 8 Queues on Rx side.
1466 writeq(0xffbbffbbffbbffbbULL
, &bar0
->mc_pause_thresh_q0q3
);
1467 writeq(0xffbbffbbffbbffbbULL
, &bar0
->mc_pause_thresh_q4q7
);
1469 /* Disable RMAC PAD STRIPPING */
1470 add
= &bar0
->mac_cfg
;
1471 val64
= readq(&bar0
->mac_cfg
);
1472 val64
&= ~(MAC_CFG_RMAC_STRIP_PAD
);
1473 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
1474 writel((u32
) (val64
), add
);
1475 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
1476 writel((u32
) (val64
>> 32), (add
+ 4));
1477 val64
= readq(&bar0
->mac_cfg
);
1480 * Set the time value to be inserted in the pause frame
1481 * generated by xena.
1483 val64
= readq(&bar0
->rmac_pause_cfg
);
1484 val64
&= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1485 val64
|= RMAC_PAUSE_HG_PTIME(nic
->mac_control
.rmac_pause_time
);
1486 writeq(val64
, &bar0
->rmac_pause_cfg
);
1489 * Set the Threshold Limit for Generating the pause frame
1490 * If the amount of data in any Queue exceeds ratio of
1491 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1492 * pause frame is generated
1495 for (i
= 0; i
< 4; i
++) {
1497 (((u64
) 0xFF00 | nic
->mac_control
.
1498 mc_pause_threshold_q0q3
)
1501 writeq(val64
, &bar0
->mc_pause_thresh_q0q3
);
1504 for (i
= 0; i
< 4; i
++) {
1506 (((u64
) 0xFF00 | nic
->mac_control
.
1507 mc_pause_threshold_q4q7
)
1510 writeq(val64
, &bar0
->mc_pause_thresh_q4q7
);
1513 * TxDMA will stop Read request if the number of read split has
1514 * exceeded the limit pointed by shared_splits
1516 val64
= readq(&bar0
->pic_control
);
1517 val64
|= PIC_CNTL_SHARED_SPLITS(shared_splits
);
1518 writeq(val64
, &bar0
->pic_control
);
1521 * Programming the Herc to split every write transaction
1522 * that does not start on an ADB to reduce disconnects.
1524 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1525 val64
= WREQ_SPLIT_MASK_SET_MASK(255);
1526 writeq(val64
, &bar0
->wreq_split_mask
);
1529 /* Setting Link stability period to 64 ms */
1530 if (nic
->device_type
== XFRAME_II_DEVICE
) {
1531 val64
= MISC_LINK_STABILITY_PRD(3);
1532 writeq(val64
, &bar0
->misc_control
);
1537 #define LINK_UP_DOWN_INTERRUPT 1
1538 #define MAC_RMAC_ERR_TIMER 2
1540 static int s2io_link_fault_indication(nic_t
*nic
)
1542 if (nic
->intr_type
!= INTA
)
1543 return MAC_RMAC_ERR_TIMER
;
1544 if (nic
->device_type
== XFRAME_II_DEVICE
)
1545 return LINK_UP_DOWN_INTERRUPT
;
1547 return MAC_RMAC_ERR_TIMER
;
1551 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1552 * @nic: device private variable,
1553 * @mask: A mask indicating which Intr block must be modified and,
1554 * @flag: A flag indicating whether to enable or disable the Intrs.
1555 * Description: This function will either disable or enable the interrupts
1556 * depending on the flag argument. The mask argument can be used to
1557 * enable/disable any Intr block.
1558 * Return Value: NONE.
1561 static void en_dis_able_nic_intrs(struct s2io_nic
*nic
, u16 mask
, int flag
)
1563 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
1564 register u64 val64
= 0, temp64
= 0;
1566 /* Top level interrupt classification */
1567 /* PIC Interrupts */
1568 if ((mask
& (TX_PIC_INTR
| RX_PIC_INTR
))) {
1569 /* Enable PIC Intrs in the general intr mask register */
1570 val64
= TXPIC_INT_M
| PIC_RX_INT_M
;
1571 if (flag
== ENABLE_INTRS
) {
1572 temp64
= readq(&bar0
->general_int_mask
);
1573 temp64
&= ~((u64
) val64
);
1574 writeq(temp64
, &bar0
->general_int_mask
);
1576 * If Hercules adapter enable GPIO otherwise
1577 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1578 * interrupts for now.
1581 if (s2io_link_fault_indication(nic
) ==
1582 LINK_UP_DOWN_INTERRUPT
) {
1583 temp64
= readq(&bar0
->pic_int_mask
);
1584 temp64
&= ~((u64
) PIC_INT_GPIO
);
1585 writeq(temp64
, &bar0
->pic_int_mask
);
1586 temp64
= readq(&bar0
->gpio_int_mask
);
1587 temp64
&= ~((u64
) GPIO_INT_MASK_LINK_UP
);
1588 writeq(temp64
, &bar0
->gpio_int_mask
);
1590 writeq(DISABLE_ALL_INTRS
, &bar0
->pic_int_mask
);
1593 * No MSI Support is available presently, so TTI and
1594 * RTI interrupts are also disabled.
1596 } else if (flag
== DISABLE_INTRS
) {
1598 * Disable PIC Intrs in the general
1599 * intr mask register
1601 writeq(DISABLE_ALL_INTRS
, &bar0
->pic_int_mask
);
1602 temp64
= readq(&bar0
->general_int_mask
);
1604 writeq(val64
, &bar0
->general_int_mask
);
1608 /* DMA Interrupts */
1609 /* Enabling/Disabling Tx DMA interrupts */
1610 if (mask
& TX_DMA_INTR
) {
1611 /* Enable TxDMA Intrs in the general intr mask register */
1612 val64
= TXDMA_INT_M
;
1613 if (flag
== ENABLE_INTRS
) {
1614 temp64
= readq(&bar0
->general_int_mask
);
1615 temp64
&= ~((u64
) val64
);
1616 writeq(temp64
, &bar0
->general_int_mask
);
1618 * Keep all interrupts other than PFC interrupt
1619 * and PCC interrupt disabled in DMA level.
1621 val64
= DISABLE_ALL_INTRS
& ~(TXDMA_PFC_INT_M
|
1623 writeq(val64
, &bar0
->txdma_int_mask
);
1625 * Enable only the MISC error 1 interrupt in PFC block
1627 val64
= DISABLE_ALL_INTRS
& (~PFC_MISC_ERR_1
);
1628 writeq(val64
, &bar0
->pfc_err_mask
);
1630 * Enable only the FB_ECC error interrupt in PCC block
1632 val64
= DISABLE_ALL_INTRS
& (~PCC_FB_ECC_ERR
);
1633 writeq(val64
, &bar0
->pcc_err_mask
);
1634 } else if (flag
== DISABLE_INTRS
) {
1636 * Disable TxDMA Intrs in the general intr mask
1639 writeq(DISABLE_ALL_INTRS
, &bar0
->txdma_int_mask
);
1640 writeq(DISABLE_ALL_INTRS
, &bar0
->pfc_err_mask
);
1641 temp64
= readq(&bar0
->general_int_mask
);
1643 writeq(val64
, &bar0
->general_int_mask
);
1647 /* Enabling/Disabling Rx DMA interrupts */
1648 if (mask
& RX_DMA_INTR
) {
1649 /* Enable RxDMA Intrs in the general intr mask register */
1650 val64
= RXDMA_INT_M
;
1651 if (flag
== ENABLE_INTRS
) {
1652 temp64
= readq(&bar0
->general_int_mask
);
1653 temp64
&= ~((u64
) val64
);
1654 writeq(temp64
, &bar0
->general_int_mask
);
1656 * All RxDMA block interrupts are disabled for now
1659 writeq(DISABLE_ALL_INTRS
, &bar0
->rxdma_int_mask
);
1660 } else if (flag
== DISABLE_INTRS
) {
1662 * Disable RxDMA Intrs in the general intr mask
1665 writeq(DISABLE_ALL_INTRS
, &bar0
->rxdma_int_mask
);
1666 temp64
= readq(&bar0
->general_int_mask
);
1668 writeq(val64
, &bar0
->general_int_mask
);
1672 /* MAC Interrupts */
1673 /* Enabling/Disabling MAC interrupts */
1674 if (mask
& (TX_MAC_INTR
| RX_MAC_INTR
)) {
1675 val64
= TXMAC_INT_M
| RXMAC_INT_M
;
1676 if (flag
== ENABLE_INTRS
) {
1677 temp64
= readq(&bar0
->general_int_mask
);
1678 temp64
&= ~((u64
) val64
);
1679 writeq(temp64
, &bar0
->general_int_mask
);
1681 * All MAC block error interrupts are disabled for now
1684 } else if (flag
== DISABLE_INTRS
) {
1686 * Disable MAC Intrs in the general intr mask register
1688 writeq(DISABLE_ALL_INTRS
, &bar0
->mac_int_mask
);
1689 writeq(DISABLE_ALL_INTRS
,
1690 &bar0
->mac_rmac_err_mask
);
1692 temp64
= readq(&bar0
->general_int_mask
);
1694 writeq(val64
, &bar0
->general_int_mask
);
1698 /* XGXS Interrupts */
1699 if (mask
& (TX_XGXS_INTR
| RX_XGXS_INTR
)) {
1700 val64
= TXXGXS_INT_M
| RXXGXS_INT_M
;
1701 if (flag
== ENABLE_INTRS
) {
1702 temp64
= readq(&bar0
->general_int_mask
);
1703 temp64
&= ~((u64
) val64
);
1704 writeq(temp64
, &bar0
->general_int_mask
);
1706 * All XGXS block error interrupts are disabled for now
1709 writeq(DISABLE_ALL_INTRS
, &bar0
->xgxs_int_mask
);
1710 } else if (flag
== DISABLE_INTRS
) {
1712 * Disable MC Intrs in the general intr mask register
1714 writeq(DISABLE_ALL_INTRS
, &bar0
->xgxs_int_mask
);
1715 temp64
= readq(&bar0
->general_int_mask
);
1717 writeq(val64
, &bar0
->general_int_mask
);
1721 /* Memory Controller(MC) interrupts */
1722 if (mask
& MC_INTR
) {
1724 if (flag
== ENABLE_INTRS
) {
1725 temp64
= readq(&bar0
->general_int_mask
);
1726 temp64
&= ~((u64
) val64
);
1727 writeq(temp64
, &bar0
->general_int_mask
);
1729 * Enable all MC Intrs.
1731 writeq(0x0, &bar0
->mc_int_mask
);
1732 writeq(0x0, &bar0
->mc_err_mask
);
1733 } else if (flag
== DISABLE_INTRS
) {
1735 * Disable MC Intrs in the general intr mask register
1737 writeq(DISABLE_ALL_INTRS
, &bar0
->mc_int_mask
);
1738 temp64
= readq(&bar0
->general_int_mask
);
1740 writeq(val64
, &bar0
->general_int_mask
);
1745 /* Tx traffic interrupts */
1746 if (mask
& TX_TRAFFIC_INTR
) {
1747 val64
= TXTRAFFIC_INT_M
;
1748 if (flag
== ENABLE_INTRS
) {
1749 temp64
= readq(&bar0
->general_int_mask
);
1750 temp64
&= ~((u64
) val64
);
1751 writeq(temp64
, &bar0
->general_int_mask
);
1753 * Enable all the Tx side interrupts
1754 * writing 0 Enables all 64 TX interrupt levels
1756 writeq(0x0, &bar0
->tx_traffic_mask
);
1757 } else if (flag
== DISABLE_INTRS
) {
1759 * Disable Tx Traffic Intrs in the general intr mask
1762 writeq(DISABLE_ALL_INTRS
, &bar0
->tx_traffic_mask
);
1763 temp64
= readq(&bar0
->general_int_mask
);
1765 writeq(val64
, &bar0
->general_int_mask
);
1769 /* Rx traffic interrupts */
1770 if (mask
& RX_TRAFFIC_INTR
) {
1771 val64
= RXTRAFFIC_INT_M
;
1772 if (flag
== ENABLE_INTRS
) {
1773 temp64
= readq(&bar0
->general_int_mask
);
1774 temp64
&= ~((u64
) val64
);
1775 writeq(temp64
, &bar0
->general_int_mask
);
1776 /* writing 0 Enables all 8 RX interrupt levels */
1777 writeq(0x0, &bar0
->rx_traffic_mask
);
1778 } else if (flag
== DISABLE_INTRS
) {
1780 * Disable Rx Traffic Intrs in the general intr mask
1783 writeq(DISABLE_ALL_INTRS
, &bar0
->rx_traffic_mask
);
1784 temp64
= readq(&bar0
->general_int_mask
);
1786 writeq(val64
, &bar0
->general_int_mask
);
1791 static int check_prc_pcc_state(u64 val64
, int flag
, int rev_id
, int herc
)
1795 if (flag
== FALSE
) {
1796 if ((!herc
&& (rev_id
>= 4)) || herc
) {
1797 if (!(val64
& ADAPTER_STATUS_RMAC_PCC_IDLE
) &&
1798 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1799 ADAPTER_STATUS_RC_PRC_QUIESCENT
)) {
1803 if (!(val64
& ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE
) &&
1804 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1805 ADAPTER_STATUS_RC_PRC_QUIESCENT
)) {
1810 if ((!herc
&& (rev_id
>= 4)) || herc
) {
1811 if (((val64
& ADAPTER_STATUS_RMAC_PCC_IDLE
) ==
1812 ADAPTER_STATUS_RMAC_PCC_IDLE
) &&
1813 (!(val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ||
1814 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1815 ADAPTER_STATUS_RC_PRC_QUIESCENT
))) {
1819 if (((val64
& ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE
) ==
1820 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE
) &&
1821 (!(val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ||
1822 ((val64
& ADAPTER_STATUS_RC_PRC_QUIESCENT
) ==
1823 ADAPTER_STATUS_RC_PRC_QUIESCENT
))) {
1832 * verify_xena_quiescence - Checks whether the H/W is ready
1833 * @val64 : Value read from adapter status register.
1834 * @flag : indicates if the adapter enable bit was ever written once
1836 * Description: Returns whether the H/W is ready to go or not. Depending
1837 * on whether adapter enable bit was written or not the comparison
1838 * differs and the calling function passes the input argument flag to
1840 * Return: 1 If xena is quiescence
1841 * 0 If Xena is not quiescence
1844 static int verify_xena_quiescence(nic_t
*sp
, u64 val64
, int flag
)
1847 u64 tmp64
= ~((u64
) val64
);
1848 int rev_id
= get_xena_rev_id(sp
->pdev
);
1850 herc
= (sp
->device_type
== XFRAME_II_DEVICE
);
1853 (ADAPTER_STATUS_TDMA_READY
| ADAPTER_STATUS_RDMA_READY
|
1854 ADAPTER_STATUS_PFC_READY
| ADAPTER_STATUS_TMAC_BUF_EMPTY
|
1855 ADAPTER_STATUS_PIC_QUIESCENT
| ADAPTER_STATUS_MC_DRAM_READY
|
1856 ADAPTER_STATUS_MC_QUEUES_READY
| ADAPTER_STATUS_M_PLL_LOCK
|
1857 ADAPTER_STATUS_P_PLL_LOCK
))) {
1858 ret
= check_prc_pcc_state(val64
, flag
, rev_id
, herc
);
1865 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1866 * @sp: Pointer to device specifc structure
1868 * New procedure to clear mac address reading problems on Alpha platforms
1872 static void fix_mac_address(nic_t
* sp
)
1874 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
1878 while (fix_mac
[i
] != END_SIGN
) {
1879 writeq(fix_mac
[i
++], &bar0
->gpio_control
);
1881 val64
= readq(&bar0
->gpio_control
);
1886 * start_nic - Turns the device on
1887 * @nic : device private variable.
1889 * This function actually turns the device on. Before this function is
1890 * called,all Registers are configured from their reset states
1891 * and shared memory is allocated but the NIC is still quiescent. On
1892 * calling this function, the device interrupts are cleared and the NIC is
1893 * literally switched on by writing into the adapter control register.
1895 * SUCCESS on success and -1 on failure.
1898 static int start_nic(struct s2io_nic
*nic
)
1900 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
1901 struct net_device
*dev
= nic
->dev
;
1902 register u64 val64
= 0;
1905 mac_info_t
*mac_control
;
1906 struct config_param
*config
;
1908 mac_control
= &nic
->mac_control
;
1909 config
= &nic
->config
;
1911 /* PRC Initialization and configuration */
1912 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
1913 writeq((u64
) mac_control
->rings
[i
].rx_blocks
[0].block_dma_addr
,
1914 &bar0
->prc_rxd0_n
[i
]);
1916 val64
= readq(&bar0
->prc_ctrl_n
[i
]);
1917 if (nic
->config
.bimodal
)
1918 val64
|= PRC_CTRL_BIMODAL_INTERRUPT
;
1919 if (nic
->rxd_mode
== RXD_MODE_1
)
1920 val64
|= PRC_CTRL_RC_ENABLED
;
1922 val64
|= PRC_CTRL_RC_ENABLED
| PRC_CTRL_RING_MODE_3
;
1923 writeq(val64
, &bar0
->prc_ctrl_n
[i
]);
1926 if (nic
->rxd_mode
== RXD_MODE_3B
) {
1927 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1928 val64
= readq(&bar0
->rx_pa_cfg
);
1929 val64
|= RX_PA_CFG_IGNORE_L2_ERR
;
1930 writeq(val64
, &bar0
->rx_pa_cfg
);
1934 * Enabling MC-RLDRAM. After enabling the device, we timeout
1935 * for around 100ms, which is approximately the time required
1936 * for the device to be ready for operation.
1938 val64
= readq(&bar0
->mc_rldram_mrs
);
1939 val64
|= MC_RLDRAM_QUEUE_SIZE_ENABLE
| MC_RLDRAM_MRS_ENABLE
;
1940 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_mrs
, UF
);
1941 val64
= readq(&bar0
->mc_rldram_mrs
);
1943 msleep(100); /* Delay by around 100 ms. */
1945 /* Enabling ECC Protection. */
1946 val64
= readq(&bar0
->adapter_control
);
1947 val64
&= ~ADAPTER_ECC_EN
;
1948 writeq(val64
, &bar0
->adapter_control
);
1951 * Clearing any possible Link state change interrupts that
1952 * could have popped up just before Enabling the card.
1954 val64
= readq(&bar0
->mac_rmac_err_reg
);
1956 writeq(val64
, &bar0
->mac_rmac_err_reg
);
1959 * Verify if the device is ready to be enabled, if so enable
1962 val64
= readq(&bar0
->adapter_status
);
1963 if (!verify_xena_quiescence(nic
, val64
, nic
->device_enabled_once
)) {
1964 DBG_PRINT(ERR_DBG
, "%s: device is not ready, ", dev
->name
);
1965 DBG_PRINT(ERR_DBG
, "Adapter status reads: 0x%llx\n",
1966 (unsigned long long) val64
);
1970 /* Enable select interrupts */
1971 if (nic
->intr_type
!= INTA
)
1972 en_dis_able_nic_intrs(nic
, ENA_ALL_INTRS
, DISABLE_INTRS
);
1974 interruptible
= TX_TRAFFIC_INTR
| RX_TRAFFIC_INTR
;
1975 interruptible
|= TX_PIC_INTR
| RX_PIC_INTR
;
1976 interruptible
|= TX_MAC_INTR
| RX_MAC_INTR
;
1977 en_dis_able_nic_intrs(nic
, interruptible
, ENABLE_INTRS
);
1981 * With some switches, link might be already up at this point.
1982 * Because of this weird behavior, when we enable laser,
1983 * we may not get link. We need to handle this. We cannot
1984 * figure out which switch is misbehaving. So we are forced to
1985 * make a global change.
1988 /* Enabling Laser. */
1989 val64
= readq(&bar0
->adapter_control
);
1990 val64
|= ADAPTER_EOI_TX_ON
;
1991 writeq(val64
, &bar0
->adapter_control
);
1993 /* SXE-002: Initialize link and activity LED */
1994 subid
= nic
->pdev
->subsystem_device
;
1995 if (((subid
& 0xFF) >= 0x07) &&
1996 (nic
->device_type
== XFRAME_I_DEVICE
)) {
1997 val64
= readq(&bar0
->gpio_control
);
1998 val64
|= 0x0000800000000000ULL
;
1999 writeq(val64
, &bar0
->gpio_control
);
2000 val64
= 0x0411040400000000ULL
;
2001 writeq(val64
, (void __iomem
*)bar0
+ 0x2700);
2005 * Don't see link state interrupts on certain switches, so
2006 * directly scheduling a link state task from here.
2008 schedule_work(&nic
->set_link_task
);
2013 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2015 static struct sk_buff
*s2io_txdl_getskb(fifo_info_t
*fifo_data
, TxD_t
*txdlp
, int get_off
)
2017 nic_t
*nic
= fifo_data
->nic
;
2018 struct sk_buff
*skb
;
2023 if (txds
->Host_Control
== (u64
)(long)nic
->ufo_in_band_v
) {
2024 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2025 txds
->Buffer_Pointer
, sizeof(u64
),
2030 skb
= (struct sk_buff
*) ((unsigned long)
2031 txds
->Host_Control
);
2033 memset(txdlp
, 0, (sizeof(TxD_t
) * fifo_data
->max_txds
));
2036 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2037 txds
->Buffer_Pointer
,
2038 skb
->len
- skb
->data_len
,
2040 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
2043 for (j
= 0; j
< frg_cnt
; j
++, txds
++) {
2044 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[j
];
2045 if (!txds
->Buffer_Pointer
)
2047 pci_unmap_page(nic
->pdev
, (dma_addr_t
)
2048 txds
->Buffer_Pointer
,
2049 frag
->size
, PCI_DMA_TODEVICE
);
2052 txdlp
->Host_Control
= 0;
2057 * free_tx_buffers - Free all queued Tx buffers
2058 * @nic : device private variable.
2060 * Free all queued Tx buffers.
2061 * Return Value: void
2064 static void free_tx_buffers(struct s2io_nic
*nic
)
2066 struct net_device
*dev
= nic
->dev
;
2067 struct sk_buff
*skb
;
2070 mac_info_t
*mac_control
;
2071 struct config_param
*config
;
2074 mac_control
= &nic
->mac_control
;
2075 config
= &nic
->config
;
2077 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
2078 for (j
= 0; j
< config
->tx_cfg
[i
].fifo_len
- 1; j
++) {
2079 txdp
= (TxD_t
*) mac_control
->fifos
[i
].list_info
[j
].
2081 skb
= s2io_txdl_getskb(&mac_control
->fifos
[i
], txdp
, j
);
2088 "%s:forcibly freeing %d skbs on FIFO%d\n",
2090 mac_control
->fifos
[i
].tx_curr_get_info
.offset
= 0;
2091 mac_control
->fifos
[i
].tx_curr_put_info
.offset
= 0;
2096 * stop_nic - To stop the nic
2097 * @nic ; device private variable.
2099 * This function does exactly the opposite of what the start_nic()
2100 * function does. This function is called to stop the device.
2105 static void stop_nic(struct s2io_nic
*nic
)
2107 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
2108 register u64 val64
= 0;
2109 u16 interruptible
, i
;
2110 mac_info_t
*mac_control
;
2111 struct config_param
*config
;
2113 mac_control
= &nic
->mac_control
;
2114 config
= &nic
->config
;
2116 /* Disable all interrupts */
2117 interruptible
= TX_TRAFFIC_INTR
| RX_TRAFFIC_INTR
;
2118 interruptible
|= TX_PIC_INTR
| RX_PIC_INTR
;
2119 interruptible
|= TX_MAC_INTR
| RX_MAC_INTR
;
2120 en_dis_able_nic_intrs(nic
, interruptible
, DISABLE_INTRS
);
2123 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2124 val64
= readq(&bar0
->prc_ctrl_n
[i
]);
2125 val64
&= ~((u64
) PRC_CTRL_RC_ENABLED
);
2126 writeq(val64
, &bar0
->prc_ctrl_n
[i
]);
2130 int fill_rxd_3buf(nic_t
*nic
, RxD_t
*rxdp
, struct sk_buff
*skb
)
2132 struct net_device
*dev
= nic
->dev
;
2133 struct sk_buff
*frag_list
;
2136 /* Buffer-1 receives L3/L4 headers */
2137 ((RxD3_t
*)rxdp
)->Buffer1_ptr
= pci_map_single
2138 (nic
->pdev
, skb
->data
, l3l4hdr_size
+ 4,
2139 PCI_DMA_FROMDEVICE
);
2141 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2142 skb_shinfo(skb
)->frag_list
= dev_alloc_skb(dev
->mtu
+ ALIGN_SIZE
);
2143 if (skb_shinfo(skb
)->frag_list
== NULL
) {
2144 DBG_PRINT(ERR_DBG
, "%s: dev_alloc_skb failed\n ", dev
->name
);
2147 frag_list
= skb_shinfo(skb
)->frag_list
;
2148 frag_list
->next
= NULL
;
2149 tmp
= (void *)ALIGN((long)frag_list
->data
, ALIGN_SIZE
+ 1);
2150 frag_list
->data
= tmp
;
2151 frag_list
->tail
= tmp
;
2153 /* Buffer-2 receives L4 data payload */
2154 ((RxD3_t
*)rxdp
)->Buffer2_ptr
= pci_map_single(nic
->pdev
,
2155 frag_list
->data
, dev
->mtu
,
2156 PCI_DMA_FROMDEVICE
);
2157 rxdp
->Control_2
|= SET_BUFFER1_SIZE_3(l3l4hdr_size
+ 4);
2158 rxdp
->Control_2
|= SET_BUFFER2_SIZE_3(dev
->mtu
);
2164 * fill_rx_buffers - Allocates the Rx side skbs
2165 * @nic: device private variable
2166 * @ring_no: ring number
2168 * The function allocates Rx side skbs and puts the physical
2169 * address of these buffers into the RxD buffer pointers, so that the NIC
2170 * can DMA the received frame into these locations.
2171 * The NIC supports 3 receive modes, viz
2173 * 2. three buffer and
2174 * 3. Five buffer modes.
2175 * Each mode defines how many fragments the received frame will be split
2176 * up into by the NIC. The frame is split into L3 header, L4 Header,
2177 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2178 * is split into 3 fragments. As of now only single buffer mode is
2181 * SUCCESS on success or an appropriate -ve value on failure.
2184 static int fill_rx_buffers(struct s2io_nic
*nic
, int ring_no
)
2186 struct net_device
*dev
= nic
->dev
;
2187 struct sk_buff
*skb
;
2189 int off
, off1
, size
, block_no
, block_no1
;
2192 mac_info_t
*mac_control
;
2193 struct config_param
*config
;
2196 #ifndef CONFIG_S2IO_NAPI
2197 unsigned long flags
;
2199 RxD_t
*first_rxdp
= NULL
;
2201 mac_control
= &nic
->mac_control
;
2202 config
= &nic
->config
;
2203 alloc_cnt
= mac_control
->rings
[ring_no
].pkt_cnt
-
2204 atomic_read(&nic
->rx_bufs_left
[ring_no
]);
2206 while (alloc_tab
< alloc_cnt
) {
2207 block_no
= mac_control
->rings
[ring_no
].rx_curr_put_info
.
2209 block_no1
= mac_control
->rings
[ring_no
].rx_curr_get_info
.
2211 off
= mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
;
2212 off1
= mac_control
->rings
[ring_no
].rx_curr_get_info
.offset
;
2214 rxdp
= mac_control
->rings
[ring_no
].
2215 rx_blocks
[block_no
].rxds
[off
].virt_addr
;
2217 if ((block_no
== block_no1
) && (off
== off1
) &&
2218 (rxdp
->Host_Control
)) {
2219 DBG_PRINT(INTR_DBG
, "%s: Get and Put",
2221 DBG_PRINT(INTR_DBG
, " info equated\n");
2224 if (off
&& (off
== rxd_count
[nic
->rxd_mode
])) {
2225 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2227 if (mac_control
->rings
[ring_no
].rx_curr_put_info
.
2228 block_index
== mac_control
->rings
[ring_no
].
2230 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2232 block_no
= mac_control
->rings
[ring_no
].
2233 rx_curr_put_info
.block_index
;
2234 if (off
== rxd_count
[nic
->rxd_mode
])
2236 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2238 rxdp
= mac_control
->rings
[ring_no
].
2239 rx_blocks
[block_no
].block_virt_addr
;
2240 DBG_PRINT(INTR_DBG
, "%s: Next block at: %p\n",
2243 #ifndef CONFIG_S2IO_NAPI
2244 spin_lock_irqsave(&nic
->put_lock
, flags
);
2245 mac_control
->rings
[ring_no
].put_pos
=
2246 (block_no
* (rxd_count
[nic
->rxd_mode
] + 1)) + off
;
2247 spin_unlock_irqrestore(&nic
->put_lock
, flags
);
2249 if ((rxdp
->Control_1
& RXD_OWN_XENA
) &&
2250 ((nic
->rxd_mode
>= RXD_MODE_3A
) &&
2251 (rxdp
->Control_2
& BIT(0)))) {
2252 mac_control
->rings
[ring_no
].rx_curr_put_info
.
2256 /* calculate size of skb based on ring mode */
2257 size
= dev
->mtu
+ HEADER_ETHERNET_II_802_3_SIZE
+
2258 HEADER_802_2_SIZE
+ HEADER_SNAP_SIZE
;
2259 if (nic
->rxd_mode
== RXD_MODE_1
)
2260 size
+= NET_IP_ALIGN
;
2261 else if (nic
->rxd_mode
== RXD_MODE_3B
)
2262 size
= dev
->mtu
+ ALIGN_SIZE
+ BUF0_LEN
+ 4;
2264 size
= l3l4hdr_size
+ ALIGN_SIZE
+ BUF0_LEN
+ 4;
2267 skb
= dev_alloc_skb(size
);
2269 DBG_PRINT(ERR_DBG
, "%s: Out of ", dev
->name
);
2270 DBG_PRINT(ERR_DBG
, "memory to allocate SKBs\n");
2273 first_rxdp
->Control_1
|= RXD_OWN_XENA
;
2277 if (nic
->rxd_mode
== RXD_MODE_1
) {
2278 /* 1 buffer mode - normal operation mode */
2279 memset(rxdp
, 0, sizeof(RxD1_t
));
2280 skb_reserve(skb
, NET_IP_ALIGN
);
2281 ((RxD1_t
*)rxdp
)->Buffer0_ptr
= pci_map_single
2282 (nic
->pdev
, skb
->data
, size
, PCI_DMA_FROMDEVICE
);
2283 rxdp
->Control_2
&= (~MASK_BUFFER0_SIZE_1
);
2284 rxdp
->Control_2
|= SET_BUFFER0_SIZE_1(size
);
2286 } else if (nic
->rxd_mode
>= RXD_MODE_3A
) {
2288 * 2 or 3 buffer mode -
2289 * Both 2 buffer mode and 3 buffer mode provides 128
2290 * byte aligned receive buffers.
2292 * 3 buffer mode provides header separation where in
2293 * skb->data will have L3/L4 headers where as
2294 * skb_shinfo(skb)->frag_list will have the L4 data
2298 memset(rxdp
, 0, sizeof(RxD3_t
));
2299 ba
= &mac_control
->rings
[ring_no
].ba
[block_no
][off
];
2300 skb_reserve(skb
, BUF0_LEN
);
2301 tmp
= (u64
)(unsigned long) skb
->data
;
2304 skb
->data
= (void *) (unsigned long)tmp
;
2305 skb
->tail
= (void *) (unsigned long)tmp
;
2307 ((RxD3_t
*)rxdp
)->Buffer0_ptr
=
2308 pci_map_single(nic
->pdev
, ba
->ba_0
, BUF0_LEN
,
2309 PCI_DMA_FROMDEVICE
);
2310 rxdp
->Control_2
= SET_BUFFER0_SIZE_3(BUF0_LEN
);
2311 if (nic
->rxd_mode
== RXD_MODE_3B
) {
2312 /* Two buffer mode */
2315 * Buffer2 will have L3/L4 header plus
2318 ((RxD3_t
*)rxdp
)->Buffer2_ptr
= pci_map_single
2319 (nic
->pdev
, skb
->data
, dev
->mtu
+ 4,
2320 PCI_DMA_FROMDEVICE
);
2322 /* Buffer-1 will be dummy buffer not used */
2323 ((RxD3_t
*)rxdp
)->Buffer1_ptr
=
2324 pci_map_single(nic
->pdev
, ba
->ba_1
, BUF1_LEN
,
2325 PCI_DMA_FROMDEVICE
);
2326 rxdp
->Control_2
|= SET_BUFFER1_SIZE_3(1);
2327 rxdp
->Control_2
|= SET_BUFFER2_SIZE_3
2331 if (fill_rxd_3buf(nic
, rxdp
, skb
) == -ENOMEM
) {
2332 dev_kfree_skb_irq(skb
);
2335 first_rxdp
->Control_1
|=
2341 rxdp
->Control_2
|= BIT(0);
2343 rxdp
->Host_Control
= (unsigned long) (skb
);
2344 if (alloc_tab
& ((1 << rxsync_frequency
) - 1))
2345 rxdp
->Control_1
|= RXD_OWN_XENA
;
2347 if (off
== (rxd_count
[nic
->rxd_mode
] + 1))
2349 mac_control
->rings
[ring_no
].rx_curr_put_info
.offset
= off
;
2351 rxdp
->Control_2
|= SET_RXD_MARKER
;
2352 if (!(alloc_tab
& ((1 << rxsync_frequency
) - 1))) {
2355 first_rxdp
->Control_1
|= RXD_OWN_XENA
;
2359 atomic_inc(&nic
->rx_bufs_left
[ring_no
]);
2364 /* Transfer ownership of first descriptor to adapter just before
2365 * exiting. Before that, use memory barrier so that ownership
2366 * and other fields are seen by adapter correctly.
2370 first_rxdp
->Control_1
|= RXD_OWN_XENA
;
2376 static void free_rxd_blk(struct s2io_nic
*sp
, int ring_no
, int blk
)
2378 struct net_device
*dev
= sp
->dev
;
2380 struct sk_buff
*skb
;
2382 mac_info_t
*mac_control
;
2385 mac_control
= &sp
->mac_control
;
2386 for (j
= 0 ; j
< rxd_count
[sp
->rxd_mode
]; j
++) {
2387 rxdp
= mac_control
->rings
[ring_no
].
2388 rx_blocks
[blk
].rxds
[j
].virt_addr
;
2389 skb
= (struct sk_buff
*)
2390 ((unsigned long) rxdp
->Host_Control
);
2394 if (sp
->rxd_mode
== RXD_MODE_1
) {
2395 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2396 ((RxD1_t
*)rxdp
)->Buffer0_ptr
,
2398 HEADER_ETHERNET_II_802_3_SIZE
2399 + HEADER_802_2_SIZE
+
2401 PCI_DMA_FROMDEVICE
);
2402 memset(rxdp
, 0, sizeof(RxD1_t
));
2403 } else if(sp
->rxd_mode
== RXD_MODE_3B
) {
2404 ba
= &mac_control
->rings
[ring_no
].
2406 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2407 ((RxD3_t
*)rxdp
)->Buffer0_ptr
,
2409 PCI_DMA_FROMDEVICE
);
2410 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2411 ((RxD3_t
*)rxdp
)->Buffer1_ptr
,
2413 PCI_DMA_FROMDEVICE
);
2414 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2415 ((RxD3_t
*)rxdp
)->Buffer2_ptr
,
2417 PCI_DMA_FROMDEVICE
);
2418 memset(rxdp
, 0, sizeof(RxD3_t
));
2420 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2421 ((RxD3_t
*)rxdp
)->Buffer0_ptr
, BUF0_LEN
,
2422 PCI_DMA_FROMDEVICE
);
2423 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2424 ((RxD3_t
*)rxdp
)->Buffer1_ptr
,
2426 PCI_DMA_FROMDEVICE
);
2427 pci_unmap_single(sp
->pdev
, (dma_addr_t
)
2428 ((RxD3_t
*)rxdp
)->Buffer2_ptr
, dev
->mtu
,
2429 PCI_DMA_FROMDEVICE
);
2430 memset(rxdp
, 0, sizeof(RxD3_t
));
2433 atomic_dec(&sp
->rx_bufs_left
[ring_no
]);
2438 * free_rx_buffers - Frees all Rx buffers
2439 * @sp: device private variable.
2441 * This function will free all Rx buffers allocated by host.
2446 static void free_rx_buffers(struct s2io_nic
*sp
)
2448 struct net_device
*dev
= sp
->dev
;
2449 int i
, blk
= 0, buf_cnt
= 0;
2450 mac_info_t
*mac_control
;
2451 struct config_param
*config
;
2453 mac_control
= &sp
->mac_control
;
2454 config
= &sp
->config
;
2456 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2457 for (blk
= 0; blk
< rx_ring_sz
[i
]; blk
++)
2458 free_rxd_blk(sp
,i
,blk
);
2460 mac_control
->rings
[i
].rx_curr_put_info
.block_index
= 0;
2461 mac_control
->rings
[i
].rx_curr_get_info
.block_index
= 0;
2462 mac_control
->rings
[i
].rx_curr_put_info
.offset
= 0;
2463 mac_control
->rings
[i
].rx_curr_get_info
.offset
= 0;
2464 atomic_set(&sp
->rx_bufs_left
[i
], 0);
2465 DBG_PRINT(INIT_DBG
, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2466 dev
->name
, buf_cnt
, i
);
2471 * s2io_poll - Rx interrupt handler for NAPI support
2472 * @dev : pointer to the device structure.
2473 * @budget : The number of packets that were budgeted to be processed
2474 * during one pass through the 'Poll" function.
2476 * Comes into picture only if NAPI support has been incorporated. It does
2477 * the same thing that rx_intr_handler does, but not in a interrupt context
2478 * also It will process only a given number of packets.
2480 * 0 on success and 1 if there are No Rx packets to be processed.
2483 #if defined(CONFIG_S2IO_NAPI)
2484 static int s2io_poll(struct net_device
*dev
, int *budget
)
2486 nic_t
*nic
= dev
->priv
;
2487 int pkt_cnt
= 0, org_pkts_to_process
;
2488 mac_info_t
*mac_control
;
2489 struct config_param
*config
;
2490 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
2494 atomic_inc(&nic
->isr_cnt
);
2495 mac_control
= &nic
->mac_control
;
2496 config
= &nic
->config
;
2498 nic
->pkts_to_process
= *budget
;
2499 if (nic
->pkts_to_process
> dev
->quota
)
2500 nic
->pkts_to_process
= dev
->quota
;
2501 org_pkts_to_process
= nic
->pkts_to_process
;
2503 val64
= readq(&bar0
->rx_traffic_int
);
2504 writeq(val64
, &bar0
->rx_traffic_int
);
2506 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2507 rx_intr_handler(&mac_control
->rings
[i
]);
2508 pkt_cnt
= org_pkts_to_process
- nic
->pkts_to_process
;
2509 if (!nic
->pkts_to_process
) {
2510 /* Quota for the current iteration has been met */
2517 dev
->quota
-= pkt_cnt
;
2519 netif_rx_complete(dev
);
2521 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2522 if (fill_rx_buffers(nic
, i
) == -ENOMEM
) {
2523 DBG_PRINT(ERR_DBG
, "%s:Out of memory", dev
->name
);
2524 DBG_PRINT(ERR_DBG
, " in Rx Poll!!\n");
2528 /* Re enable the Rx interrupts. */
2529 en_dis_able_nic_intrs(nic
, RX_TRAFFIC_INTR
, ENABLE_INTRS
);
2530 atomic_dec(&nic
->isr_cnt
);
2534 dev
->quota
-= pkt_cnt
;
2537 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
2538 if (fill_rx_buffers(nic
, i
) == -ENOMEM
) {
2539 DBG_PRINT(ERR_DBG
, "%s:Out of memory", dev
->name
);
2540 DBG_PRINT(ERR_DBG
, " in Rx Poll!!\n");
2544 atomic_dec(&nic
->isr_cnt
);
2550 * rx_intr_handler - Rx interrupt handler
2551 * @nic: device private variable.
2553 * If the interrupt is because of a received frame or if the
2554 * receive ring contains fresh as yet un-processed frames,this function is
2555 * called. It picks out the RxD at which place the last Rx processing had
2556 * stopped and sends the skb to the OSM's Rx handler and then increments
2561 static void rx_intr_handler(ring_info_t
*ring_data
)
2563 nic_t
*nic
= ring_data
->nic
;
2564 struct net_device
*dev
= (struct net_device
*) nic
->dev
;
2565 int get_block
, put_block
, put_offset
;
2566 rx_curr_get_info_t get_info
, put_info
;
2568 struct sk_buff
*skb
;
2569 #ifndef CONFIG_S2IO_NAPI
2572 spin_lock(&nic
->rx_lock
);
2573 if (atomic_read(&nic
->card_state
) == CARD_DOWN
) {
2574 DBG_PRINT(INTR_DBG
, "%s: %s going down for reset\n",
2575 __FUNCTION__
, dev
->name
);
2576 spin_unlock(&nic
->rx_lock
);
2580 get_info
= ring_data
->rx_curr_get_info
;
2581 get_block
= get_info
.block_index
;
2582 put_info
= ring_data
->rx_curr_put_info
;
2583 put_block
= put_info
.block_index
;
2584 rxdp
= ring_data
->rx_blocks
[get_block
].rxds
[get_info
.offset
].virt_addr
;
2585 #ifndef CONFIG_S2IO_NAPI
2586 spin_lock(&nic
->put_lock
);
2587 put_offset
= ring_data
->put_pos
;
2588 spin_unlock(&nic
->put_lock
);
2590 put_offset
= (put_block
* (rxd_count
[nic
->rxd_mode
] + 1)) +
2593 while (RXD_IS_UP2DT(rxdp
)) {
2594 /* If your are next to put index then it's FIFO full condition */
2595 if ((get_block
== put_block
) &&
2596 (get_info
.offset
+ 1) == put_info
.offset
) {
2597 DBG_PRINT(ERR_DBG
, "%s: Ring Full\n",dev
->name
);
2600 skb
= (struct sk_buff
*) ((unsigned long)rxdp
->Host_Control
);
2602 DBG_PRINT(ERR_DBG
, "%s: The skb is ",
2604 DBG_PRINT(ERR_DBG
, "Null in Rx Intr\n");
2605 spin_unlock(&nic
->rx_lock
);
2608 if (nic
->rxd_mode
== RXD_MODE_1
) {
2609 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2610 ((RxD1_t
*)rxdp
)->Buffer0_ptr
,
2612 HEADER_ETHERNET_II_802_3_SIZE
+
2615 PCI_DMA_FROMDEVICE
);
2616 } else if (nic
->rxd_mode
== RXD_MODE_3B
) {
2617 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2618 ((RxD3_t
*)rxdp
)->Buffer0_ptr
,
2619 BUF0_LEN
, PCI_DMA_FROMDEVICE
);
2620 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2621 ((RxD3_t
*)rxdp
)->Buffer1_ptr
,
2622 BUF1_LEN
, PCI_DMA_FROMDEVICE
);
2623 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2624 ((RxD3_t
*)rxdp
)->Buffer2_ptr
,
2626 PCI_DMA_FROMDEVICE
);
2628 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2629 ((RxD3_t
*)rxdp
)->Buffer0_ptr
, BUF0_LEN
,
2630 PCI_DMA_FROMDEVICE
);
2631 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2632 ((RxD3_t
*)rxdp
)->Buffer1_ptr
,
2634 PCI_DMA_FROMDEVICE
);
2635 pci_unmap_single(nic
->pdev
, (dma_addr_t
)
2636 ((RxD3_t
*)rxdp
)->Buffer2_ptr
,
2637 dev
->mtu
, PCI_DMA_FROMDEVICE
);
2639 rx_osm_handler(ring_data
, rxdp
);
2641 ring_data
->rx_curr_get_info
.offset
= get_info
.offset
;
2642 rxdp
= ring_data
->rx_blocks
[get_block
].
2643 rxds
[get_info
.offset
].virt_addr
;
2644 if (get_info
.offset
== rxd_count
[nic
->rxd_mode
]) {
2645 get_info
.offset
= 0;
2646 ring_data
->rx_curr_get_info
.offset
= get_info
.offset
;
2648 if (get_block
== ring_data
->block_count
)
2650 ring_data
->rx_curr_get_info
.block_index
= get_block
;
2651 rxdp
= ring_data
->rx_blocks
[get_block
].block_virt_addr
;
2654 #ifdef CONFIG_S2IO_NAPI
2655 nic
->pkts_to_process
-= 1;
2656 if (!nic
->pkts_to_process
)
2660 if ((indicate_max_pkts
) && (pkt_cnt
> indicate_max_pkts
))
2664 spin_unlock(&nic
->rx_lock
);
2668 * tx_intr_handler - Transmit interrupt handler
2669 * @nic : device private variable
2671 * If an interrupt was raised to indicate DMA complete of the
2672 * Tx packet, this function is called. It identifies the last TxD
2673 * whose buffer was freed and frees all skbs whose data have already
2674 * DMA'ed into the NICs internal memory.
2679 static void tx_intr_handler(fifo_info_t
*fifo_data
)
2681 nic_t
*nic
= fifo_data
->nic
;
2682 struct net_device
*dev
= (struct net_device
*) nic
->dev
;
2683 tx_curr_get_info_t get_info
, put_info
;
2684 struct sk_buff
*skb
;
2687 get_info
= fifo_data
->tx_curr_get_info
;
2688 put_info
= fifo_data
->tx_curr_put_info
;
2689 txdlp
= (TxD_t
*) fifo_data
->list_info
[get_info
.offset
].
2691 while ((!(txdlp
->Control_1
& TXD_LIST_OWN_XENA
)) &&
2692 (get_info
.offset
!= put_info
.offset
) &&
2693 (txdlp
->Host_Control
)) {
2694 /* Check for TxD errors */
2695 if (txdlp
->Control_1
& TXD_T_CODE
) {
2696 unsigned long long err
;
2697 err
= txdlp
->Control_1
& TXD_T_CODE
;
2698 if ((err
>> 48) == 0xA) {
2699 DBG_PRINT(TX_DBG
, "TxD returned due \
2700 to loss of link\n");
2703 DBG_PRINT(ERR_DBG
, "***TxD error \
2708 skb
= s2io_txdl_getskb(fifo_data
, txdlp
, get_info
.offset
);
2710 DBG_PRINT(ERR_DBG
, "%s: Null skb ",
2712 DBG_PRINT(ERR_DBG
, "in Tx Free Intr\n");
2716 /* Updating the statistics block */
2717 nic
->stats
.tx_bytes
+= skb
->len
;
2718 dev_kfree_skb_irq(skb
);
2721 get_info
.offset
%= get_info
.fifo_len
+ 1;
2722 txdlp
= (TxD_t
*) fifo_data
->list_info
2723 [get_info
.offset
].list_virt_addr
;
2724 fifo_data
->tx_curr_get_info
.offset
=
2728 spin_lock(&nic
->tx_lock
);
2729 if (netif_queue_stopped(dev
))
2730 netif_wake_queue(dev
);
2731 spin_unlock(&nic
->tx_lock
);
2735 * alarm_intr_handler - Alarm Interrrupt handler
2736 * @nic: device private variable
2737 * Description: If the interrupt was neither because of Rx packet or Tx
2738 * complete, this function is called. If the interrupt was to indicate
2739 * a loss of link, the OSM link status handler is invoked for any other
2740 * alarm interrupt the block that raised the interrupt is displayed
2741 * and a H/W reset is issued.
2746 static void alarm_intr_handler(struct s2io_nic
*nic
)
2748 struct net_device
*dev
= (struct net_device
*) nic
->dev
;
2749 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
2750 register u64 val64
= 0, err_reg
= 0;
2752 /* Handling link status change error Intr */
2753 if (s2io_link_fault_indication(nic
) == MAC_RMAC_ERR_TIMER
) {
2754 err_reg
= readq(&bar0
->mac_rmac_err_reg
);
2755 writeq(err_reg
, &bar0
->mac_rmac_err_reg
);
2756 if (err_reg
& RMAC_LINK_STATE_CHANGE_INT
) {
2757 schedule_work(&nic
->set_link_task
);
2761 /* Handling Ecc errors */
2762 val64
= readq(&bar0
->mc_err_reg
);
2763 writeq(val64
, &bar0
->mc_err_reg
);
2764 if (val64
& (MC_ERR_REG_ECC_ALL_SNG
| MC_ERR_REG_ECC_ALL_DBL
)) {
2765 if (val64
& MC_ERR_REG_ECC_ALL_DBL
) {
2766 nic
->mac_control
.stats_info
->sw_stat
.
2768 DBG_PRINT(INIT_DBG
, "%s: Device indicates ",
2770 DBG_PRINT(INIT_DBG
, "double ECC error!!\n");
2771 if (nic
->device_type
!= XFRAME_II_DEVICE
) {
2772 /* Reset XframeI only if critical error */
2773 if (val64
& (MC_ERR_REG_MIRI_ECC_DB_ERR_0
|
2774 MC_ERR_REG_MIRI_ECC_DB_ERR_1
)) {
2775 netif_stop_queue(dev
);
2776 schedule_work(&nic
->rst_timer_task
);
2780 nic
->mac_control
.stats_info
->sw_stat
.
2785 /* In case of a serious error, the device will be Reset. */
2786 val64
= readq(&bar0
->serr_source
);
2787 if (val64
& SERR_SOURCE_ANY
) {
2788 DBG_PRINT(ERR_DBG
, "%s: Device indicates ", dev
->name
);
2789 DBG_PRINT(ERR_DBG
, "serious error %llx!!\n",
2790 (unsigned long long)val64
);
2791 netif_stop_queue(dev
);
2792 schedule_work(&nic
->rst_timer_task
);
2796 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2797 * Error occurs, the adapter will be recycled by disabling the
2798 * adapter enable bit and enabling it again after the device
2799 * becomes Quiescent.
2801 val64
= readq(&bar0
->pcc_err_reg
);
2802 writeq(val64
, &bar0
->pcc_err_reg
);
2803 if (val64
& PCC_FB_ECC_DB_ERR
) {
2804 u64 ac
= readq(&bar0
->adapter_control
);
2805 ac
&= ~(ADAPTER_CNTL_EN
);
2806 writeq(ac
, &bar0
->adapter_control
);
2807 ac
= readq(&bar0
->adapter_control
);
2808 schedule_work(&nic
->set_link_task
);
2811 /* Other type of interrupts are not being handled now, TODO */
2815 * wait_for_cmd_complete - waits for a command to complete.
2816 * @sp : private member of the device structure, which is a pointer to the
2817 * s2io_nic structure.
2818 * Description: Function that waits for a command to Write into RMAC
2819 * ADDR DATA registers to be completed and returns either success or
2820 * error depending on whether the command was complete or not.
2822 * SUCCESS on success and FAILURE on failure.
2825 static int wait_for_cmd_complete(nic_t
* sp
)
2827 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
2828 int ret
= FAILURE
, cnt
= 0;
2832 val64
= readq(&bar0
->rmac_addr_cmd_mem
);
2833 if (!(val64
& RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING
)) {
2846 * s2io_reset - Resets the card.
2847 * @sp : private member of the device structure.
2848 * Description: Function to Reset the card. This function then also
2849 * restores the previously saved PCI configuration space registers as
2850 * the card reset also resets the configuration space.
2855 void s2io_reset(nic_t
* sp
)
2857 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
2861 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2862 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
, &(pci_cmd
));
2864 val64
= SW_RESET_ALL
;
2865 writeq(val64
, &bar0
->sw_reset
);
2868 * At this stage, if the PCI write is indeed completed, the
2869 * card is reset and so is the PCI Config space of the device.
2870 * So a read cannot be issued at this stage on any of the
2871 * registers to ensure the write into "sw_reset" register
2873 * Question: Is there any system call that will explicitly force
2874 * all the write commands still pending on the bus to be pushed
2876 * As of now I'am just giving a 250ms delay and hoping that the
2877 * PCI write to sw_reset register is done by this time.
2881 /* Restore the PCI state saved during initialization. */
2882 pci_restore_state(sp
->pdev
);
2883 pci_write_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
2889 /* Set swapper to enable I/O register access */
2890 s2io_set_swapper(sp
);
2892 /* Restore the MSIX table entries from local variables */
2893 restore_xmsi_data(sp
);
2895 /* Clear certain PCI/PCI-X fields after reset */
2896 if (sp
->device_type
== XFRAME_II_DEVICE
) {
2897 /* Clear parity err detect bit */
2898 pci_write_config_word(sp
->pdev
, PCI_STATUS
, 0x8000);
2900 /* Clearing PCIX Ecc status register */
2901 pci_write_config_dword(sp
->pdev
, 0x68, 0x7C);
2903 /* Clearing PCI_STATUS error reflected here */
2904 writeq(BIT(62), &bar0
->txpic_int_reg
);
2907 /* Reset device statistics maintained by OS */
2908 memset(&sp
->stats
, 0, sizeof (struct net_device_stats
));
2910 /* SXE-002: Configure link and activity LED to turn it off */
2911 subid
= sp
->pdev
->subsystem_device
;
2912 if (((subid
& 0xFF) >= 0x07) &&
2913 (sp
->device_type
== XFRAME_I_DEVICE
)) {
2914 val64
= readq(&bar0
->gpio_control
);
2915 val64
|= 0x0000800000000000ULL
;
2916 writeq(val64
, &bar0
->gpio_control
);
2917 val64
= 0x0411040400000000ULL
;
2918 writeq(val64
, (void __iomem
*)bar0
+ 0x2700);
2922 * Clear spurious ECC interrupts that would have occured on
2923 * XFRAME II cards after reset.
2925 if (sp
->device_type
== XFRAME_II_DEVICE
) {
2926 val64
= readq(&bar0
->pcc_err_reg
);
2927 writeq(val64
, &bar0
->pcc_err_reg
);
2930 sp
->device_enabled_once
= FALSE
;
2934 * s2io_set_swapper - to set the swapper controle on the card
2935 * @sp : private member of the device structure,
2936 * pointer to the s2io_nic structure.
2937 * Description: Function to set the swapper control on the card
2938 * correctly depending on the 'endianness' of the system.
2940 * SUCCESS on success and FAILURE on failure.
2943 int s2io_set_swapper(nic_t
* sp
)
2945 struct net_device
*dev
= sp
->dev
;
2946 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
2947 u64 val64
, valt
, valr
;
2950 * Set proper endian settings and verify the same by reading
2951 * the PIF Feed-back register.
2954 val64
= readq(&bar0
->pif_rd_swapper_fb
);
2955 if (val64
!= 0x0123456789ABCDEFULL
) {
2957 u64 value
[] = { 0xC30000C3C30000C3ULL
, /* FE=1, SE=1 */
2958 0x8100008181000081ULL
, /* FE=1, SE=0 */
2959 0x4200004242000042ULL
, /* FE=0, SE=1 */
2960 0}; /* FE=0, SE=0 */
2963 writeq(value
[i
], &bar0
->swapper_ctrl
);
2964 val64
= readq(&bar0
->pif_rd_swapper_fb
);
2965 if (val64
== 0x0123456789ABCDEFULL
)
2970 DBG_PRINT(ERR_DBG
, "%s: Endian settings are wrong, ",
2972 DBG_PRINT(ERR_DBG
, "feedback read %llx\n",
2973 (unsigned long long) val64
);
2978 valr
= readq(&bar0
->swapper_ctrl
);
2981 valt
= 0x0123456789ABCDEFULL
;
2982 writeq(valt
, &bar0
->xmsi_address
);
2983 val64
= readq(&bar0
->xmsi_address
);
2987 u64 value
[] = { 0x00C3C30000C3C300ULL
, /* FE=1, SE=1 */
2988 0x0081810000818100ULL
, /* FE=1, SE=0 */
2989 0x0042420000424200ULL
, /* FE=0, SE=1 */
2990 0}; /* FE=0, SE=0 */
2993 writeq((value
[i
] | valr
), &bar0
->swapper_ctrl
);
2994 writeq(valt
, &bar0
->xmsi_address
);
2995 val64
= readq(&bar0
->xmsi_address
);
3001 unsigned long long x
= val64
;
3002 DBG_PRINT(ERR_DBG
, "Write failed, Xmsi_addr ");
3003 DBG_PRINT(ERR_DBG
, "reads:0x%llx\n", x
);
3007 val64
= readq(&bar0
->swapper_ctrl
);
3008 val64
&= 0xFFFF000000000000ULL
;
3012 * The device by default set to a big endian format, so a
3013 * big endian driver need not set anything.
3015 val64
|= (SWAPPER_CTRL_TXP_FE
|
3016 SWAPPER_CTRL_TXP_SE
|
3017 SWAPPER_CTRL_TXD_R_FE
|
3018 SWAPPER_CTRL_TXD_W_FE
|
3019 SWAPPER_CTRL_TXF_R_FE
|
3020 SWAPPER_CTRL_RXD_R_FE
|
3021 SWAPPER_CTRL_RXD_W_FE
|
3022 SWAPPER_CTRL_RXF_W_FE
|
3023 SWAPPER_CTRL_XMSI_FE
|
3024 SWAPPER_CTRL_STATS_FE
| SWAPPER_CTRL_STATS_SE
);
3025 if (sp
->intr_type
== INTA
)
3026 val64
|= SWAPPER_CTRL_XMSI_SE
;
3027 writeq(val64
, &bar0
->swapper_ctrl
);
3030 * Initially we enable all bits to make it accessible by the
3031 * driver, then we selectively enable only those bits that
3034 val64
|= (SWAPPER_CTRL_TXP_FE
|
3035 SWAPPER_CTRL_TXP_SE
|
3036 SWAPPER_CTRL_TXD_R_FE
|
3037 SWAPPER_CTRL_TXD_R_SE
|
3038 SWAPPER_CTRL_TXD_W_FE
|
3039 SWAPPER_CTRL_TXD_W_SE
|
3040 SWAPPER_CTRL_TXF_R_FE
|
3041 SWAPPER_CTRL_RXD_R_FE
|
3042 SWAPPER_CTRL_RXD_R_SE
|
3043 SWAPPER_CTRL_RXD_W_FE
|
3044 SWAPPER_CTRL_RXD_W_SE
|
3045 SWAPPER_CTRL_RXF_W_FE
|
3046 SWAPPER_CTRL_XMSI_FE
|
3047 SWAPPER_CTRL_STATS_FE
| SWAPPER_CTRL_STATS_SE
);
3048 if (sp
->intr_type
== INTA
)
3049 val64
|= SWAPPER_CTRL_XMSI_SE
;
3050 writeq(val64
, &bar0
->swapper_ctrl
);
3052 val64
= readq(&bar0
->swapper_ctrl
);
3055 * Verifying if endian settings are accurate by reading a
3056 * feedback register.
3058 val64
= readq(&bar0
->pif_rd_swapper_fb
);
3059 if (val64
!= 0x0123456789ABCDEFULL
) {
3060 /* Endian settings are incorrect, calls for another dekko. */
3061 DBG_PRINT(ERR_DBG
, "%s: Endian settings are wrong, ",
3063 DBG_PRINT(ERR_DBG
, "feedback read %llx\n",
3064 (unsigned long long) val64
);
3071 static int wait_for_msix_trans(nic_t
*nic
, int i
)
3073 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
3075 int ret
= 0, cnt
= 0;
3078 val64
= readq(&bar0
->xmsi_access
);
3079 if (!(val64
& BIT(15)))
3085 DBG_PRINT(ERR_DBG
, "XMSI # %d Access failed\n", i
);
3092 void restore_xmsi_data(nic_t
*nic
)
3094 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
3098 for (i
=0; i
< MAX_REQUESTED_MSI_X
; i
++) {
3099 writeq(nic
->msix_info
[i
].addr
, &bar0
->xmsi_address
);
3100 writeq(nic
->msix_info
[i
].data
, &bar0
->xmsi_data
);
3101 val64
= (BIT(7) | BIT(15) | vBIT(i
, 26, 6));
3102 writeq(val64
, &bar0
->xmsi_access
);
3103 if (wait_for_msix_trans(nic
, i
)) {
3104 DBG_PRINT(ERR_DBG
, "failed in %s\n", __FUNCTION__
);
3110 static void store_xmsi_data(nic_t
*nic
)
3112 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
3113 u64 val64
, addr
, data
;
3116 /* Store and display */
3117 for (i
=0; i
< MAX_REQUESTED_MSI_X
; i
++) {
3118 val64
= (BIT(15) | vBIT(i
, 26, 6));
3119 writeq(val64
, &bar0
->xmsi_access
);
3120 if (wait_for_msix_trans(nic
, i
)) {
3121 DBG_PRINT(ERR_DBG
, "failed in %s\n", __FUNCTION__
);
3124 addr
= readq(&bar0
->xmsi_address
);
3125 data
= readq(&bar0
->xmsi_data
);
3127 nic
->msix_info
[i
].addr
= addr
;
3128 nic
->msix_info
[i
].data
= data
;
3133 int s2io_enable_msi(nic_t
*nic
)
3135 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
3136 u16 msi_ctrl
, msg_val
;
3137 struct config_param
*config
= &nic
->config
;
3138 struct net_device
*dev
= nic
->dev
;
3139 u64 val64
, tx_mat
, rx_mat
;
3142 val64
= readq(&bar0
->pic_control
);
3144 writeq(val64
, &bar0
->pic_control
);
3146 err
= pci_enable_msi(nic
->pdev
);
3148 DBG_PRINT(ERR_DBG
, "%s: enabling MSI failed\n",
3154 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3155 * for interrupt handling.
3157 pci_read_config_word(nic
->pdev
, 0x4c, &msg_val
);
3159 pci_write_config_word(nic
->pdev
, 0x4c, msg_val
);
3160 pci_read_config_word(nic
->pdev
, 0x4c, &msg_val
);
3162 pci_read_config_word(nic
->pdev
, 0x42, &msi_ctrl
);
3164 pci_write_config_word(nic
->pdev
, 0x42, msi_ctrl
);
3166 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3167 tx_mat
= readq(&bar0
->tx_mat0_n
[0]);
3168 for (i
=0; i
<config
->tx_fifo_num
; i
++) {
3169 tx_mat
|= TX_MAT_SET(i
, 1);
3171 writeq(tx_mat
, &bar0
->tx_mat0_n
[0]);
3173 rx_mat
= readq(&bar0
->rx_mat
);
3174 for (i
=0; i
<config
->rx_ring_num
; i
++) {
3175 rx_mat
|= RX_MAT_SET(i
, 1);
3177 writeq(rx_mat
, &bar0
->rx_mat
);
3179 dev
->irq
= nic
->pdev
->irq
;
3183 int s2io_enable_msi_x(nic_t
*nic
)
3185 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
3187 u16 msi_control
; /* Temp variable */
3188 int ret
, i
, j
, msix_indx
= 1;
3190 nic
->entries
= kmalloc(MAX_REQUESTED_MSI_X
* sizeof(struct msix_entry
),
3192 if (nic
->entries
== NULL
) {
3193 DBG_PRINT(ERR_DBG
, "%s: Memory allocation failed\n", __FUNCTION__
);
3196 memset(nic
->entries
, 0, MAX_REQUESTED_MSI_X
* sizeof(struct msix_entry
));
3199 kmalloc(MAX_REQUESTED_MSI_X
* sizeof(struct s2io_msix_entry
),
3201 if (nic
->s2io_entries
== NULL
) {
3202 DBG_PRINT(ERR_DBG
, "%s: Memory allocation failed\n", __FUNCTION__
);
3203 kfree(nic
->entries
);
3206 memset(nic
->s2io_entries
, 0,
3207 MAX_REQUESTED_MSI_X
* sizeof(struct s2io_msix_entry
));
3209 for (i
=0; i
< MAX_REQUESTED_MSI_X
; i
++) {
3210 nic
->entries
[i
].entry
= i
;
3211 nic
->s2io_entries
[i
].entry
= i
;
3212 nic
->s2io_entries
[i
].arg
= NULL
;
3213 nic
->s2io_entries
[i
].in_use
= 0;
3216 tx_mat
= readq(&bar0
->tx_mat0_n
[0]);
3217 for (i
=0; i
<nic
->config
.tx_fifo_num
; i
++, msix_indx
++) {
3218 tx_mat
|= TX_MAT_SET(i
, msix_indx
);
3219 nic
->s2io_entries
[msix_indx
].arg
= &nic
->mac_control
.fifos
[i
];
3220 nic
->s2io_entries
[msix_indx
].type
= MSIX_FIFO_TYPE
;
3221 nic
->s2io_entries
[msix_indx
].in_use
= MSIX_FLG
;
3223 writeq(tx_mat
, &bar0
->tx_mat0_n
[0]);
3225 if (!nic
->config
.bimodal
) {
3226 rx_mat
= readq(&bar0
->rx_mat
);
3227 for (j
=0; j
<nic
->config
.rx_ring_num
; j
++, msix_indx
++) {
3228 rx_mat
|= RX_MAT_SET(j
, msix_indx
);
3229 nic
->s2io_entries
[msix_indx
].arg
= &nic
->mac_control
.rings
[j
];
3230 nic
->s2io_entries
[msix_indx
].type
= MSIX_RING_TYPE
;
3231 nic
->s2io_entries
[msix_indx
].in_use
= MSIX_FLG
;
3233 writeq(rx_mat
, &bar0
->rx_mat
);
3235 tx_mat
= readq(&bar0
->tx_mat0_n
[7]);
3236 for (j
=0; j
<nic
->config
.rx_ring_num
; j
++, msix_indx
++) {
3237 tx_mat
|= TX_MAT_SET(i
, msix_indx
);
3238 nic
->s2io_entries
[msix_indx
].arg
= &nic
->mac_control
.rings
[j
];
3239 nic
->s2io_entries
[msix_indx
].type
= MSIX_RING_TYPE
;
3240 nic
->s2io_entries
[msix_indx
].in_use
= MSIX_FLG
;
3242 writeq(tx_mat
, &bar0
->tx_mat0_n
[7]);
3245 ret
= pci_enable_msix(nic
->pdev
, nic
->entries
, MAX_REQUESTED_MSI_X
);
3247 DBG_PRINT(ERR_DBG
, "%s: Enabling MSIX failed\n", nic
->dev
->name
);
3248 kfree(nic
->entries
);
3249 kfree(nic
->s2io_entries
);
3250 nic
->entries
= NULL
;
3251 nic
->s2io_entries
= NULL
;
3256 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3257 * in the herc NIC. (Temp change, needs to be removed later)
3259 pci_read_config_word(nic
->pdev
, 0x42, &msi_control
);
3260 msi_control
|= 0x1; /* Enable MSI */
3261 pci_write_config_word(nic
->pdev
, 0x42, msi_control
);
3266 /* ********************************************************* *
3267 * Functions defined below concern the OS part of the driver *
3268 * ********************************************************* */
3271 * s2io_open - open entry point of the driver
3272 * @dev : pointer to the device structure.
3274 * This function is the open entry point of the driver. It mainly calls a
3275 * function to allocate Rx buffers and inserts them into the buffer
3276 * descriptors and then enables the Rx part of the NIC.
3278 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3282 static int s2io_open(struct net_device
*dev
)
3284 nic_t
*sp
= dev
->priv
;
3287 u16 msi_control
; /* Temp variable */
3290 * Make sure you have link off by default every time
3291 * Nic is initialized
3293 netif_carrier_off(dev
);
3294 sp
->last_link_state
= 0;
3296 /* Initialize H/W and enable interrupts */
3297 if (s2io_card_up(sp
)) {
3298 DBG_PRINT(ERR_DBG
, "%s: H/W initialization failed\n",
3301 goto hw_init_failed
;
3304 /* Store the values of the MSIX table in the nic_t structure */
3305 store_xmsi_data(sp
);
3307 /* After proper initialization of H/W, register ISR */
3308 if (sp
->intr_type
== MSI
) {
3309 err
= request_irq((int) sp
->pdev
->irq
, s2io_msi_handle
,
3310 SA_SHIRQ
, sp
->name
, dev
);
3312 DBG_PRINT(ERR_DBG
, "%s: MSI registration \
3313 failed\n", dev
->name
);
3314 goto isr_registration_failed
;
3317 if (sp
->intr_type
== MSI_X
) {
3318 for (i
=1; (sp
->s2io_entries
[i
].in_use
== MSIX_FLG
); i
++) {
3319 if (sp
->s2io_entries
[i
].type
== MSIX_FIFO_TYPE
) {
3320 sprintf(sp
->desc1
, "%s:MSI-X-%d-TX",
3322 err
= request_irq(sp
->entries
[i
].vector
,
3323 s2io_msix_fifo_handle
, 0, sp
->desc1
,
3324 sp
->s2io_entries
[i
].arg
);
3325 DBG_PRINT(ERR_DBG
, "%s @ 0x%llx\n", sp
->desc1
,
3326 (unsigned long long)sp
->msix_info
[i
].addr
);
3328 sprintf(sp
->desc2
, "%s:MSI-X-%d-RX",
3330 err
= request_irq(sp
->entries
[i
].vector
,
3331 s2io_msix_ring_handle
, 0, sp
->desc2
,
3332 sp
->s2io_entries
[i
].arg
);
3333 DBG_PRINT(ERR_DBG
, "%s @ 0x%llx\n", sp
->desc2
,
3334 (unsigned long long)sp
->msix_info
[i
].addr
);
3337 DBG_PRINT(ERR_DBG
, "%s: MSI-X-%d registration \
3338 failed\n", dev
->name
, i
);
3339 DBG_PRINT(ERR_DBG
, "Returned: %d\n", err
);
3340 goto isr_registration_failed
;
3342 sp
->s2io_entries
[i
].in_use
= MSIX_REGISTERED_SUCCESS
;
3345 if (sp
->intr_type
== INTA
) {
3346 err
= request_irq((int) sp
->pdev
->irq
, s2io_isr
, SA_SHIRQ
,
3349 DBG_PRINT(ERR_DBG
, "%s: ISR registration failed\n",
3351 goto isr_registration_failed
;
3355 if (s2io_set_mac_addr(dev
, dev
->dev_addr
) == FAILURE
) {
3356 DBG_PRINT(ERR_DBG
, "Set Mac Address Failed\n");
3358 goto setting_mac_address_failed
;
3361 netif_start_queue(dev
);
3364 setting_mac_address_failed
:
3365 if (sp
->intr_type
!= MSI_X
)
3366 free_irq(sp
->pdev
->irq
, dev
);
3367 isr_registration_failed
:
3368 del_timer_sync(&sp
->alarm_timer
);
3369 if (sp
->intr_type
== MSI_X
) {
3370 if (sp
->device_type
== XFRAME_II_DEVICE
) {
3371 for (i
=1; (sp
->s2io_entries
[i
].in_use
==
3372 MSIX_REGISTERED_SUCCESS
); i
++) {
3373 int vector
= sp
->entries
[i
].vector
;
3374 void *arg
= sp
->s2io_entries
[i
].arg
;
3376 free_irq(vector
, arg
);
3378 pci_disable_msix(sp
->pdev
);
3381 pci_read_config_word(sp
->pdev
, 0x42, &msi_control
);
3382 msi_control
&= 0xFFFE; /* Disable MSI */
3383 pci_write_config_word(sp
->pdev
, 0x42, msi_control
);
3386 else if (sp
->intr_type
== MSI
)
3387 pci_disable_msi(sp
->pdev
);
3390 if (sp
->intr_type
== MSI_X
) {
3393 if (sp
->s2io_entries
)
3394 kfree(sp
->s2io_entries
);
3400 * s2io_close -close entry point of the driver
3401 * @dev : device pointer.
3403 * This is the stop entry point of the driver. It needs to undo exactly
3404 * whatever was done by the open entry point,thus it's usually referred to
3405 * as the close function.Among other things this function mainly stops the
3406 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3408 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3412 static int s2io_close(struct net_device
*dev
)
3414 nic_t
*sp
= dev
->priv
;
3418 flush_scheduled_work();
3419 netif_stop_queue(dev
);
3420 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3423 if (sp
->intr_type
== MSI_X
) {
3424 if (sp
->device_type
== XFRAME_II_DEVICE
) {
3425 for (i
=1; (sp
->s2io_entries
[i
].in_use
==
3426 MSIX_REGISTERED_SUCCESS
); i
++) {
3427 int vector
= sp
->entries
[i
].vector
;
3428 void *arg
= sp
->s2io_entries
[i
].arg
;
3430 free_irq(vector
, arg
);
3432 pci_read_config_word(sp
->pdev
, 0x42, &msi_control
);
3433 msi_control
&= 0xFFFE; /* Disable MSI */
3434 pci_write_config_word(sp
->pdev
, 0x42, msi_control
);
3436 pci_disable_msix(sp
->pdev
);
3440 free_irq(sp
->pdev
->irq
, dev
);
3441 if (sp
->intr_type
== MSI
)
3442 pci_disable_msi(sp
->pdev
);
3444 sp
->device_close_flag
= TRUE
; /* Device is shut down. */
3449 * s2io_xmit - Tx entry point of te driver
3450 * @skb : the socket buffer containing the Tx data.
3451 * @dev : device pointer.
3453 * This function is the Tx entry point of the driver. S2IO NIC supports
3454 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3455 * NOTE: when device cant queue the pkt,just the trans_start variable will
3458 * 0 on success & 1 on failure.
3461 static int s2io_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3463 nic_t
*sp
= dev
->priv
;
3464 u16 frg_cnt
, frg_len
, i
, queue
, queue_len
, put_off
, get_off
;
3467 TxFIFO_element_t __iomem
*tx_fifo
;
3468 unsigned long flags
;
3473 int vlan_priority
= 0;
3474 mac_info_t
*mac_control
;
3475 struct config_param
*config
;
3477 mac_control
= &sp
->mac_control
;
3478 config
= &sp
->config
;
3480 DBG_PRINT(TX_DBG
, "%s: In Neterion Tx routine\n", dev
->name
);
3481 spin_lock_irqsave(&sp
->tx_lock
, flags
);
3482 if (atomic_read(&sp
->card_state
) == CARD_DOWN
) {
3483 DBG_PRINT(TX_DBG
, "%s: Card going down for reset\n",
3485 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3492 /* Get Fifo number to Transmit based on vlan priority */
3493 if (sp
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3494 vlan_tag
= vlan_tx_tag_get(skb
);
3495 vlan_priority
= vlan_tag
>> 13;
3496 queue
= config
->fifo_mapping
[vlan_priority
];
3499 put_off
= (u16
) mac_control
->fifos
[queue
].tx_curr_put_info
.offset
;
3500 get_off
= (u16
) mac_control
->fifos
[queue
].tx_curr_get_info
.offset
;
3501 txdp
= (TxD_t
*) mac_control
->fifos
[queue
].list_info
[put_off
].
3504 queue_len
= mac_control
->fifos
[queue
].tx_curr_put_info
.fifo_len
+ 1;
3505 /* Avoid "put" pointer going beyond "get" pointer */
3506 if (txdp
->Host_Control
|| (((put_off
+ 1) % queue_len
) == get_off
)) {
3507 DBG_PRINT(TX_DBG
, "Error in xmit, No free TXDs.\n");
3508 netif_stop_queue(dev
);
3510 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3514 /* A buffer with no data will be dropped */
3516 DBG_PRINT(TX_DBG
, "%s:Buffer has no data..\n", dev
->name
);
3518 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3522 txdp
->Control_1
= 0;
3523 txdp
->Control_2
= 0;
3525 mss
= skb_shinfo(skb
)->tso_size
;
3527 txdp
->Control_1
|= TXD_TCP_LSO_EN
;
3528 txdp
->Control_1
|= TXD_TCP_LSO_MSS(mss
);
3531 if (skb
->ip_summed
== CHECKSUM_HW
) {
3533 (TXD_TX_CKO_IPV4_EN
| TXD_TX_CKO_TCP_EN
|
3536 txdp
->Control_1
|= TXD_GATHER_CODE_FIRST
;
3537 txdp
->Control_1
|= TXD_LIST_OWN_XENA
;
3538 txdp
->Control_2
|= config
->tx_intr_type
;
3540 if (sp
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3541 txdp
->Control_2
|= TXD_VLAN_ENABLE
;
3542 txdp
->Control_2
|= TXD_VLAN_TAG(vlan_tag
);
3545 frg_len
= skb
->len
- skb
->data_len
;
3546 if (skb_shinfo(skb
)->ufo_size
) {
3549 ufo_size
= skb_shinfo(skb
)->ufo_size
;
3551 txdp
->Control_1
|= TXD_UFO_EN
;
3552 txdp
->Control_1
|= TXD_UFO_MSS(ufo_size
);
3553 txdp
->Control_1
|= TXD_BUFFER0_SIZE(8);
3555 sp
->ufo_in_band_v
[put_off
] =
3556 (u64
)skb_shinfo(skb
)->ip6_frag_id
;
3558 sp
->ufo_in_band_v
[put_off
] =
3559 (u64
)skb_shinfo(skb
)->ip6_frag_id
<< 32;
3561 txdp
->Host_Control
= (unsigned long)sp
->ufo_in_band_v
;
3562 txdp
->Buffer_Pointer
= pci_map_single(sp
->pdev
,
3564 sizeof(u64
), PCI_DMA_TODEVICE
);
3566 txdp
->Control_1
= 0;
3567 txdp
->Control_2
= 0;
3570 txdp
->Buffer_Pointer
= pci_map_single
3571 (sp
->pdev
, skb
->data
, frg_len
, PCI_DMA_TODEVICE
);
3572 txdp
->Host_Control
= (unsigned long) skb
;
3573 txdp
->Control_1
|= TXD_BUFFER0_SIZE(frg_len
);
3575 if (skb_shinfo(skb
)->ufo_size
)
3576 txdp
->Control_1
|= TXD_UFO_EN
;
3578 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
3579 /* For fragmented SKB. */
3580 for (i
= 0; i
< frg_cnt
; i
++) {
3581 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3582 /* A '0' length fragment will be ignored */
3586 txdp
->Buffer_Pointer
= (u64
) pci_map_page
3587 (sp
->pdev
, frag
->page
, frag
->page_offset
,
3588 frag
->size
, PCI_DMA_TODEVICE
);
3589 txdp
->Control_1
|= TXD_BUFFER0_SIZE(frag
->size
);
3590 if (skb_shinfo(skb
)->ufo_size
)
3591 txdp
->Control_1
|= TXD_UFO_EN
;
3593 txdp
->Control_1
|= TXD_GATHER_CODE_LAST
;
3595 if (skb_shinfo(skb
)->ufo_size
)
3596 frg_cnt
++; /* as Txd0 was used for inband header */
3598 tx_fifo
= mac_control
->tx_FIFO_start
[queue
];
3599 val64
= mac_control
->fifos
[queue
].list_info
[put_off
].list_phy_addr
;
3600 writeq(val64
, &tx_fifo
->TxDL_Pointer
);
3602 val64
= (TX_FIFO_LAST_TXD_NUM(frg_cnt
) | TX_FIFO_FIRST_LIST
|
3607 val64
|= TX_FIFO_SPECIAL_FUNC
;
3609 if (skb_shinfo(skb
)->ufo_size
)
3610 val64
|= TX_FIFO_SPECIAL_FUNC
;
3611 writeq(val64
, &tx_fifo
->List_Control
);
3616 put_off
%= mac_control
->fifos
[queue
].tx_curr_put_info
.fifo_len
+ 1;
3617 mac_control
->fifos
[queue
].tx_curr_put_info
.offset
= put_off
;
3619 /* Avoid "put" pointer going beyond "get" pointer */
3620 if (((put_off
+ 1) % queue_len
) == get_off
) {
3622 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3624 netif_stop_queue(dev
);
3627 dev
->trans_start
= jiffies
;
3628 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
3634 s2io_alarm_handle(unsigned long data
)
3636 nic_t
*sp
= (nic_t
*)data
;
3638 alarm_intr_handler(sp
);
3639 mod_timer(&sp
->alarm_timer
, jiffies
+ HZ
/ 2);
3643 s2io_msi_handle(int irq
, void *dev_id
, struct pt_regs
*regs
)
3645 struct net_device
*dev
= (struct net_device
*) dev_id
;
3646 nic_t
*sp
= dev
->priv
;
3649 mac_info_t
*mac_control
;
3650 struct config_param
*config
;
3652 atomic_inc(&sp
->isr_cnt
);
3653 mac_control
= &sp
->mac_control
;
3654 config
= &sp
->config
;
3655 DBG_PRINT(INTR_DBG
, "%s: MSI handler\n", __FUNCTION__
);
3657 /* If Intr is because of Rx Traffic */
3658 for (i
= 0; i
< config
->rx_ring_num
; i
++)
3659 rx_intr_handler(&mac_control
->rings
[i
]);
3661 /* If Intr is because of Tx Traffic */
3662 for (i
= 0; i
< config
->tx_fifo_num
; i
++)
3663 tx_intr_handler(&mac_control
->fifos
[i
]);
3666 * If the Rx buffer count is below the panic threshold then
3667 * reallocate the buffers from the interrupt handler itself,
3668 * else schedule a tasklet to reallocate the buffers.
3670 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
3671 int rxb_size
= atomic_read(&sp
->rx_bufs_left
[i
]);
3672 int level
= rx_buffer_level(sp
, rxb_size
, i
);
3674 if ((level
== PANIC
) && (!TASKLET_IN_USE
)) {
3675 DBG_PRINT(INTR_DBG
, "%s: Rx BD hit ", dev
->name
);
3676 DBG_PRINT(INTR_DBG
, "PANIC levels\n");
3677 if ((ret
= fill_rx_buffers(sp
, i
)) == -ENOMEM
) {
3678 DBG_PRINT(ERR_DBG
, "%s:Out of memory",
3680 DBG_PRINT(ERR_DBG
, " in ISR!!\n");
3681 clear_bit(0, (&sp
->tasklet_status
));
3682 atomic_dec(&sp
->isr_cnt
);
3685 clear_bit(0, (&sp
->tasklet_status
));
3686 } else if (level
== LOW
) {
3687 tasklet_schedule(&sp
->task
);
3691 atomic_dec(&sp
->isr_cnt
);
3696 s2io_msix_ring_handle(int irq
, void *dev_id
, struct pt_regs
*regs
)
3698 ring_info_t
*ring
= (ring_info_t
*)dev_id
;
3699 nic_t
*sp
= ring
->nic
;
3700 int rxb_size
, level
, rng_n
;
3702 atomic_inc(&sp
->isr_cnt
);
3703 rx_intr_handler(ring
);
3705 rng_n
= ring
->ring_no
;
3706 rxb_size
= atomic_read(&sp
->rx_bufs_left
[rng_n
]);
3707 level
= rx_buffer_level(sp
, rxb_size
, rng_n
);
3709 if ((level
== PANIC
) && (!TASKLET_IN_USE
)) {
3711 DBG_PRINT(INTR_DBG
, "%s: Rx BD hit ", __FUNCTION__
);
3712 DBG_PRINT(INTR_DBG
, "PANIC levels\n");
3713 if ((ret
= fill_rx_buffers(sp
, rng_n
)) == -ENOMEM
) {
3714 DBG_PRINT(ERR_DBG
, "Out of memory in %s",
3716 clear_bit(0, (&sp
->tasklet_status
));
3719 clear_bit(0, (&sp
->tasklet_status
));
3720 } else if (level
== LOW
) {
3721 tasklet_schedule(&sp
->task
);
3723 atomic_dec(&sp
->isr_cnt
);
3729 s2io_msix_fifo_handle(int irq
, void *dev_id
, struct pt_regs
*regs
)
3731 fifo_info_t
*fifo
= (fifo_info_t
*)dev_id
;
3732 nic_t
*sp
= fifo
->nic
;
3734 atomic_inc(&sp
->isr_cnt
);
3735 tx_intr_handler(fifo
);
3736 atomic_dec(&sp
->isr_cnt
);
3740 static void s2io_txpic_intr_handle(nic_t
*sp
)
3742 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3745 val64
= readq(&bar0
->pic_int_status
);
3746 if (val64
& PIC_INT_GPIO
) {
3747 val64
= readq(&bar0
->gpio_int_reg
);
3748 if ((val64
& GPIO_INT_REG_LINK_DOWN
) &&
3749 (val64
& GPIO_INT_REG_LINK_UP
)) {
3750 val64
|= GPIO_INT_REG_LINK_DOWN
;
3751 val64
|= GPIO_INT_REG_LINK_UP
;
3752 writeq(val64
, &bar0
->gpio_int_reg
);
3756 if (((sp
->last_link_state
== LINK_UP
) &&
3757 (val64
& GPIO_INT_REG_LINK_DOWN
)) ||
3758 ((sp
->last_link_state
== LINK_DOWN
) &&
3759 (val64
& GPIO_INT_REG_LINK_UP
))) {
3760 val64
= readq(&bar0
->gpio_int_mask
);
3761 val64
|= GPIO_INT_MASK_LINK_DOWN
;
3762 val64
|= GPIO_INT_MASK_LINK_UP
;
3763 writeq(val64
, &bar0
->gpio_int_mask
);
3764 s2io_set_link((unsigned long)sp
);
3767 if (sp
->last_link_state
== LINK_UP
) {
3768 /*enable down interrupt */
3769 val64
= readq(&bar0
->gpio_int_mask
);
3770 /* unmasks link down intr */
3771 val64
&= ~GPIO_INT_MASK_LINK_DOWN
;
3772 /* masks link up intr */
3773 val64
|= GPIO_INT_MASK_LINK_UP
;
3774 writeq(val64
, &bar0
->gpio_int_mask
);
3776 /*enable UP Interrupt */
3777 val64
= readq(&bar0
->gpio_int_mask
);
3778 /* unmasks link up interrupt */
3779 val64
&= ~GPIO_INT_MASK_LINK_UP
;
3780 /* masks link down interrupt */
3781 val64
|= GPIO_INT_MASK_LINK_DOWN
;
3782 writeq(val64
, &bar0
->gpio_int_mask
);
3788 * s2io_isr - ISR handler of the device .
3789 * @irq: the irq of the device.
3790 * @dev_id: a void pointer to the dev structure of the NIC.
3791 * @pt_regs: pointer to the registers pushed on the stack.
3792 * Description: This function is the ISR handler of the device. It
3793 * identifies the reason for the interrupt and calls the relevant
3794 * service routines. As a contongency measure, this ISR allocates the
3795 * recv buffers, if their numbers are below the panic value which is
3796 * presently set to 25% of the original number of rcv buffers allocated.
3798 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
3799 * IRQ_NONE: will be returned if interrupt is not from our device
3801 static irqreturn_t
s2io_isr(int irq
, void *dev_id
, struct pt_regs
*regs
)
3803 struct net_device
*dev
= (struct net_device
*) dev_id
;
3804 nic_t
*sp
= dev
->priv
;
3805 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3807 u64 reason
= 0, val64
;
3808 mac_info_t
*mac_control
;
3809 struct config_param
*config
;
3811 atomic_inc(&sp
->isr_cnt
);
3812 mac_control
= &sp
->mac_control
;
3813 config
= &sp
->config
;
3816 * Identify the cause for interrupt and call the appropriate
3817 * interrupt handler. Causes for the interrupt could be;
3821 * 4. Error in any functional blocks of the NIC.
3823 reason
= readq(&bar0
->general_int_status
);
3826 /* The interrupt was not raised by Xena. */
3827 atomic_dec(&sp
->isr_cnt
);
3831 #ifdef CONFIG_S2IO_NAPI
3832 if (reason
& GEN_INTR_RXTRAFFIC
) {
3833 if (netif_rx_schedule_prep(dev
)) {
3834 en_dis_able_nic_intrs(sp
, RX_TRAFFIC_INTR
,
3836 __netif_rx_schedule(dev
);
3840 /* If Intr is because of Rx Traffic */
3841 if (reason
& GEN_INTR_RXTRAFFIC
) {
3843 * rx_traffic_int reg is an R1 register, writing all 1's
3844 * will ensure that the actual interrupt causing bit get's
3845 * cleared and hence a read can be avoided.
3847 val64
= 0xFFFFFFFFFFFFFFFFULL
;
3848 writeq(val64
, &bar0
->rx_traffic_int
);
3849 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
3850 rx_intr_handler(&mac_control
->rings
[i
]);
3855 /* If Intr is because of Tx Traffic */
3856 if (reason
& GEN_INTR_TXTRAFFIC
) {
3858 * tx_traffic_int reg is an R1 register, writing all 1's
3859 * will ensure that the actual interrupt causing bit get's
3860 * cleared and hence a read can be avoided.
3862 val64
= 0xFFFFFFFFFFFFFFFFULL
;
3863 writeq(val64
, &bar0
->tx_traffic_int
);
3865 for (i
= 0; i
< config
->tx_fifo_num
; i
++)
3866 tx_intr_handler(&mac_control
->fifos
[i
]);
3869 if (reason
& GEN_INTR_TXPIC
)
3870 s2io_txpic_intr_handle(sp
);
3872 * If the Rx buffer count is below the panic threshold then
3873 * reallocate the buffers from the interrupt handler itself,
3874 * else schedule a tasklet to reallocate the buffers.
3876 #ifndef CONFIG_S2IO_NAPI
3877 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
3879 int rxb_size
= atomic_read(&sp
->rx_bufs_left
[i
]);
3880 int level
= rx_buffer_level(sp
, rxb_size
, i
);
3882 if ((level
== PANIC
) && (!TASKLET_IN_USE
)) {
3883 DBG_PRINT(INTR_DBG
, "%s: Rx BD hit ", dev
->name
);
3884 DBG_PRINT(INTR_DBG
, "PANIC levels\n");
3885 if ((ret
= fill_rx_buffers(sp
, i
)) == -ENOMEM
) {
3886 DBG_PRINT(ERR_DBG
, "%s:Out of memory",
3888 DBG_PRINT(ERR_DBG
, " in ISR!!\n");
3889 clear_bit(0, (&sp
->tasklet_status
));
3890 atomic_dec(&sp
->isr_cnt
);
3893 clear_bit(0, (&sp
->tasklet_status
));
3894 } else if (level
== LOW
) {
3895 tasklet_schedule(&sp
->task
);
3900 atomic_dec(&sp
->isr_cnt
);
3907 static void s2io_updt_stats(nic_t
*sp
)
3909 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3913 if (atomic_read(&sp
->card_state
) == CARD_UP
) {
3914 /* Apprx 30us on a 133 MHz bus */
3915 val64
= SET_UPDT_CLICKS(10) |
3916 STAT_CFG_ONE_SHOT_EN
| STAT_CFG_STAT_EN
;
3917 writeq(val64
, &bar0
->stat_cfg
);
3920 val64
= readq(&bar0
->stat_cfg
);
3921 if (!(val64
& BIT(0)))
3925 break; /* Updt failed */
3931 * s2io_get_stats - Updates the device statistics structure.
3932 * @dev : pointer to the device structure.
3934 * This function updates the device statistics structure in the s2io_nic
3935 * structure and returns a pointer to the same.
3937 * pointer to the updated net_device_stats structure.
3940 static struct net_device_stats
*s2io_get_stats(struct net_device
*dev
)
3942 nic_t
*sp
= dev
->priv
;
3943 mac_info_t
*mac_control
;
3944 struct config_param
*config
;
3947 mac_control
= &sp
->mac_control
;
3948 config
= &sp
->config
;
3950 /* Configure Stats for immediate updt */
3951 s2io_updt_stats(sp
);
3953 sp
->stats
.tx_packets
=
3954 le32_to_cpu(mac_control
->stats_info
->tmac_frms
);
3955 sp
->stats
.tx_errors
=
3956 le32_to_cpu(mac_control
->stats_info
->tmac_any_err_frms
);
3957 sp
->stats
.rx_errors
=
3958 le32_to_cpu(mac_control
->stats_info
->rmac_drop_frms
);
3959 sp
->stats
.multicast
=
3960 le32_to_cpu(mac_control
->stats_info
->rmac_vld_mcst_frms
);
3961 sp
->stats
.rx_length_errors
=
3962 le32_to_cpu(mac_control
->stats_info
->rmac_long_frms
);
3964 return (&sp
->stats
);
3968 * s2io_set_multicast - entry point for multicast address enable/disable.
3969 * @dev : pointer to the device structure
3971 * This function is a driver entry point which gets called by the kernel
3972 * whenever multicast addresses must be enabled/disabled. This also gets
3973 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3974 * determine, if multicast address must be enabled or if promiscuous mode
3975 * is to be disabled etc.
3980 static void s2io_set_multicast(struct net_device
*dev
)
3983 struct dev_mc_list
*mclist
;
3984 nic_t
*sp
= dev
->priv
;
3985 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
3986 u64 val64
= 0, multi_mac
= 0x010203040506ULL
, mask
=
3988 u64 dis_addr
= 0xffffffffffffULL
, mac_addr
= 0;
3991 if ((dev
->flags
& IFF_ALLMULTI
) && (!sp
->m_cast_flg
)) {
3992 /* Enable all Multicast addresses */
3993 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac
),
3994 &bar0
->rmac_addr_data0_mem
);
3995 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask
),
3996 &bar0
->rmac_addr_data1_mem
);
3997 val64
= RMAC_ADDR_CMD_MEM_WE
|
3998 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
3999 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET
);
4000 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
4001 /* Wait till command completes */
4002 wait_for_cmd_complete(sp
);
4005 sp
->all_multi_pos
= MAC_MC_ALL_MC_ADDR_OFFSET
;
4006 } else if ((dev
->flags
& IFF_ALLMULTI
) && (sp
->m_cast_flg
)) {
4007 /* Disable all Multicast addresses */
4008 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr
),
4009 &bar0
->rmac_addr_data0_mem
);
4010 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4011 &bar0
->rmac_addr_data1_mem
);
4012 val64
= RMAC_ADDR_CMD_MEM_WE
|
4013 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
4014 RMAC_ADDR_CMD_MEM_OFFSET(sp
->all_multi_pos
);
4015 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
4016 /* Wait till command completes */
4017 wait_for_cmd_complete(sp
);
4020 sp
->all_multi_pos
= 0;
4023 if ((dev
->flags
& IFF_PROMISC
) && (!sp
->promisc_flg
)) {
4024 /* Put the NIC into promiscuous mode */
4025 add
= &bar0
->mac_cfg
;
4026 val64
= readq(&bar0
->mac_cfg
);
4027 val64
|= MAC_CFG_RMAC_PROM_ENABLE
;
4029 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
4030 writel((u32
) val64
, add
);
4031 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
4032 writel((u32
) (val64
>> 32), (add
+ 4));
4034 val64
= readq(&bar0
->mac_cfg
);
4035 sp
->promisc_flg
= 1;
4036 DBG_PRINT(INFO_DBG
, "%s: entered promiscuous mode\n",
4038 } else if (!(dev
->flags
& IFF_PROMISC
) && (sp
->promisc_flg
)) {
4039 /* Remove the NIC from promiscuous mode */
4040 add
= &bar0
->mac_cfg
;
4041 val64
= readq(&bar0
->mac_cfg
);
4042 val64
&= ~MAC_CFG_RMAC_PROM_ENABLE
;
4044 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
4045 writel((u32
) val64
, add
);
4046 writeq(RMAC_CFG_KEY(0x4C0D), &bar0
->rmac_cfg_key
);
4047 writel((u32
) (val64
>> 32), (add
+ 4));
4049 val64
= readq(&bar0
->mac_cfg
);
4050 sp
->promisc_flg
= 0;
4051 DBG_PRINT(INFO_DBG
, "%s: left promiscuous mode\n",
4055 /* Update individual M_CAST address list */
4056 if ((!sp
->m_cast_flg
) && dev
->mc_count
) {
4058 (MAX_ADDRS_SUPPORTED
- MAC_MC_ADDR_START_OFFSET
- 1)) {
4059 DBG_PRINT(ERR_DBG
, "%s: No more Rx filters ",
4061 DBG_PRINT(ERR_DBG
, "can be added, please enable ");
4062 DBG_PRINT(ERR_DBG
, "ALL_MULTI instead\n");
4066 prev_cnt
= sp
->mc_addr_count
;
4067 sp
->mc_addr_count
= dev
->mc_count
;
4069 /* Clear out the previous list of Mc in the H/W. */
4070 for (i
= 0; i
< prev_cnt
; i
++) {
4071 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr
),
4072 &bar0
->rmac_addr_data0_mem
);
4073 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4074 &bar0
->rmac_addr_data1_mem
);
4075 val64
= RMAC_ADDR_CMD_MEM_WE
|
4076 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
4077 RMAC_ADDR_CMD_MEM_OFFSET
4078 (MAC_MC_ADDR_START_OFFSET
+ i
);
4079 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
4081 /* Wait for command completes */
4082 if (wait_for_cmd_complete(sp
)) {
4083 DBG_PRINT(ERR_DBG
, "%s: Adding ",
4085 DBG_PRINT(ERR_DBG
, "Multicasts failed\n");
4090 /* Create the new Rx filter list and update the same in H/W. */
4091 for (i
= 0, mclist
= dev
->mc_list
; i
< dev
->mc_count
;
4092 i
++, mclist
= mclist
->next
) {
4093 memcpy(sp
->usr_addrs
[i
].addr
, mclist
->dmi_addr
,
4095 for (j
= 0; j
< ETH_ALEN
; j
++) {
4096 mac_addr
|= mclist
->dmi_addr
[j
];
4100 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr
),
4101 &bar0
->rmac_addr_data0_mem
);
4102 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4103 &bar0
->rmac_addr_data1_mem
);
4104 val64
= RMAC_ADDR_CMD_MEM_WE
|
4105 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
4106 RMAC_ADDR_CMD_MEM_OFFSET
4107 (i
+ MAC_MC_ADDR_START_OFFSET
);
4108 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
4110 /* Wait for command completes */
4111 if (wait_for_cmd_complete(sp
)) {
4112 DBG_PRINT(ERR_DBG
, "%s: Adding ",
4114 DBG_PRINT(ERR_DBG
, "Multicasts failed\n");
4122 * s2io_set_mac_addr - Programs the Xframe mac address
4123 * @dev : pointer to the device structure.
4124 * @addr: a uchar pointer to the new mac address which is to be set.
4125 * Description : This procedure will program the Xframe to receive
4126 * frames with new Mac Address
4127 * Return value: SUCCESS on success and an appropriate (-)ve integer
4128 * as defined in errno.h file on failure.
4131 int s2io_set_mac_addr(struct net_device
*dev
, u8
* addr
)
4133 nic_t
*sp
= dev
->priv
;
4134 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4135 register u64 val64
, mac_addr
= 0;
4139 * Set the new MAC address as the new unicast filter and reflect this
4140 * change on the device address registered with the OS. It will be
4143 for (i
= 0; i
< ETH_ALEN
; i
++) {
4145 mac_addr
|= addr
[i
];
4148 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr
),
4149 &bar0
->rmac_addr_data0_mem
);
4152 RMAC_ADDR_CMD_MEM_WE
| RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
4153 RMAC_ADDR_CMD_MEM_OFFSET(0);
4154 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
4155 /* Wait till command completes */
4156 if (wait_for_cmd_complete(sp
)) {
4157 DBG_PRINT(ERR_DBG
, "%s: set_mac_addr failed\n", dev
->name
);
4165 * s2io_ethtool_sset - Sets different link parameters.
4166 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4167 * @info: pointer to the structure with parameters given by ethtool to set
4170 * The function sets different link parameters provided by the user onto
4176 static int s2io_ethtool_sset(struct net_device
*dev
,
4177 struct ethtool_cmd
*info
)
4179 nic_t
*sp
= dev
->priv
;
4180 if ((info
->autoneg
== AUTONEG_ENABLE
) ||
4181 (info
->speed
!= SPEED_10000
) || (info
->duplex
!= DUPLEX_FULL
))
4184 s2io_close(sp
->dev
);
4192 * s2io_ethtol_gset - Return link specific information.
4193 * @sp : private member of the device structure, pointer to the
4194 * s2io_nic structure.
4195 * @info : pointer to the structure with parameters given by ethtool
4196 * to return link information.
4198 * Returns link specific information like speed, duplex etc.. to ethtool.
4200 * return 0 on success.
4203 static int s2io_ethtool_gset(struct net_device
*dev
, struct ethtool_cmd
*info
)
4205 nic_t
*sp
= dev
->priv
;
4206 info
->supported
= (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
);
4207 info
->advertising
= (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
);
4208 info
->port
= PORT_FIBRE
;
4209 /* info->transceiver?? TODO */
4211 if (netif_carrier_ok(sp
->dev
)) {
4212 info
->speed
= 10000;
4213 info
->duplex
= DUPLEX_FULL
;
4219 info
->autoneg
= AUTONEG_DISABLE
;
4224 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4225 * @sp : private member of the device structure, which is a pointer to the
4226 * s2io_nic structure.
4227 * @info : pointer to the structure with parameters given by ethtool to
4228 * return driver information.
4230 * Returns driver specefic information like name, version etc.. to ethtool.
4235 static void s2io_ethtool_gdrvinfo(struct net_device
*dev
,
4236 struct ethtool_drvinfo
*info
)
4238 nic_t
*sp
= dev
->priv
;
4240 strncpy(info
->driver
, s2io_driver_name
, sizeof(info
->driver
));
4241 strncpy(info
->version
, s2io_driver_version
, sizeof(info
->version
));
4242 strncpy(info
->fw_version
, "", sizeof(info
->fw_version
));
4243 strncpy(info
->bus_info
, pci_name(sp
->pdev
), sizeof(info
->bus_info
));
4244 info
->regdump_len
= XENA_REG_SPACE
;
4245 info
->eedump_len
= XENA_EEPROM_SPACE
;
4246 info
->testinfo_len
= S2IO_TEST_LEN
;
4247 info
->n_stats
= S2IO_STAT_LEN
;
4251 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4252 * @sp: private member of the device structure, which is a pointer to the
4253 * s2io_nic structure.
4254 * @regs : pointer to the structure with parameters given by ethtool for
4255 * dumping the registers.
4256 * @reg_space: The input argumnet into which all the registers are dumped.
4258 * Dumps the entire register space of xFrame NIC into the user given
4264 static void s2io_ethtool_gregs(struct net_device
*dev
,
4265 struct ethtool_regs
*regs
, void *space
)
4269 u8
*reg_space
= (u8
*) space
;
4270 nic_t
*sp
= dev
->priv
;
4272 regs
->len
= XENA_REG_SPACE
;
4273 regs
->version
= sp
->pdev
->subsystem_device
;
4275 for (i
= 0; i
< regs
->len
; i
+= 8) {
4276 reg
= readq(sp
->bar0
+ i
);
4277 memcpy((reg_space
+ i
), ®
, 8);
4282 * s2io_phy_id - timer function that alternates adapter LED.
4283 * @data : address of the private member of the device structure, which
4284 * is a pointer to the s2io_nic structure, provided as an u32.
4285 * Description: This is actually the timer function that alternates the
4286 * adapter LED bit of the adapter control bit to set/reset every time on
4287 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4288 * once every second.
4290 static void s2io_phy_id(unsigned long data
)
4292 nic_t
*sp
= (nic_t
*) data
;
4293 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4297 subid
= sp
->pdev
->subsystem_device
;
4298 if ((sp
->device_type
== XFRAME_II_DEVICE
) ||
4299 ((subid
& 0xFF) >= 0x07)) {
4300 val64
= readq(&bar0
->gpio_control
);
4301 val64
^= GPIO_CTRL_GPIO_0
;
4302 writeq(val64
, &bar0
->gpio_control
);
4304 val64
= readq(&bar0
->adapter_control
);
4305 val64
^= ADAPTER_LED_ON
;
4306 writeq(val64
, &bar0
->adapter_control
);
4309 mod_timer(&sp
->id_timer
, jiffies
+ HZ
/ 2);
4313 * s2io_ethtool_idnic - To physically identify the nic on the system.
4314 * @sp : private member of the device structure, which is a pointer to the
4315 * s2io_nic structure.
4316 * @id : pointer to the structure with identification parameters given by
4318 * Description: Used to physically identify the NIC on the system.
4319 * The Link LED will blink for a time specified by the user for
4321 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4322 * identification is possible only if it's link is up.
4324 * int , returns 0 on success
4327 static int s2io_ethtool_idnic(struct net_device
*dev
, u32 data
)
4329 u64 val64
= 0, last_gpio_ctrl_val
;
4330 nic_t
*sp
= dev
->priv
;
4331 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4334 subid
= sp
->pdev
->subsystem_device
;
4335 last_gpio_ctrl_val
= readq(&bar0
->gpio_control
);
4336 if ((sp
->device_type
== XFRAME_I_DEVICE
) &&
4337 ((subid
& 0xFF) < 0x07)) {
4338 val64
= readq(&bar0
->adapter_control
);
4339 if (!(val64
& ADAPTER_CNTL_EN
)) {
4341 "Adapter Link down, cannot blink LED\n");
4345 if (sp
->id_timer
.function
== NULL
) {
4346 init_timer(&sp
->id_timer
);
4347 sp
->id_timer
.function
= s2io_phy_id
;
4348 sp
->id_timer
.data
= (unsigned long) sp
;
4350 mod_timer(&sp
->id_timer
, jiffies
);
4352 msleep_interruptible(data
* HZ
);
4354 msleep_interruptible(MAX_FLICKER_TIME
);
4355 del_timer_sync(&sp
->id_timer
);
4357 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp
->device_type
, subid
)) {
4358 writeq(last_gpio_ctrl_val
, &bar0
->gpio_control
);
4359 last_gpio_ctrl_val
= readq(&bar0
->gpio_control
);
4366 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4367 * @sp : private member of the device structure, which is a pointer to the
4368 * s2io_nic structure.
4369 * @ep : pointer to the structure with pause parameters given by ethtool.
4371 * Returns the Pause frame generation and reception capability of the NIC.
4375 static void s2io_ethtool_getpause_data(struct net_device
*dev
,
4376 struct ethtool_pauseparam
*ep
)
4379 nic_t
*sp
= dev
->priv
;
4380 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4382 val64
= readq(&bar0
->rmac_pause_cfg
);
4383 if (val64
& RMAC_PAUSE_GEN_ENABLE
)
4384 ep
->tx_pause
= TRUE
;
4385 if (val64
& RMAC_PAUSE_RX_ENABLE
)
4386 ep
->rx_pause
= TRUE
;
4387 ep
->autoneg
= FALSE
;
4391 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4392 * @sp : private member of the device structure, which is a pointer to the
4393 * s2io_nic structure.
4394 * @ep : pointer to the structure with pause parameters given by ethtool.
4396 * It can be used to set or reset Pause frame generation or reception
4397 * support of the NIC.
4399 * int, returns 0 on Success
4402 static int s2io_ethtool_setpause_data(struct net_device
*dev
,
4403 struct ethtool_pauseparam
*ep
)
4406 nic_t
*sp
= dev
->priv
;
4407 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4409 val64
= readq(&bar0
->rmac_pause_cfg
);
4411 val64
|= RMAC_PAUSE_GEN_ENABLE
;
4413 val64
&= ~RMAC_PAUSE_GEN_ENABLE
;
4415 val64
|= RMAC_PAUSE_RX_ENABLE
;
4417 val64
&= ~RMAC_PAUSE_RX_ENABLE
;
4418 writeq(val64
, &bar0
->rmac_pause_cfg
);
4423 * read_eeprom - reads 4 bytes of data from user given offset.
4424 * @sp : private member of the device structure, which is a pointer to the
4425 * s2io_nic structure.
4426 * @off : offset at which the data must be written
4427 * @data : Its an output parameter where the data read at the given
4430 * Will read 4 bytes of data from the user given offset and return the
4432 * NOTE: Will allow to read only part of the EEPROM visible through the
4435 * -1 on failure and 0 on success.
4438 #define S2IO_DEV_ID 5
4439 static int read_eeprom(nic_t
* sp
, int off
, u64
* data
)
4444 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4446 if (sp
->device_type
== XFRAME_I_DEVICE
) {
4447 val64
= I2C_CONTROL_DEV_ID(S2IO_DEV_ID
) | I2C_CONTROL_ADDR(off
) |
4448 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ
|
4449 I2C_CONTROL_CNTL_START
;
4450 SPECIAL_REG_WRITE(val64
, &bar0
->i2c_control
, LF
);
4452 while (exit_cnt
< 5) {
4453 val64
= readq(&bar0
->i2c_control
);
4454 if (I2C_CONTROL_CNTL_END(val64
)) {
4455 *data
= I2C_CONTROL_GET_DATA(val64
);
4464 if (sp
->device_type
== XFRAME_II_DEVICE
) {
4465 val64
= SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1
|
4466 SPI_CONTROL_BYTECNT(0x3) |
4467 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off
);
4468 SPECIAL_REG_WRITE(val64
, &bar0
->spi_control
, LF
);
4469 val64
|= SPI_CONTROL_REQ
;
4470 SPECIAL_REG_WRITE(val64
, &bar0
->spi_control
, LF
);
4471 while (exit_cnt
< 5) {
4472 val64
= readq(&bar0
->spi_control
);
4473 if (val64
& SPI_CONTROL_NACK
) {
4476 } else if (val64
& SPI_CONTROL_DONE
) {
4477 *data
= readq(&bar0
->spi_data
);
4490 * write_eeprom - actually writes the relevant part of the data value.
4491 * @sp : private member of the device structure, which is a pointer to the
4492 * s2io_nic structure.
4493 * @off : offset at which the data must be written
4494 * @data : The data that is to be written
4495 * @cnt : Number of bytes of the data that are actually to be written into
4496 * the Eeprom. (max of 3)
4498 * Actually writes the relevant part of the data value into the Eeprom
4499 * through the I2C bus.
4501 * 0 on success, -1 on failure.
4504 static int write_eeprom(nic_t
* sp
, int off
, u64 data
, int cnt
)
4506 int exit_cnt
= 0, ret
= -1;
4508 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4510 if (sp
->device_type
== XFRAME_I_DEVICE
) {
4511 val64
= I2C_CONTROL_DEV_ID(S2IO_DEV_ID
) | I2C_CONTROL_ADDR(off
) |
4512 I2C_CONTROL_BYTE_CNT(cnt
) | I2C_CONTROL_SET_DATA((u32
)data
) |
4513 I2C_CONTROL_CNTL_START
;
4514 SPECIAL_REG_WRITE(val64
, &bar0
->i2c_control
, LF
);
4516 while (exit_cnt
< 5) {
4517 val64
= readq(&bar0
->i2c_control
);
4518 if (I2C_CONTROL_CNTL_END(val64
)) {
4519 if (!(val64
& I2C_CONTROL_NACK
))
4528 if (sp
->device_type
== XFRAME_II_DEVICE
) {
4529 int write_cnt
= (cnt
== 8) ? 0 : cnt
;
4530 writeq(SPI_DATA_WRITE(data
,(cnt
<<3)), &bar0
->spi_data
);
4532 val64
= SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1
|
4533 SPI_CONTROL_BYTECNT(write_cnt
) |
4534 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off
);
4535 SPECIAL_REG_WRITE(val64
, &bar0
->spi_control
, LF
);
4536 val64
|= SPI_CONTROL_REQ
;
4537 SPECIAL_REG_WRITE(val64
, &bar0
->spi_control
, LF
);
4538 while (exit_cnt
< 5) {
4539 val64
= readq(&bar0
->spi_control
);
4540 if (val64
& SPI_CONTROL_NACK
) {
4543 } else if (val64
& SPI_CONTROL_DONE
) {
4555 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4556 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4557 * @eeprom : pointer to the user level structure provided by ethtool,
4558 * containing all relevant information.
4559 * @data_buf : user defined value to be written into Eeprom.
4560 * Description: Reads the values stored in the Eeprom at given offset
4561 * for a given length. Stores these values int the input argument data
4562 * buffer 'data_buf' and returns these to the caller (ethtool.)
4567 static int s2io_ethtool_geeprom(struct net_device
*dev
,
4568 struct ethtool_eeprom
*eeprom
, u8
* data_buf
)
4572 nic_t
*sp
= dev
->priv
;
4574 eeprom
->magic
= sp
->pdev
->vendor
| (sp
->pdev
->device
<< 16);
4576 if ((eeprom
->offset
+ eeprom
->len
) > (XENA_EEPROM_SPACE
))
4577 eeprom
->len
= XENA_EEPROM_SPACE
- eeprom
->offset
;
4579 for (i
= 0; i
< eeprom
->len
; i
+= 4) {
4580 if (read_eeprom(sp
, (eeprom
->offset
+ i
), &data
)) {
4581 DBG_PRINT(ERR_DBG
, "Read of EEPROM failed\n");
4585 memcpy((data_buf
+ i
), &valid
, 4);
4591 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4592 * @sp : private member of the device structure, which is a pointer to the
4593 * s2io_nic structure.
4594 * @eeprom : pointer to the user level structure provided by ethtool,
4595 * containing all relevant information.
4596 * @data_buf ; user defined value to be written into Eeprom.
4598 * Tries to write the user provided value in the Eeprom, at the offset
4599 * given by the user.
4601 * 0 on success, -EFAULT on failure.
4604 static int s2io_ethtool_seeprom(struct net_device
*dev
,
4605 struct ethtool_eeprom
*eeprom
,
4608 int len
= eeprom
->len
, cnt
= 0;
4609 u64 valid
= 0, data
;
4610 nic_t
*sp
= dev
->priv
;
4612 if (eeprom
->magic
!= (sp
->pdev
->vendor
| (sp
->pdev
->device
<< 16))) {
4614 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4615 DBG_PRINT(ERR_DBG
, "is wrong, Its not 0x%x\n",
4621 data
= (u32
) data_buf
[cnt
] & 0x000000FF;
4623 valid
= (u32
) (data
<< 24);
4627 if (write_eeprom(sp
, (eeprom
->offset
+ cnt
), valid
, 0)) {
4629 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4631 "write into the specified offset\n");
4642 * s2io_register_test - reads and writes into all clock domains.
4643 * @sp : private member of the device structure, which is a pointer to the
4644 * s2io_nic structure.
4645 * @data : variable that returns the result of each of the test conducted b
4648 * Read and write into all clock domains. The NIC has 3 clock domains,
4649 * see that registers in all the three regions are accessible.
4654 static int s2io_register_test(nic_t
* sp
, uint64_t * data
)
4656 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4657 u64 val64
= 0, exp_val
;
4660 val64
= readq(&bar0
->pif_rd_swapper_fb
);
4661 if (val64
!= 0x123456789abcdefULL
) {
4663 DBG_PRINT(INFO_DBG
, "Read Test level 1 fails\n");
4666 val64
= readq(&bar0
->rmac_pause_cfg
);
4667 if (val64
!= 0xc000ffff00000000ULL
) {
4669 DBG_PRINT(INFO_DBG
, "Read Test level 2 fails\n");
4672 val64
= readq(&bar0
->rx_queue_cfg
);
4673 if (sp
->device_type
== XFRAME_II_DEVICE
)
4674 exp_val
= 0x0404040404040404ULL
;
4676 exp_val
= 0x0808080808080808ULL
;
4677 if (val64
!= exp_val
) {
4679 DBG_PRINT(INFO_DBG
, "Read Test level 3 fails\n");
4682 val64
= readq(&bar0
->xgxs_efifo_cfg
);
4683 if (val64
!= 0x000000001923141EULL
) {
4685 DBG_PRINT(INFO_DBG
, "Read Test level 4 fails\n");
4688 val64
= 0x5A5A5A5A5A5A5A5AULL
;
4689 writeq(val64
, &bar0
->xmsi_data
);
4690 val64
= readq(&bar0
->xmsi_data
);
4691 if (val64
!= 0x5A5A5A5A5A5A5A5AULL
) {
4693 DBG_PRINT(ERR_DBG
, "Write Test level 1 fails\n");
4696 val64
= 0xA5A5A5A5A5A5A5A5ULL
;
4697 writeq(val64
, &bar0
->xmsi_data
);
4698 val64
= readq(&bar0
->xmsi_data
);
4699 if (val64
!= 0xA5A5A5A5A5A5A5A5ULL
) {
4701 DBG_PRINT(ERR_DBG
, "Write Test level 2 fails\n");
4709 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4710 * @sp : private member of the device structure, which is a pointer to the
4711 * s2io_nic structure.
4712 * @data:variable that returns the result of each of the test conducted by
4715 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4721 static int s2io_eeprom_test(nic_t
* sp
, uint64_t * data
)
4724 u64 ret_data
, org_4F0
, org_7F0
;
4725 u8 saved_4F0
= 0, saved_7F0
= 0;
4726 struct net_device
*dev
= sp
->dev
;
4728 /* Test Write Error at offset 0 */
4729 /* Note that SPI interface allows write access to all areas
4730 * of EEPROM. Hence doing all negative testing only for Xframe I.
4732 if (sp
->device_type
== XFRAME_I_DEVICE
)
4733 if (!write_eeprom(sp
, 0, 0, 3))
4736 /* Save current values at offsets 0x4F0 and 0x7F0 */
4737 if (!read_eeprom(sp
, 0x4F0, &org_4F0
))
4739 if (!read_eeprom(sp
, 0x7F0, &org_7F0
))
4742 /* Test Write at offset 4f0 */
4743 if (write_eeprom(sp
, 0x4F0, 0x012345, 3))
4745 if (read_eeprom(sp
, 0x4F0, &ret_data
))
4748 if (ret_data
!= 0x012345) {
4749 DBG_PRINT(ERR_DBG
, "%s: eeprom test error at offset 0x4F0. "
4750 "Data written %llx Data read %llx\n",
4751 dev
->name
, (unsigned long long)0x12345,
4752 (unsigned long long)ret_data
);
4756 /* Reset the EEPROM data go FFFF */
4757 write_eeprom(sp
, 0x4F0, 0xFFFFFF, 3);
4759 /* Test Write Request Error at offset 0x7c */
4760 if (sp
->device_type
== XFRAME_I_DEVICE
)
4761 if (!write_eeprom(sp
, 0x07C, 0, 3))
4764 /* Test Write Request at offset 0x7f0 */
4765 if (write_eeprom(sp
, 0x7F0, 0x012345, 3))
4767 if (read_eeprom(sp
, 0x7F0, &ret_data
))
4770 if (ret_data
!= 0x012345) {
4771 DBG_PRINT(ERR_DBG
, "%s: eeprom test error at offset 0x7F0. "
4772 "Data written %llx Data read %llx\n",
4773 dev
->name
, (unsigned long long)0x12345,
4774 (unsigned long long)ret_data
);
4778 /* Reset the EEPROM data go FFFF */
4779 write_eeprom(sp
, 0x7F0, 0xFFFFFF, 3);
4781 if (sp
->device_type
== XFRAME_I_DEVICE
) {
4782 /* Test Write Error at offset 0x80 */
4783 if (!write_eeprom(sp
, 0x080, 0, 3))
4786 /* Test Write Error at offset 0xfc */
4787 if (!write_eeprom(sp
, 0x0FC, 0, 3))
4790 /* Test Write Error at offset 0x100 */
4791 if (!write_eeprom(sp
, 0x100, 0, 3))
4794 /* Test Write Error at offset 4ec */
4795 if (!write_eeprom(sp
, 0x4EC, 0, 3))
4799 /* Restore values at offsets 0x4F0 and 0x7F0 */
4801 write_eeprom(sp
, 0x4F0, org_4F0
, 3);
4803 write_eeprom(sp
, 0x7F0, org_7F0
, 3);
4810 * s2io_bist_test - invokes the MemBist test of the card .
4811 * @sp : private member of the device structure, which is a pointer to the
4812 * s2io_nic structure.
4813 * @data:variable that returns the result of each of the test conducted by
4816 * This invokes the MemBist test of the card. We give around
4817 * 2 secs time for the Test to complete. If it's still not complete
4818 * within this peiod, we consider that the test failed.
4820 * 0 on success and -1 on failure.
4823 static int s2io_bist_test(nic_t
* sp
, uint64_t * data
)
4826 int cnt
= 0, ret
= -1;
4828 pci_read_config_byte(sp
->pdev
, PCI_BIST
, &bist
);
4829 bist
|= PCI_BIST_START
;
4830 pci_write_config_word(sp
->pdev
, PCI_BIST
, bist
);
4833 pci_read_config_byte(sp
->pdev
, PCI_BIST
, &bist
);
4834 if (!(bist
& PCI_BIST_START
)) {
4835 *data
= (bist
& PCI_BIST_CODE_MASK
);
4847 * s2io-link_test - verifies the link state of the nic
4848 * @sp ; private member of the device structure, which is a pointer to the
4849 * s2io_nic structure.
4850 * @data: variable that returns the result of each of the test conducted by
4853 * The function verifies the link state of the NIC and updates the input
4854 * argument 'data' appropriately.
4859 static int s2io_link_test(nic_t
* sp
, uint64_t * data
)
4861 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4864 val64
= readq(&bar0
->adapter_status
);
4865 if (val64
& ADAPTER_STATUS_RMAC_LOCAL_FAULT
)
4872 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4873 * @sp - private member of the device structure, which is a pointer to the
4874 * s2io_nic structure.
4875 * @data - variable that returns the result of each of the test
4876 * conducted by the driver.
4878 * This is one of the offline test that tests the read and write
4879 * access to the RldRam chip on the NIC.
4884 static int s2io_rldram_test(nic_t
* sp
, uint64_t * data
)
4886 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
4888 int cnt
, iteration
= 0, test_fail
= 0;
4890 val64
= readq(&bar0
->adapter_control
);
4891 val64
&= ~ADAPTER_ECC_EN
;
4892 writeq(val64
, &bar0
->adapter_control
);
4894 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4895 val64
|= MC_RLDRAM_TEST_MODE
;
4896 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_test_ctrl
, LF
);
4898 val64
= readq(&bar0
->mc_rldram_mrs
);
4899 val64
|= MC_RLDRAM_QUEUE_SIZE_ENABLE
;
4900 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_mrs
, UF
);
4902 val64
|= MC_RLDRAM_MRS_ENABLE
;
4903 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_mrs
, UF
);
4905 while (iteration
< 2) {
4906 val64
= 0x55555555aaaa0000ULL
;
4907 if (iteration
== 1) {
4908 val64
^= 0xFFFFFFFFFFFF0000ULL
;
4910 writeq(val64
, &bar0
->mc_rldram_test_d0
);
4912 val64
= 0xaaaa5a5555550000ULL
;
4913 if (iteration
== 1) {
4914 val64
^= 0xFFFFFFFFFFFF0000ULL
;
4916 writeq(val64
, &bar0
->mc_rldram_test_d1
);
4918 val64
= 0x55aaaaaaaa5a0000ULL
;
4919 if (iteration
== 1) {
4920 val64
^= 0xFFFFFFFFFFFF0000ULL
;
4922 writeq(val64
, &bar0
->mc_rldram_test_d2
);
4924 val64
= (u64
) (0x0000003ffffe0100ULL
);
4925 writeq(val64
, &bar0
->mc_rldram_test_add
);
4927 val64
= MC_RLDRAM_TEST_MODE
| MC_RLDRAM_TEST_WRITE
|
4929 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_test_ctrl
, LF
);
4931 for (cnt
= 0; cnt
< 5; cnt
++) {
4932 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4933 if (val64
& MC_RLDRAM_TEST_DONE
)
4941 val64
= MC_RLDRAM_TEST_MODE
| MC_RLDRAM_TEST_GO
;
4942 SPECIAL_REG_WRITE(val64
, &bar0
->mc_rldram_test_ctrl
, LF
);
4944 for (cnt
= 0; cnt
< 5; cnt
++) {
4945 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4946 if (val64
& MC_RLDRAM_TEST_DONE
)
4954 val64
= readq(&bar0
->mc_rldram_test_ctrl
);
4955 if (!(val64
& MC_RLDRAM_TEST_PASS
))
4963 /* Bring the adapter out of test mode */
4964 SPECIAL_REG_WRITE(0, &bar0
->mc_rldram_test_ctrl
, LF
);
4970 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4971 * @sp : private member of the device structure, which is a pointer to the
4972 * s2io_nic structure.
4973 * @ethtest : pointer to a ethtool command specific structure that will be
4974 * returned to the user.
4975 * @data : variable that returns the result of each of the test
4976 * conducted by the driver.
4978 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4979 * the health of the card.
4984 static void s2io_ethtool_test(struct net_device
*dev
,
4985 struct ethtool_test
*ethtest
,
4988 nic_t
*sp
= dev
->priv
;
4989 int orig_state
= netif_running(sp
->dev
);
4991 if (ethtest
->flags
== ETH_TEST_FL_OFFLINE
) {
4992 /* Offline Tests. */
4994 s2io_close(sp
->dev
);
4996 if (s2io_register_test(sp
, &data
[0]))
4997 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
5001 if (s2io_rldram_test(sp
, &data
[3]))
5002 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
5006 if (s2io_eeprom_test(sp
, &data
[1]))
5007 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
5009 if (s2io_bist_test(sp
, &data
[4]))
5010 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
5020 "%s: is not up, cannot run test\n",
5029 if (s2io_link_test(sp
, &data
[2]))
5030 ethtest
->flags
|= ETH_TEST_FL_FAILED
;
5039 static void s2io_get_ethtool_stats(struct net_device
*dev
,
5040 struct ethtool_stats
*estats
,
5044 nic_t
*sp
= dev
->priv
;
5045 StatInfo_t
*stat_info
= sp
->mac_control
.stats_info
;
5047 s2io_updt_stats(sp
);
5049 (u64
)le32_to_cpu(stat_info
->tmac_frms_oflow
) << 32 |
5050 le32_to_cpu(stat_info
->tmac_frms
);
5052 (u64
)le32_to_cpu(stat_info
->tmac_data_octets_oflow
) << 32 |
5053 le32_to_cpu(stat_info
->tmac_data_octets
);
5054 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_drop_frms
);
5056 (u64
)le32_to_cpu(stat_info
->tmac_mcst_frms_oflow
) << 32 |
5057 le32_to_cpu(stat_info
->tmac_mcst_frms
);
5059 (u64
)le32_to_cpu(stat_info
->tmac_bcst_frms_oflow
) << 32 |
5060 le32_to_cpu(stat_info
->tmac_bcst_frms
);
5061 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_pause_ctrl_frms
);
5063 (u64
)le32_to_cpu(stat_info
->tmac_any_err_frms_oflow
) << 32 |
5064 le32_to_cpu(stat_info
->tmac_any_err_frms
);
5065 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_vld_ip_octets
);
5067 (u64
)le32_to_cpu(stat_info
->tmac_vld_ip_oflow
) << 32 |
5068 le32_to_cpu(stat_info
->tmac_vld_ip
);
5070 (u64
)le32_to_cpu(stat_info
->tmac_drop_ip_oflow
) << 32 |
5071 le32_to_cpu(stat_info
->tmac_drop_ip
);
5073 (u64
)le32_to_cpu(stat_info
->tmac_icmp_oflow
) << 32 |
5074 le32_to_cpu(stat_info
->tmac_icmp
);
5076 (u64
)le32_to_cpu(stat_info
->tmac_rst_tcp_oflow
) << 32 |
5077 le32_to_cpu(stat_info
->tmac_rst_tcp
);
5078 tmp_stats
[i
++] = le64_to_cpu(stat_info
->tmac_tcp
);
5079 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->tmac_udp_oflow
) << 32 |
5080 le32_to_cpu(stat_info
->tmac_udp
);
5082 (u64
)le32_to_cpu(stat_info
->rmac_vld_frms_oflow
) << 32 |
5083 le32_to_cpu(stat_info
->rmac_vld_frms
);
5085 (u64
)le32_to_cpu(stat_info
->rmac_data_octets_oflow
) << 32 |
5086 le32_to_cpu(stat_info
->rmac_data_octets
);
5087 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_fcs_err_frms
);
5088 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_drop_frms
);
5090 (u64
)le32_to_cpu(stat_info
->rmac_vld_mcst_frms_oflow
) << 32 |
5091 le32_to_cpu(stat_info
->rmac_vld_mcst_frms
);
5093 (u64
)le32_to_cpu(stat_info
->rmac_vld_bcst_frms_oflow
) << 32 |
5094 le32_to_cpu(stat_info
->rmac_vld_bcst_frms
);
5095 tmp_stats
[i
++] = le32_to_cpu(stat_info
->rmac_in_rng_len_err_frms
);
5096 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_long_frms
);
5097 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_pause_ctrl_frms
);
5099 (u64
)le32_to_cpu(stat_info
->rmac_discarded_frms_oflow
) << 32 |
5100 le32_to_cpu(stat_info
->rmac_discarded_frms
);
5102 (u64
)le32_to_cpu(stat_info
->rmac_usized_frms_oflow
) << 32 |
5103 le32_to_cpu(stat_info
->rmac_usized_frms
);
5105 (u64
)le32_to_cpu(stat_info
->rmac_osized_frms_oflow
) << 32 |
5106 le32_to_cpu(stat_info
->rmac_osized_frms
);
5108 (u64
)le32_to_cpu(stat_info
->rmac_frag_frms_oflow
) << 32 |
5109 le32_to_cpu(stat_info
->rmac_frag_frms
);
5111 (u64
)le32_to_cpu(stat_info
->rmac_jabber_frms_oflow
) << 32 |
5112 le32_to_cpu(stat_info
->rmac_jabber_frms
);
5113 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_ip_oflow
) << 32 |
5114 le32_to_cpu(stat_info
->rmac_ip
);
5115 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_ip_octets
);
5116 tmp_stats
[i
++] = le32_to_cpu(stat_info
->rmac_hdr_err_ip
);
5117 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_drop_ip_oflow
) << 32 |
5118 le32_to_cpu(stat_info
->rmac_drop_ip
);
5119 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_icmp_oflow
) << 32 |
5120 le32_to_cpu(stat_info
->rmac_icmp
);
5121 tmp_stats
[i
++] = le64_to_cpu(stat_info
->rmac_tcp
);
5122 tmp_stats
[i
++] = (u64
)le32_to_cpu(stat_info
->rmac_udp_oflow
) << 32 |
5123 le32_to_cpu(stat_info
->rmac_udp
);
5125 (u64
)le32_to_cpu(stat_info
->rmac_err_drp_udp_oflow
) << 32 |
5126 le32_to_cpu(stat_info
->rmac_err_drp_udp
);
5128 (u64
)le32_to_cpu(stat_info
->rmac_pause_cnt_oflow
) << 32 |
5129 le32_to_cpu(stat_info
->rmac_pause_cnt
);
5131 (u64
)le32_to_cpu(stat_info
->rmac_accepted_ip_oflow
) << 32 |
5132 le32_to_cpu(stat_info
->rmac_accepted_ip
);
5133 tmp_stats
[i
++] = le32_to_cpu(stat_info
->rmac_err_tcp
);
5135 tmp_stats
[i
++] = stat_info
->sw_stat
.single_ecc_errs
;
5136 tmp_stats
[i
++] = stat_info
->sw_stat
.double_ecc_errs
;
5139 static int s2io_ethtool_get_regs_len(struct net_device
*dev
)
5141 return (XENA_REG_SPACE
);
5145 static u32
s2io_ethtool_get_rx_csum(struct net_device
* dev
)
5147 nic_t
*sp
= dev
->priv
;
5149 return (sp
->rx_csum
);
5152 static int s2io_ethtool_set_rx_csum(struct net_device
*dev
, u32 data
)
5154 nic_t
*sp
= dev
->priv
;
5164 static int s2io_get_eeprom_len(struct net_device
*dev
)
5166 return (XENA_EEPROM_SPACE
);
5169 static int s2io_ethtool_self_test_count(struct net_device
*dev
)
5171 return (S2IO_TEST_LEN
);
5174 static void s2io_ethtool_get_strings(struct net_device
*dev
,
5175 u32 stringset
, u8
* data
)
5177 switch (stringset
) {
5179 memcpy(data
, s2io_gstrings
, S2IO_STRINGS_LEN
);
5182 memcpy(data
, ðtool_stats_keys
,
5183 sizeof(ethtool_stats_keys
));
5186 static int s2io_ethtool_get_stats_count(struct net_device
*dev
)
5188 return (S2IO_STAT_LEN
);
5191 static int s2io_ethtool_op_set_tx_csum(struct net_device
*dev
, u32 data
)
5194 dev
->features
|= NETIF_F_IP_CSUM
;
5196 dev
->features
&= ~NETIF_F_IP_CSUM
;
5202 static struct ethtool_ops netdev_ethtool_ops
= {
5203 .get_settings
= s2io_ethtool_gset
,
5204 .set_settings
= s2io_ethtool_sset
,
5205 .get_drvinfo
= s2io_ethtool_gdrvinfo
,
5206 .get_regs_len
= s2io_ethtool_get_regs_len
,
5207 .get_regs
= s2io_ethtool_gregs
,
5208 .get_link
= ethtool_op_get_link
,
5209 .get_eeprom_len
= s2io_get_eeprom_len
,
5210 .get_eeprom
= s2io_ethtool_geeprom
,
5211 .set_eeprom
= s2io_ethtool_seeprom
,
5212 .get_pauseparam
= s2io_ethtool_getpause_data
,
5213 .set_pauseparam
= s2io_ethtool_setpause_data
,
5214 .get_rx_csum
= s2io_ethtool_get_rx_csum
,
5215 .set_rx_csum
= s2io_ethtool_set_rx_csum
,
5216 .get_tx_csum
= ethtool_op_get_tx_csum
,
5217 .set_tx_csum
= s2io_ethtool_op_set_tx_csum
,
5218 .get_sg
= ethtool_op_get_sg
,
5219 .set_sg
= ethtool_op_set_sg
,
5221 .get_tso
= ethtool_op_get_tso
,
5222 .set_tso
= ethtool_op_set_tso
,
5224 .get_ufo
= ethtool_op_get_ufo
,
5225 .set_ufo
= ethtool_op_set_ufo
,
5226 .self_test_count
= s2io_ethtool_self_test_count
,
5227 .self_test
= s2io_ethtool_test
,
5228 .get_strings
= s2io_ethtool_get_strings
,
5229 .phys_id
= s2io_ethtool_idnic
,
5230 .get_stats_count
= s2io_ethtool_get_stats_count
,
5231 .get_ethtool_stats
= s2io_get_ethtool_stats
5235 * s2io_ioctl - Entry point for the Ioctl
5236 * @dev : Device pointer.
5237 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5238 * a proprietary structure used to pass information to the driver.
5239 * @cmd : This is used to distinguish between the different commands that
5240 * can be passed to the IOCTL functions.
5242 * Currently there are no special functionality supported in IOCTL, hence
5243 * function always return EOPNOTSUPPORTED
5246 static int s2io_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
5252 * s2io_change_mtu - entry point to change MTU size for the device.
5253 * @dev : device pointer.
5254 * @new_mtu : the new MTU size for the device.
5255 * Description: A driver entry point to change MTU size for the device.
5256 * Before changing the MTU the device must be stopped.
5258 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5262 static int s2io_change_mtu(struct net_device
*dev
, int new_mtu
)
5264 nic_t
*sp
= dev
->priv
;
5266 if ((new_mtu
< MIN_MTU
) || (new_mtu
> S2IO_JUMBO_SIZE
)) {
5267 DBG_PRINT(ERR_DBG
, "%s: MTU size is invalid.\n",
5273 if (netif_running(dev
)) {
5275 netif_stop_queue(dev
);
5276 if (s2io_card_up(sp
)) {
5277 DBG_PRINT(ERR_DBG
, "%s: Device bring up failed\n",
5280 if (netif_queue_stopped(dev
))
5281 netif_wake_queue(dev
);
5282 } else { /* Device is down */
5283 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
5284 u64 val64
= new_mtu
;
5286 writeq(vBIT(val64
, 2, 14), &bar0
->rmac_max_pyld_len
);
5293 * s2io_tasklet - Bottom half of the ISR.
5294 * @dev_adr : address of the device structure in dma_addr_t format.
5296 * This is the tasklet or the bottom half of the ISR. This is
5297 * an extension of the ISR which is scheduled by the scheduler to be run
5298 * when the load on the CPU is low. All low priority tasks of the ISR can
5299 * be pushed into the tasklet. For now the tasklet is used only to
5300 * replenish the Rx buffers in the Rx buffer descriptors.
5305 static void s2io_tasklet(unsigned long dev_addr
)
5307 struct net_device
*dev
= (struct net_device
*) dev_addr
;
5308 nic_t
*sp
= dev
->priv
;
5310 mac_info_t
*mac_control
;
5311 struct config_param
*config
;
5313 mac_control
= &sp
->mac_control
;
5314 config
= &sp
->config
;
5316 if (!TASKLET_IN_USE
) {
5317 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
5318 ret
= fill_rx_buffers(sp
, i
);
5319 if (ret
== -ENOMEM
) {
5320 DBG_PRINT(ERR_DBG
, "%s: Out of ",
5322 DBG_PRINT(ERR_DBG
, "memory in tasklet\n");
5324 } else if (ret
== -EFILL
) {
5326 "%s: Rx Ring %d is full\n",
5331 clear_bit(0, (&sp
->tasklet_status
));
5336 * s2io_set_link - Set the LInk status
5337 * @data: long pointer to device private structue
5338 * Description: Sets the link status for the adapter
5341 static void s2io_set_link(unsigned long data
)
5343 nic_t
*nic
= (nic_t
*) data
;
5344 struct net_device
*dev
= nic
->dev
;
5345 XENA_dev_config_t __iomem
*bar0
= nic
->bar0
;
5349 if (test_and_set_bit(0, &(nic
->link_state
))) {
5350 /* The card is being reset, no point doing anything */
5354 subid
= nic
->pdev
->subsystem_device
;
5355 if (s2io_link_fault_indication(nic
) == MAC_RMAC_ERR_TIMER
) {
5357 * Allow a small delay for the NICs self initiated
5358 * cleanup to complete.
5363 val64
= readq(&bar0
->adapter_status
);
5364 if (verify_xena_quiescence(nic
, val64
, nic
->device_enabled_once
)) {
5365 if (LINK_IS_UP(val64
)) {
5366 val64
= readq(&bar0
->adapter_control
);
5367 val64
|= ADAPTER_CNTL_EN
;
5368 writeq(val64
, &bar0
->adapter_control
);
5369 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic
->device_type
,
5371 val64
= readq(&bar0
->gpio_control
);
5372 val64
|= GPIO_CTRL_GPIO_0
;
5373 writeq(val64
, &bar0
->gpio_control
);
5374 val64
= readq(&bar0
->gpio_control
);
5376 val64
|= ADAPTER_LED_ON
;
5377 writeq(val64
, &bar0
->adapter_control
);
5379 if (s2io_link_fault_indication(nic
) ==
5380 MAC_RMAC_ERR_TIMER
) {
5381 val64
= readq(&bar0
->adapter_status
);
5382 if (!LINK_IS_UP(val64
)) {
5383 DBG_PRINT(ERR_DBG
, "%s:", dev
->name
);
5384 DBG_PRINT(ERR_DBG
, " Link down");
5385 DBG_PRINT(ERR_DBG
, "after ");
5386 DBG_PRINT(ERR_DBG
, "enabling ");
5387 DBG_PRINT(ERR_DBG
, "device \n");
5390 if (nic
->device_enabled_once
== FALSE
) {
5391 nic
->device_enabled_once
= TRUE
;
5393 s2io_link(nic
, LINK_UP
);
5395 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic
->device_type
,
5397 val64
= readq(&bar0
->gpio_control
);
5398 val64
&= ~GPIO_CTRL_GPIO_0
;
5399 writeq(val64
, &bar0
->gpio_control
);
5400 val64
= readq(&bar0
->gpio_control
);
5402 s2io_link(nic
, LINK_DOWN
);
5404 } else { /* NIC is not Quiescent. */
5405 DBG_PRINT(ERR_DBG
, "%s: Error: ", dev
->name
);
5406 DBG_PRINT(ERR_DBG
, "device is not Quiescent\n");
5407 netif_stop_queue(dev
);
5409 clear_bit(0, &(nic
->link_state
));
5412 static void s2io_card_down(nic_t
* sp
)
5415 XENA_dev_config_t __iomem
*bar0
= sp
->bar0
;
5416 unsigned long flags
;
5417 register u64 val64
= 0;
5419 del_timer_sync(&sp
->alarm_timer
);
5420 /* If s2io_set_link task is executing, wait till it completes. */
5421 while (test_and_set_bit(0, &(sp
->link_state
))) {
5424 atomic_set(&sp
->card_state
, CARD_DOWN
);
5426 /* disable Tx and Rx traffic on the NIC */
5430 tasklet_kill(&sp
->task
);
5432 /* Check if the device is Quiescent and then Reset the NIC */
5434 val64
= readq(&bar0
->adapter_status
);
5435 if (verify_xena_quiescence(sp
, val64
, sp
->device_enabled_once
)) {
5443 "s2io_close:Device not Quiescent ");
5444 DBG_PRINT(ERR_DBG
, "adaper status reads 0x%llx\n",
5445 (unsigned long long) val64
);
5451 /* Waiting till all Interrupt handlers are complete */
5455 if (!atomic_read(&sp
->isr_cnt
))
5460 spin_lock_irqsave(&sp
->tx_lock
, flags
);
5461 /* Free all Tx buffers */
5462 free_tx_buffers(sp
);
5463 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
5465 /* Free all Rx buffers */
5466 spin_lock_irqsave(&sp
->rx_lock
, flags
);
5467 free_rx_buffers(sp
);
5468 spin_unlock_irqrestore(&sp
->rx_lock
, flags
);
5470 clear_bit(0, &(sp
->link_state
));
5473 static int s2io_card_up(nic_t
* sp
)
5476 mac_info_t
*mac_control
;
5477 struct config_param
*config
;
5478 struct net_device
*dev
= (struct net_device
*) sp
->dev
;
5480 /* Initialize the H/W I/O registers */
5481 if (init_nic(sp
) != 0) {
5482 DBG_PRINT(ERR_DBG
, "%s: H/W initialization failed\n",
5487 if (sp
->intr_type
== MSI
)
5488 ret
= s2io_enable_msi(sp
);
5489 else if (sp
->intr_type
== MSI_X
)
5490 ret
= s2io_enable_msi_x(sp
);
5492 DBG_PRINT(ERR_DBG
, "%s: Defaulting to INTA\n", dev
->name
);
5493 sp
->intr_type
= INTA
;
5497 * Initializing the Rx buffers. For now we are considering only 1
5498 * Rx ring and initializing buffers into 30 Rx blocks
5500 mac_control
= &sp
->mac_control
;
5501 config
= &sp
->config
;
5503 for (i
= 0; i
< config
->rx_ring_num
; i
++) {
5504 if ((ret
= fill_rx_buffers(sp
, i
))) {
5505 DBG_PRINT(ERR_DBG
, "%s: Out of memory in Open\n",
5508 free_rx_buffers(sp
);
5511 DBG_PRINT(INFO_DBG
, "Buf in ring:%d is %d:\n", i
,
5512 atomic_read(&sp
->rx_bufs_left
[i
]));
5515 /* Setting its receive mode */
5516 s2io_set_multicast(dev
);
5518 /* Enable tasklet for the device */
5519 tasklet_init(&sp
->task
, s2io_tasklet
, (unsigned long) dev
);
5521 /* Enable Rx Traffic and interrupts on the NIC */
5522 if (start_nic(sp
)) {
5523 DBG_PRINT(ERR_DBG
, "%s: Starting NIC failed\n", dev
->name
);
5524 tasklet_kill(&sp
->task
);
5526 free_irq(dev
->irq
, dev
);
5527 free_rx_buffers(sp
);
5531 S2IO_TIMER_CONF(sp
->alarm_timer
, s2io_alarm_handle
, sp
, (HZ
/2));
5533 atomic_set(&sp
->card_state
, CARD_UP
);
5538 * s2io_restart_nic - Resets the NIC.
5539 * @data : long pointer to the device private structure
5541 * This function is scheduled to be run by the s2io_tx_watchdog
5542 * function after 0.5 secs to reset the NIC. The idea is to reduce
5543 * the run time of the watch dog routine which is run holding a
5547 static void s2io_restart_nic(unsigned long data
)
5549 struct net_device
*dev
= (struct net_device
*) data
;
5550 nic_t
*sp
= dev
->priv
;
5553 if (s2io_card_up(sp
)) {
5554 DBG_PRINT(ERR_DBG
, "%s: Device bring up failed\n",
5557 netif_wake_queue(dev
);
5558 DBG_PRINT(ERR_DBG
, "%s: was reset by Tx watchdog timer\n",
5564 * s2io_tx_watchdog - Watchdog for transmit side.
5565 * @dev : Pointer to net device structure
5567 * This function is triggered if the Tx Queue is stopped
5568 * for a pre-defined amount of time when the Interface is still up.
5569 * If the Interface is jammed in such a situation, the hardware is
5570 * reset (by s2io_close) and restarted again (by s2io_open) to
5571 * overcome any problem that might have been caused in the hardware.
5576 static void s2io_tx_watchdog(struct net_device
*dev
)
5578 nic_t
*sp
= dev
->priv
;
5580 if (netif_carrier_ok(dev
)) {
5581 schedule_work(&sp
->rst_timer_task
);
5586 * rx_osm_handler - To perform some OS related operations on SKB.
5587 * @sp: private member of the device structure,pointer to s2io_nic structure.
5588 * @skb : the socket buffer pointer.
5589 * @len : length of the packet
5590 * @cksum : FCS checksum of the frame.
5591 * @ring_no : the ring from which this RxD was extracted.
5593 * This function is called by the Tx interrupt serivce routine to perform
5594 * some OS related operations on the SKB before passing it to the upper
5595 * layers. It mainly checks if the checksum is OK, if so adds it to the
5596 * SKBs cksum variable, increments the Rx packet count and passes the SKB
5597 * to the upper layer. If the checksum is wrong, it increments the Rx
5598 * packet error count, frees the SKB and returns error.
5600 * SUCCESS on success and -1 on failure.
5602 static int rx_osm_handler(ring_info_t
*ring_data
, RxD_t
* rxdp
)
5604 nic_t
*sp
= ring_data
->nic
;
5605 struct net_device
*dev
= (struct net_device
*) sp
->dev
;
5606 struct sk_buff
*skb
= (struct sk_buff
*)
5607 ((unsigned long) rxdp
->Host_Control
);
5608 int ring_no
= ring_data
->ring_no
;
5609 u16 l3_csum
, l4_csum
;
5612 if (rxdp
->Control_1
& RXD_T_CODE
) {
5613 unsigned long long err
= rxdp
->Control_1
& RXD_T_CODE
;
5614 DBG_PRINT(ERR_DBG
, "%s: Rx error Value: 0x%llx\n",
5617 sp
->stats
.rx_crc_errors
++;
5618 atomic_dec(&sp
->rx_bufs_left
[ring_no
]);
5619 rxdp
->Host_Control
= 0;
5623 /* Updating statistics */
5624 rxdp
->Host_Control
= 0;
5626 sp
->stats
.rx_packets
++;
5627 if (sp
->rxd_mode
== RXD_MODE_1
) {
5628 int len
= RXD_GET_BUFFER0_SIZE_1(rxdp
->Control_2
);
5630 sp
->stats
.rx_bytes
+= len
;
5633 } else if (sp
->rxd_mode
>= RXD_MODE_3A
) {
5634 int get_block
= ring_data
->rx_curr_get_info
.block_index
;
5635 int get_off
= ring_data
->rx_curr_get_info
.offset
;
5636 int buf0_len
= RXD_GET_BUFFER0_SIZE_3(rxdp
->Control_2
);
5637 int buf2_len
= RXD_GET_BUFFER2_SIZE_3(rxdp
->Control_2
);
5638 unsigned char *buff
= skb_push(skb
, buf0_len
);
5640 buffAdd_t
*ba
= &ring_data
->ba
[get_block
][get_off
];
5641 sp
->stats
.rx_bytes
+= buf0_len
+ buf2_len
;
5642 memcpy(buff
, ba
->ba_0
, buf0_len
);
5644 if (sp
->rxd_mode
== RXD_MODE_3A
) {
5645 int buf1_len
= RXD_GET_BUFFER1_SIZE_3(rxdp
->Control_2
);
5647 skb_put(skb
, buf1_len
);
5648 skb
->len
+= buf2_len
;
5649 skb
->data_len
+= buf2_len
;
5650 skb
->truesize
+= buf2_len
;
5651 skb_put(skb_shinfo(skb
)->frag_list
, buf2_len
);
5652 sp
->stats
.rx_bytes
+= buf1_len
;
5655 skb_put(skb
, buf2_len
);
5658 if ((rxdp
->Control_1
& TCP_OR_UDP_FRAME
) &&
5660 l3_csum
= RXD_GET_L3_CKSUM(rxdp
->Control_1
);
5661 l4_csum
= RXD_GET_L4_CKSUM(rxdp
->Control_1
);
5662 if ((l3_csum
== L3_CKSUM_OK
) && (l4_csum
== L4_CKSUM_OK
)) {
5664 * NIC verifies if the Checksum of the received
5665 * frame is Ok or not and accordingly returns
5666 * a flag in the RxD.
5668 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5671 * Packet with erroneous checksum, let the
5672 * upper layers deal with it.
5674 skb
->ip_summed
= CHECKSUM_NONE
;
5677 skb
->ip_summed
= CHECKSUM_NONE
;
5680 skb
->protocol
= eth_type_trans(skb
, dev
);
5681 #ifdef CONFIG_S2IO_NAPI
5682 if (sp
->vlgrp
&& RXD_GET_VLAN_TAG(rxdp
->Control_2
)) {
5683 /* Queueing the vlan frame to the upper layer */
5684 vlan_hwaccel_receive_skb(skb
, sp
->vlgrp
,
5685 RXD_GET_VLAN_TAG(rxdp
->Control_2
));
5687 netif_receive_skb(skb
);
5690 if (sp
->vlgrp
&& RXD_GET_VLAN_TAG(rxdp
->Control_2
)) {
5691 /* Queueing the vlan frame to the upper layer */
5692 vlan_hwaccel_rx(skb
, sp
->vlgrp
,
5693 RXD_GET_VLAN_TAG(rxdp
->Control_2
));
5698 dev
->last_rx
= jiffies
;
5699 atomic_dec(&sp
->rx_bufs_left
[ring_no
]);
5704 * s2io_link - stops/starts the Tx queue.
5705 * @sp : private member of the device structure, which is a pointer to the
5706 * s2io_nic structure.
5707 * @link : inidicates whether link is UP/DOWN.
5709 * This function stops/starts the Tx queue depending on whether the link
5710 * status of the NIC is is down or up. This is called by the Alarm
5711 * interrupt handler whenever a link change interrupt comes up.
5716 void s2io_link(nic_t
* sp
, int link
)
5718 struct net_device
*dev
= (struct net_device
*) sp
->dev
;
5720 if (link
!= sp
->last_link_state
) {
5721 if (link
== LINK_DOWN
) {
5722 DBG_PRINT(ERR_DBG
, "%s: Link down\n", dev
->name
);
5723 netif_carrier_off(dev
);
5725 DBG_PRINT(ERR_DBG
, "%s: Link Up\n", dev
->name
);
5726 netif_carrier_on(dev
);
5729 sp
->last_link_state
= link
;
5733 * get_xena_rev_id - to identify revision ID of xena.
5734 * @pdev : PCI Dev structure
5736 * Function to identify the Revision ID of xena.
5738 * returns the revision ID of the device.
5741 int get_xena_rev_id(struct pci_dev
*pdev
)
5745 ret
= pci_read_config_byte(pdev
, PCI_REVISION_ID
, (u8
*) & id
);
5750 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5751 * @sp : private member of the device structure, which is a pointer to the
5752 * s2io_nic structure.
5754 * This function initializes a few of the PCI and PCI-X configuration registers
5755 * with recommended values.
5760 static void s2io_init_pci(nic_t
* sp
)
5762 u16 pci_cmd
= 0, pcix_cmd
= 0;
5764 /* Enable Data Parity Error Recovery in PCI-X command register. */
5765 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5767 pci_write_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5769 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5772 /* Set the PErr Response bit in PCI command register. */
5773 pci_read_config_word(sp
->pdev
, PCI_COMMAND
, &pci_cmd
);
5774 pci_write_config_word(sp
->pdev
, PCI_COMMAND
,
5775 (pci_cmd
| PCI_COMMAND_PARITY
));
5776 pci_read_config_word(sp
->pdev
, PCI_COMMAND
, &pci_cmd
);
5778 /* Forcibly disabling relaxed ordering capability of the card. */
5780 pci_write_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5782 pci_read_config_word(sp
->pdev
, PCIX_COMMAND_REGISTER
,
5786 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5787 MODULE_LICENSE("GPL");
5788 MODULE_VERSION(DRV_VERSION
);
5790 module_param(tx_fifo_num
, int, 0);
5791 module_param(rx_ring_num
, int, 0);
5792 module_param(rx_ring_mode
, int, 0);
5793 module_param_array(tx_fifo_len
, uint
, NULL
, 0);
5794 module_param_array(rx_ring_sz
, uint
, NULL
, 0);
5795 module_param_array(rts_frm_len
, uint
, NULL
, 0);
5796 module_param(use_continuous_tx_intrs
, int, 1);
5797 module_param(rmac_pause_time
, int, 0);
5798 module_param(mc_pause_threshold_q0q3
, int, 0);
5799 module_param(mc_pause_threshold_q4q7
, int, 0);
5800 module_param(shared_splits
, int, 0);
5801 module_param(tmac_util_period
, int, 0);
5802 module_param(rmac_util_period
, int, 0);
5803 module_param(bimodal
, bool, 0);
5804 module_param(l3l4hdr_size
, int , 0);
5805 #ifndef CONFIG_S2IO_NAPI
5806 module_param(indicate_max_pkts
, int, 0);
5808 module_param(rxsync_frequency
, int, 0);
5809 module_param(intr_type
, int, 0);
5812 * s2io_init_nic - Initialization of the adapter .
5813 * @pdev : structure containing the PCI related information of the device.
5814 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5816 * The function initializes an adapter identified by the pci_dec structure.
5817 * All OS related initialization including memory and device structure and
5818 * initlaization of the device private variable is done. Also the swapper
5819 * control register is initialized to enable read and write into the I/O
5820 * registers of the device.
5822 * returns 0 on success and negative on failure.
5825 static int __devinit
5826 s2io_init_nic(struct pci_dev
*pdev
, const struct pci_device_id
*pre
)
5829 struct net_device
*dev
;
5831 int dma_flag
= FALSE
;
5832 u32 mac_up
, mac_down
;
5833 u64 val64
= 0, tmp64
= 0;
5834 XENA_dev_config_t __iomem
*bar0
= NULL
;
5836 mac_info_t
*mac_control
;
5837 struct config_param
*config
;
5839 u8 dev_intr_type
= intr_type
;
5841 #ifdef CONFIG_S2IO_NAPI
5842 if (dev_intr_type
!= INTA
) {
5843 DBG_PRINT(ERR_DBG
, "NAPI cannot be enabled when MSI/MSI-X \
5844 is enabled. Defaulting to INTA\n");
5845 dev_intr_type
= INTA
;
5848 DBG_PRINT(ERR_DBG
, "NAPI support has been enabled\n");
5851 if ((ret
= pci_enable_device(pdev
))) {
5853 "s2io_init_nic: pci_enable_device failed\n");
5857 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
5858 DBG_PRINT(INIT_DBG
, "s2io_init_nic: Using 64bit DMA\n");
5860 if (pci_set_consistent_dma_mask
5861 (pdev
, DMA_64BIT_MASK
)) {
5863 "Unable to obtain 64bit DMA for \
5864 consistent allocations\n");
5865 pci_disable_device(pdev
);
5868 } else if (!pci_set_dma_mask(pdev
, DMA_32BIT_MASK
)) {
5869 DBG_PRINT(INIT_DBG
, "s2io_init_nic: Using 32bit DMA\n");
5871 pci_disable_device(pdev
);
5875 if ((dev_intr_type
== MSI_X
) &&
5876 ((pdev
->device
!= PCI_DEVICE_ID_HERC_WIN
) &&
5877 (pdev
->device
!= PCI_DEVICE_ID_HERC_UNI
))) {
5878 DBG_PRINT(ERR_DBG
, "Xframe I does not support MSI_X. \
5879 Defaulting to INTA\n");
5880 dev_intr_type
= INTA
;
5882 if (dev_intr_type
!= MSI_X
) {
5883 if (pci_request_regions(pdev
, s2io_driver_name
)) {
5884 DBG_PRINT(ERR_DBG
, "Request Regions failed\n"),
5885 pci_disable_device(pdev
);
5890 if (!(request_mem_region(pci_resource_start(pdev
, 0),
5891 pci_resource_len(pdev
, 0), s2io_driver_name
))) {
5892 DBG_PRINT(ERR_DBG
, "bar0 Request Regions failed\n");
5893 pci_disable_device(pdev
);
5896 if (!(request_mem_region(pci_resource_start(pdev
, 2),
5897 pci_resource_len(pdev
, 2), s2io_driver_name
))) {
5898 DBG_PRINT(ERR_DBG
, "bar1 Request Regions failed\n");
5899 release_mem_region(pci_resource_start(pdev
, 0),
5900 pci_resource_len(pdev
, 0));
5901 pci_disable_device(pdev
);
5906 dev
= alloc_etherdev(sizeof(nic_t
));
5908 DBG_PRINT(ERR_DBG
, "Device allocation failed\n");
5909 pci_disable_device(pdev
);
5910 pci_release_regions(pdev
);
5914 pci_set_master(pdev
);
5915 pci_set_drvdata(pdev
, dev
);
5916 SET_MODULE_OWNER(dev
);
5917 SET_NETDEV_DEV(dev
, &pdev
->dev
);
5919 /* Private member variable initialized to s2io NIC structure */
5921 memset(sp
, 0, sizeof(nic_t
));
5924 sp
->high_dma_flag
= dma_flag
;
5925 sp
->device_enabled_once
= FALSE
;
5926 if (rx_ring_mode
== 1)
5927 sp
->rxd_mode
= RXD_MODE_1
;
5928 if (rx_ring_mode
== 2)
5929 sp
->rxd_mode
= RXD_MODE_3B
;
5930 if (rx_ring_mode
== 3)
5931 sp
->rxd_mode
= RXD_MODE_3A
;
5933 sp
->intr_type
= dev_intr_type
;
5935 if ((pdev
->device
== PCI_DEVICE_ID_HERC_WIN
) ||
5936 (pdev
->device
== PCI_DEVICE_ID_HERC_UNI
))
5937 sp
->device_type
= XFRAME_II_DEVICE
;
5939 sp
->device_type
= XFRAME_I_DEVICE
;
5942 /* Initialize some PCI/PCI-X fields of the NIC. */
5946 * Setting the device configuration parameters.
5947 * Most of these parameters can be specified by the user during
5948 * module insertion as they are module loadable parameters. If
5949 * these parameters are not not specified during load time, they
5950 * are initialized with default values.
5952 mac_control
= &sp
->mac_control
;
5953 config
= &sp
->config
;
5955 /* Tx side parameters. */
5956 if (tx_fifo_len
[0] == 0)
5957 tx_fifo_len
[0] = DEFAULT_FIFO_LEN
; /* Default value. */
5958 config
->tx_fifo_num
= tx_fifo_num
;
5959 for (i
= 0; i
< MAX_TX_FIFOS
; i
++) {
5960 config
->tx_cfg
[i
].fifo_len
= tx_fifo_len
[i
];
5961 config
->tx_cfg
[i
].fifo_priority
= i
;
5964 /* mapping the QoS priority to the configured fifos */
5965 for (i
= 0; i
< MAX_TX_FIFOS
; i
++)
5966 config
->fifo_mapping
[i
] = fifo_map
[config
->tx_fifo_num
][i
];
5968 config
->tx_intr_type
= TXD_INT_TYPE_UTILZ
;
5969 for (i
= 0; i
< config
->tx_fifo_num
; i
++) {
5970 config
->tx_cfg
[i
].f_no_snoop
=
5971 (NO_SNOOP_TXD
| NO_SNOOP_TXD_BUFFER
);
5972 if (config
->tx_cfg
[i
].fifo_len
< 65) {
5973 config
->tx_intr_type
= TXD_INT_TYPE_PER_LIST
;
5977 /* + 2 because one Txd for skb->data and one Txd for UFO */
5978 config
->max_txds
= MAX_SKB_FRAGS
+ 2;
5980 /* Rx side parameters. */
5981 if (rx_ring_sz
[0] == 0)
5982 rx_ring_sz
[0] = SMALL_BLK_CNT
; /* Default value. */
5983 config
->rx_ring_num
= rx_ring_num
;
5984 for (i
= 0; i
< MAX_RX_RINGS
; i
++) {
5985 config
->rx_cfg
[i
].num_rxd
= rx_ring_sz
[i
] *
5986 (rxd_count
[sp
->rxd_mode
] + 1);
5987 config
->rx_cfg
[i
].ring_priority
= i
;
5990 for (i
= 0; i
< rx_ring_num
; i
++) {
5991 config
->rx_cfg
[i
].ring_org
= RING_ORG_BUFF1
;
5992 config
->rx_cfg
[i
].f_no_snoop
=
5993 (NO_SNOOP_RXD
| NO_SNOOP_RXD_BUFFER
);
5996 /* Setting Mac Control parameters */
5997 mac_control
->rmac_pause_time
= rmac_pause_time
;
5998 mac_control
->mc_pause_threshold_q0q3
= mc_pause_threshold_q0q3
;
5999 mac_control
->mc_pause_threshold_q4q7
= mc_pause_threshold_q4q7
;
6002 /* Initialize Ring buffer parameters. */
6003 for (i
= 0; i
< config
->rx_ring_num
; i
++)
6004 atomic_set(&sp
->rx_bufs_left
[i
], 0);
6006 /* Initialize the number of ISRs currently running */
6007 atomic_set(&sp
->isr_cnt
, 0);
6009 /* initialize the shared memory used by the NIC and the host */
6010 if (init_shared_mem(sp
)) {
6011 DBG_PRINT(ERR_DBG
, "%s: Memory allocation failed\n",
6014 goto mem_alloc_failed
;
6017 sp
->bar0
= ioremap(pci_resource_start(pdev
, 0),
6018 pci_resource_len(pdev
, 0));
6020 DBG_PRINT(ERR_DBG
, "%s: S2IO: cannot remap io mem1\n",
6023 goto bar0_remap_failed
;
6026 sp
->bar1
= ioremap(pci_resource_start(pdev
, 2),
6027 pci_resource_len(pdev
, 2));
6029 DBG_PRINT(ERR_DBG
, "%s: S2IO: cannot remap io mem2\n",
6032 goto bar1_remap_failed
;
6035 dev
->irq
= pdev
->irq
;
6036 dev
->base_addr
= (unsigned long) sp
->bar0
;
6038 /* Initializing the BAR1 address as the start of the FIFO pointer. */
6039 for (j
= 0; j
< MAX_TX_FIFOS
; j
++) {
6040 mac_control
->tx_FIFO_start
[j
] = (TxFIFO_element_t __iomem
*)
6041 (sp
->bar1
+ (j
* 0x00020000));
6044 /* Driver entry points */
6045 dev
->open
= &s2io_open
;
6046 dev
->stop
= &s2io_close
;
6047 dev
->hard_start_xmit
= &s2io_xmit
;
6048 dev
->get_stats
= &s2io_get_stats
;
6049 dev
->set_multicast_list
= &s2io_set_multicast
;
6050 dev
->do_ioctl
= &s2io_ioctl
;
6051 dev
->change_mtu
= &s2io_change_mtu
;
6052 SET_ETHTOOL_OPS(dev
, &netdev_ethtool_ops
);
6053 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
6054 dev
->vlan_rx_register
= s2io_vlan_rx_register
;
6055 dev
->vlan_rx_kill_vid
= (void *)s2io_vlan_rx_kill_vid
;
6058 * will use eth_mac_addr() for dev->set_mac_address
6059 * mac address will be set every time dev->open() is called
6061 #if defined(CONFIG_S2IO_NAPI)
6062 dev
->poll
= s2io_poll
;
6066 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
6067 if (sp
->high_dma_flag
== TRUE
)
6068 dev
->features
|= NETIF_F_HIGHDMA
;
6070 dev
->features
|= NETIF_F_TSO
;
6072 if (sp
->device_type
& XFRAME_II_DEVICE
) {
6073 dev
->features
|= NETIF_F_UFO
;
6074 dev
->features
|= NETIF_F_HW_CSUM
;
6077 dev
->tx_timeout
= &s2io_tx_watchdog
;
6078 dev
->watchdog_timeo
= WATCH_DOG_TIMEOUT
;
6079 INIT_WORK(&sp
->rst_timer_task
,
6080 (void (*)(void *)) s2io_restart_nic
, dev
);
6081 INIT_WORK(&sp
->set_link_task
,
6082 (void (*)(void *)) s2io_set_link
, sp
);
6084 pci_save_state(sp
->pdev
);
6086 /* Setting swapper control on the NIC, for proper reset operation */
6087 if (s2io_set_swapper(sp
)) {
6088 DBG_PRINT(ERR_DBG
, "%s:swapper settings are wrong\n",
6091 goto set_swap_failed
;
6094 /* Verify if the Herc works on the slot its placed into */
6095 if (sp
->device_type
& XFRAME_II_DEVICE
) {
6096 mode
= s2io_verify_pci_mode(sp
);
6098 DBG_PRINT(ERR_DBG
, "%s: ", __FUNCTION__
);
6099 DBG_PRINT(ERR_DBG
, " Unsupported PCI bus mode\n");
6101 goto set_swap_failed
;
6105 /* Not needed for Herc */
6106 if (sp
->device_type
& XFRAME_I_DEVICE
) {
6108 * Fix for all "FFs" MAC address problems observed on
6111 fix_mac_address(sp
);
6116 * MAC address initialization.
6117 * For now only one mac address will be read and used.
6120 val64
= RMAC_ADDR_CMD_MEM_RD
| RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD
|
6121 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET
);
6122 writeq(val64
, &bar0
->rmac_addr_cmd_mem
);
6123 wait_for_cmd_complete(sp
);
6125 tmp64
= readq(&bar0
->rmac_addr_data0_mem
);
6126 mac_down
= (u32
) tmp64
;
6127 mac_up
= (u32
) (tmp64
>> 32);
6129 memset(sp
->def_mac_addr
[0].mac_addr
, 0, sizeof(ETH_ALEN
));
6131 sp
->def_mac_addr
[0].mac_addr
[3] = (u8
) (mac_up
);
6132 sp
->def_mac_addr
[0].mac_addr
[2] = (u8
) (mac_up
>> 8);
6133 sp
->def_mac_addr
[0].mac_addr
[1] = (u8
) (mac_up
>> 16);
6134 sp
->def_mac_addr
[0].mac_addr
[0] = (u8
) (mac_up
>> 24);
6135 sp
->def_mac_addr
[0].mac_addr
[5] = (u8
) (mac_down
>> 16);
6136 sp
->def_mac_addr
[0].mac_addr
[4] = (u8
) (mac_down
>> 24);
6138 /* Set the factory defined MAC address initially */
6139 dev
->addr_len
= ETH_ALEN
;
6140 memcpy(dev
->dev_addr
, sp
->def_mac_addr
, ETH_ALEN
);
6143 * Initialize the tasklet status and link state flags
6144 * and the card state parameter
6146 atomic_set(&(sp
->card_state
), 0);
6147 sp
->tasklet_status
= 0;
6150 /* Initialize spinlocks */
6151 spin_lock_init(&sp
->tx_lock
);
6152 #ifndef CONFIG_S2IO_NAPI
6153 spin_lock_init(&sp
->put_lock
);
6155 spin_lock_init(&sp
->rx_lock
);
6158 * SXE-002: Configure link and activity LED to init state
6161 subid
= sp
->pdev
->subsystem_device
;
6162 if ((subid
& 0xFF) >= 0x07) {
6163 val64
= readq(&bar0
->gpio_control
);
6164 val64
|= 0x0000800000000000ULL
;
6165 writeq(val64
, &bar0
->gpio_control
);
6166 val64
= 0x0411040400000000ULL
;
6167 writeq(val64
, (void __iomem
*) bar0
+ 0x2700);
6168 val64
= readq(&bar0
->gpio_control
);
6171 sp
->rx_csum
= 1; /* Rx chksum verify enabled by default */
6173 if (register_netdev(dev
)) {
6174 DBG_PRINT(ERR_DBG
, "Device registration failed\n");
6176 goto register_failed
;
6179 if (sp
->device_type
& XFRAME_II_DEVICE
) {
6180 DBG_PRINT(ERR_DBG
, "%s: Neterion Xframe II 10GbE adapter ",
6182 DBG_PRINT(ERR_DBG
, "(rev %d), Version %s",
6183 get_xena_rev_id(sp
->pdev
),
6184 s2io_driver_version
);
6185 switch(sp
->intr_type
) {
6187 DBG_PRINT(ERR_DBG
, ", Intr type INTA");
6190 DBG_PRINT(ERR_DBG
, ", Intr type MSI");
6193 DBG_PRINT(ERR_DBG
, ", Intr type MSI-X");
6197 DBG_PRINT(ERR_DBG
, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6198 DBG_PRINT(ERR_DBG
, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6199 sp
->def_mac_addr
[0].mac_addr
[0],
6200 sp
->def_mac_addr
[0].mac_addr
[1],
6201 sp
->def_mac_addr
[0].mac_addr
[2],
6202 sp
->def_mac_addr
[0].mac_addr
[3],
6203 sp
->def_mac_addr
[0].mac_addr
[4],
6204 sp
->def_mac_addr
[0].mac_addr
[5]);
6205 mode
= s2io_print_pci_mode(sp
);
6207 DBG_PRINT(ERR_DBG
, " Unsupported PCI bus mode ");
6209 goto set_swap_failed
;
6212 DBG_PRINT(ERR_DBG
, "%s: Neterion Xframe I 10GbE adapter ",
6214 DBG_PRINT(ERR_DBG
, "(rev %d), Version %s",
6215 get_xena_rev_id(sp
->pdev
),
6216 s2io_driver_version
);
6217 switch(sp
->intr_type
) {
6219 DBG_PRINT(ERR_DBG
, ", Intr type INTA");
6222 DBG_PRINT(ERR_DBG
, ", Intr type MSI");
6225 DBG_PRINT(ERR_DBG
, ", Intr type MSI-X");
6228 DBG_PRINT(ERR_DBG
, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6229 DBG_PRINT(ERR_DBG
, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6230 sp
->def_mac_addr
[0].mac_addr
[0],
6231 sp
->def_mac_addr
[0].mac_addr
[1],
6232 sp
->def_mac_addr
[0].mac_addr
[2],
6233 sp
->def_mac_addr
[0].mac_addr
[3],
6234 sp
->def_mac_addr
[0].mac_addr
[4],
6235 sp
->def_mac_addr
[0].mac_addr
[5]);
6237 if (sp
->rxd_mode
== RXD_MODE_3B
)
6238 DBG_PRINT(ERR_DBG
, "%s: 2-Buffer mode support has been "
6239 "enabled\n",dev
->name
);
6240 if (sp
->rxd_mode
== RXD_MODE_3A
)
6241 DBG_PRINT(ERR_DBG
, "%s: 3-Buffer mode support has been "
6242 "enabled\n",dev
->name
);
6244 /* Initialize device name */
6245 strcpy(sp
->name
, dev
->name
);
6246 if (sp
->device_type
& XFRAME_II_DEVICE
)
6247 strcat(sp
->name
, ": Neterion Xframe II 10GbE adapter");
6249 strcat(sp
->name
, ": Neterion Xframe I 10GbE adapter");
6251 /* Initialize bimodal Interrupts */
6252 sp
->config
.bimodal
= bimodal
;
6253 if (!(sp
->device_type
& XFRAME_II_DEVICE
) && bimodal
) {
6254 sp
->config
.bimodal
= 0;
6255 DBG_PRINT(ERR_DBG
,"%s:Bimodal intr not supported by Xframe I\n",
6260 * Make Link state as off at this point, when the Link change
6261 * interrupt comes the state will be automatically changed to
6264 netif_carrier_off(dev
);
6275 free_shared_mem(sp
);
6276 pci_disable_device(pdev
);
6277 if (dev_intr_type
!= MSI_X
)
6278 pci_release_regions(pdev
);
6280 release_mem_region(pci_resource_start(pdev
, 0),
6281 pci_resource_len(pdev
, 0));
6282 release_mem_region(pci_resource_start(pdev
, 2),
6283 pci_resource_len(pdev
, 2));
6285 pci_set_drvdata(pdev
, NULL
);
6292 * s2io_rem_nic - Free the PCI device
6293 * @pdev: structure containing the PCI related information of the device.
6294 * Description: This function is called by the Pci subsystem to release a
6295 * PCI device and free up all resource held up by the device. This could
6296 * be in response to a Hot plug event or when the driver is to be removed
6300 static void __devexit
s2io_rem_nic(struct pci_dev
*pdev
)
6302 struct net_device
*dev
=
6303 (struct net_device
*) pci_get_drvdata(pdev
);
6307 DBG_PRINT(ERR_DBG
, "Driver Data is NULL!!\n");
6312 unregister_netdev(dev
);
6314 free_shared_mem(sp
);
6317 pci_disable_device(pdev
);
6318 if (sp
->intr_type
!= MSI_X
)
6319 pci_release_regions(pdev
);
6321 release_mem_region(pci_resource_start(pdev
, 0),
6322 pci_resource_len(pdev
, 0));
6323 release_mem_region(pci_resource_start(pdev
, 2),
6324 pci_resource_len(pdev
, 2));
6326 pci_set_drvdata(pdev
, NULL
);
6331 * s2io_starter - Entry point for the driver
6332 * Description: This function is the entry point for the driver. It verifies
6333 * the module loadable parameters and initializes PCI configuration space.
6336 int __init
s2io_starter(void)
6338 return pci_module_init(&s2io_driver
);
6342 * s2io_closer - Cleanup routine for the driver
6343 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
6346 void s2io_closer(void)
6348 pci_unregister_driver(&s2io_driver
);
6349 DBG_PRINT(INIT_DBG
, "cleanup done\n");
6352 module_init(s2io_starter
);
6353 module_exit(s2io_closer
);