1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
14 * The module loadable parameters that are supported by the driver and a brief
15 * explanation of all the variables:
17 * Strip VLAN Tag enable/disable. Instructs the device to remove
18 * the VLAN tag from all received tagged frames that are not
19 * replicated at the internal L2 switch.
20 * 0 - Do not strip the VLAN tag.
21 * 1 - Strip the VLAN tag.
24 * Enable learning the mac address of the guest OS interface in
25 * a virtualization environment.
30 * Maximum number of port to be supported.
34 * This configures the maximum no of VPATH configures for each
36 * MIN - 1 and MAX - 17
39 * This configures maximum no of Device function to be enabled.
40 * MIN - 1 and MAX - 17
42 ******************************************************************************/
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/bitops.h>
47 #include <linux/if_vlan.h>
48 #include <linux/interrupt.h>
49 #include <linux/pci.h>
50 #include <linux/slab.h>
51 #include <linux/tcp.h>
53 #include <linux/netdevice.h>
54 #include <linux/etherdevice.h>
55 #include <linux/firmware.h>
56 #include <linux/net_tstamp.h>
57 #include <linux/prefetch.h>
58 #include "vxge-main.h"
61 MODULE_LICENSE("Dual BSD/GPL");
62 MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
63 "Virtualized Server Adapter");
65 static DEFINE_PCI_DEVICE_TABLE(vxge_id_table
) = {
66 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_TITAN_WIN
, PCI_ANY_ID
,
68 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_TITAN_UNI
, PCI_ANY_ID
,
73 MODULE_DEVICE_TABLE(pci
, vxge_id_table
);
75 VXGE_MODULE_PARAM_INT(vlan_tag_strip
, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
);
76 VXGE_MODULE_PARAM_INT(addr_learn_en
, VXGE_HW_MAC_ADDR_LEARN_DEFAULT
);
77 VXGE_MODULE_PARAM_INT(max_config_port
, VXGE_MAX_CONFIG_PORT
);
78 VXGE_MODULE_PARAM_INT(max_config_vpath
, VXGE_USE_DEFAULT
);
79 VXGE_MODULE_PARAM_INT(max_mac_vpath
, VXGE_MAX_MAC_ADDR_COUNT
);
80 VXGE_MODULE_PARAM_INT(max_config_dev
, VXGE_MAX_CONFIG_DEV
);
82 static u16 vpath_selector
[VXGE_HW_MAX_VIRTUAL_PATHS
] =
83 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
84 static unsigned int bw_percentage
[VXGE_HW_MAX_VIRTUAL_PATHS
] =
85 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS
- 1)] = 0xFF};
86 module_param_array(bw_percentage
, uint
, NULL
, 0);
88 static struct vxge_drv_config
*driver_config
;
90 static inline int is_vxge_card_up(struct vxgedev
*vdev
)
92 return test_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
95 static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo
*fifo
)
97 struct sk_buff
**skb_ptr
= NULL
;
98 struct sk_buff
**temp
;
99 #define NR_SKB_COMPLETED 128
100 struct sk_buff
*completed
[NR_SKB_COMPLETED
];
107 if (__netif_tx_trylock(fifo
->txq
)) {
108 vxge_hw_vpath_poll_tx(fifo
->handle
, &skb_ptr
,
109 NR_SKB_COMPLETED
, &more
);
110 __netif_tx_unlock(fifo
->txq
);
114 for (temp
= completed
; temp
!= skb_ptr
; temp
++)
115 dev_kfree_skb_irq(*temp
);
119 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev
*vdev
)
123 /* Complete all transmits */
124 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
125 VXGE_COMPLETE_VPATH_TX(&vdev
->vpaths
[i
].fifo
);
128 static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev
*vdev
)
131 struct vxge_ring
*ring
;
133 /* Complete all receives*/
134 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
135 ring
= &vdev
->vpaths
[i
].ring
;
136 vxge_hw_vpath_poll_rx(ring
->handle
);
141 * vxge_callback_link_up
143 * This function is called during interrupt context to notify link up state
146 static void vxge_callback_link_up(struct __vxge_hw_device
*hldev
)
148 struct net_device
*dev
= hldev
->ndev
;
149 struct vxgedev
*vdev
= netdev_priv(dev
);
151 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
152 vdev
->ndev
->name
, __func__
, __LINE__
);
153 netdev_notice(vdev
->ndev
, "Link Up\n");
154 vdev
->stats
.link_up
++;
156 netif_carrier_on(vdev
->ndev
);
157 netif_tx_wake_all_queues(vdev
->ndev
);
159 vxge_debug_entryexit(VXGE_TRACE
,
160 "%s: %s:%d Exiting...", vdev
->ndev
->name
, __func__
, __LINE__
);
164 * vxge_callback_link_down
166 * This function is called during interrupt context to notify link down state
169 static void vxge_callback_link_down(struct __vxge_hw_device
*hldev
)
171 struct net_device
*dev
= hldev
->ndev
;
172 struct vxgedev
*vdev
= netdev_priv(dev
);
174 vxge_debug_entryexit(VXGE_TRACE
,
175 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
176 netdev_notice(vdev
->ndev
, "Link Down\n");
178 vdev
->stats
.link_down
++;
179 netif_carrier_off(vdev
->ndev
);
180 netif_tx_stop_all_queues(vdev
->ndev
);
182 vxge_debug_entryexit(VXGE_TRACE
,
183 "%s: %s:%d Exiting...", vdev
->ndev
->name
, __func__
, __LINE__
);
191 static struct sk_buff
*
192 vxge_rx_alloc(void *dtrh
, struct vxge_ring
*ring
, const int skb_size
)
194 struct net_device
*dev
;
196 struct vxge_rx_priv
*rx_priv
;
199 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
200 ring
->ndev
->name
, __func__
, __LINE__
);
202 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
204 /* try to allocate skb first. this one may fail */
205 skb
= netdev_alloc_skb(dev
, skb_size
+
206 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
208 vxge_debug_mem(VXGE_ERR
,
209 "%s: out of memory to allocate SKB", dev
->name
);
210 ring
->stats
.skb_alloc_fail
++;
214 vxge_debug_mem(VXGE_TRACE
,
215 "%s: %s:%d Skb : 0x%p", ring
->ndev
->name
,
216 __func__
, __LINE__
, skb
);
218 skb_reserve(skb
, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
221 rx_priv
->skb_data
= NULL
;
222 rx_priv
->data_size
= skb_size
;
223 vxge_debug_entryexit(VXGE_TRACE
,
224 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
232 static int vxge_rx_map(void *dtrh
, struct vxge_ring
*ring
)
234 struct vxge_rx_priv
*rx_priv
;
237 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
238 ring
->ndev
->name
, __func__
, __LINE__
);
239 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
241 rx_priv
->skb_data
= rx_priv
->skb
->data
;
242 dma_addr
= pci_map_single(ring
->pdev
, rx_priv
->skb_data
,
243 rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
245 if (unlikely(pci_dma_mapping_error(ring
->pdev
, dma_addr
))) {
246 ring
->stats
.pci_map_fail
++;
249 vxge_debug_mem(VXGE_TRACE
,
250 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
251 ring
->ndev
->name
, __func__
, __LINE__
,
252 (unsigned long long)dma_addr
);
253 vxge_hw_ring_rxd_1b_set(dtrh
, dma_addr
, rx_priv
->data_size
);
255 rx_priv
->data_dma
= dma_addr
;
256 vxge_debug_entryexit(VXGE_TRACE
,
257 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
263 * vxge_rx_initial_replenish
264 * Allocation of RxD as an initial replenish procedure.
266 static enum vxge_hw_status
267 vxge_rx_initial_replenish(void *dtrh
, void *userdata
)
269 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
270 struct vxge_rx_priv
*rx_priv
;
272 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
273 ring
->ndev
->name
, __func__
, __LINE__
);
274 if (vxge_rx_alloc(dtrh
, ring
,
275 VXGE_LL_MAX_FRAME_SIZE(ring
->ndev
)) == NULL
)
278 if (vxge_rx_map(dtrh
, ring
)) {
279 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
280 dev_kfree_skb(rx_priv
->skb
);
284 vxge_debug_entryexit(VXGE_TRACE
,
285 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
291 vxge_rx_complete(struct vxge_ring
*ring
, struct sk_buff
*skb
, u16 vlan
,
292 int pkt_length
, struct vxge_hw_ring_rxd_info
*ext_info
)
295 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
296 ring
->ndev
->name
, __func__
, __LINE__
);
297 skb_record_rx_queue(skb
, ring
->driver_id
);
298 skb
->protocol
= eth_type_trans(skb
, ring
->ndev
);
300 u64_stats_update_begin(&ring
->stats
.syncp
);
301 ring
->stats
.rx_frms
++;
302 ring
->stats
.rx_bytes
+= pkt_length
;
304 if (skb
->pkt_type
== PACKET_MULTICAST
)
305 ring
->stats
.rx_mcast
++;
306 u64_stats_update_end(&ring
->stats
.syncp
);
308 vxge_debug_rx(VXGE_TRACE
,
309 "%s: %s:%d skb protocol = %d",
310 ring
->ndev
->name
, __func__
, __LINE__
, skb
->protocol
);
312 if (ext_info
->vlan
&&
313 ring
->vlan_tag_strip
== VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
)
314 __vlan_hwaccel_put_tag(skb
, ext_info
->vlan
);
315 napi_gro_receive(ring
->napi_p
, skb
);
317 vxge_debug_entryexit(VXGE_TRACE
,
318 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
321 static inline void vxge_re_pre_post(void *dtr
, struct vxge_ring
*ring
,
322 struct vxge_rx_priv
*rx_priv
)
324 pci_dma_sync_single_for_device(ring
->pdev
,
325 rx_priv
->data_dma
, rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
327 vxge_hw_ring_rxd_1b_set(dtr
, rx_priv
->data_dma
, rx_priv
->data_size
);
328 vxge_hw_ring_rxd_pre_post(ring
->handle
, dtr
);
331 static inline void vxge_post(int *dtr_cnt
, void **first_dtr
,
332 void *post_dtr
, struct __vxge_hw_ring
*ringh
)
334 int dtr_count
= *dtr_cnt
;
335 if ((*dtr_cnt
% VXGE_HW_RXSYNC_FREQ_CNT
) == 0) {
337 vxge_hw_ring_rxd_post_post_wmb(ringh
, *first_dtr
);
338 *first_dtr
= post_dtr
;
340 vxge_hw_ring_rxd_post_post(ringh
, post_dtr
);
342 *dtr_cnt
= dtr_count
;
348 * If the interrupt is because of a received frame or if the receive ring
349 * contains fresh as yet un-processed frames, this function is called.
351 static enum vxge_hw_status
352 vxge_rx_1b_compl(struct __vxge_hw_ring
*ringh
, void *dtr
,
353 u8 t_code
, void *userdata
)
355 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
356 struct net_device
*dev
= ring
->ndev
;
357 unsigned int dma_sizes
;
358 void *first_dtr
= NULL
;
364 struct vxge_rx_priv
*rx_priv
;
365 struct vxge_hw_ring_rxd_info ext_info
;
366 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
367 ring
->ndev
->name
, __func__
, __LINE__
);
370 prefetch((char *)dtr
+ L1_CACHE_BYTES
);
371 rx_priv
= vxge_hw_ring_rxd_private_get(dtr
);
373 data_size
= rx_priv
->data_size
;
374 data_dma
= rx_priv
->data_dma
;
375 prefetch(rx_priv
->skb_data
);
377 vxge_debug_rx(VXGE_TRACE
,
378 "%s: %s:%d skb = 0x%p",
379 ring
->ndev
->name
, __func__
, __LINE__
, skb
);
381 vxge_hw_ring_rxd_1b_get(ringh
, dtr
, &dma_sizes
);
382 pkt_length
= dma_sizes
;
384 pkt_length
-= ETH_FCS_LEN
;
386 vxge_debug_rx(VXGE_TRACE
,
387 "%s: %s:%d Packet Length = %d",
388 ring
->ndev
->name
, __func__
, __LINE__
, pkt_length
);
390 vxge_hw_ring_rxd_1b_info_get(ringh
, dtr
, &ext_info
);
392 /* check skb validity */
395 prefetch((char *)skb
+ L1_CACHE_BYTES
);
396 if (unlikely(t_code
)) {
397 if (vxge_hw_ring_handle_tcode(ringh
, dtr
, t_code
) !=
400 ring
->stats
.rx_errors
++;
401 vxge_debug_rx(VXGE_TRACE
,
402 "%s: %s :%d Rx T_code is %d",
403 ring
->ndev
->name
, __func__
,
406 /* If the t_code is not supported and if the
407 * t_code is other than 0x5 (unparseable packet
408 * such as unknown UPV6 header), Drop it !!!
410 vxge_re_pre_post(dtr
, ring
, rx_priv
);
412 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
413 ring
->stats
.rx_dropped
++;
418 if (pkt_length
> VXGE_LL_RX_COPY_THRESHOLD
) {
419 if (vxge_rx_alloc(dtr
, ring
, data_size
) != NULL
) {
420 if (!vxge_rx_map(dtr
, ring
)) {
421 skb_put(skb
, pkt_length
);
423 pci_unmap_single(ring
->pdev
, data_dma
,
424 data_size
, PCI_DMA_FROMDEVICE
);
426 vxge_hw_ring_rxd_pre_post(ringh
, dtr
);
427 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
430 dev_kfree_skb(rx_priv
->skb
);
432 rx_priv
->data_size
= data_size
;
433 vxge_re_pre_post(dtr
, ring
, rx_priv
);
435 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
437 ring
->stats
.rx_dropped
++;
441 vxge_re_pre_post(dtr
, ring
, rx_priv
);
443 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
444 ring
->stats
.rx_dropped
++;
448 struct sk_buff
*skb_up
;
450 skb_up
= netdev_alloc_skb(dev
, pkt_length
+
451 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
452 if (skb_up
!= NULL
) {
454 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
456 pci_dma_sync_single_for_cpu(ring
->pdev
,
460 vxge_debug_mem(VXGE_TRACE
,
461 "%s: %s:%d skb_up = %p",
462 ring
->ndev
->name
, __func__
,
464 memcpy(skb_up
->data
, skb
->data
, pkt_length
);
466 vxge_re_pre_post(dtr
, ring
, rx_priv
);
468 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
470 /* will netif_rx small SKB instead */
472 skb_put(skb
, pkt_length
);
474 vxge_re_pre_post(dtr
, ring
, rx_priv
);
476 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
477 vxge_debug_rx(VXGE_ERR
,
478 "%s: vxge_rx_1b_compl: out of "
479 "memory", dev
->name
);
480 ring
->stats
.skb_alloc_fail
++;
485 if ((ext_info
.proto
& VXGE_HW_FRAME_PROTO_TCP_OR_UDP
) &&
486 !(ext_info
.proto
& VXGE_HW_FRAME_PROTO_IP_FRAG
) &&
487 (dev
->features
& NETIF_F_RXCSUM
) && /* Offload Rx side CSUM */
488 ext_info
.l3_cksum
== VXGE_HW_L3_CKSUM_OK
&&
489 ext_info
.l4_cksum
== VXGE_HW_L4_CKSUM_OK
)
490 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
492 skb_checksum_none_assert(skb
);
496 struct skb_shared_hwtstamps
*skb_hwts
;
497 u32 ns
= *(u32
*)(skb
->head
+ pkt_length
);
499 skb_hwts
= skb_hwtstamps(skb
);
500 skb_hwts
->hwtstamp
= ns_to_ktime(ns
);
501 skb_hwts
->syststamp
.tv64
= 0;
504 /* rth_hash_type and rth_it_hit are non-zero regardless of
505 * whether rss is enabled. Only the rth_value is zero/non-zero
506 * if rss is disabled/enabled, so key off of that.
508 if (ext_info
.rth_value
)
509 skb
->rxhash
= ext_info
.rth_value
;
511 vxge_rx_complete(ring
, skb
, ext_info
.vlan
,
512 pkt_length
, &ext_info
);
515 ring
->pkts_processed
++;
519 } while (vxge_hw_ring_rxd_next_completed(ringh
, &dtr
,
520 &t_code
) == VXGE_HW_OK
);
523 vxge_hw_ring_rxd_post_post_wmb(ringh
, first_dtr
);
525 vxge_debug_entryexit(VXGE_TRACE
,
534 * If an interrupt was raised to indicate DMA complete of the Tx packet,
535 * this function is called. It identifies the last TxD whose buffer was
536 * freed and frees all skbs whose data have already DMA'ed into the NICs
539 static enum vxge_hw_status
540 vxge_xmit_compl(struct __vxge_hw_fifo
*fifo_hw
, void *dtr
,
541 enum vxge_hw_fifo_tcode t_code
, void *userdata
,
542 struct sk_buff
***skb_ptr
, int nr_skb
, int *more
)
544 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)userdata
;
545 struct sk_buff
*skb
, **done_skb
= *skb_ptr
;
548 vxge_debug_entryexit(VXGE_TRACE
,
549 "%s:%d Entered....", __func__
, __LINE__
);
555 struct vxge_tx_priv
*txd_priv
=
556 vxge_hw_fifo_txdl_private_get(dtr
);
559 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
560 frag
= &skb_shinfo(skb
)->frags
[0];
562 vxge_debug_tx(VXGE_TRACE
,
563 "%s: %s:%d fifo_hw = %p dtr = %p "
564 "tcode = 0x%x", fifo
->ndev
->name
, __func__
,
565 __LINE__
, fifo_hw
, dtr
, t_code
);
566 /* check skb validity */
568 vxge_debug_tx(VXGE_TRACE
,
569 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
570 fifo
->ndev
->name
, __func__
, __LINE__
,
571 skb
, txd_priv
, frg_cnt
);
572 if (unlikely(t_code
)) {
573 fifo
->stats
.tx_errors
++;
574 vxge_debug_tx(VXGE_ERR
,
575 "%s: tx: dtr %p completed due to "
576 "error t_code %01x", fifo
->ndev
->name
,
578 vxge_hw_fifo_handle_tcode(fifo_hw
, dtr
, t_code
);
581 /* for unfragmented skb */
582 pci_unmap_single(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
583 skb_headlen(skb
), PCI_DMA_TODEVICE
);
585 for (j
= 0; j
< frg_cnt
; j
++) {
586 pci_unmap_page(fifo
->pdev
,
587 txd_priv
->dma_buffers
[i
++],
588 frag
->size
, PCI_DMA_TODEVICE
);
592 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
594 /* Updating the statistics block */
595 u64_stats_update_begin(&fifo
->stats
.syncp
);
596 fifo
->stats
.tx_frms
++;
597 fifo
->stats
.tx_bytes
+= skb
->len
;
598 u64_stats_update_end(&fifo
->stats
.syncp
);
608 if (pkt_cnt
> fifo
->indicate_max_pkts
)
611 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw
,
612 &dtr
, &t_code
) == VXGE_HW_OK
);
615 if (netif_tx_queue_stopped(fifo
->txq
))
616 netif_tx_wake_queue(fifo
->txq
);
618 vxge_debug_entryexit(VXGE_TRACE
,
619 "%s: %s:%d Exiting...",
620 fifo
->ndev
->name
, __func__
, __LINE__
);
624 /* select a vpath to transmit the packet */
625 static u32
vxge_get_vpath_no(struct vxgedev
*vdev
, struct sk_buff
*skb
)
627 u16 queue_len
, counter
= 0;
628 if (skb
->protocol
== htons(ETH_P_IP
)) {
634 if (!ip_is_fragment(ip
)) {
635 th
= (struct tcphdr
*)(((unsigned char *)ip
) +
638 queue_len
= vdev
->no_of_vpath
;
639 counter
= (ntohs(th
->source
) +
641 vdev
->vpath_selector
[queue_len
- 1];
642 if (counter
>= queue_len
)
643 counter
= queue_len
- 1;
649 static enum vxge_hw_status
vxge_search_mac_addr_in_list(
650 struct vxge_vpath
*vpath
, u64 del_mac
)
652 struct list_head
*entry
, *next
;
653 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
654 if (((struct vxge_mac_addrs
*)entry
)->macaddr
== del_mac
)
660 static int vxge_mac_list_add(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
662 struct vxge_mac_addrs
*new_mac_entry
;
663 u8
*mac_address
= NULL
;
665 if (vpath
->mac_addr_cnt
>= VXGE_MAX_LEARN_MAC_ADDR_CNT
)
668 new_mac_entry
= kzalloc(sizeof(struct vxge_mac_addrs
), GFP_ATOMIC
);
669 if (!new_mac_entry
) {
670 vxge_debug_mem(VXGE_ERR
,
671 "%s: memory allocation failed",
676 list_add(&new_mac_entry
->item
, &vpath
->mac_addr_list
);
678 /* Copy the new mac address to the list */
679 mac_address
= (u8
*)&new_mac_entry
->macaddr
;
680 memcpy(mac_address
, mac
->macaddr
, ETH_ALEN
);
682 new_mac_entry
->state
= mac
->state
;
683 vpath
->mac_addr_cnt
++;
685 if (is_multicast_ether_addr(mac
->macaddr
))
686 vpath
->mcast_addr_cnt
++;
691 /* Add a mac address to DA table */
692 static enum vxge_hw_status
693 vxge_add_mac_addr(struct vxgedev
*vdev
, struct macInfo
*mac
)
695 enum vxge_hw_status status
= VXGE_HW_OK
;
696 struct vxge_vpath
*vpath
;
697 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode
;
699 if (is_multicast_ether_addr(mac
->macaddr
))
700 duplicate_mode
= VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
;
702 duplicate_mode
= VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE
;
704 vpath
= &vdev
->vpaths
[mac
->vpath_no
];
705 status
= vxge_hw_vpath_mac_addr_add(vpath
->handle
, mac
->macaddr
,
706 mac
->macmask
, duplicate_mode
);
707 if (status
!= VXGE_HW_OK
) {
708 vxge_debug_init(VXGE_ERR
,
709 "DA config add entry failed for vpath:%d",
712 if (FALSE
== vxge_mac_list_add(vpath
, mac
))
718 static int vxge_learn_mac(struct vxgedev
*vdev
, u8
*mac_header
)
720 struct macInfo mac_info
;
721 u8
*mac_address
= NULL
;
722 u64 mac_addr
= 0, vpath_vector
= 0;
724 enum vxge_hw_status status
= VXGE_HW_OK
;
725 struct vxge_vpath
*vpath
= NULL
;
726 struct __vxge_hw_device
*hldev
;
728 hldev
= pci_get_drvdata(vdev
->pdev
);
730 mac_address
= (u8
*)&mac_addr
;
731 memcpy(mac_address
, mac_header
, ETH_ALEN
);
733 /* Is this mac address already in the list? */
734 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
735 vpath
= &vdev
->vpaths
[vpath_idx
];
736 if (vxge_search_mac_addr_in_list(vpath
, mac_addr
))
740 memset(&mac_info
, 0, sizeof(struct macInfo
));
741 memcpy(mac_info
.macaddr
, mac_header
, ETH_ALEN
);
743 /* Any vpath has room to add mac address to its da table? */
744 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
745 vpath
= &vdev
->vpaths
[vpath_idx
];
746 if (vpath
->mac_addr_cnt
< vpath
->max_mac_addr_cnt
) {
747 /* Add this mac address to this vpath */
748 mac_info
.vpath_no
= vpath_idx
;
749 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
750 status
= vxge_add_mac_addr(vdev
, &mac_info
);
751 if (status
!= VXGE_HW_OK
)
757 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_LIST
;
759 mac_info
.vpath_no
= vpath_idx
;
760 /* Is the first vpath already selected as catch-basin ? */
761 vpath
= &vdev
->vpaths
[vpath_idx
];
762 if (vpath
->mac_addr_cnt
> vpath
->max_mac_addr_cnt
) {
763 /* Add this mac address to this vpath */
764 if (FALSE
== vxge_mac_list_add(vpath
, &mac_info
))
769 /* Select first vpath as catch-basin */
770 vpath_vector
= vxge_mBIT(vpath
->device_id
);
771 status
= vxge_hw_mgmt_reg_write(vpath
->vdev
->devh
,
772 vxge_hw_mgmt_reg_type_mrpcim
,
775 struct vxge_hw_mrpcim_reg
,
778 if (status
!= VXGE_HW_OK
) {
779 vxge_debug_tx(VXGE_ERR
,
780 "%s: Unable to set the vpath-%d in catch-basin mode",
781 VXGE_DRIVER_NAME
, vpath
->device_id
);
785 if (FALSE
== vxge_mac_list_add(vpath
, &mac_info
))
793 * @skb : the socket buffer containing the Tx data.
794 * @dev : device pointer.
796 * This function is the Tx entry point of the driver. Neterion NIC supports
797 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
800 vxge_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
802 struct vxge_fifo
*fifo
= NULL
;
805 struct vxgedev
*vdev
= NULL
;
806 enum vxge_hw_status status
;
807 int frg_cnt
, first_frg_len
;
809 int i
= 0, j
= 0, avail
;
811 struct vxge_tx_priv
*txdl_priv
= NULL
;
812 struct __vxge_hw_fifo
*fifo_hw
;
816 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
817 dev
->name
, __func__
, __LINE__
);
819 /* A buffer with no data will be dropped */
820 if (unlikely(skb
->len
<= 0)) {
821 vxge_debug_tx(VXGE_ERR
,
822 "%s: Buffer has no data..", dev
->name
);
827 vdev
= netdev_priv(dev
);
829 if (unlikely(!is_vxge_card_up(vdev
))) {
830 vxge_debug_tx(VXGE_ERR
,
831 "%s: vdev not initialized", dev
->name
);
836 if (vdev
->config
.addr_learn_en
) {
837 vpath_no
= vxge_learn_mac(vdev
, skb
->data
+ ETH_ALEN
);
838 if (vpath_no
== -EPERM
) {
839 vxge_debug_tx(VXGE_ERR
,
840 "%s: Failed to store the mac address",
847 if (vdev
->config
.tx_steering_type
== TX_MULTIQ_STEERING
)
848 vpath_no
= skb_get_queue_mapping(skb
);
849 else if (vdev
->config
.tx_steering_type
== TX_PORT_STEERING
)
850 vpath_no
= vxge_get_vpath_no(vdev
, skb
);
852 vxge_debug_tx(VXGE_TRACE
, "%s: vpath_no= %d", dev
->name
, vpath_no
);
854 if (vpath_no
>= vdev
->no_of_vpath
)
857 fifo
= &vdev
->vpaths
[vpath_no
].fifo
;
858 fifo_hw
= fifo
->handle
;
860 if (netif_tx_queue_stopped(fifo
->txq
))
861 return NETDEV_TX_BUSY
;
863 avail
= vxge_hw_fifo_free_txdl_count_get(fifo_hw
);
865 vxge_debug_tx(VXGE_ERR
,
866 "%s: No free TXDs available", dev
->name
);
867 fifo
->stats
.txd_not_free
++;
871 /* Last TXD? Stop tx queue to avoid dropping packets. TX
872 * completion will resume the queue.
875 netif_tx_stop_queue(fifo
->txq
);
877 status
= vxge_hw_fifo_txdl_reserve(fifo_hw
, &dtr
, &dtr_priv
);
878 if (unlikely(status
!= VXGE_HW_OK
)) {
879 vxge_debug_tx(VXGE_ERR
,
880 "%s: Out of descriptors .", dev
->name
);
881 fifo
->stats
.txd_out_of_desc
++;
885 vxge_debug_tx(VXGE_TRACE
,
886 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
887 dev
->name
, __func__
, __LINE__
,
888 fifo_hw
, dtr
, dtr_priv
);
890 if (vlan_tx_tag_present(skb
)) {
891 u16 vlan_tag
= vlan_tx_tag_get(skb
);
892 vxge_hw_fifo_txdl_vlan_set(dtr
, vlan_tag
);
895 first_frg_len
= skb_headlen(skb
);
897 dma_pointer
= pci_map_single(fifo
->pdev
, skb
->data
, first_frg_len
,
900 if (unlikely(pci_dma_mapping_error(fifo
->pdev
, dma_pointer
))) {
901 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
902 fifo
->stats
.pci_map_fail
++;
906 txdl_priv
= vxge_hw_fifo_txdl_private_get(dtr
);
907 txdl_priv
->skb
= skb
;
908 txdl_priv
->dma_buffers
[j
] = dma_pointer
;
910 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
911 vxge_debug_tx(VXGE_TRACE
,
912 "%s: %s:%d skb = %p txdl_priv = %p "
913 "frag_cnt = %d dma_pointer = 0x%llx", dev
->name
,
914 __func__
, __LINE__
, skb
, txdl_priv
,
915 frg_cnt
, (unsigned long long)dma_pointer
);
917 vxge_hw_fifo_txdl_buffer_set(fifo_hw
, dtr
, j
++, dma_pointer
,
920 frag
= &skb_shinfo(skb
)->frags
[0];
921 for (i
= 0; i
< frg_cnt
; i
++) {
922 /* ignore 0 length fragment */
926 dma_pointer
= (u64
) pci_map_page(fifo
->pdev
, frag
->page
,
927 frag
->page_offset
, frag
->size
,
930 if (unlikely(pci_dma_mapping_error(fifo
->pdev
, dma_pointer
)))
932 vxge_debug_tx(VXGE_TRACE
,
933 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
934 dev
->name
, __func__
, __LINE__
, i
,
935 (unsigned long long)dma_pointer
);
937 txdl_priv
->dma_buffers
[j
] = dma_pointer
;
938 vxge_hw_fifo_txdl_buffer_set(fifo_hw
, dtr
, j
++, dma_pointer
,
943 offload_type
= vxge_offload_type(skb
);
945 if (offload_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
946 int mss
= vxge_tcp_mss(skb
);
948 vxge_debug_tx(VXGE_TRACE
, "%s: %s:%d mss = %d",
949 dev
->name
, __func__
, __LINE__
, mss
);
950 vxge_hw_fifo_txdl_mss_set(dtr
, mss
);
952 vxge_assert(skb
->len
<=
953 dev
->mtu
+ VXGE_HW_MAC_HEADER_MAX_SIZE
);
959 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
960 vxge_hw_fifo_txdl_cksum_set_bits(dtr
,
961 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN
|
962 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN
|
963 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN
);
965 vxge_hw_fifo_txdl_post(fifo_hw
, dtr
);
967 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d Exiting...",
968 dev
->name
, __func__
, __LINE__
);
972 vxge_debug_tx(VXGE_TRACE
, "%s: pci_map_page failed", dev
->name
);
975 frag
= &skb_shinfo(skb
)->frags
[0];
977 pci_unmap_single(fifo
->pdev
, txdl_priv
->dma_buffers
[j
++],
978 skb_headlen(skb
), PCI_DMA_TODEVICE
);
981 pci_unmap_page(fifo
->pdev
, txdl_priv
->dma_buffers
[j
],
982 frag
->size
, PCI_DMA_TODEVICE
);
986 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
988 netif_tx_stop_queue(fifo
->txq
);
997 * Function will be called by hw function to abort all outstanding receive
1001 vxge_rx_term(void *dtrh
, enum vxge_hw_rxd_state state
, void *userdata
)
1003 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
1004 struct vxge_rx_priv
*rx_priv
=
1005 vxge_hw_ring_rxd_private_get(dtrh
);
1007 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
1008 ring
->ndev
->name
, __func__
, __LINE__
);
1009 if (state
!= VXGE_HW_RXD_STATE_POSTED
)
1012 pci_unmap_single(ring
->pdev
, rx_priv
->data_dma
,
1013 rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
1015 dev_kfree_skb(rx_priv
->skb
);
1016 rx_priv
->skb_data
= NULL
;
1018 vxge_debug_entryexit(VXGE_TRACE
,
1019 "%s: %s:%d Exiting...",
1020 ring
->ndev
->name
, __func__
, __LINE__
);
1026 * Function will be called to abort all outstanding tx descriptors
1029 vxge_tx_term(void *dtrh
, enum vxge_hw_txdl_state state
, void *userdata
)
1031 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)userdata
;
1033 int i
= 0, j
, frg_cnt
;
1034 struct vxge_tx_priv
*txd_priv
= vxge_hw_fifo_txdl_private_get(dtrh
);
1035 struct sk_buff
*skb
= txd_priv
->skb
;
1037 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1039 if (state
!= VXGE_HW_TXDL_STATE_POSTED
)
1042 /* check skb validity */
1044 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
1045 frag
= &skb_shinfo(skb
)->frags
[0];
1047 /* for unfragmented skb */
1048 pci_unmap_single(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
1049 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1051 for (j
= 0; j
< frg_cnt
; j
++) {
1052 pci_unmap_page(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
1053 frag
->size
, PCI_DMA_TODEVICE
);
1059 vxge_debug_entryexit(VXGE_TRACE
,
1060 "%s:%d Exiting...", __func__
, __LINE__
);
1063 static int vxge_mac_list_del(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
1065 struct list_head
*entry
, *next
;
1067 u8
*mac_address
= (u8
*) (&del_mac
);
1069 /* Copy the mac address to delete from the list */
1070 memcpy(mac_address
, mac
->macaddr
, ETH_ALEN
);
1072 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
1073 if (((struct vxge_mac_addrs
*)entry
)->macaddr
== del_mac
) {
1075 kfree((struct vxge_mac_addrs
*)entry
);
1076 vpath
->mac_addr_cnt
--;
1078 if (is_multicast_ether_addr(mac
->macaddr
))
1079 vpath
->mcast_addr_cnt
--;
1087 /* delete a mac address from DA table */
1088 static enum vxge_hw_status
1089 vxge_del_mac_addr(struct vxgedev
*vdev
, struct macInfo
*mac
)
1091 enum vxge_hw_status status
= VXGE_HW_OK
;
1092 struct vxge_vpath
*vpath
;
1094 vpath
= &vdev
->vpaths
[mac
->vpath_no
];
1095 status
= vxge_hw_vpath_mac_addr_delete(vpath
->handle
, mac
->macaddr
,
1097 if (status
!= VXGE_HW_OK
) {
1098 vxge_debug_init(VXGE_ERR
,
1099 "DA config delete entry failed for vpath:%d",
1102 vxge_mac_list_del(vpath
, mac
);
1107 * vxge_set_multicast
1108 * @dev: pointer to the device structure
1110 * Entry point for multicast address enable/disable
1111 * This function is a driver entry point which gets called by the kernel
1112 * whenever multicast addresses must be enabled/disabled. This also gets
1113 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1114 * determine, if multicast address must be enabled or if promiscuous mode
1115 * is to be disabled etc.
1117 static void vxge_set_multicast(struct net_device
*dev
)
1119 struct netdev_hw_addr
*ha
;
1120 struct vxgedev
*vdev
;
1121 int i
, mcast_cnt
= 0;
1122 struct __vxge_hw_device
*hldev
;
1123 struct vxge_vpath
*vpath
;
1124 enum vxge_hw_status status
= VXGE_HW_OK
;
1125 struct macInfo mac_info
;
1127 struct vxge_mac_addrs
*mac_entry
;
1128 struct list_head
*list_head
;
1129 struct list_head
*entry
, *next
;
1130 u8
*mac_address
= NULL
;
1132 vxge_debug_entryexit(VXGE_TRACE
,
1133 "%s:%d", __func__
, __LINE__
);
1135 vdev
= netdev_priv(dev
);
1136 hldev
= (struct __vxge_hw_device
*)vdev
->devh
;
1138 if (unlikely(!is_vxge_card_up(vdev
)))
1141 if ((dev
->flags
& IFF_ALLMULTI
) && (!vdev
->all_multi_flg
)) {
1142 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1143 vpath
= &vdev
->vpaths
[i
];
1144 vxge_assert(vpath
->is_open
);
1145 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
1146 if (status
!= VXGE_HW_OK
)
1147 vxge_debug_init(VXGE_ERR
, "failed to enable "
1148 "multicast, status %d", status
);
1149 vdev
->all_multi_flg
= 1;
1151 } else if (!(dev
->flags
& IFF_ALLMULTI
) && (vdev
->all_multi_flg
)) {
1152 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1153 vpath
= &vdev
->vpaths
[i
];
1154 vxge_assert(vpath
->is_open
);
1155 status
= vxge_hw_vpath_mcast_disable(vpath
->handle
);
1156 if (status
!= VXGE_HW_OK
)
1157 vxge_debug_init(VXGE_ERR
, "failed to disable "
1158 "multicast, status %d", status
);
1159 vdev
->all_multi_flg
= 0;
1164 if (!vdev
->config
.addr_learn_en
) {
1165 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1166 vpath
= &vdev
->vpaths
[i
];
1167 vxge_assert(vpath
->is_open
);
1169 if (dev
->flags
& IFF_PROMISC
)
1170 status
= vxge_hw_vpath_promisc_enable(
1173 status
= vxge_hw_vpath_promisc_disable(
1175 if (status
!= VXGE_HW_OK
)
1176 vxge_debug_init(VXGE_ERR
, "failed to %s promisc"
1177 ", status %d", dev
->flags
&IFF_PROMISC
?
1178 "enable" : "disable", status
);
1182 memset(&mac_info
, 0, sizeof(struct macInfo
));
1183 /* Update individual M_CAST address list */
1184 if ((!vdev
->all_multi_flg
) && netdev_mc_count(dev
)) {
1185 mcast_cnt
= vdev
->vpaths
[0].mcast_addr_cnt
;
1186 list_head
= &vdev
->vpaths
[0].mac_addr_list
;
1187 if ((netdev_mc_count(dev
) +
1188 (vdev
->vpaths
[0].mac_addr_cnt
- mcast_cnt
)) >
1189 vdev
->vpaths
[0].max_mac_addr_cnt
)
1190 goto _set_all_mcast
;
1192 /* Delete previous MC's */
1193 for (i
= 0; i
< mcast_cnt
; i
++) {
1194 list_for_each_safe(entry
, next
, list_head
) {
1195 mac_entry
= (struct vxge_mac_addrs
*)entry
;
1196 /* Copy the mac address to delete */
1197 mac_address
= (u8
*)&mac_entry
->macaddr
;
1198 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1200 if (is_multicast_ether_addr(mac_info
.macaddr
)) {
1201 for (vpath_idx
= 0; vpath_idx
<
1204 mac_info
.vpath_no
= vpath_idx
;
1205 status
= vxge_del_mac_addr(
1214 netdev_for_each_mc_addr(ha
, dev
) {
1215 memcpy(mac_info
.macaddr
, ha
->addr
, ETH_ALEN
);
1216 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
;
1218 mac_info
.vpath_no
= vpath_idx
;
1219 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1220 status
= vxge_add_mac_addr(vdev
, &mac_info
);
1221 if (status
!= VXGE_HW_OK
) {
1222 vxge_debug_init(VXGE_ERR
,
1223 "%s:%d Setting individual"
1224 "multicast address failed",
1225 __func__
, __LINE__
);
1226 goto _set_all_mcast
;
1233 mcast_cnt
= vdev
->vpaths
[0].mcast_addr_cnt
;
1234 /* Delete previous MC's */
1235 for (i
= 0; i
< mcast_cnt
; i
++) {
1236 list_for_each_safe(entry
, next
, list_head
) {
1237 mac_entry
= (struct vxge_mac_addrs
*)entry
;
1238 /* Copy the mac address to delete */
1239 mac_address
= (u8
*)&mac_entry
->macaddr
;
1240 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1242 if (is_multicast_ether_addr(mac_info
.macaddr
))
1246 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
;
1248 mac_info
.vpath_no
= vpath_idx
;
1249 status
= vxge_del_mac_addr(vdev
, &mac_info
);
1253 /* Enable all multicast */
1254 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1255 vpath
= &vdev
->vpaths
[i
];
1256 vxge_assert(vpath
->is_open
);
1258 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
1259 if (status
!= VXGE_HW_OK
) {
1260 vxge_debug_init(VXGE_ERR
,
1261 "%s:%d Enabling all multicasts failed",
1262 __func__
, __LINE__
);
1264 vdev
->all_multi_flg
= 1;
1266 dev
->flags
|= IFF_ALLMULTI
;
1269 vxge_debug_entryexit(VXGE_TRACE
,
1270 "%s:%d Exiting...", __func__
, __LINE__
);
1275 * @dev: pointer to the device structure
1277 * Update entry "0" (default MAC addr)
1279 static int vxge_set_mac_addr(struct net_device
*dev
, void *p
)
1281 struct sockaddr
*addr
= p
;
1282 struct vxgedev
*vdev
;
1283 struct __vxge_hw_device
*hldev
;
1284 enum vxge_hw_status status
= VXGE_HW_OK
;
1285 struct macInfo mac_info_new
, mac_info_old
;
1288 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1290 vdev
= netdev_priv(dev
);
1293 if (!is_valid_ether_addr(addr
->sa_data
))
1296 memset(&mac_info_new
, 0, sizeof(struct macInfo
));
1297 memset(&mac_info_old
, 0, sizeof(struct macInfo
));
1299 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d Exiting...",
1300 __func__
, __LINE__
);
1302 /* Get the old address */
1303 memcpy(mac_info_old
.macaddr
, dev
->dev_addr
, dev
->addr_len
);
1305 /* Copy the new address */
1306 memcpy(mac_info_new
.macaddr
, addr
->sa_data
, dev
->addr_len
);
1308 /* First delete the old mac address from all the vpaths
1309 as we can't specify the index while adding new mac address */
1310 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
1311 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vpath_idx
];
1312 if (!vpath
->is_open
) {
1313 /* This can happen when this interface is added/removed
1314 to the bonding interface. Delete this station address
1315 from the linked list */
1316 vxge_mac_list_del(vpath
, &mac_info_old
);
1318 /* Add this new address to the linked list
1319 for later restoring */
1320 vxge_mac_list_add(vpath
, &mac_info_new
);
1324 /* Delete the station address */
1325 mac_info_old
.vpath_no
= vpath_idx
;
1326 status
= vxge_del_mac_addr(vdev
, &mac_info_old
);
1329 if (unlikely(!is_vxge_card_up(vdev
))) {
1330 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1334 /* Set this mac address to all the vpaths */
1335 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
1336 mac_info_new
.vpath_no
= vpath_idx
;
1337 mac_info_new
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1338 status
= vxge_add_mac_addr(vdev
, &mac_info_new
);
1339 if (status
!= VXGE_HW_OK
)
1343 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1349 * vxge_vpath_intr_enable
1350 * @vdev: pointer to vdev
1351 * @vp_id: vpath for which to enable the interrupts
1353 * Enables the interrupts for the vpath
1355 static void vxge_vpath_intr_enable(struct vxgedev
*vdev
, int vp_id
)
1357 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1359 int tim_msix_id
[4] = {0, 1, 0, 0};
1360 int alarm_msix_id
= VXGE_ALARM_MSIX_ID
;
1362 vxge_hw_vpath_intr_enable(vpath
->handle
);
1364 if (vdev
->config
.intr_type
== INTA
)
1365 vxge_hw_vpath_inta_unmask_tx_rx(vpath
->handle
);
1367 vxge_hw_vpath_msix_set(vpath
->handle
, tim_msix_id
,
1370 msix_id
= vpath
->device_id
* VXGE_HW_VPATH_MSIX_ACTIVE
;
1371 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
);
1372 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
+ 1);
1374 /* enable the alarm vector */
1375 msix_id
= (vpath
->handle
->vpath
->hldev
->first_vp_id
*
1376 VXGE_HW_VPATH_MSIX_ACTIVE
) + alarm_msix_id
;
1377 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
);
1382 * vxge_vpath_intr_disable
1383 * @vdev: pointer to vdev
1384 * @vp_id: vpath for which to disable the interrupts
1386 * Disables the interrupts for the vpath
1388 static void vxge_vpath_intr_disable(struct vxgedev
*vdev
, int vp_id
)
1390 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1391 struct __vxge_hw_device
*hldev
;
1394 hldev
= pci_get_drvdata(vdev
->pdev
);
1396 vxge_hw_vpath_wait_receive_idle(hldev
, vpath
->device_id
);
1398 vxge_hw_vpath_intr_disable(vpath
->handle
);
1400 if (vdev
->config
.intr_type
== INTA
)
1401 vxge_hw_vpath_inta_mask_tx_rx(vpath
->handle
);
1403 msix_id
= vpath
->device_id
* VXGE_HW_VPATH_MSIX_ACTIVE
;
1404 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
);
1405 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
+ 1);
1407 /* disable the alarm vector */
1408 msix_id
= (vpath
->handle
->vpath
->hldev
->first_vp_id
*
1409 VXGE_HW_VPATH_MSIX_ACTIVE
) + VXGE_ALARM_MSIX_ID
;
1410 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
);
1414 /* list all mac addresses from DA table */
1415 static enum vxge_hw_status
1416 vxge_search_mac_addr_in_da_table(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
1418 enum vxge_hw_status status
= VXGE_HW_OK
;
1419 unsigned char macmask
[ETH_ALEN
];
1420 unsigned char macaddr
[ETH_ALEN
];
1422 status
= vxge_hw_vpath_mac_addr_get(vpath
->handle
,
1424 if (status
!= VXGE_HW_OK
) {
1425 vxge_debug_init(VXGE_ERR
,
1426 "DA config list entry failed for vpath:%d",
1431 while (memcmp(mac
->macaddr
, macaddr
, ETH_ALEN
)) {
1432 status
= vxge_hw_vpath_mac_addr_get_next(vpath
->handle
,
1434 if (status
!= VXGE_HW_OK
)
1441 /* Store all mac addresses from the list to the DA table */
1442 static enum vxge_hw_status
vxge_restore_vpath_mac_addr(struct vxge_vpath
*vpath
)
1444 enum vxge_hw_status status
= VXGE_HW_OK
;
1445 struct macInfo mac_info
;
1446 u8
*mac_address
= NULL
;
1447 struct list_head
*entry
, *next
;
1449 memset(&mac_info
, 0, sizeof(struct macInfo
));
1451 if (vpath
->is_open
) {
1452 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
1455 ((struct vxge_mac_addrs
*)entry
)->macaddr
;
1456 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1457 ((struct vxge_mac_addrs
*)entry
)->state
=
1458 VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1459 /* does this mac address already exist in da table? */
1460 status
= vxge_search_mac_addr_in_da_table(vpath
,
1462 if (status
!= VXGE_HW_OK
) {
1463 /* Add this mac address to the DA table */
1464 status
= vxge_hw_vpath_mac_addr_add(
1465 vpath
->handle
, mac_info
.macaddr
,
1467 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
);
1468 if (status
!= VXGE_HW_OK
) {
1469 vxge_debug_init(VXGE_ERR
,
1470 "DA add entry failed for vpath:%d",
1472 ((struct vxge_mac_addrs
*)entry
)->state
1473 = VXGE_LL_MAC_ADDR_IN_LIST
;
1482 /* Store all vlan ids from the list to the vid table */
1483 static enum vxge_hw_status
1484 vxge_restore_vpath_vid_table(struct vxge_vpath
*vpath
)
1486 enum vxge_hw_status status
= VXGE_HW_OK
;
1487 struct vxgedev
*vdev
= vpath
->vdev
;
1490 if (!vpath
->is_open
)
1493 for_each_set_bit(vid
, vdev
->active_vlans
, VLAN_N_VID
)
1494 status
= vxge_hw_vpath_vid_add(vpath
->handle
, vid
);
1501 * @vdev: pointer to vdev
1502 * @vp_id: vpath to reset
1506 static int vxge_reset_vpath(struct vxgedev
*vdev
, int vp_id
)
1508 enum vxge_hw_status status
= VXGE_HW_OK
;
1509 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1512 /* check if device is down already */
1513 if (unlikely(!is_vxge_card_up(vdev
)))
1516 /* is device reset already scheduled */
1517 if (test_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
1520 if (vpath
->handle
) {
1521 if (vxge_hw_vpath_reset(vpath
->handle
) == VXGE_HW_OK
) {
1522 if (is_vxge_card_up(vdev
) &&
1523 vxge_hw_vpath_recover_from_reset(vpath
->handle
)
1525 vxge_debug_init(VXGE_ERR
,
1526 "vxge_hw_vpath_recover_from_reset"
1527 "failed for vpath:%d", vp_id
);
1531 vxge_debug_init(VXGE_ERR
,
1532 "vxge_hw_vpath_reset failed for"
1537 return VXGE_HW_FAIL
;
1539 vxge_restore_vpath_mac_addr(vpath
);
1540 vxge_restore_vpath_vid_table(vpath
);
1542 /* Enable all broadcast */
1543 vxge_hw_vpath_bcast_enable(vpath
->handle
);
1545 /* Enable all multicast */
1546 if (vdev
->all_multi_flg
) {
1547 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
1548 if (status
!= VXGE_HW_OK
)
1549 vxge_debug_init(VXGE_ERR
,
1550 "%s:%d Enabling multicast failed",
1551 __func__
, __LINE__
);
1554 /* Enable the interrupts */
1555 vxge_vpath_intr_enable(vdev
, vp_id
);
1559 /* Enable the flow of traffic through the vpath */
1560 vxge_hw_vpath_enable(vpath
->handle
);
1563 vxge_hw_vpath_rx_doorbell_init(vpath
->handle
);
1564 vpath
->ring
.last_status
= VXGE_HW_OK
;
1566 /* Vpath reset done */
1567 clear_bit(vp_id
, &vdev
->vp_reset
);
1569 /* Start the vpath queue */
1570 if (netif_tx_queue_stopped(vpath
->fifo
.txq
))
1571 netif_tx_wake_queue(vpath
->fifo
.txq
);
1577 static void vxge_config_ci_for_tti_rti(struct vxgedev
*vdev
)
1581 /* Enable CI for RTI */
1582 if (vdev
->config
.intr_type
== MSI_X
) {
1583 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1584 struct __vxge_hw_ring
*hw_ring
;
1586 hw_ring
= vdev
->vpaths
[i
].ring
.handle
;
1587 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring
);
1591 /* Enable CI for TTI */
1592 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1593 struct __vxge_hw_fifo
*hw_fifo
= vdev
->vpaths
[i
].fifo
.handle
;
1594 vxge_hw_vpath_tti_ci_set(hw_fifo
);
1596 * For Inta (with or without napi), Set CI ON for only one
1597 * vpath. (Have only one free running timer).
1599 if ((vdev
->config
.intr_type
== INTA
) && (i
== 0))
1606 static int do_vxge_reset(struct vxgedev
*vdev
, int event
)
1608 enum vxge_hw_status status
;
1609 int ret
= 0, vp_id
, i
;
1611 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1613 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_START_RESET
)) {
1614 /* check if device is down already */
1615 if (unlikely(!is_vxge_card_up(vdev
)))
1618 /* is reset already scheduled */
1619 if (test_and_set_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
1623 if (event
== VXGE_LL_FULL_RESET
) {
1624 netif_carrier_off(vdev
->ndev
);
1626 /* wait for all the vpath reset to complete */
1627 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
1628 while (test_bit(vp_id
, &vdev
->vp_reset
))
1632 netif_carrier_on(vdev
->ndev
);
1634 /* if execution mode is set to debug, don't reset the adapter */
1635 if (unlikely(vdev
->exec_mode
)) {
1636 vxge_debug_init(VXGE_ERR
,
1637 "%s: execution mode is debug, returning..",
1639 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
1640 netif_tx_stop_all_queues(vdev
->ndev
);
1645 if (event
== VXGE_LL_FULL_RESET
) {
1646 vxge_hw_device_wait_receive_idle(vdev
->devh
);
1647 vxge_hw_device_intr_disable(vdev
->devh
);
1649 switch (vdev
->cric_err_event
) {
1650 case VXGE_HW_EVENT_UNKNOWN
:
1651 netif_tx_stop_all_queues(vdev
->ndev
);
1652 vxge_debug_init(VXGE_ERR
,
1653 "fatal: %s: Disabling device due to"
1658 case VXGE_HW_EVENT_RESET_START
:
1660 case VXGE_HW_EVENT_RESET_COMPLETE
:
1661 case VXGE_HW_EVENT_LINK_DOWN
:
1662 case VXGE_HW_EVENT_LINK_UP
:
1663 case VXGE_HW_EVENT_ALARM_CLEARED
:
1664 case VXGE_HW_EVENT_ECCERR
:
1665 case VXGE_HW_EVENT_MRPCIM_ECCERR
:
1668 case VXGE_HW_EVENT_FIFO_ERR
:
1669 case VXGE_HW_EVENT_VPATH_ERR
:
1671 case VXGE_HW_EVENT_CRITICAL_ERR
:
1672 netif_tx_stop_all_queues(vdev
->ndev
);
1673 vxge_debug_init(VXGE_ERR
,
1674 "fatal: %s: Disabling device due to"
1677 /* SOP or device reset required */
1678 /* This event is not currently used */
1681 case VXGE_HW_EVENT_SERR
:
1682 netif_tx_stop_all_queues(vdev
->ndev
);
1683 vxge_debug_init(VXGE_ERR
,
1684 "fatal: %s: Disabling device due to"
1689 case VXGE_HW_EVENT_SRPCIM_SERR
:
1690 case VXGE_HW_EVENT_MRPCIM_SERR
:
1693 case VXGE_HW_EVENT_SLOT_FREEZE
:
1694 netif_tx_stop_all_queues(vdev
->ndev
);
1695 vxge_debug_init(VXGE_ERR
,
1696 "fatal: %s: Disabling device due to"
1707 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_START_RESET
))
1708 netif_tx_stop_all_queues(vdev
->ndev
);
1710 if (event
== VXGE_LL_FULL_RESET
) {
1711 status
= vxge_reset_all_vpaths(vdev
);
1712 if (status
!= VXGE_HW_OK
) {
1713 vxge_debug_init(VXGE_ERR
,
1714 "fatal: %s: can not reset vpaths",
1721 if (event
== VXGE_LL_COMPL_RESET
) {
1722 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
1723 if (vdev
->vpaths
[i
].handle
) {
1724 if (vxge_hw_vpath_recover_from_reset(
1725 vdev
->vpaths
[i
].handle
)
1727 vxge_debug_init(VXGE_ERR
,
1728 "vxge_hw_vpath_recover_"
1729 "from_reset failed for vpath: "
1735 vxge_debug_init(VXGE_ERR
,
1736 "vxge_hw_vpath_reset failed for "
1743 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_COMPL_RESET
)) {
1744 /* Reprogram the DA table with populated mac addresses */
1745 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
1746 vxge_restore_vpath_mac_addr(&vdev
->vpaths
[vp_id
]);
1747 vxge_restore_vpath_vid_table(&vdev
->vpaths
[vp_id
]);
1750 /* enable vpath interrupts */
1751 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
1752 vxge_vpath_intr_enable(vdev
, i
);
1754 vxge_hw_device_intr_enable(vdev
->devh
);
1758 /* Indicate card up */
1759 set_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
1761 /* Get the traffic to flow through the vpaths */
1762 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1763 vxge_hw_vpath_enable(vdev
->vpaths
[i
].handle
);
1765 vxge_hw_vpath_rx_doorbell_init(vdev
->vpaths
[i
].handle
);
1768 netif_tx_wake_all_queues(vdev
->ndev
);
1772 vxge_config_ci_for_tti_rti(vdev
);
1775 vxge_debug_entryexit(VXGE_TRACE
,
1776 "%s:%d Exiting...", __func__
, __LINE__
);
1778 /* Indicate reset done */
1779 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_COMPL_RESET
))
1780 clear_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
);
1786 * @vdev: pointer to ll device
1788 * driver may reset the chip on events of serr, eccerr, etc
1790 static void vxge_reset(struct work_struct
*work
)
1792 struct vxgedev
*vdev
= container_of(work
, struct vxgedev
, reset_task
);
1794 if (!netif_running(vdev
->ndev
))
1797 do_vxge_reset(vdev
, VXGE_LL_FULL_RESET
);
1801 * vxge_poll - Receive handler when Receive Polling is used.
1802 * @dev: pointer to the device structure.
1803 * @budget: Number of packets budgeted to be processed in this iteration.
1805 * This function comes into picture only if Receive side is being handled
1806 * through polling (called NAPI in linux). It mostly does what the normal
1807 * Rx interrupt handler does in terms of descriptor and packet processing
1808 * but not in an interrupt context. Also it will process a specified number
1809 * of packets at most in one iteration. This value is passed down by the
1810 * kernel as the function argument 'budget'.
1812 static int vxge_poll_msix(struct napi_struct
*napi
, int budget
)
1814 struct vxge_ring
*ring
= container_of(napi
, struct vxge_ring
, napi
);
1816 int budget_org
= budget
;
1818 ring
->budget
= budget
;
1819 ring
->pkts_processed
= 0;
1820 vxge_hw_vpath_poll_rx(ring
->handle
);
1821 pkts_processed
= ring
->pkts_processed
;
1823 if (ring
->pkts_processed
< budget_org
) {
1824 napi_complete(napi
);
1826 /* Re enable the Rx interrupts for the vpath */
1827 vxge_hw_channel_msix_unmask(
1828 (struct __vxge_hw_channel
*)ring
->handle
,
1829 ring
->rx_vector_no
);
1833 /* We are copying and returning the local variable, in case if after
1834 * clearing the msix interrupt above, if the interrupt fires right
1835 * away which can preempt this NAPI thread */
1836 return pkts_processed
;
1839 static int vxge_poll_inta(struct napi_struct
*napi
, int budget
)
1841 struct vxgedev
*vdev
= container_of(napi
, struct vxgedev
, napi
);
1842 int pkts_processed
= 0;
1844 int budget_org
= budget
;
1845 struct vxge_ring
*ring
;
1847 struct __vxge_hw_device
*hldev
= pci_get_drvdata(vdev
->pdev
);
1849 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1850 ring
= &vdev
->vpaths
[i
].ring
;
1851 ring
->budget
= budget
;
1852 ring
->pkts_processed
= 0;
1853 vxge_hw_vpath_poll_rx(ring
->handle
);
1854 pkts_processed
+= ring
->pkts_processed
;
1855 budget
-= ring
->pkts_processed
;
1860 VXGE_COMPLETE_ALL_TX(vdev
);
1862 if (pkts_processed
< budget_org
) {
1863 napi_complete(napi
);
1864 /* Re enable the Rx interrupts for the ring */
1865 vxge_hw_device_unmask_all(hldev
);
1866 vxge_hw_device_flush_io(hldev
);
1869 return pkts_processed
;
1872 #ifdef CONFIG_NET_POLL_CONTROLLER
1874 * vxge_netpoll - netpoll event handler entry point
1875 * @dev : pointer to the device structure.
1877 * This function will be called by upper layer to check for events on the
1878 * interface in situations where interrupts are disabled. It is used for
1879 * specific in-kernel networking tasks, such as remote consoles and kernel
1880 * debugging over the network (example netdump in RedHat).
1882 static void vxge_netpoll(struct net_device
*dev
)
1884 struct __vxge_hw_device
*hldev
;
1885 struct vxgedev
*vdev
;
1887 vdev
= netdev_priv(dev
);
1888 hldev
= pci_get_drvdata(vdev
->pdev
);
1890 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1892 if (pci_channel_offline(vdev
->pdev
))
1895 disable_irq(dev
->irq
);
1896 vxge_hw_device_clear_tx_rx(hldev
);
1898 vxge_hw_device_clear_tx_rx(hldev
);
1899 VXGE_COMPLETE_ALL_RX(vdev
);
1900 VXGE_COMPLETE_ALL_TX(vdev
);
1902 enable_irq(dev
->irq
);
1904 vxge_debug_entryexit(VXGE_TRACE
,
1905 "%s:%d Exiting...", __func__
, __LINE__
);
1909 /* RTH configuration */
1910 static enum vxge_hw_status
vxge_rth_configure(struct vxgedev
*vdev
)
1912 enum vxge_hw_status status
= VXGE_HW_OK
;
1913 struct vxge_hw_rth_hash_types hash_types
;
1914 u8 itable
[256] = {0}; /* indirection table */
1915 u8 mtable
[256] = {0}; /* CPU to vpath mapping */
1920 * - itable with bucket numbers
1921 * - mtable with bucket-to-vpath mapping
1923 for (index
= 0; index
< (1 << vdev
->config
.rth_bkt_sz
); index
++) {
1924 itable
[index
] = index
;
1925 mtable
[index
] = index
% vdev
->no_of_vpath
;
1928 /* set indirection table, bucket-to-vpath mapping */
1929 status
= vxge_hw_vpath_rts_rth_itable_set(vdev
->vp_handles
,
1932 vdev
->config
.rth_bkt_sz
);
1933 if (status
!= VXGE_HW_OK
) {
1934 vxge_debug_init(VXGE_ERR
,
1935 "RTH indirection table configuration failed "
1936 "for vpath:%d", vdev
->vpaths
[0].device_id
);
1940 /* Fill RTH hash types */
1941 hash_types
.hash_type_tcpipv4_en
= vdev
->config
.rth_hash_type_tcpipv4
;
1942 hash_types
.hash_type_ipv4_en
= vdev
->config
.rth_hash_type_ipv4
;
1943 hash_types
.hash_type_tcpipv6_en
= vdev
->config
.rth_hash_type_tcpipv6
;
1944 hash_types
.hash_type_ipv6_en
= vdev
->config
.rth_hash_type_ipv6
;
1945 hash_types
.hash_type_tcpipv6ex_en
=
1946 vdev
->config
.rth_hash_type_tcpipv6ex
;
1947 hash_types
.hash_type_ipv6ex_en
= vdev
->config
.rth_hash_type_ipv6ex
;
1950 * Because the itable_set() method uses the active_table field
1951 * for the target virtual path the RTH config should be updated
1952 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1953 * when steering frames.
1955 for (index
= 0; index
< vdev
->no_of_vpath
; index
++) {
1956 status
= vxge_hw_vpath_rts_rth_set(
1957 vdev
->vpaths
[index
].handle
,
1958 vdev
->config
.rth_algorithm
,
1960 vdev
->config
.rth_bkt_sz
);
1961 if (status
!= VXGE_HW_OK
) {
1962 vxge_debug_init(VXGE_ERR
,
1963 "RTH configuration failed for vpath:%d",
1964 vdev
->vpaths
[index
].device_id
);
1973 enum vxge_hw_status
vxge_reset_all_vpaths(struct vxgedev
*vdev
)
1975 enum vxge_hw_status status
= VXGE_HW_OK
;
1976 struct vxge_vpath
*vpath
;
1979 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1980 vpath
= &vdev
->vpaths
[i
];
1981 if (vpath
->handle
) {
1982 if (vxge_hw_vpath_reset(vpath
->handle
) == VXGE_HW_OK
) {
1983 if (is_vxge_card_up(vdev
) &&
1984 vxge_hw_vpath_recover_from_reset(
1985 vpath
->handle
) != VXGE_HW_OK
) {
1986 vxge_debug_init(VXGE_ERR
,
1987 "vxge_hw_vpath_recover_"
1988 "from_reset failed for vpath: "
1993 vxge_debug_init(VXGE_ERR
,
1994 "vxge_hw_vpath_reset failed for "
2005 static void vxge_close_vpaths(struct vxgedev
*vdev
, int index
)
2007 struct vxge_vpath
*vpath
;
2010 for (i
= index
; i
< vdev
->no_of_vpath
; i
++) {
2011 vpath
= &vdev
->vpaths
[i
];
2013 if (vpath
->handle
&& vpath
->is_open
) {
2014 vxge_hw_vpath_close(vpath
->handle
);
2015 vdev
->stats
.vpaths_open
--;
2018 vpath
->handle
= NULL
;
2023 static int vxge_open_vpaths(struct vxgedev
*vdev
)
2025 struct vxge_hw_vpath_attr attr
;
2026 enum vxge_hw_status status
;
2027 struct vxge_vpath
*vpath
;
2031 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2032 vpath
= &vdev
->vpaths
[i
];
2033 vxge_assert(vpath
->is_configured
);
2035 if (!vdev
->titan1
) {
2036 struct vxge_hw_vp_config
*vcfg
;
2037 vcfg
= &vdev
->devh
->config
.vp_config
[vpath
->device_id
];
2039 vcfg
->rti
.urange_a
= RTI_T1A_RX_URANGE_A
;
2040 vcfg
->rti
.urange_b
= RTI_T1A_RX_URANGE_B
;
2041 vcfg
->rti
.urange_c
= RTI_T1A_RX_URANGE_C
;
2042 vcfg
->tti
.uec_a
= TTI_T1A_TX_UFC_A
;
2043 vcfg
->tti
.uec_b
= TTI_T1A_TX_UFC_B
;
2044 vcfg
->tti
.uec_c
= TTI_T1A_TX_UFC_C(vdev
->mtu
);
2045 vcfg
->tti
.uec_d
= TTI_T1A_TX_UFC_D(vdev
->mtu
);
2046 vcfg
->tti
.ltimer_val
= VXGE_T1A_TTI_LTIMER_VAL
;
2047 vcfg
->tti
.rtimer_val
= VXGE_T1A_TTI_RTIMER_VAL
;
2050 attr
.vp_id
= vpath
->device_id
;
2051 attr
.fifo_attr
.callback
= vxge_xmit_compl
;
2052 attr
.fifo_attr
.txdl_term
= vxge_tx_term
;
2053 attr
.fifo_attr
.per_txdl_space
= sizeof(struct vxge_tx_priv
);
2054 attr
.fifo_attr
.userdata
= &vpath
->fifo
;
2056 attr
.ring_attr
.callback
= vxge_rx_1b_compl
;
2057 attr
.ring_attr
.rxd_init
= vxge_rx_initial_replenish
;
2058 attr
.ring_attr
.rxd_term
= vxge_rx_term
;
2059 attr
.ring_attr
.per_rxd_space
= sizeof(struct vxge_rx_priv
);
2060 attr
.ring_attr
.userdata
= &vpath
->ring
;
2062 vpath
->ring
.ndev
= vdev
->ndev
;
2063 vpath
->ring
.pdev
= vdev
->pdev
;
2065 status
= vxge_hw_vpath_open(vdev
->devh
, &attr
, &vpath
->handle
);
2066 if (status
== VXGE_HW_OK
) {
2067 vpath
->fifo
.handle
=
2068 (struct __vxge_hw_fifo
*)attr
.fifo_attr
.userdata
;
2069 vpath
->ring
.handle
=
2070 (struct __vxge_hw_ring
*)attr
.ring_attr
.userdata
;
2071 vpath
->fifo
.tx_steering_type
=
2072 vdev
->config
.tx_steering_type
;
2073 vpath
->fifo
.ndev
= vdev
->ndev
;
2074 vpath
->fifo
.pdev
= vdev
->pdev
;
2075 if (vdev
->config
.tx_steering_type
)
2077 netdev_get_tx_queue(vdev
->ndev
, i
);
2080 netdev_get_tx_queue(vdev
->ndev
, 0);
2081 vpath
->fifo
.indicate_max_pkts
=
2082 vdev
->config
.fifo_indicate_max_pkts
;
2083 vpath
->fifo
.tx_vector_no
= 0;
2084 vpath
->ring
.rx_vector_no
= 0;
2085 vpath
->ring
.rx_hwts
= vdev
->rx_hwts
;
2087 vdev
->vp_handles
[i
] = vpath
->handle
;
2088 vpath
->ring
.vlan_tag_strip
= vdev
->vlan_tag_strip
;
2089 vdev
->stats
.vpaths_open
++;
2091 vdev
->stats
.vpath_open_fail
++;
2092 vxge_debug_init(VXGE_ERR
, "%s: vpath: %d failed to "
2093 "open with status: %d",
2094 vdev
->ndev
->name
, vpath
->device_id
,
2096 vxge_close_vpaths(vdev
, 0);
2100 vp_id
= vpath
->handle
->vpath
->vp_id
;
2101 vdev
->vpaths_deployed
|= vxge_mBIT(vp_id
);
2108 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2109 * if the interrupts are not within a range
2110 * @fifo: pointer to transmit fifo structure
2111 * Description: The function changes boundary timer and restriction timer
2112 * value depends on the traffic
2113 * Return Value: None
2115 static void adaptive_coalesce_tx_interrupts(struct vxge_fifo
*fifo
)
2117 fifo
->interrupt_count
++;
2118 if (jiffies
> fifo
->jiffies
+ HZ
/ 100) {
2119 struct __vxge_hw_fifo
*hw_fifo
= fifo
->handle
;
2121 fifo
->jiffies
= jiffies
;
2122 if (fifo
->interrupt_count
> VXGE_T1A_MAX_TX_INTERRUPT_COUNT
&&
2123 hw_fifo
->rtimer
!= VXGE_TTI_RTIMER_ADAPT_VAL
) {
2124 hw_fifo
->rtimer
= VXGE_TTI_RTIMER_ADAPT_VAL
;
2125 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo
);
2126 } else if (hw_fifo
->rtimer
!= 0) {
2127 hw_fifo
->rtimer
= 0;
2128 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo
);
2130 fifo
->interrupt_count
= 0;
2135 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2136 * if the interrupts are not within a range
2137 * @ring: pointer to receive ring structure
2138 * Description: The function increases of decreases the packet counts within
2139 * the ranges of traffic utilization, if the interrupts due to this ring are
2140 * not within a fixed range.
2141 * Return Value: Nothing
2143 static void adaptive_coalesce_rx_interrupts(struct vxge_ring
*ring
)
2145 ring
->interrupt_count
++;
2146 if (jiffies
> ring
->jiffies
+ HZ
/ 100) {
2147 struct __vxge_hw_ring
*hw_ring
= ring
->handle
;
2149 ring
->jiffies
= jiffies
;
2150 if (ring
->interrupt_count
> VXGE_T1A_MAX_INTERRUPT_COUNT
&&
2151 hw_ring
->rtimer
!= VXGE_RTI_RTIMER_ADAPT_VAL
) {
2152 hw_ring
->rtimer
= VXGE_RTI_RTIMER_ADAPT_VAL
;
2153 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring
);
2154 } else if (hw_ring
->rtimer
!= 0) {
2155 hw_ring
->rtimer
= 0;
2156 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring
);
2158 ring
->interrupt_count
= 0;
2164 * @irq: the irq of the device.
2165 * @dev_id: a void pointer to the hldev structure of the Titan device
2166 * @ptregs: pointer to the registers pushed on the stack.
2168 * This function is the ISR handler of the device when napi is enabled. It
2169 * identifies the reason for the interrupt and calls the relevant service
2172 static irqreturn_t
vxge_isr_napi(int irq
, void *dev_id
)
2174 struct net_device
*dev
;
2175 struct __vxge_hw_device
*hldev
;
2177 enum vxge_hw_status status
;
2178 struct vxgedev
*vdev
= (struct vxgedev
*)dev_id
;
2180 vxge_debug_intr(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
2183 hldev
= pci_get_drvdata(vdev
->pdev
);
2185 if (pci_channel_offline(vdev
->pdev
))
2188 if (unlikely(!is_vxge_card_up(vdev
)))
2191 status
= vxge_hw_device_begin_irq(hldev
, vdev
->exec_mode
, &reason
);
2192 if (status
== VXGE_HW_OK
) {
2193 vxge_hw_device_mask_all(hldev
);
2196 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2197 vdev
->vpaths_deployed
>>
2198 (64 - VXGE_HW_MAX_VIRTUAL_PATHS
))) {
2200 vxge_hw_device_clear_tx_rx(hldev
);
2201 napi_schedule(&vdev
->napi
);
2202 vxge_debug_intr(VXGE_TRACE
,
2203 "%s:%d Exiting...", __func__
, __LINE__
);
2206 vxge_hw_device_unmask_all(hldev
);
2207 } else if (unlikely((status
== VXGE_HW_ERR_VPATH
) ||
2208 (status
== VXGE_HW_ERR_CRITICAL
) ||
2209 (status
== VXGE_HW_ERR_FIFO
))) {
2210 vxge_hw_device_mask_all(hldev
);
2211 vxge_hw_device_flush_io(hldev
);
2213 } else if (unlikely(status
== VXGE_HW_ERR_SLOT_FREEZE
))
2216 vxge_debug_intr(VXGE_TRACE
, "%s:%d Exiting...", __func__
, __LINE__
);
2220 #ifdef CONFIG_PCI_MSI
2222 static irqreturn_t
vxge_tx_msix_handle(int irq
, void *dev_id
)
2224 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)dev_id
;
2226 adaptive_coalesce_tx_interrupts(fifo
);
2228 vxge_hw_channel_msix_mask((struct __vxge_hw_channel
*)fifo
->handle
,
2229 fifo
->tx_vector_no
);
2231 vxge_hw_channel_msix_clear((struct __vxge_hw_channel
*)fifo
->handle
,
2232 fifo
->tx_vector_no
);
2234 VXGE_COMPLETE_VPATH_TX(fifo
);
2236 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel
*)fifo
->handle
,
2237 fifo
->tx_vector_no
);
2244 static irqreturn_t
vxge_rx_msix_napi_handle(int irq
, void *dev_id
)
2246 struct vxge_ring
*ring
= (struct vxge_ring
*)dev_id
;
2248 adaptive_coalesce_rx_interrupts(ring
);
2250 vxge_hw_channel_msix_mask((struct __vxge_hw_channel
*)ring
->handle
,
2251 ring
->rx_vector_no
);
2253 vxge_hw_channel_msix_clear((struct __vxge_hw_channel
*)ring
->handle
,
2254 ring
->rx_vector_no
);
2256 napi_schedule(&ring
->napi
);
2261 vxge_alarm_msix_handle(int irq
, void *dev_id
)
2264 enum vxge_hw_status status
;
2265 struct vxge_vpath
*vpath
= (struct vxge_vpath
*)dev_id
;
2266 struct vxgedev
*vdev
= vpath
->vdev
;
2267 int msix_id
= (vpath
->handle
->vpath
->vp_id
*
2268 VXGE_HW_VPATH_MSIX_ACTIVE
) + VXGE_ALARM_MSIX_ID
;
2270 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2271 /* Reduce the chance of losing alarm interrupts by masking
2272 * the vector. A pending bit will be set if an alarm is
2273 * generated and on unmask the interrupt will be fired.
2275 vxge_hw_vpath_msix_mask(vdev
->vpaths
[i
].handle
, msix_id
);
2276 vxge_hw_vpath_msix_clear(vdev
->vpaths
[i
].handle
, msix_id
);
2279 status
= vxge_hw_vpath_alarm_process(vdev
->vpaths
[i
].handle
,
2281 if (status
== VXGE_HW_OK
) {
2282 vxge_hw_vpath_msix_unmask(vdev
->vpaths
[i
].handle
,
2287 vxge_debug_intr(VXGE_ERR
,
2288 "%s: vxge_hw_vpath_alarm_process failed %x ",
2289 VXGE_DRIVER_NAME
, status
);
2294 static int vxge_alloc_msix(struct vxgedev
*vdev
)
2297 int msix_intr_vect
= 0, temp
;
2301 /* Tx/Rx MSIX Vectors count */
2302 vdev
->intr_cnt
= vdev
->no_of_vpath
* 2;
2304 /* Alarm MSIX Vectors count */
2307 vdev
->entries
= kcalloc(vdev
->intr_cnt
, sizeof(struct msix_entry
),
2309 if (!vdev
->entries
) {
2310 vxge_debug_init(VXGE_ERR
,
2311 "%s: memory allocation failed",
2314 goto alloc_entries_failed
;
2317 vdev
->vxge_entries
= kcalloc(vdev
->intr_cnt
,
2318 sizeof(struct vxge_msix_entry
),
2320 if (!vdev
->vxge_entries
) {
2321 vxge_debug_init(VXGE_ERR
, "%s: memory allocation failed",
2324 goto alloc_vxge_entries_failed
;
2327 for (i
= 0, j
= 0; i
< vdev
->no_of_vpath
; i
++) {
2329 msix_intr_vect
= i
* VXGE_HW_VPATH_MSIX_ACTIVE
;
2331 /* Initialize the fifo vector */
2332 vdev
->entries
[j
].entry
= msix_intr_vect
;
2333 vdev
->vxge_entries
[j
].entry
= msix_intr_vect
;
2334 vdev
->vxge_entries
[j
].in_use
= 0;
2337 /* Initialize the ring vector */
2338 vdev
->entries
[j
].entry
= msix_intr_vect
+ 1;
2339 vdev
->vxge_entries
[j
].entry
= msix_intr_vect
+ 1;
2340 vdev
->vxge_entries
[j
].in_use
= 0;
2344 /* Initialize the alarm vector */
2345 vdev
->entries
[j
].entry
= VXGE_ALARM_MSIX_ID
;
2346 vdev
->vxge_entries
[j
].entry
= VXGE_ALARM_MSIX_ID
;
2347 vdev
->vxge_entries
[j
].in_use
= 0;
2349 ret
= pci_enable_msix(vdev
->pdev
, vdev
->entries
, vdev
->intr_cnt
);
2351 vxge_debug_init(VXGE_ERR
,
2352 "%s: MSI-X enable failed for %d vectors, ret: %d",
2353 VXGE_DRIVER_NAME
, vdev
->intr_cnt
, ret
);
2354 if ((max_config_vpath
!= VXGE_USE_DEFAULT
) || (ret
< 3)) {
2356 goto enable_msix_failed
;
2359 kfree(vdev
->entries
);
2360 kfree(vdev
->vxge_entries
);
2361 vdev
->entries
= NULL
;
2362 vdev
->vxge_entries
= NULL
;
2363 /* Try with less no of vector by reducing no of vpaths count */
2365 vxge_close_vpaths(vdev
, temp
);
2366 vdev
->no_of_vpath
= temp
;
2368 } else if (ret
< 0) {
2370 goto enable_msix_failed
;
2375 kfree(vdev
->vxge_entries
);
2376 alloc_vxge_entries_failed
:
2377 kfree(vdev
->entries
);
2378 alloc_entries_failed
:
2382 static int vxge_enable_msix(struct vxgedev
*vdev
)
2386 /* 0 - Tx, 1 - Rx */
2387 int tim_msix_id
[4] = {0, 1, 0, 0};
2391 /* allocate msix vectors */
2392 ret
= vxge_alloc_msix(vdev
);
2394 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2395 struct vxge_vpath
*vpath
= &vdev
->vpaths
[i
];
2397 /* If fifo or ring are not enabled, the MSIX vector for
2398 * it should be set to 0.
2400 vpath
->ring
.rx_vector_no
= (vpath
->device_id
*
2401 VXGE_HW_VPATH_MSIX_ACTIVE
) + 1;
2403 vpath
->fifo
.tx_vector_no
= (vpath
->device_id
*
2404 VXGE_HW_VPATH_MSIX_ACTIVE
);
2406 vxge_hw_vpath_msix_set(vpath
->handle
, tim_msix_id
,
2407 VXGE_ALARM_MSIX_ID
);
2414 static void vxge_rem_msix_isr(struct vxgedev
*vdev
)
2418 for (intr_cnt
= 0; intr_cnt
< (vdev
->no_of_vpath
* 2 + 1);
2420 if (vdev
->vxge_entries
[intr_cnt
].in_use
) {
2421 synchronize_irq(vdev
->entries
[intr_cnt
].vector
);
2422 free_irq(vdev
->entries
[intr_cnt
].vector
,
2423 vdev
->vxge_entries
[intr_cnt
].arg
);
2424 vdev
->vxge_entries
[intr_cnt
].in_use
= 0;
2428 kfree(vdev
->entries
);
2429 kfree(vdev
->vxge_entries
);
2430 vdev
->entries
= NULL
;
2431 vdev
->vxge_entries
= NULL
;
2433 if (vdev
->config
.intr_type
== MSI_X
)
2434 pci_disable_msix(vdev
->pdev
);
2438 static void vxge_rem_isr(struct vxgedev
*vdev
)
2440 struct __vxge_hw_device
*hldev
;
2441 hldev
= pci_get_drvdata(vdev
->pdev
);
2443 #ifdef CONFIG_PCI_MSI
2444 if (vdev
->config
.intr_type
== MSI_X
) {
2445 vxge_rem_msix_isr(vdev
);
2448 if (vdev
->config
.intr_type
== INTA
) {
2449 synchronize_irq(vdev
->pdev
->irq
);
2450 free_irq(vdev
->pdev
->irq
, vdev
);
2454 static int vxge_add_isr(struct vxgedev
*vdev
)
2457 #ifdef CONFIG_PCI_MSI
2458 int vp_idx
= 0, intr_idx
= 0, intr_cnt
= 0, msix_idx
= 0, irq_req
= 0;
2459 int pci_fun
= PCI_FUNC(vdev
->pdev
->devfn
);
2461 if (vdev
->config
.intr_type
== MSI_X
)
2462 ret
= vxge_enable_msix(vdev
);
2465 vxge_debug_init(VXGE_ERR
,
2466 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME
);
2467 vxge_debug_init(VXGE_ERR
,
2468 "%s: Defaulting to INTA", VXGE_DRIVER_NAME
);
2469 vdev
->config
.intr_type
= INTA
;
2472 if (vdev
->config
.intr_type
== MSI_X
) {
2474 intr_idx
< (vdev
->no_of_vpath
*
2475 VXGE_HW_VPATH_MSIX_ACTIVE
); intr_idx
++) {
2477 msix_idx
= intr_idx
% VXGE_HW_VPATH_MSIX_ACTIVE
;
2482 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2483 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2485 vdev
->entries
[intr_cnt
].entry
,
2488 vdev
->entries
[intr_cnt
].vector
,
2489 vxge_tx_msix_handle
, 0,
2490 vdev
->desc
[intr_cnt
],
2491 &vdev
->vpaths
[vp_idx
].fifo
);
2492 vdev
->vxge_entries
[intr_cnt
].arg
=
2493 &vdev
->vpaths
[vp_idx
].fifo
;
2497 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2498 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2500 vdev
->entries
[intr_cnt
].entry
,
2503 vdev
->entries
[intr_cnt
].vector
,
2504 vxge_rx_msix_napi_handle
,
2506 vdev
->desc
[intr_cnt
],
2507 &vdev
->vpaths
[vp_idx
].ring
);
2508 vdev
->vxge_entries
[intr_cnt
].arg
=
2509 &vdev
->vpaths
[vp_idx
].ring
;
2515 vxge_debug_init(VXGE_ERR
,
2516 "%s: MSIX - %d Registration failed",
2517 vdev
->ndev
->name
, intr_cnt
);
2518 vxge_rem_msix_isr(vdev
);
2519 vdev
->config
.intr_type
= INTA
;
2520 vxge_debug_init(VXGE_ERR
,
2521 "%s: Defaulting to INTA"
2522 , vdev
->ndev
->name
);
2527 /* We requested for this msix interrupt */
2528 vdev
->vxge_entries
[intr_cnt
].in_use
= 1;
2529 msix_idx
+= vdev
->vpaths
[vp_idx
].device_id
*
2530 VXGE_HW_VPATH_MSIX_ACTIVE
;
2531 vxge_hw_vpath_msix_unmask(
2532 vdev
->vpaths
[vp_idx
].handle
,
2537 /* Point to next vpath handler */
2538 if (((intr_idx
+ 1) % VXGE_HW_VPATH_MSIX_ACTIVE
== 0) &&
2539 (vp_idx
< (vdev
->no_of_vpath
- 1)))
2543 intr_cnt
= vdev
->no_of_vpath
* 2;
2544 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2545 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2547 vdev
->entries
[intr_cnt
].entry
,
2549 /* For Alarm interrupts */
2550 ret
= request_irq(vdev
->entries
[intr_cnt
].vector
,
2551 vxge_alarm_msix_handle
, 0,
2552 vdev
->desc
[intr_cnt
],
2555 vxge_debug_init(VXGE_ERR
,
2556 "%s: MSIX - %d Registration failed",
2557 vdev
->ndev
->name
, intr_cnt
);
2558 vxge_rem_msix_isr(vdev
);
2559 vdev
->config
.intr_type
= INTA
;
2560 vxge_debug_init(VXGE_ERR
,
2561 "%s: Defaulting to INTA",
2566 msix_idx
= (vdev
->vpaths
[0].handle
->vpath
->vp_id
*
2567 VXGE_HW_VPATH_MSIX_ACTIVE
) + VXGE_ALARM_MSIX_ID
;
2568 vxge_hw_vpath_msix_unmask(vdev
->vpaths
[vp_idx
].handle
,
2570 vdev
->vxge_entries
[intr_cnt
].in_use
= 1;
2571 vdev
->vxge_entries
[intr_cnt
].arg
= &vdev
->vpaths
[0];
2576 if (vdev
->config
.intr_type
== INTA
) {
2577 snprintf(vdev
->desc
[0], VXGE_INTR_STRLEN
,
2578 "%s:vxge:INTA", vdev
->ndev
->name
);
2579 vxge_hw_device_set_intr_type(vdev
->devh
,
2580 VXGE_HW_INTR_MODE_IRQLINE
);
2582 vxge_hw_vpath_tti_ci_set(vdev
->vpaths
[0].fifo
.handle
);
2584 ret
= request_irq((int) vdev
->pdev
->irq
,
2586 IRQF_SHARED
, vdev
->desc
[0], vdev
);
2588 vxge_debug_init(VXGE_ERR
,
2589 "%s %s-%d: ISR registration failed",
2590 VXGE_DRIVER_NAME
, "IRQ", vdev
->pdev
->irq
);
2593 vxge_debug_init(VXGE_TRACE
,
2594 "new %s-%d line allocated",
2595 "IRQ", vdev
->pdev
->irq
);
2601 static void vxge_poll_vp_reset(unsigned long data
)
2603 struct vxgedev
*vdev
= (struct vxgedev
*)data
;
2606 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2607 if (test_bit(i
, &vdev
->vp_reset
)) {
2608 vxge_reset_vpath(vdev
, i
);
2612 if (j
&& (vdev
->config
.intr_type
!= MSI_X
)) {
2613 vxge_hw_device_unmask_all(vdev
->devh
);
2614 vxge_hw_device_flush_io(vdev
->devh
);
2617 mod_timer(&vdev
->vp_reset_timer
, jiffies
+ HZ
/ 2);
2620 static void vxge_poll_vp_lockup(unsigned long data
)
2622 struct vxgedev
*vdev
= (struct vxgedev
*)data
;
2623 enum vxge_hw_status status
= VXGE_HW_OK
;
2624 struct vxge_vpath
*vpath
;
2625 struct vxge_ring
*ring
;
2627 unsigned long rx_frms
;
2629 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2630 ring
= &vdev
->vpaths
[i
].ring
;
2632 /* Truncated to machine word size number of frames */
2633 rx_frms
= ACCESS_ONCE(ring
->stats
.rx_frms
);
2635 /* Did this vpath received any packets */
2636 if (ring
->stats
.prev_rx_frms
== rx_frms
) {
2637 status
= vxge_hw_vpath_check_leak(ring
->handle
);
2639 /* Did it received any packets last time */
2640 if ((VXGE_HW_FAIL
== status
) &&
2641 (VXGE_HW_FAIL
== ring
->last_status
)) {
2643 /* schedule vpath reset */
2644 if (!test_and_set_bit(i
, &vdev
->vp_reset
)) {
2645 vpath
= &vdev
->vpaths
[i
];
2647 /* disable interrupts for this vpath */
2648 vxge_vpath_intr_disable(vdev
, i
);
2650 /* stop the queue for this vpath */
2651 netif_tx_stop_queue(vpath
->fifo
.txq
);
2656 ring
->stats
.prev_rx_frms
= rx_frms
;
2657 ring
->last_status
= status
;
2660 /* Check every 1 milli second */
2661 mod_timer(&vdev
->vp_lockup_timer
, jiffies
+ HZ
/ 1000);
2664 static u32
vxge_fix_features(struct net_device
*dev
, u32 features
)
2666 u32 changed
= dev
->features
^ features
;
2668 /* Enabling RTH requires some of the logic in vxge_device_register and a
2669 * vpath reset. Due to these restrictions, only allow modification
2670 * while the interface is down.
2672 if ((changed
& NETIF_F_RXHASH
) && netif_running(dev
))
2673 features
^= NETIF_F_RXHASH
;
2678 static int vxge_set_features(struct net_device
*dev
, u32 features
)
2680 struct vxgedev
*vdev
= netdev_priv(dev
);
2681 u32 changed
= dev
->features
^ features
;
2683 if (!(changed
& NETIF_F_RXHASH
))
2686 /* !netif_running() ensured by vxge_fix_features() */
2688 vdev
->devh
->config
.rth_en
= !!(features
& NETIF_F_RXHASH
);
2689 if (vxge_reset_all_vpaths(vdev
) != VXGE_HW_OK
) {
2690 dev
->features
= features
^ NETIF_F_RXHASH
;
2691 vdev
->devh
->config
.rth_en
= !!(dev
->features
& NETIF_F_RXHASH
);
2700 * @dev: pointer to the device structure.
2702 * This function is the open entry point of the driver. It mainly calls a
2703 * function to allocate Rx buffers and inserts them into the buffer
2704 * descriptors and then enables the Rx part of the NIC.
2705 * Return value: '0' on success and an appropriate (-)ve integer as
2706 * defined in errno.h file on failure.
2708 static int vxge_open(struct net_device
*dev
)
2710 enum vxge_hw_status status
;
2711 struct vxgedev
*vdev
;
2712 struct __vxge_hw_device
*hldev
;
2713 struct vxge_vpath
*vpath
;
2716 u64 val64
, function_mode
;
2718 vxge_debug_entryexit(VXGE_TRACE
,
2719 "%s: %s:%d", dev
->name
, __func__
, __LINE__
);
2721 vdev
= netdev_priv(dev
);
2722 hldev
= pci_get_drvdata(vdev
->pdev
);
2723 function_mode
= vdev
->config
.device_hw_info
.function_mode
;
2725 /* make sure you have link off by default every time Nic is
2727 netif_carrier_off(dev
);
2730 status
= vxge_open_vpaths(vdev
);
2731 if (status
!= VXGE_HW_OK
) {
2732 vxge_debug_init(VXGE_ERR
,
2733 "%s: fatal: Vpath open failed", vdev
->ndev
->name
);
2738 vdev
->mtu
= dev
->mtu
;
2740 status
= vxge_add_isr(vdev
);
2741 if (status
!= VXGE_HW_OK
) {
2742 vxge_debug_init(VXGE_ERR
,
2743 "%s: fatal: ISR add failed", dev
->name
);
2748 if (vdev
->config
.intr_type
!= MSI_X
) {
2749 netif_napi_add(dev
, &vdev
->napi
, vxge_poll_inta
,
2750 vdev
->config
.napi_weight
);
2751 napi_enable(&vdev
->napi
);
2752 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2753 vpath
= &vdev
->vpaths
[i
];
2754 vpath
->ring
.napi_p
= &vdev
->napi
;
2757 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2758 vpath
= &vdev
->vpaths
[i
];
2759 netif_napi_add(dev
, &vpath
->ring
.napi
,
2760 vxge_poll_msix
, vdev
->config
.napi_weight
);
2761 napi_enable(&vpath
->ring
.napi
);
2762 vpath
->ring
.napi_p
= &vpath
->ring
.napi
;
2767 if (vdev
->config
.rth_steering
) {
2768 status
= vxge_rth_configure(vdev
);
2769 if (status
!= VXGE_HW_OK
) {
2770 vxge_debug_init(VXGE_ERR
,
2771 "%s: fatal: RTH configuration failed",
2777 printk(KERN_INFO
"%s: Receive Hashing Offload %s\n", dev
->name
,
2778 hldev
->config
.rth_en
? "enabled" : "disabled");
2780 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2781 vpath
= &vdev
->vpaths
[i
];
2783 /* set initial mtu before enabling the device */
2784 status
= vxge_hw_vpath_mtu_set(vpath
->handle
, vdev
->mtu
);
2785 if (status
!= VXGE_HW_OK
) {
2786 vxge_debug_init(VXGE_ERR
,
2787 "%s: fatal: can not set new MTU", dev
->name
);
2793 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE
, VXGE_COMPONENT_LL
, vdev
);
2794 vxge_debug_init(vdev
->level_trace
,
2795 "%s: MTU is %d", vdev
->ndev
->name
, vdev
->mtu
);
2796 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR
, VXGE_COMPONENT_LL
, vdev
);
2798 /* Restore the DA, VID table and also multicast and promiscuous mode
2801 if (vdev
->all_multi_flg
) {
2802 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2803 vpath
= &vdev
->vpaths
[i
];
2804 vxge_restore_vpath_mac_addr(vpath
);
2805 vxge_restore_vpath_vid_table(vpath
);
2807 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
2808 if (status
!= VXGE_HW_OK
)
2809 vxge_debug_init(VXGE_ERR
,
2810 "%s:%d Enabling multicast failed",
2811 __func__
, __LINE__
);
2815 /* Enable vpath to sniff all unicast/multicast traffic that not
2816 * addressed to them. We allow promiscuous mode for PF only
2820 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
2821 val64
|= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i
);
2823 vxge_hw_mgmt_reg_write(vdev
->devh
,
2824 vxge_hw_mgmt_reg_type_mrpcim
,
2826 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2827 rxmac_authorize_all_addr
),
2830 vxge_hw_mgmt_reg_write(vdev
->devh
,
2831 vxge_hw_mgmt_reg_type_mrpcim
,
2833 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2834 rxmac_authorize_all_vid
),
2837 vxge_set_multicast(dev
);
2839 /* Enabling Bcast and mcast for all vpath */
2840 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2841 vpath
= &vdev
->vpaths
[i
];
2842 status
= vxge_hw_vpath_bcast_enable(vpath
->handle
);
2843 if (status
!= VXGE_HW_OK
)
2844 vxge_debug_init(VXGE_ERR
,
2845 "%s : Can not enable bcast for vpath "
2846 "id %d", dev
->name
, i
);
2847 if (vdev
->config
.addr_learn_en
) {
2848 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
2849 if (status
!= VXGE_HW_OK
)
2850 vxge_debug_init(VXGE_ERR
,
2851 "%s : Can not enable mcast for vpath "
2852 "id %d", dev
->name
, i
);
2856 vxge_hw_device_setpause_data(vdev
->devh
, 0,
2857 vdev
->config
.tx_pause_enable
,
2858 vdev
->config
.rx_pause_enable
);
2860 if (vdev
->vp_reset_timer
.function
== NULL
)
2861 vxge_os_timer(vdev
->vp_reset_timer
,
2862 vxge_poll_vp_reset
, vdev
, (HZ
/2));
2864 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2865 if (vdev
->titan1
&& vdev
->vp_lockup_timer
.function
== NULL
)
2866 vxge_os_timer(vdev
->vp_lockup_timer
, vxge_poll_vp_lockup
, vdev
,
2869 set_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
2873 if (vxge_hw_device_link_state_get(vdev
->devh
) == VXGE_HW_LINK_UP
) {
2874 netif_carrier_on(vdev
->ndev
);
2875 netdev_notice(vdev
->ndev
, "Link Up\n");
2876 vdev
->stats
.link_up
++;
2879 vxge_hw_device_intr_enable(vdev
->devh
);
2883 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2884 vpath
= &vdev
->vpaths
[i
];
2886 vxge_hw_vpath_enable(vpath
->handle
);
2888 vxge_hw_vpath_rx_doorbell_init(vpath
->handle
);
2891 netif_tx_start_all_queues(vdev
->ndev
);
2894 vxge_config_ci_for_tti_rti(vdev
);
2902 if (vdev
->config
.intr_type
!= MSI_X
)
2903 napi_disable(&vdev
->napi
);
2905 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2906 napi_disable(&vdev
->vpaths
[i
].ring
.napi
);
2910 vxge_close_vpaths(vdev
, 0);
2912 vxge_debug_entryexit(VXGE_TRACE
,
2913 "%s: %s:%d Exiting...",
2914 dev
->name
, __func__
, __LINE__
);
2918 /* Loop through the mac address list and delete all the entries */
2919 static void vxge_free_mac_add_list(struct vxge_vpath
*vpath
)
2922 struct list_head
*entry
, *next
;
2923 if (list_empty(&vpath
->mac_addr_list
))
2926 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
2928 kfree((struct vxge_mac_addrs
*)entry
);
2932 static void vxge_napi_del_all(struct vxgedev
*vdev
)
2935 if (vdev
->config
.intr_type
!= MSI_X
)
2936 netif_napi_del(&vdev
->napi
);
2938 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2939 netif_napi_del(&vdev
->vpaths
[i
].ring
.napi
);
2943 static int do_vxge_close(struct net_device
*dev
, int do_io
)
2945 enum vxge_hw_status status
;
2946 struct vxgedev
*vdev
;
2947 struct __vxge_hw_device
*hldev
;
2949 u64 val64
, vpath_vector
;
2950 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
2951 dev
->name
, __func__
, __LINE__
);
2953 vdev
= netdev_priv(dev
);
2954 hldev
= pci_get_drvdata(vdev
->pdev
);
2956 if (unlikely(!is_vxge_card_up(vdev
)))
2959 /* If vxge_handle_crit_err task is executing,
2960 * wait till it completes. */
2961 while (test_and_set_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
2965 /* Put the vpath back in normal mode */
2966 vpath_vector
= vxge_mBIT(vdev
->vpaths
[0].device_id
);
2967 status
= vxge_hw_mgmt_reg_read(vdev
->devh
,
2968 vxge_hw_mgmt_reg_type_mrpcim
,
2971 struct vxge_hw_mrpcim_reg
,
2972 rts_mgr_cbasin_cfg
),
2974 if (status
== VXGE_HW_OK
) {
2975 val64
&= ~vpath_vector
;
2976 status
= vxge_hw_mgmt_reg_write(vdev
->devh
,
2977 vxge_hw_mgmt_reg_type_mrpcim
,
2980 struct vxge_hw_mrpcim_reg
,
2981 rts_mgr_cbasin_cfg
),
2985 /* Remove the function 0 from promiscuous mode */
2986 vxge_hw_mgmt_reg_write(vdev
->devh
,
2987 vxge_hw_mgmt_reg_type_mrpcim
,
2989 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2990 rxmac_authorize_all_addr
),
2993 vxge_hw_mgmt_reg_write(vdev
->devh
,
2994 vxge_hw_mgmt_reg_type_mrpcim
,
2996 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2997 rxmac_authorize_all_vid
),
3004 del_timer_sync(&vdev
->vp_lockup_timer
);
3006 del_timer_sync(&vdev
->vp_reset_timer
);
3009 vxge_hw_device_wait_receive_idle(hldev
);
3011 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3014 if (vdev
->config
.intr_type
!= MSI_X
)
3015 napi_disable(&vdev
->napi
);
3017 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
3018 napi_disable(&vdev
->vpaths
[i
].ring
.napi
);
3021 netif_carrier_off(vdev
->ndev
);
3022 netdev_notice(vdev
->ndev
, "Link Down\n");
3023 netif_tx_stop_all_queues(vdev
->ndev
);
3025 /* Note that at this point xmit() is stopped by upper layer */
3027 vxge_hw_device_intr_disable(vdev
->devh
);
3031 vxge_napi_del_all(vdev
);
3034 vxge_reset_all_vpaths(vdev
);
3036 vxge_close_vpaths(vdev
, 0);
3038 vxge_debug_entryexit(VXGE_TRACE
,
3039 "%s: %s:%d Exiting...", dev
->name
, __func__
, __LINE__
);
3041 clear_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
);
3048 * @dev: device pointer.
3050 * This is the stop entry point of the driver. It needs to undo exactly
3051 * whatever was done by the open entry point, thus it's usually referred to
3052 * as the close function.Among other things this function mainly stops the
3053 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3054 * Return value: '0' on success and an appropriate (-)ve integer as
3055 * defined in errno.h file on failure.
3057 static int vxge_close(struct net_device
*dev
)
3059 do_vxge_close(dev
, 1);
3065 * @dev: net device pointer.
3066 * @new_mtu :the new MTU size for the device.
3068 * A driver entry point to change MTU size for the device. Before changing
3069 * the MTU the device must be stopped.
3071 static int vxge_change_mtu(struct net_device
*dev
, int new_mtu
)
3073 struct vxgedev
*vdev
= netdev_priv(dev
);
3075 vxge_debug_entryexit(vdev
->level_trace
,
3076 "%s:%d", __func__
, __LINE__
);
3077 if ((new_mtu
< VXGE_HW_MIN_MTU
) || (new_mtu
> VXGE_HW_MAX_MTU
)) {
3078 vxge_debug_init(vdev
->level_err
,
3079 "%s: mtu size is invalid", dev
->name
);
3083 /* check if device is down already */
3084 if (unlikely(!is_vxge_card_up(vdev
))) {
3085 /* just store new value, will use later on open() */
3087 vxge_debug_init(vdev
->level_err
,
3088 "%s", "device is down on MTU change");
3092 vxge_debug_init(vdev
->level_trace
,
3093 "trying to apply new MTU %d", new_mtu
);
3095 if (vxge_close(dev
))
3099 vdev
->mtu
= new_mtu
;
3104 vxge_debug_init(vdev
->level_trace
,
3105 "%s: MTU changed to %d", vdev
->ndev
->name
, new_mtu
);
3107 vxge_debug_entryexit(vdev
->level_trace
,
3108 "%s:%d Exiting...", __func__
, __LINE__
);
3115 * @dev: pointer to the device structure
3116 * @stats: pointer to struct rtnl_link_stats64
3119 static struct rtnl_link_stats64
*
3120 vxge_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*net_stats
)
3122 struct vxgedev
*vdev
= netdev_priv(dev
);
3125 /* net_stats already zeroed by caller */
3126 for (k
= 0; k
< vdev
->no_of_vpath
; k
++) {
3127 struct vxge_ring_stats
*rxstats
= &vdev
->vpaths
[k
].ring
.stats
;
3128 struct vxge_fifo_stats
*txstats
= &vdev
->vpaths
[k
].fifo
.stats
;
3130 u64 packets
, bytes
, multicast
;
3133 start
= u64_stats_fetch_begin(&rxstats
->syncp
);
3135 packets
= rxstats
->rx_frms
;
3136 multicast
= rxstats
->rx_mcast
;
3137 bytes
= rxstats
->rx_bytes
;
3138 } while (u64_stats_fetch_retry(&rxstats
->syncp
, start
));
3140 net_stats
->rx_packets
+= packets
;
3141 net_stats
->rx_bytes
+= bytes
;
3142 net_stats
->multicast
+= multicast
;
3144 net_stats
->rx_errors
+= rxstats
->rx_errors
;
3145 net_stats
->rx_dropped
+= rxstats
->rx_dropped
;
3148 start
= u64_stats_fetch_begin(&txstats
->syncp
);
3150 packets
= txstats
->tx_frms
;
3151 bytes
= txstats
->tx_bytes
;
3152 } while (u64_stats_fetch_retry(&txstats
->syncp
, start
));
3154 net_stats
->tx_packets
+= packets
;
3155 net_stats
->tx_bytes
+= bytes
;
3156 net_stats
->tx_errors
+= txstats
->tx_errors
;
3162 static enum vxge_hw_status
vxge_timestamp_config(struct __vxge_hw_device
*devh
)
3164 enum vxge_hw_status status
;
3167 /* Timestamp is passed to the driver via the FCS, therefore we
3168 * must disable the FCS stripping by the adapter. Since this is
3169 * required for the driver to load (due to a hardware bug),
3170 * there is no need to do anything special here.
3172 val64
= VXGE_HW_XMAC_TIMESTAMP_EN
|
3173 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3174 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3176 status
= vxge_hw_mgmt_reg_write(devh
,
3177 vxge_hw_mgmt_reg_type_mrpcim
,
3179 offsetof(struct vxge_hw_mrpcim_reg
,
3182 vxge_hw_device_flush_io(devh
);
3183 devh
->config
.hwts_en
= VXGE_HW_HWTS_ENABLE
;
3187 static int vxge_hwtstamp_ioctl(struct vxgedev
*vdev
, void __user
*data
)
3189 struct hwtstamp_config config
;
3192 if (copy_from_user(&config
, data
, sizeof(config
)))
3195 /* reserved for future extensions */
3199 /* Transmit HW Timestamp not supported */
3200 switch (config
.tx_type
) {
3201 case HWTSTAMP_TX_OFF
:
3203 case HWTSTAMP_TX_ON
:
3208 switch (config
.rx_filter
) {
3209 case HWTSTAMP_FILTER_NONE
:
3211 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
3214 case HWTSTAMP_FILTER_ALL
:
3215 case HWTSTAMP_FILTER_SOME
:
3216 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
3217 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
3218 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
3219 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
3220 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
3221 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
3222 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
3223 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
3224 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
3225 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
3226 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
3227 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
3228 if (vdev
->devh
->config
.hwts_en
!= VXGE_HW_HWTS_ENABLE
)
3232 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
3239 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
3240 vdev
->vpaths
[i
].ring
.rx_hwts
= vdev
->rx_hwts
;
3242 if (copy_to_user(data
, &config
, sizeof(config
)))
3250 * @dev: Device pointer.
3251 * @ifr: An IOCTL specific structure, that can contain a pointer to
3252 * a proprietary structure used to pass information to the driver.
3253 * @cmd: This is used to distinguish between the different commands that
3254 * can be passed to the IOCTL functions.
3256 * Entry point for the Ioctl.
3258 static int vxge_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
3260 struct vxgedev
*vdev
= netdev_priv(dev
);
3265 ret
= vxge_hwtstamp_ioctl(vdev
, rq
->ifr_data
);
3278 * @dev: pointer to net device structure
3280 * Watchdog for transmit side.
3281 * This function is triggered if the Tx Queue is stopped
3282 * for a pre-defined amount of time when the Interface is still up.
3284 static void vxge_tx_watchdog(struct net_device
*dev
)
3286 struct vxgedev
*vdev
;
3288 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3290 vdev
= netdev_priv(dev
);
3292 vdev
->cric_err_event
= VXGE_HW_EVENT_RESET_START
;
3294 schedule_work(&vdev
->reset_task
);
3295 vxge_debug_entryexit(VXGE_TRACE
,
3296 "%s:%d Exiting...", __func__
, __LINE__
);
3300 * vxge_vlan_rx_add_vid
3301 * @dev: net device pointer.
3304 * Add the vlan id to the devices vlan id table
3307 vxge_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
3309 struct vxgedev
*vdev
= netdev_priv(dev
);
3310 struct vxge_vpath
*vpath
;
3313 /* Add these vlan to the vid table */
3314 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
3315 vpath
= &vdev
->vpaths
[vp_id
];
3316 if (!vpath
->is_open
)
3318 vxge_hw_vpath_vid_add(vpath
->handle
, vid
);
3320 set_bit(vid
, vdev
->active_vlans
);
3324 * vxge_vlan_rx_add_vid
3325 * @dev: net device pointer.
3328 * Remove the vlan id from the device's vlan id table
3331 vxge_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
3333 struct vxgedev
*vdev
= netdev_priv(dev
);
3334 struct vxge_vpath
*vpath
;
3337 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3339 /* Delete this vlan from the vid table */
3340 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
3341 vpath
= &vdev
->vpaths
[vp_id
];
3342 if (!vpath
->is_open
)
3344 vxge_hw_vpath_vid_delete(vpath
->handle
, vid
);
3346 vxge_debug_entryexit(VXGE_TRACE
,
3347 "%s:%d Exiting...", __func__
, __LINE__
);
3348 clear_bit(vid
, vdev
->active_vlans
);
3351 static const struct net_device_ops vxge_netdev_ops
= {
3352 .ndo_open
= vxge_open
,
3353 .ndo_stop
= vxge_close
,
3354 .ndo_get_stats64
= vxge_get_stats64
,
3355 .ndo_start_xmit
= vxge_xmit
,
3356 .ndo_validate_addr
= eth_validate_addr
,
3357 .ndo_set_rx_mode
= vxge_set_multicast
,
3358 .ndo_do_ioctl
= vxge_ioctl
,
3359 .ndo_set_mac_address
= vxge_set_mac_addr
,
3360 .ndo_change_mtu
= vxge_change_mtu
,
3361 .ndo_fix_features
= vxge_fix_features
,
3362 .ndo_set_features
= vxge_set_features
,
3363 .ndo_vlan_rx_kill_vid
= vxge_vlan_rx_kill_vid
,
3364 .ndo_vlan_rx_add_vid
= vxge_vlan_rx_add_vid
,
3365 .ndo_tx_timeout
= vxge_tx_watchdog
,
3366 #ifdef CONFIG_NET_POLL_CONTROLLER
3367 .ndo_poll_controller
= vxge_netpoll
,
3371 static int __devinit
vxge_device_register(struct __vxge_hw_device
*hldev
,
3372 struct vxge_config
*config
,
3373 int high_dma
, int no_of_vpath
,
3374 struct vxgedev
**vdev_out
)
3376 struct net_device
*ndev
;
3377 enum vxge_hw_status status
= VXGE_HW_OK
;
3378 struct vxgedev
*vdev
;
3379 int ret
= 0, no_of_queue
= 1;
3383 if (config
->tx_steering_type
)
3384 no_of_queue
= no_of_vpath
;
3386 ndev
= alloc_etherdev_mq(sizeof(struct vxgedev
),
3390 vxge_hw_device_trace_level_get(hldev
),
3391 "%s : device allocation failed", __func__
);
3396 vxge_debug_entryexit(
3397 vxge_hw_device_trace_level_get(hldev
),
3398 "%s: %s:%d Entering...",
3399 ndev
->name
, __func__
, __LINE__
);
3401 vdev
= netdev_priv(ndev
);
3402 memset(vdev
, 0, sizeof(struct vxgedev
));
3406 vdev
->pdev
= hldev
->pdev
;
3407 memcpy(&vdev
->config
, config
, sizeof(struct vxge_config
));
3409 vdev
->titan1
= (vdev
->pdev
->revision
== VXGE_HW_TITAN1_PCI_REVISION
);
3411 SET_NETDEV_DEV(ndev
, &vdev
->pdev
->dev
);
3413 ndev
->hw_features
= NETIF_F_RXCSUM
| NETIF_F_SG
|
3414 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3415 NETIF_F_TSO
| NETIF_F_TSO6
|
3417 if (vdev
->config
.rth_steering
!= NO_STEERING
)
3418 ndev
->hw_features
|= NETIF_F_RXHASH
;
3420 ndev
->features
|= ndev
->hw_features
|
3421 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
3423 /* Driver entry points */
3424 ndev
->irq
= vdev
->pdev
->irq
;
3425 ndev
->base_addr
= (unsigned long) hldev
->bar0
;
3427 ndev
->netdev_ops
= &vxge_netdev_ops
;
3429 ndev
->watchdog_timeo
= VXGE_LL_WATCH_DOG_TIMEOUT
;
3430 INIT_WORK(&vdev
->reset_task
, vxge_reset
);
3432 vxge_initialize_ethtool_ops(ndev
);
3434 /* Allocate memory for vpath */
3435 vdev
->vpaths
= kzalloc((sizeof(struct vxge_vpath
)) *
3436 no_of_vpath
, GFP_KERNEL
);
3437 if (!vdev
->vpaths
) {
3438 vxge_debug_init(VXGE_ERR
,
3439 "%s: vpath memory allocation failed",
3445 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3446 "%s : checksuming enabled", __func__
);
3449 ndev
->features
|= NETIF_F_HIGHDMA
;
3450 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3451 "%s : using High DMA", __func__
);
3454 ret
= register_netdev(ndev
);
3456 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3457 "%s: %s : device registration failed!",
3458 ndev
->name
, __func__
);
3462 /* Set the factory defined MAC address initially */
3463 ndev
->addr_len
= ETH_ALEN
;
3465 /* Make Link state as off at this point, when the Link change
3466 * interrupt comes the state will be automatically changed to
3469 netif_carrier_off(ndev
);
3471 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3472 "%s: Ethernet device registered",
3478 /* Resetting the Device stats */
3479 status
= vxge_hw_mrpcim_stats_access(
3481 VXGE_HW_STATS_OP_CLEAR_ALL_STATS
,
3486 if (status
== VXGE_HW_ERR_PRIVILAGED_OPEARATION
)
3488 vxge_hw_device_trace_level_get(hldev
),
3489 "%s: device stats clear returns"
3490 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev
->name
);
3492 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev
),
3493 "%s: %s:%d Exiting...",
3494 ndev
->name
, __func__
, __LINE__
);
3498 kfree(vdev
->vpaths
);
3506 * vxge_device_unregister
3508 * This function will unregister and free network device
3510 static void vxge_device_unregister(struct __vxge_hw_device
*hldev
)
3512 struct vxgedev
*vdev
;
3513 struct net_device
*dev
;
3517 vdev
= netdev_priv(dev
);
3519 vxge_debug_entryexit(vdev
->level_trace
, "%s: %s:%d", vdev
->ndev
->name
,
3520 __func__
, __LINE__
);
3522 strncpy(buf
, dev
->name
, IFNAMSIZ
);
3524 flush_work_sync(&vdev
->reset_task
);
3526 /* in 2.6 will call stop() if device is up */
3527 unregister_netdev(dev
);
3529 kfree(vdev
->vpaths
);
3531 /* we are safe to free it now */
3534 vxge_debug_init(vdev
->level_trace
, "%s: ethernet device unregistered",
3536 vxge_debug_entryexit(vdev
->level_trace
, "%s: %s:%d Exiting...", buf
,
3537 __func__
, __LINE__
);
3541 * vxge_callback_crit_err
3543 * This function is called by the alarm handler in interrupt context.
3544 * Driver must analyze it based on the event type.
3547 vxge_callback_crit_err(struct __vxge_hw_device
*hldev
,
3548 enum vxge_hw_event type
, u64 vp_id
)
3550 struct net_device
*dev
= hldev
->ndev
;
3551 struct vxgedev
*vdev
= netdev_priv(dev
);
3552 struct vxge_vpath
*vpath
= NULL
;
3555 vxge_debug_entryexit(vdev
->level_trace
,
3556 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
3558 /* Note: This event type should be used for device wide
3559 * indications only - Serious errors, Slot freeze and critical errors
3561 vdev
->cric_err_event
= type
;
3563 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
3564 vpath
= &vdev
->vpaths
[vpath_idx
];
3565 if (vpath
->device_id
== vp_id
)
3569 if (!test_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
)) {
3570 if (type
== VXGE_HW_EVENT_SLOT_FREEZE
) {
3571 vxge_debug_init(VXGE_ERR
,
3572 "%s: Slot is frozen", vdev
->ndev
->name
);
3573 } else if (type
== VXGE_HW_EVENT_SERR
) {
3574 vxge_debug_init(VXGE_ERR
,
3575 "%s: Encountered Serious Error",
3577 } else if (type
== VXGE_HW_EVENT_CRITICAL_ERR
)
3578 vxge_debug_init(VXGE_ERR
,
3579 "%s: Encountered Critical Error",
3583 if ((type
== VXGE_HW_EVENT_SERR
) ||
3584 (type
== VXGE_HW_EVENT_SLOT_FREEZE
)) {
3585 if (unlikely(vdev
->exec_mode
))
3586 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3587 } else if (type
== VXGE_HW_EVENT_CRITICAL_ERR
) {
3588 vxge_hw_device_mask_all(hldev
);
3589 if (unlikely(vdev
->exec_mode
))
3590 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3591 } else if ((type
== VXGE_HW_EVENT_FIFO_ERR
) ||
3592 (type
== VXGE_HW_EVENT_VPATH_ERR
)) {
3594 if (unlikely(vdev
->exec_mode
))
3595 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3597 /* check if this vpath is already set for reset */
3598 if (!test_and_set_bit(vpath_idx
, &vdev
->vp_reset
)) {
3600 /* disable interrupts for this vpath */
3601 vxge_vpath_intr_disable(vdev
, vpath_idx
);
3603 /* stop the queue for this vpath */
3604 netif_tx_stop_queue(vpath
->fifo
.txq
);
3609 vxge_debug_entryexit(vdev
->level_trace
,
3610 "%s: %s:%d Exiting...",
3611 vdev
->ndev
->name
, __func__
, __LINE__
);
3614 static void verify_bandwidth(void)
3616 int i
, band_width
, total
= 0, equal_priority
= 0;
3618 /* 1. If user enters 0 for some fifo, give equal priority to all */
3619 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3620 if (bw_percentage
[i
] == 0) {
3626 if (!equal_priority
) {
3627 /* 2. If sum exceeds 100, give equal priority to all */
3628 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3629 if (bw_percentage
[i
] == 0xFF)
3632 total
+= bw_percentage
[i
];
3633 if (total
> VXGE_HW_VPATH_BANDWIDTH_MAX
) {
3640 if (!equal_priority
) {
3641 /* Is all the bandwidth consumed? */
3642 if (total
< VXGE_HW_VPATH_BANDWIDTH_MAX
) {
3643 if (i
< VXGE_HW_MAX_VIRTUAL_PATHS
) {
3644 /* Split rest of bw equally among next VPs*/
3646 (VXGE_HW_VPATH_BANDWIDTH_MAX
- total
) /
3647 (VXGE_HW_MAX_VIRTUAL_PATHS
- i
);
3648 if (band_width
< 2) /* min of 2% */
3651 for (; i
< VXGE_HW_MAX_VIRTUAL_PATHS
;
3657 } else if (i
< VXGE_HW_MAX_VIRTUAL_PATHS
)
3661 if (equal_priority
) {
3662 vxge_debug_init(VXGE_ERR
,
3663 "%s: Assigning equal bandwidth to all the vpaths",
3665 bw_percentage
[0] = VXGE_HW_VPATH_BANDWIDTH_MAX
/
3666 VXGE_HW_MAX_VIRTUAL_PATHS
;
3667 for (i
= 1; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3668 bw_percentage
[i
] = bw_percentage
[0];
3673 * Vpath configuration
3675 static int __devinit
vxge_config_vpaths(
3676 struct vxge_hw_device_config
*device_config
,
3677 u64 vpath_mask
, struct vxge_config
*config_param
)
3679 int i
, no_of_vpaths
= 0, default_no_vpath
= 0, temp
;
3680 u32 txdl_size
, txdl_per_memblock
;
3682 temp
= driver_config
->vpath_per_dev
;
3683 if ((driver_config
->vpath_per_dev
== VXGE_USE_DEFAULT
) &&
3684 (max_config_dev
== VXGE_MAX_CONFIG_DEV
)) {
3685 /* No more CPU. Return vpath number as zero.*/
3686 if (driver_config
->g_no_cpus
== -1)
3689 if (!driver_config
->g_no_cpus
)
3690 driver_config
->g_no_cpus
= num_online_cpus();
3692 driver_config
->vpath_per_dev
= driver_config
->g_no_cpus
>> 1;
3693 if (!driver_config
->vpath_per_dev
)
3694 driver_config
->vpath_per_dev
= 1;
3696 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3697 if (!vxge_bVALn(vpath_mask
, i
, 1))
3701 if (default_no_vpath
< driver_config
->vpath_per_dev
)
3702 driver_config
->vpath_per_dev
= default_no_vpath
;
3704 driver_config
->g_no_cpus
= driver_config
->g_no_cpus
-
3705 (driver_config
->vpath_per_dev
* 2);
3706 if (driver_config
->g_no_cpus
<= 0)
3707 driver_config
->g_no_cpus
= -1;
3710 if (driver_config
->vpath_per_dev
== 1) {
3711 vxge_debug_ll_config(VXGE_TRACE
,
3712 "%s: Disable tx and rx steering, "
3713 "as single vpath is configured", VXGE_DRIVER_NAME
);
3714 config_param
->rth_steering
= NO_STEERING
;
3715 config_param
->tx_steering_type
= NO_STEERING
;
3716 device_config
->rth_en
= 0;
3719 /* configure bandwidth */
3720 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3721 device_config
->vp_config
[i
].min_bandwidth
= bw_percentage
[i
];
3723 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3724 device_config
->vp_config
[i
].vp_id
= i
;
3725 device_config
->vp_config
[i
].mtu
= VXGE_HW_DEFAULT_MTU
;
3726 if (no_of_vpaths
< driver_config
->vpath_per_dev
) {
3727 if (!vxge_bVALn(vpath_mask
, i
, 1)) {
3728 vxge_debug_ll_config(VXGE_TRACE
,
3729 "%s: vpath: %d is not available",
3730 VXGE_DRIVER_NAME
, i
);
3733 vxge_debug_ll_config(VXGE_TRACE
,
3734 "%s: vpath: %d available",
3735 VXGE_DRIVER_NAME
, i
);
3739 vxge_debug_ll_config(VXGE_TRACE
,
3740 "%s: vpath: %d is not configured, "
3741 "max_config_vpath exceeded",
3742 VXGE_DRIVER_NAME
, i
);
3746 /* Configure Tx fifo's */
3747 device_config
->vp_config
[i
].fifo
.enable
=
3748 VXGE_HW_FIFO_ENABLE
;
3749 device_config
->vp_config
[i
].fifo
.max_frags
=
3751 device_config
->vp_config
[i
].fifo
.memblock_size
=
3752 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE
;
3754 txdl_size
= device_config
->vp_config
[i
].fifo
.max_frags
*
3755 sizeof(struct vxge_hw_fifo_txd
);
3756 txdl_per_memblock
= VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE
/ txdl_size
;
3758 device_config
->vp_config
[i
].fifo
.fifo_blocks
=
3759 ((VXGE_DEF_FIFO_LENGTH
- 1) / txdl_per_memblock
) + 1;
3761 device_config
->vp_config
[i
].fifo
.intr
=
3762 VXGE_HW_FIFO_QUEUE_INTR_DISABLE
;
3764 /* Configure tti properties */
3765 device_config
->vp_config
[i
].tti
.intr_enable
=
3766 VXGE_HW_TIM_INTR_ENABLE
;
3768 device_config
->vp_config
[i
].tti
.btimer_val
=
3769 (VXGE_TTI_BTIMER_VAL
* 1000) / 272;
3771 device_config
->vp_config
[i
].tti
.timer_ac_en
=
3772 VXGE_HW_TIM_TIMER_AC_ENABLE
;
3774 /* For msi-x with napi (each vector has a handler of its own) -
3775 * Set CI to OFF for all vpaths
3777 device_config
->vp_config
[i
].tti
.timer_ci_en
=
3778 VXGE_HW_TIM_TIMER_CI_DISABLE
;
3780 device_config
->vp_config
[i
].tti
.timer_ri_en
=
3781 VXGE_HW_TIM_TIMER_RI_DISABLE
;
3783 device_config
->vp_config
[i
].tti
.util_sel
=
3784 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL
;
3786 device_config
->vp_config
[i
].tti
.ltimer_val
=
3787 (VXGE_TTI_LTIMER_VAL
* 1000) / 272;
3789 device_config
->vp_config
[i
].tti
.rtimer_val
=
3790 (VXGE_TTI_RTIMER_VAL
* 1000) / 272;
3792 device_config
->vp_config
[i
].tti
.urange_a
= TTI_TX_URANGE_A
;
3793 device_config
->vp_config
[i
].tti
.urange_b
= TTI_TX_URANGE_B
;
3794 device_config
->vp_config
[i
].tti
.urange_c
= TTI_TX_URANGE_C
;
3795 device_config
->vp_config
[i
].tti
.uec_a
= TTI_TX_UFC_A
;
3796 device_config
->vp_config
[i
].tti
.uec_b
= TTI_TX_UFC_B
;
3797 device_config
->vp_config
[i
].tti
.uec_c
= TTI_TX_UFC_C
;
3798 device_config
->vp_config
[i
].tti
.uec_d
= TTI_TX_UFC_D
;
3800 /* Configure Rx rings */
3801 device_config
->vp_config
[i
].ring
.enable
=
3802 VXGE_HW_RING_ENABLE
;
3804 device_config
->vp_config
[i
].ring
.ring_blocks
=
3805 VXGE_HW_DEF_RING_BLOCKS
;
3807 device_config
->vp_config
[i
].ring
.buffer_mode
=
3808 VXGE_HW_RING_RXD_BUFFER_MODE_1
;
3810 device_config
->vp_config
[i
].ring
.rxds_limit
=
3811 VXGE_HW_DEF_RING_RXDS_LIMIT
;
3813 device_config
->vp_config
[i
].ring
.scatter_mode
=
3814 VXGE_HW_RING_SCATTER_MODE_A
;
3816 /* Configure rti properties */
3817 device_config
->vp_config
[i
].rti
.intr_enable
=
3818 VXGE_HW_TIM_INTR_ENABLE
;
3820 device_config
->vp_config
[i
].rti
.btimer_val
=
3821 (VXGE_RTI_BTIMER_VAL
* 1000)/272;
3823 device_config
->vp_config
[i
].rti
.timer_ac_en
=
3824 VXGE_HW_TIM_TIMER_AC_ENABLE
;
3826 device_config
->vp_config
[i
].rti
.timer_ci_en
=
3827 VXGE_HW_TIM_TIMER_CI_DISABLE
;
3829 device_config
->vp_config
[i
].rti
.timer_ri_en
=
3830 VXGE_HW_TIM_TIMER_RI_DISABLE
;
3832 device_config
->vp_config
[i
].rti
.util_sel
=
3833 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL
;
3835 device_config
->vp_config
[i
].rti
.urange_a
=
3837 device_config
->vp_config
[i
].rti
.urange_b
=
3839 device_config
->vp_config
[i
].rti
.urange_c
=
3841 device_config
->vp_config
[i
].rti
.uec_a
= RTI_RX_UFC_A
;
3842 device_config
->vp_config
[i
].rti
.uec_b
= RTI_RX_UFC_B
;
3843 device_config
->vp_config
[i
].rti
.uec_c
= RTI_RX_UFC_C
;
3844 device_config
->vp_config
[i
].rti
.uec_d
= RTI_RX_UFC_D
;
3846 device_config
->vp_config
[i
].rti
.rtimer_val
=
3847 (VXGE_RTI_RTIMER_VAL
* 1000) / 272;
3849 device_config
->vp_config
[i
].rti
.ltimer_val
=
3850 (VXGE_RTI_LTIMER_VAL
* 1000) / 272;
3852 device_config
->vp_config
[i
].rpa_strip_vlan_tag
=
3856 driver_config
->vpath_per_dev
= temp
;
3857 return no_of_vpaths
;
3860 /* initialize device configuratrions */
3861 static void __devinit
vxge_device_config_init(
3862 struct vxge_hw_device_config
*device_config
,
3865 /* Used for CQRQ/SRQ. */
3866 device_config
->dma_blockpool_initial
=
3867 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE
;
3869 device_config
->dma_blockpool_max
=
3870 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE
;
3872 if (max_mac_vpath
> VXGE_MAX_MAC_ADDR_COUNT
)
3873 max_mac_vpath
= VXGE_MAX_MAC_ADDR_COUNT
;
3875 #ifndef CONFIG_PCI_MSI
3876 vxge_debug_init(VXGE_ERR
,
3877 "%s: This Kernel does not support "
3878 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME
);
3882 /* Configure whether MSI-X or IRQL. */
3883 switch (*intr_type
) {
3885 device_config
->intr_mode
= VXGE_HW_INTR_MODE_IRQLINE
;
3889 device_config
->intr_mode
= VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
;
3893 /* Timer period between device poll */
3894 device_config
->device_poll_millis
= VXGE_TIMER_DELAY
;
3896 /* Configure mac based steering. */
3897 device_config
->rts_mac_en
= addr_learn_en
;
3899 /* Configure Vpaths */
3900 device_config
->rth_it_type
= VXGE_HW_RTH_IT_TYPE_MULTI_IT
;
3902 vxge_debug_ll_config(VXGE_TRACE
, "%s : Device Config Params ",
3904 vxge_debug_ll_config(VXGE_TRACE
, "intr_mode : %d",
3905 device_config
->intr_mode
);
3906 vxge_debug_ll_config(VXGE_TRACE
, "device_poll_millis : %d",
3907 device_config
->device_poll_millis
);
3908 vxge_debug_ll_config(VXGE_TRACE
, "rth_en : %d",
3909 device_config
->rth_en
);
3910 vxge_debug_ll_config(VXGE_TRACE
, "rth_it_type : %d",
3911 device_config
->rth_it_type
);
3914 static void __devinit
vxge_print_parm(struct vxgedev
*vdev
, u64 vpath_mask
)
3918 vxge_debug_init(VXGE_TRACE
,
3919 "%s: %d Vpath(s) opened",
3920 vdev
->ndev
->name
, vdev
->no_of_vpath
);
3922 switch (vdev
->config
.intr_type
) {
3924 vxge_debug_init(VXGE_TRACE
,
3925 "%s: Interrupt type INTA", vdev
->ndev
->name
);
3929 vxge_debug_init(VXGE_TRACE
,
3930 "%s: Interrupt type MSI-X", vdev
->ndev
->name
);
3934 if (vdev
->config
.rth_steering
) {
3935 vxge_debug_init(VXGE_TRACE
,
3936 "%s: RTH steering enabled for TCP_IPV4",
3939 vxge_debug_init(VXGE_TRACE
,
3940 "%s: RTH steering disabled", vdev
->ndev
->name
);
3943 switch (vdev
->config
.tx_steering_type
) {
3945 vxge_debug_init(VXGE_TRACE
,
3946 "%s: Tx steering disabled", vdev
->ndev
->name
);
3948 case TX_PRIORITY_STEERING
:
3949 vxge_debug_init(VXGE_TRACE
,
3950 "%s: Unsupported tx steering option",
3952 vxge_debug_init(VXGE_TRACE
,
3953 "%s: Tx steering disabled", vdev
->ndev
->name
);
3954 vdev
->config
.tx_steering_type
= 0;
3956 case TX_VLAN_STEERING
:
3957 vxge_debug_init(VXGE_TRACE
,
3958 "%s: Unsupported tx steering option",
3960 vxge_debug_init(VXGE_TRACE
,
3961 "%s: Tx steering disabled", vdev
->ndev
->name
);
3962 vdev
->config
.tx_steering_type
= 0;
3964 case TX_MULTIQ_STEERING
:
3965 vxge_debug_init(VXGE_TRACE
,
3966 "%s: Tx multiqueue steering enabled",
3969 case TX_PORT_STEERING
:
3970 vxge_debug_init(VXGE_TRACE
,
3971 "%s: Tx port steering enabled",
3975 vxge_debug_init(VXGE_ERR
,
3976 "%s: Unsupported tx steering type",
3978 vxge_debug_init(VXGE_TRACE
,
3979 "%s: Tx steering disabled", vdev
->ndev
->name
);
3980 vdev
->config
.tx_steering_type
= 0;
3983 if (vdev
->config
.addr_learn_en
)
3984 vxge_debug_init(VXGE_TRACE
,
3985 "%s: MAC Address learning enabled", vdev
->ndev
->name
);
3987 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3988 if (!vxge_bVALn(vpath_mask
, i
, 1))
3990 vxge_debug_ll_config(VXGE_TRACE
,
3991 "%s: MTU size - %d", vdev
->ndev
->name
,
3992 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3993 config
.vp_config
[i
].mtu
);
3994 vxge_debug_init(VXGE_TRACE
,
3995 "%s: VLAN tag stripping %s", vdev
->ndev
->name
,
3996 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3997 config
.vp_config
[i
].rpa_strip_vlan_tag
3998 ? "Enabled" : "Disabled");
3999 vxge_debug_ll_config(VXGE_TRACE
,
4000 "%s: Max frags : %d", vdev
->ndev
->name
,
4001 ((struct __vxge_hw_device
*)(vdev
->devh
))->
4002 config
.vp_config
[i
].fifo
.max_frags
);
4009 * vxge_pm_suspend - vxge power management suspend entry point
4012 static int vxge_pm_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4017 * vxge_pm_resume - vxge power management resume entry point
4020 static int vxge_pm_resume(struct pci_dev
*pdev
)
4028 * vxge_io_error_detected - called when PCI error is detected
4029 * @pdev: Pointer to PCI device
4030 * @state: The current pci connection state
4032 * This function is called after a PCI bus error affecting
4033 * this device has been detected.
4035 static pci_ers_result_t
vxge_io_error_detected(struct pci_dev
*pdev
,
4036 pci_channel_state_t state
)
4038 struct __vxge_hw_device
*hldev
= pci_get_drvdata(pdev
);
4039 struct net_device
*netdev
= hldev
->ndev
;
4041 netif_device_detach(netdev
);
4043 if (state
== pci_channel_io_perm_failure
)
4044 return PCI_ERS_RESULT_DISCONNECT
;
4046 if (netif_running(netdev
)) {
4047 /* Bring down the card, while avoiding PCI I/O */
4048 do_vxge_close(netdev
, 0);
4051 pci_disable_device(pdev
);
4053 return PCI_ERS_RESULT_NEED_RESET
;
4057 * vxge_io_slot_reset - called after the pci bus has been reset.
4058 * @pdev: Pointer to PCI device
4060 * Restart the card from scratch, as if from a cold-boot.
4061 * At this point, the card has exprienced a hard reset,
4062 * followed by fixups by BIOS, and has its config space
4063 * set up identically to what it was at cold boot.
4065 static pci_ers_result_t
vxge_io_slot_reset(struct pci_dev
*pdev
)
4067 struct __vxge_hw_device
*hldev
= pci_get_drvdata(pdev
);
4068 struct net_device
*netdev
= hldev
->ndev
;
4070 struct vxgedev
*vdev
= netdev_priv(netdev
);
4072 if (pci_enable_device(pdev
)) {
4073 netdev_err(netdev
, "Cannot re-enable device after reset\n");
4074 return PCI_ERS_RESULT_DISCONNECT
;
4077 pci_set_master(pdev
);
4078 do_vxge_reset(vdev
, VXGE_LL_FULL_RESET
);
4080 return PCI_ERS_RESULT_RECOVERED
;
4084 * vxge_io_resume - called when traffic can start flowing again.
4085 * @pdev: Pointer to PCI device
4087 * This callback is called when the error recovery driver tells
4088 * us that its OK to resume normal operation.
4090 static void vxge_io_resume(struct pci_dev
*pdev
)
4092 struct __vxge_hw_device
*hldev
= pci_get_drvdata(pdev
);
4093 struct net_device
*netdev
= hldev
->ndev
;
4095 if (netif_running(netdev
)) {
4096 if (vxge_open(netdev
)) {
4098 "Can't bring device back up after reset\n");
4103 netif_device_attach(netdev
);
4106 static inline u32
vxge_get_num_vfs(u64 function_mode
)
4108 u32 num_functions
= 0;
4110 switch (function_mode
) {
4111 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
:
4112 case VXGE_HW_FUNCTION_MODE_SRIOV_8
:
4115 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION
:
4118 case VXGE_HW_FUNCTION_MODE_SRIOV
:
4119 case VXGE_HW_FUNCTION_MODE_MRIOV
:
4120 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17
:
4123 case VXGE_HW_FUNCTION_MODE_SRIOV_4
:
4126 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2
:
4129 case VXGE_HW_FUNCTION_MODE_MRIOV_8
:
4130 num_functions
= 8; /* TODO */
4133 return num_functions
;
4136 int vxge_fw_upgrade(struct vxgedev
*vdev
, char *fw_name
, int override
)
4138 struct __vxge_hw_device
*hldev
= vdev
->devh
;
4139 u32 maj
, min
, bld
, cmaj
, cmin
, cbld
;
4140 enum vxge_hw_status status
;
4141 const struct firmware
*fw
;
4144 ret
= request_firmware(&fw
, fw_name
, &vdev
->pdev
->dev
);
4146 vxge_debug_init(VXGE_ERR
, "%s: Firmware file '%s' not found",
4147 VXGE_DRIVER_NAME
, fw_name
);
4151 /* Load the new firmware onto the adapter */
4152 status
= vxge_update_fw_image(hldev
, fw
->data
, fw
->size
);
4153 if (status
!= VXGE_HW_OK
) {
4154 vxge_debug_init(VXGE_ERR
,
4155 "%s: FW image download to adapter failed '%s'.",
4156 VXGE_DRIVER_NAME
, fw_name
);
4161 /* Read the version of the new firmware */
4162 status
= vxge_hw_upgrade_read_version(hldev
, &maj
, &min
, &bld
);
4163 if (status
!= VXGE_HW_OK
) {
4164 vxge_debug_init(VXGE_ERR
,
4165 "%s: Upgrade read version failed '%s'.",
4166 VXGE_DRIVER_NAME
, fw_name
);
4171 cmaj
= vdev
->config
.device_hw_info
.fw_version
.major
;
4172 cmin
= vdev
->config
.device_hw_info
.fw_version
.minor
;
4173 cbld
= vdev
->config
.device_hw_info
.fw_version
.build
;
4174 /* It's possible the version in /lib/firmware is not the latest version.
4175 * If so, we could get into a loop of trying to upgrade to the latest
4176 * and flashing the older version.
4178 if (VXGE_FW_VER(maj
, min
, bld
) == VXGE_FW_VER(cmaj
, cmin
, cbld
) &&
4184 printk(KERN_NOTICE
"Upgrade to firmware version %d.%d.%d commencing\n",
4187 /* Flash the adapter with the new firmware */
4188 status
= vxge_hw_flash_fw(hldev
);
4189 if (status
!= VXGE_HW_OK
) {
4190 vxge_debug_init(VXGE_ERR
, "%s: Upgrade commit failed '%s'.",
4191 VXGE_DRIVER_NAME
, fw_name
);
4196 printk(KERN_NOTICE
"Upgrade of firmware successful! Adapter must be "
4197 "hard reset before using, thus requiring a system reboot or a "
4198 "hotplug event.\n");
4201 release_firmware(fw
);
4205 static int vxge_probe_fw_update(struct vxgedev
*vdev
)
4211 maj
= vdev
->config
.device_hw_info
.fw_version
.major
;
4212 min
= vdev
->config
.device_hw_info
.fw_version
.minor
;
4213 bld
= vdev
->config
.device_hw_info
.fw_version
.build
;
4215 if (VXGE_FW_VER(maj
, min
, bld
) == VXGE_CERT_FW_VER
)
4218 /* Ignore the build number when determining if the current firmware is
4219 * "too new" to load the driver
4221 if (VXGE_FW_VER(maj
, min
, 0) > VXGE_CERT_FW_VER
) {
4222 vxge_debug_init(VXGE_ERR
, "%s: Firmware newer than last known "
4223 "version, unable to load driver\n",
4228 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4229 * work with this driver.
4231 if (VXGE_FW_VER(maj
, min
, bld
) <= VXGE_FW_DEAD_VER
) {
4232 vxge_debug_init(VXGE_ERR
, "%s: Firmware %d.%d.%d cannot be "
4233 "upgraded\n", VXGE_DRIVER_NAME
, maj
, min
, bld
);
4237 /* If file not specified, determine gPXE or not */
4238 if (VXGE_FW_VER(maj
, min
, bld
) >= VXGE_EPROM_FW_VER
) {
4240 for (i
= 0; i
< VXGE_HW_MAX_ROM_IMAGES
; i
++)
4241 if (vdev
->devh
->eprom_versions
[i
]) {
4247 fw_name
= "vxge/X3fw-pxe.ncf";
4249 fw_name
= "vxge/X3fw.ncf";
4251 ret
= vxge_fw_upgrade(vdev
, fw_name
, 0);
4252 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4253 * probe, so ignore them
4255 if (ret
!= -EINVAL
&& ret
!= -ENOENT
)
4260 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR
, VXGE_CERT_FW_VER_MINOR
, 0) >
4261 VXGE_FW_VER(maj
, min
, 0)) {
4262 vxge_debug_init(VXGE_ERR
, "%s: Firmware %d.%d.%d is too old to"
4263 " be used with this driver.\n"
4264 "Please get the latest version from "
4265 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4266 VXGE_DRIVER_NAME
, maj
, min
, bld
);
4273 static int __devinit
is_sriov_initialized(struct pci_dev
*pdev
)
4278 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
4280 pci_read_config_word(pdev
, pos
+ PCI_SRIOV_CTRL
, &ctrl
);
4281 if (ctrl
& PCI_SRIOV_CTRL_VFE
)
4289 * @pdev : structure containing the PCI related information of the device.
4290 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4292 * This function is called when a new PCI device gets detected and initializes
4295 * returns 0 on success and negative on failure.
4298 static int __devinit
4299 vxge_probe(struct pci_dev
*pdev
, const struct pci_device_id
*pre
)
4301 struct __vxge_hw_device
*hldev
;
4302 enum vxge_hw_status status
;
4306 struct vxgedev
*vdev
;
4307 struct vxge_config
*ll_config
= NULL
;
4308 struct vxge_hw_device_config
*device_config
= NULL
;
4309 struct vxge_hw_device_attr attr
;
4310 int i
, j
, no_of_vpath
= 0, max_vpath_supported
= 0;
4312 struct vxge_mac_addrs
*entry
;
4313 static int bus
= -1, device
= -1;
4316 enum vxge_hw_status is_privileged
;
4320 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
4323 /* In SRIOV-17 mode, functions of the same adapter
4324 * can be deployed on different buses
4326 if (((bus
!= pdev
->bus
->number
) || (device
!= PCI_SLOT(pdev
->devfn
))) &&
4330 bus
= pdev
->bus
->number
;
4331 device
= PCI_SLOT(pdev
->devfn
);
4334 if (driver_config
->config_dev_cnt
&&
4335 (driver_config
->config_dev_cnt
!=
4336 driver_config
->total_dev_cnt
))
4337 vxge_debug_init(VXGE_ERR
,
4338 "%s: Configured %d of %d devices",
4340 driver_config
->config_dev_cnt
,
4341 driver_config
->total_dev_cnt
);
4342 driver_config
->config_dev_cnt
= 0;
4343 driver_config
->total_dev_cnt
= 0;
4346 /* Now making the CPU based no of vpath calculation
4347 * applicable for individual functions as well.
4349 driver_config
->g_no_cpus
= 0;
4350 driver_config
->vpath_per_dev
= max_config_vpath
;
4352 driver_config
->total_dev_cnt
++;
4353 if (++driver_config
->config_dev_cnt
> max_config_dev
) {
4358 device_config
= kzalloc(sizeof(struct vxge_hw_device_config
),
4360 if (!device_config
) {
4362 vxge_debug_init(VXGE_ERR
,
4363 "device_config : malloc failed %s %d",
4364 __FILE__
, __LINE__
);
4368 ll_config
= kzalloc(sizeof(struct vxge_config
), GFP_KERNEL
);
4371 vxge_debug_init(VXGE_ERR
,
4372 "device_config : malloc failed %s %d",
4373 __FILE__
, __LINE__
);
4376 ll_config
->tx_steering_type
= TX_MULTIQ_STEERING
;
4377 ll_config
->intr_type
= MSI_X
;
4378 ll_config
->napi_weight
= NEW_NAPI_WEIGHT
;
4379 ll_config
->rth_steering
= RTH_STEERING
;
4381 /* get the default configuration parameters */
4382 vxge_hw_device_config_default_get(device_config
);
4384 /* initialize configuration parameters */
4385 vxge_device_config_init(device_config
, &ll_config
->intr_type
);
4387 ret
= pci_enable_device(pdev
);
4389 vxge_debug_init(VXGE_ERR
,
4390 "%s : can not enable PCI device", __func__
);
4394 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4395 vxge_debug_ll_config(VXGE_TRACE
,
4396 "%s : using 64bit DMA", __func__
);
4400 if (pci_set_consistent_dma_mask(pdev
,
4401 DMA_BIT_MASK(64))) {
4402 vxge_debug_init(VXGE_ERR
,
4403 "%s : unable to obtain 64bit DMA for "
4404 "consistent allocations", __func__
);
4408 } else if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) {
4409 vxge_debug_ll_config(VXGE_TRACE
,
4410 "%s : using 32bit DMA", __func__
);
4416 ret
= pci_request_region(pdev
, 0, VXGE_DRIVER_NAME
);
4418 vxge_debug_init(VXGE_ERR
,
4419 "%s : request regions failed", __func__
);
4423 pci_set_master(pdev
);
4425 attr
.bar0
= pci_ioremap_bar(pdev
, 0);
4427 vxge_debug_init(VXGE_ERR
,
4428 "%s : cannot remap io memory bar0", __func__
);
4432 vxge_debug_ll_config(VXGE_TRACE
,
4433 "pci ioremap bar0: %p:0x%llx",
4435 (unsigned long long)pci_resource_start(pdev
, 0));
4437 status
= vxge_hw_device_hw_info_get(attr
.bar0
,
4438 &ll_config
->device_hw_info
);
4439 if (status
!= VXGE_HW_OK
) {
4440 vxge_debug_init(VXGE_ERR
,
4441 "%s: Reading of hardware info failed."
4442 "Please try upgrading the firmware.", VXGE_DRIVER_NAME
);
4447 vpath_mask
= ll_config
->device_hw_info
.vpath_mask
;
4448 if (vpath_mask
== 0) {
4449 vxge_debug_ll_config(VXGE_TRACE
,
4450 "%s: No vpaths available in device", VXGE_DRIVER_NAME
);
4455 vxge_debug_ll_config(VXGE_TRACE
,
4456 "%s:%d Vpath mask = %llx", __func__
, __LINE__
,
4457 (unsigned long long)vpath_mask
);
4459 function_mode
= ll_config
->device_hw_info
.function_mode
;
4460 host_type
= ll_config
->device_hw_info
.host_type
;
4461 is_privileged
= __vxge_hw_device_is_privilaged(host_type
,
4462 ll_config
->device_hw_info
.func_id
);
4464 /* Check how many vpaths are available */
4465 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
4466 if (!((vpath_mask
) & vxge_mBIT(i
)))
4468 max_vpath_supported
++;
4472 num_vfs
= vxge_get_num_vfs(function_mode
) - 1;
4474 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4475 if (is_sriov(function_mode
) && !is_sriov_initialized(pdev
) &&
4476 (ll_config
->intr_type
!= INTA
)) {
4477 ret
= pci_enable_sriov(pdev
, num_vfs
);
4479 vxge_debug_ll_config(VXGE_ERR
,
4480 "Failed in enabling SRIOV mode: %d\n", ret
);
4481 /* No need to fail out, as an error here is non-fatal */
4485 * Configure vpaths and get driver configured number of vpaths
4486 * which is less than or equal to the maximum vpaths per function.
4488 no_of_vpath
= vxge_config_vpaths(device_config
, vpath_mask
, ll_config
);
4490 vxge_debug_ll_config(VXGE_ERR
,
4491 "%s: No more vpaths to configure", VXGE_DRIVER_NAME
);
4496 /* Setting driver callbacks */
4497 attr
.uld_callbacks
.link_up
= vxge_callback_link_up
;
4498 attr
.uld_callbacks
.link_down
= vxge_callback_link_down
;
4499 attr
.uld_callbacks
.crit_err
= vxge_callback_crit_err
;
4501 status
= vxge_hw_device_initialize(&hldev
, &attr
, device_config
);
4502 if (status
!= VXGE_HW_OK
) {
4503 vxge_debug_init(VXGE_ERR
,
4504 "Failed to initialize device (%d)", status
);
4509 if (VXGE_FW_VER(ll_config
->device_hw_info
.fw_version
.major
,
4510 ll_config
->device_hw_info
.fw_version
.minor
,
4511 ll_config
->device_hw_info
.fw_version
.build
) >=
4512 VXGE_EPROM_FW_VER
) {
4513 struct eprom_image img
[VXGE_HW_MAX_ROM_IMAGES
];
4515 status
= vxge_hw_vpath_eprom_img_ver_get(hldev
, img
);
4516 if (status
!= VXGE_HW_OK
) {
4517 vxge_debug_init(VXGE_ERR
, "%s: Reading of EPROM failed",
4519 /* This is a non-fatal error, continue */
4522 for (i
= 0; i
< VXGE_HW_MAX_ROM_IMAGES
; i
++) {
4523 hldev
->eprom_versions
[i
] = img
[i
].version
;
4524 if (!img
[i
].is_valid
)
4526 vxge_debug_init(VXGE_TRACE
, "%s: EPROM %d, version "
4527 "%d.%d.%d.%d", VXGE_DRIVER_NAME
, i
,
4528 VXGE_EPROM_IMG_MAJOR(img
[i
].version
),
4529 VXGE_EPROM_IMG_MINOR(img
[i
].version
),
4530 VXGE_EPROM_IMG_FIX(img
[i
].version
),
4531 VXGE_EPROM_IMG_BUILD(img
[i
].version
));
4535 /* if FCS stripping is not disabled in MAC fail driver load */
4536 status
= vxge_hw_vpath_strip_fcs_check(hldev
, vpath_mask
);
4537 if (status
!= VXGE_HW_OK
) {
4538 vxge_debug_init(VXGE_ERR
, "%s: FCS stripping is enabled in MAC"
4539 " failing driver load", VXGE_DRIVER_NAME
);
4544 /* Always enable HWTS. This will always cause the FCS to be invalid,
4545 * due to the fact that HWTS is using the FCS as the location of the
4546 * timestamp. The HW FCS checking will still correctly determine if
4547 * there is a valid checksum, and the FCS is being removed by the driver
4548 * anyway. So no fucntionality is being lost. Since it is always
4549 * enabled, we now simply use the ioctl call to set whether or not the
4550 * driver should be paying attention to the HWTS.
4552 if (is_privileged
== VXGE_HW_OK
) {
4553 status
= vxge_timestamp_config(hldev
);
4554 if (status
!= VXGE_HW_OK
) {
4555 vxge_debug_init(VXGE_ERR
, "%s: HWTS enable failed",
4562 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_LL
);
4564 /* set private device info */
4565 pci_set_drvdata(pdev
, hldev
);
4567 ll_config
->fifo_indicate_max_pkts
= VXGE_FIFO_INDICATE_MAX_PKTS
;
4568 ll_config
->addr_learn_en
= addr_learn_en
;
4569 ll_config
->rth_algorithm
= RTH_ALG_JENKINS
;
4570 ll_config
->rth_hash_type_tcpipv4
= 1;
4571 ll_config
->rth_hash_type_ipv4
= 0;
4572 ll_config
->rth_hash_type_tcpipv6
= 0;
4573 ll_config
->rth_hash_type_ipv6
= 0;
4574 ll_config
->rth_hash_type_tcpipv6ex
= 0;
4575 ll_config
->rth_hash_type_ipv6ex
= 0;
4576 ll_config
->rth_bkt_sz
= RTH_BUCKET_SIZE
;
4577 ll_config
->tx_pause_enable
= VXGE_PAUSE_CTRL_ENABLE
;
4578 ll_config
->rx_pause_enable
= VXGE_PAUSE_CTRL_ENABLE
;
4580 ret
= vxge_device_register(hldev
, ll_config
, high_dma
, no_of_vpath
,
4587 ret
= vxge_probe_fw_update(vdev
);
4591 vxge_hw_device_debug_set(hldev
, VXGE_TRACE
, VXGE_COMPONENT_LL
);
4592 VXGE_COPY_DEBUG_INFO_TO_LL(vdev
, vxge_hw_device_error_level_get(hldev
),
4593 vxge_hw_device_trace_level_get(hldev
));
4595 /* set private HW device info */
4596 vdev
->mtu
= VXGE_HW_DEFAULT_MTU
;
4597 vdev
->bar0
= attr
.bar0
;
4598 vdev
->max_vpath_supported
= max_vpath_supported
;
4599 vdev
->no_of_vpath
= no_of_vpath
;
4601 /* Virtual Path count */
4602 for (i
= 0, j
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
4603 if (!vxge_bVALn(vpath_mask
, i
, 1))
4605 if (j
>= vdev
->no_of_vpath
)
4608 vdev
->vpaths
[j
].is_configured
= 1;
4609 vdev
->vpaths
[j
].device_id
= i
;
4610 vdev
->vpaths
[j
].ring
.driver_id
= j
;
4611 vdev
->vpaths
[j
].vdev
= vdev
;
4612 vdev
->vpaths
[j
].max_mac_addr_cnt
= max_mac_vpath
;
4613 memcpy((u8
*)vdev
->vpaths
[j
].macaddr
,
4614 ll_config
->device_hw_info
.mac_addrs
[i
],
4617 /* Initialize the mac address list header */
4618 INIT_LIST_HEAD(&vdev
->vpaths
[j
].mac_addr_list
);
4620 vdev
->vpaths
[j
].mac_addr_cnt
= 0;
4621 vdev
->vpaths
[j
].mcast_addr_cnt
= 0;
4624 vdev
->exec_mode
= VXGE_EXEC_MODE_DISABLE
;
4625 vdev
->max_config_port
= max_config_port
;
4627 vdev
->vlan_tag_strip
= vlan_tag_strip
;
4629 /* map the hashing selector table to the configured vpaths */
4630 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4631 vdev
->vpath_selector
[i
] = vpath_selector
[i
];
4633 macaddr
= (u8
*)vdev
->vpaths
[0].macaddr
;
4635 ll_config
->device_hw_info
.serial_number
[VXGE_HW_INFO_LEN
- 1] = '\0';
4636 ll_config
->device_hw_info
.product_desc
[VXGE_HW_INFO_LEN
- 1] = '\0';
4637 ll_config
->device_hw_info
.part_number
[VXGE_HW_INFO_LEN
- 1] = '\0';
4639 vxge_debug_init(VXGE_TRACE
, "%s: SERIAL NUMBER: %s",
4640 vdev
->ndev
->name
, ll_config
->device_hw_info
.serial_number
);
4642 vxge_debug_init(VXGE_TRACE
, "%s: PART NUMBER: %s",
4643 vdev
->ndev
->name
, ll_config
->device_hw_info
.part_number
);
4645 vxge_debug_init(VXGE_TRACE
, "%s: Neterion %s Server Adapter",
4646 vdev
->ndev
->name
, ll_config
->device_hw_info
.product_desc
);
4648 vxge_debug_init(VXGE_TRACE
, "%s: MAC ADDR: %pM",
4649 vdev
->ndev
->name
, macaddr
);
4651 vxge_debug_init(VXGE_TRACE
, "%s: Link Width x%d",
4652 vdev
->ndev
->name
, vxge_hw_device_link_width_get(hldev
));
4654 vxge_debug_init(VXGE_TRACE
,
4655 "%s: Firmware version : %s Date : %s", vdev
->ndev
->name
,
4656 ll_config
->device_hw_info
.fw_version
.version
,
4657 ll_config
->device_hw_info
.fw_date
.date
);
4660 switch (ll_config
->device_hw_info
.function_mode
) {
4661 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION
:
4662 vxge_debug_init(VXGE_TRACE
,
4663 "%s: Single Function Mode Enabled", vdev
->ndev
->name
);
4665 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
:
4666 vxge_debug_init(VXGE_TRACE
,
4667 "%s: Multi Function Mode Enabled", vdev
->ndev
->name
);
4669 case VXGE_HW_FUNCTION_MODE_SRIOV
:
4670 vxge_debug_init(VXGE_TRACE
,
4671 "%s: Single Root IOV Mode Enabled", vdev
->ndev
->name
);
4673 case VXGE_HW_FUNCTION_MODE_MRIOV
:
4674 vxge_debug_init(VXGE_TRACE
,
4675 "%s: Multi Root IOV Mode Enabled", vdev
->ndev
->name
);
4680 vxge_print_parm(vdev
, vpath_mask
);
4682 /* Store the fw version for ethttool option */
4683 strcpy(vdev
->fw_version
, ll_config
->device_hw_info
.fw_version
.version
);
4684 memcpy(vdev
->ndev
->dev_addr
, (u8
*)vdev
->vpaths
[0].macaddr
, ETH_ALEN
);
4685 memcpy(vdev
->ndev
->perm_addr
, vdev
->ndev
->dev_addr
, ETH_ALEN
);
4687 /* Copy the station mac address to the list */
4688 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
4689 entry
= kzalloc(sizeof(struct vxge_mac_addrs
), GFP_KERNEL
);
4690 if (NULL
== entry
) {
4691 vxge_debug_init(VXGE_ERR
,
4692 "%s: mac_addr_list : memory allocation failed",
4697 macaddr
= (u8
*)&entry
->macaddr
;
4698 memcpy(macaddr
, vdev
->ndev
->dev_addr
, ETH_ALEN
);
4699 list_add(&entry
->item
, &vdev
->vpaths
[i
].mac_addr_list
);
4700 vdev
->vpaths
[i
].mac_addr_cnt
= 1;
4703 kfree(device_config
);
4706 * INTA is shared in multi-function mode. This is unlike the INTA
4707 * implementation in MR mode, where each VH has its own INTA message.
4708 * - INTA is masked (disabled) as long as at least one function sets
4709 * its TITAN_MASK_ALL_INT.ALARM bit.
4710 * - INTA is unmasked (enabled) when all enabled functions have cleared
4711 * their own TITAN_MASK_ALL_INT.ALARM bit.
4712 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4713 * Though this driver leaves the top level interrupts unmasked while
4714 * leaving the required module interrupt bits masked on exit, there
4715 * could be a rougue driver around that does not follow this procedure
4716 * resulting in a failure to generate interrupts. The following code is
4717 * present to prevent such a failure.
4720 if (ll_config
->device_hw_info
.function_mode
==
4721 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
)
4722 if (vdev
->config
.intr_type
== INTA
)
4723 vxge_hw_device_unmask_all(hldev
);
4725 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d Exiting...",
4726 vdev
->ndev
->name
, __func__
, __LINE__
);
4728 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_LL
);
4729 VXGE_COPY_DEBUG_INFO_TO_LL(vdev
, vxge_hw_device_error_level_get(hldev
),
4730 vxge_hw_device_trace_level_get(hldev
));
4736 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4737 vxge_free_mac_add_list(&vdev
->vpaths
[i
]);
4739 vxge_device_unregister(hldev
);
4741 pci_set_drvdata(pdev
, NULL
);
4742 vxge_hw_device_terminate(hldev
);
4743 pci_disable_sriov(pdev
);
4747 pci_release_region(pdev
, 0);
4749 pci_disable_device(pdev
);
4752 kfree(device_config
);
4753 driver_config
->config_dev_cnt
--;
4754 driver_config
->total_dev_cnt
--;
4759 * vxge_rem_nic - Free the PCI device
4760 * @pdev: structure containing the PCI related information of the device.
4761 * Description: This function is called by the Pci subsystem to release a
4762 * PCI device and free up all resource held up by the device.
4764 static void __devexit
vxge_remove(struct pci_dev
*pdev
)
4766 struct __vxge_hw_device
*hldev
;
4767 struct vxgedev
*vdev
;
4770 hldev
= pci_get_drvdata(pdev
);
4774 vdev
= netdev_priv(hldev
->ndev
);
4776 vxge_debug_entryexit(vdev
->level_trace
, "%s:%d", __func__
, __LINE__
);
4777 vxge_debug_init(vdev
->level_trace
, "%s : removing PCI device...",
4780 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4781 vxge_free_mac_add_list(&vdev
->vpaths
[i
]);
4783 vxge_device_unregister(hldev
);
4784 pci_set_drvdata(pdev
, NULL
);
4785 /* Do not call pci_disable_sriov here, as it will break child devices */
4786 vxge_hw_device_terminate(hldev
);
4787 iounmap(vdev
->bar0
);
4788 pci_release_region(pdev
, 0);
4789 pci_disable_device(pdev
);
4790 driver_config
->config_dev_cnt
--;
4791 driver_config
->total_dev_cnt
--;
4793 vxge_debug_init(vdev
->level_trace
, "%s:%d Device unregistered",
4794 __func__
, __LINE__
);
4795 vxge_debug_entryexit(vdev
->level_trace
, "%s:%d Exiting...", __func__
,
4799 static struct pci_error_handlers vxge_err_handler
= {
4800 .error_detected
= vxge_io_error_detected
,
4801 .slot_reset
= vxge_io_slot_reset
,
4802 .resume
= vxge_io_resume
,
4805 static struct pci_driver vxge_driver
= {
4806 .name
= VXGE_DRIVER_NAME
,
4807 .id_table
= vxge_id_table
,
4808 .probe
= vxge_probe
,
4809 .remove
= __devexit_p(vxge_remove
),
4811 .suspend
= vxge_pm_suspend
,
4812 .resume
= vxge_pm_resume
,
4814 .err_handler
= &vxge_err_handler
,
4822 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4823 pr_info("Driver version: %s\n", DRV_VERSION
);
4827 driver_config
= kzalloc(sizeof(struct vxge_drv_config
), GFP_KERNEL
);
4831 ret
= pci_register_driver(&vxge_driver
);
4833 kfree(driver_config
);
4837 if (driver_config
->config_dev_cnt
&&
4838 (driver_config
->config_dev_cnt
!= driver_config
->total_dev_cnt
))
4839 vxge_debug_init(VXGE_ERR
,
4840 "%s: Configured %d of %d devices",
4841 VXGE_DRIVER_NAME
, driver_config
->config_dev_cnt
,
4842 driver_config
->total_dev_cnt
);
4850 pci_unregister_driver(&vxge_driver
);
4851 kfree(driver_config
);
4853 module_init(vxge_starter
);
4854 module_exit(vxge_closer
);