1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
14 * The module loadable parameters that are supported by the driver and a brief
15 * explanation of all the variables:
17 * Strip VLAN Tag enable/disable. Instructs the device to remove
18 * the VLAN tag from all received tagged frames that are not
19 * replicated at the internal L2 switch.
20 * 0 - Do not strip the VLAN tag.
21 * 1 - Strip the VLAN tag.
24 * Enable learning the mac address of the guest OS interface in
25 * a virtualization environment.
30 * Maximum number of port to be supported.
34 * This configures the maximum no of VPATH configures for each
36 * MIN - 1 and MAX - 17
39 * This configures maximum no of Device function to be enabled.
40 * MIN - 1 and MAX - 17
42 ******************************************************************************/
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/bitops.h>
47 #include <linux/if_vlan.h>
48 #include <linux/interrupt.h>
49 #include <linux/pci.h>
50 #include <linux/slab.h>
51 #include <linux/tcp.h>
53 #include <linux/netdevice.h>
54 #include <linux/etherdevice.h>
55 #include <linux/firmware.h>
56 #include <linux/net_tstamp.h>
57 #include <linux/prefetch.h>
58 #include <linux/module.h>
59 #include "vxge-main.h"
62 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
64 "Virtualized Server Adapter");
66 static const struct pci_device_id vxge_id_table
[] = {
67 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_TITAN_WIN
, PCI_ANY_ID
,
69 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_TITAN_UNI
, PCI_ANY_ID
,
74 MODULE_DEVICE_TABLE(pci
, vxge_id_table
);
76 VXGE_MODULE_PARAM_INT(vlan_tag_strip
, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
);
77 VXGE_MODULE_PARAM_INT(addr_learn_en
, VXGE_HW_MAC_ADDR_LEARN_DEFAULT
);
78 VXGE_MODULE_PARAM_INT(max_config_port
, VXGE_MAX_CONFIG_PORT
);
79 VXGE_MODULE_PARAM_INT(max_config_vpath
, VXGE_USE_DEFAULT
);
80 VXGE_MODULE_PARAM_INT(max_mac_vpath
, VXGE_MAX_MAC_ADDR_COUNT
);
81 VXGE_MODULE_PARAM_INT(max_config_dev
, VXGE_MAX_CONFIG_DEV
);
83 static u16 vpath_selector
[VXGE_HW_MAX_VIRTUAL_PATHS
] =
84 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
85 static unsigned int bw_percentage
[VXGE_HW_MAX_VIRTUAL_PATHS
] =
86 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS
- 1)] = 0xFF};
87 module_param_array(bw_percentage
, uint
, NULL
, 0);
89 static struct vxge_drv_config
*driver_config
;
90 static enum vxge_hw_status
vxge_reset_all_vpaths(struct vxgedev
*vdev
);
92 static inline int is_vxge_card_up(struct vxgedev
*vdev
)
94 return test_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
97 static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo
*fifo
)
99 struct sk_buff
**skb_ptr
= NULL
;
100 struct sk_buff
**temp
;
101 #define NR_SKB_COMPLETED 128
102 struct sk_buff
*completed
[NR_SKB_COMPLETED
];
109 if (__netif_tx_trylock(fifo
->txq
)) {
110 vxge_hw_vpath_poll_tx(fifo
->handle
, &skb_ptr
,
111 NR_SKB_COMPLETED
, &more
);
112 __netif_tx_unlock(fifo
->txq
);
116 for (temp
= completed
; temp
!= skb_ptr
; temp
++)
117 dev_consume_skb_irq(*temp
);
121 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev
*vdev
)
125 /* Complete all transmits */
126 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
127 VXGE_COMPLETE_VPATH_TX(&vdev
->vpaths
[i
].fifo
);
130 static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev
*vdev
)
133 struct vxge_ring
*ring
;
135 /* Complete all receives*/
136 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
137 ring
= &vdev
->vpaths
[i
].ring
;
138 vxge_hw_vpath_poll_rx(ring
->handle
);
143 * vxge_callback_link_up
145 * This function is called during interrupt context to notify link up state
148 static void vxge_callback_link_up(struct __vxge_hw_device
*hldev
)
150 struct net_device
*dev
= hldev
->ndev
;
151 struct vxgedev
*vdev
= netdev_priv(dev
);
153 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
154 vdev
->ndev
->name
, __func__
, __LINE__
);
155 netdev_notice(vdev
->ndev
, "Link Up\n");
156 vdev
->stats
.link_up
++;
158 netif_carrier_on(vdev
->ndev
);
159 netif_tx_wake_all_queues(vdev
->ndev
);
161 vxge_debug_entryexit(VXGE_TRACE
,
162 "%s: %s:%d Exiting...", vdev
->ndev
->name
, __func__
, __LINE__
);
166 * vxge_callback_link_down
168 * This function is called during interrupt context to notify link down state
171 static void vxge_callback_link_down(struct __vxge_hw_device
*hldev
)
173 struct net_device
*dev
= hldev
->ndev
;
174 struct vxgedev
*vdev
= netdev_priv(dev
);
176 vxge_debug_entryexit(VXGE_TRACE
,
177 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
178 netdev_notice(vdev
->ndev
, "Link Down\n");
180 vdev
->stats
.link_down
++;
181 netif_carrier_off(vdev
->ndev
);
182 netif_tx_stop_all_queues(vdev
->ndev
);
184 vxge_debug_entryexit(VXGE_TRACE
,
185 "%s: %s:%d Exiting...", vdev
->ndev
->name
, __func__
, __LINE__
);
193 static struct sk_buff
*
194 vxge_rx_alloc(void *dtrh
, struct vxge_ring
*ring
, const int skb_size
)
196 struct net_device
*dev
;
198 struct vxge_rx_priv
*rx_priv
;
201 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
202 ring
->ndev
->name
, __func__
, __LINE__
);
204 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
206 /* try to allocate skb first. this one may fail */
207 skb
= netdev_alloc_skb(dev
, skb_size
+
208 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
210 vxge_debug_mem(VXGE_ERR
,
211 "%s: out of memory to allocate SKB", dev
->name
);
212 ring
->stats
.skb_alloc_fail
++;
216 vxge_debug_mem(VXGE_TRACE
,
217 "%s: %s:%d Skb : 0x%p", ring
->ndev
->name
,
218 __func__
, __LINE__
, skb
);
220 skb_reserve(skb
, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
223 rx_priv
->skb_data
= NULL
;
224 rx_priv
->data_size
= skb_size
;
225 vxge_debug_entryexit(VXGE_TRACE
,
226 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
234 static int vxge_rx_map(void *dtrh
, struct vxge_ring
*ring
)
236 struct vxge_rx_priv
*rx_priv
;
239 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
240 ring
->ndev
->name
, __func__
, __LINE__
);
241 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
243 rx_priv
->skb_data
= rx_priv
->skb
->data
;
244 dma_addr
= pci_map_single(ring
->pdev
, rx_priv
->skb_data
,
245 rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
247 if (unlikely(pci_dma_mapping_error(ring
->pdev
, dma_addr
))) {
248 ring
->stats
.pci_map_fail
++;
251 vxge_debug_mem(VXGE_TRACE
,
252 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
253 ring
->ndev
->name
, __func__
, __LINE__
,
254 (unsigned long long)dma_addr
);
255 vxge_hw_ring_rxd_1b_set(dtrh
, dma_addr
, rx_priv
->data_size
);
257 rx_priv
->data_dma
= dma_addr
;
258 vxge_debug_entryexit(VXGE_TRACE
,
259 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
265 * vxge_rx_initial_replenish
266 * Allocation of RxD as an initial replenish procedure.
268 static enum vxge_hw_status
269 vxge_rx_initial_replenish(void *dtrh
, void *userdata
)
271 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
272 struct vxge_rx_priv
*rx_priv
;
274 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
275 ring
->ndev
->name
, __func__
, __LINE__
);
276 if (vxge_rx_alloc(dtrh
, ring
,
277 VXGE_LL_MAX_FRAME_SIZE(ring
->ndev
)) == NULL
)
280 if (vxge_rx_map(dtrh
, ring
)) {
281 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
282 dev_kfree_skb(rx_priv
->skb
);
286 vxge_debug_entryexit(VXGE_TRACE
,
287 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
293 vxge_rx_complete(struct vxge_ring
*ring
, struct sk_buff
*skb
, u16 vlan
,
294 int pkt_length
, struct vxge_hw_ring_rxd_info
*ext_info
)
297 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
298 ring
->ndev
->name
, __func__
, __LINE__
);
299 skb_record_rx_queue(skb
, ring
->driver_id
);
300 skb
->protocol
= eth_type_trans(skb
, ring
->ndev
);
302 u64_stats_update_begin(&ring
->stats
.syncp
);
303 ring
->stats
.rx_frms
++;
304 ring
->stats
.rx_bytes
+= pkt_length
;
306 if (skb
->pkt_type
== PACKET_MULTICAST
)
307 ring
->stats
.rx_mcast
++;
308 u64_stats_update_end(&ring
->stats
.syncp
);
310 vxge_debug_rx(VXGE_TRACE
,
311 "%s: %s:%d skb protocol = %d",
312 ring
->ndev
->name
, __func__
, __LINE__
, skb
->protocol
);
314 if (ext_info
->vlan
&&
315 ring
->vlan_tag_strip
== VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
)
316 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ext_info
->vlan
);
317 napi_gro_receive(ring
->napi_p
, skb
);
319 vxge_debug_entryexit(VXGE_TRACE
,
320 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
323 static inline void vxge_re_pre_post(void *dtr
, struct vxge_ring
*ring
,
324 struct vxge_rx_priv
*rx_priv
)
326 pci_dma_sync_single_for_device(ring
->pdev
,
327 rx_priv
->data_dma
, rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
329 vxge_hw_ring_rxd_1b_set(dtr
, rx_priv
->data_dma
, rx_priv
->data_size
);
330 vxge_hw_ring_rxd_pre_post(ring
->handle
, dtr
);
333 static inline void vxge_post(int *dtr_cnt
, void **first_dtr
,
334 void *post_dtr
, struct __vxge_hw_ring
*ringh
)
336 int dtr_count
= *dtr_cnt
;
337 if ((*dtr_cnt
% VXGE_HW_RXSYNC_FREQ_CNT
) == 0) {
339 vxge_hw_ring_rxd_post_post_wmb(ringh
, *first_dtr
);
340 *first_dtr
= post_dtr
;
342 vxge_hw_ring_rxd_post_post(ringh
, post_dtr
);
344 *dtr_cnt
= dtr_count
;
350 * If the interrupt is because of a received frame or if the receive ring
351 * contains fresh as yet un-processed frames, this function is called.
353 static enum vxge_hw_status
354 vxge_rx_1b_compl(struct __vxge_hw_ring
*ringh
, void *dtr
,
355 u8 t_code
, void *userdata
)
357 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
358 struct net_device
*dev
= ring
->ndev
;
359 unsigned int dma_sizes
;
360 void *first_dtr
= NULL
;
366 struct vxge_rx_priv
*rx_priv
;
367 struct vxge_hw_ring_rxd_info ext_info
;
368 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
369 ring
->ndev
->name
, __func__
, __LINE__
);
371 if (ring
->budget
<= 0)
375 prefetch((char *)dtr
+ L1_CACHE_BYTES
);
376 rx_priv
= vxge_hw_ring_rxd_private_get(dtr
);
378 data_size
= rx_priv
->data_size
;
379 data_dma
= rx_priv
->data_dma
;
380 prefetch(rx_priv
->skb_data
);
382 vxge_debug_rx(VXGE_TRACE
,
383 "%s: %s:%d skb = 0x%p",
384 ring
->ndev
->name
, __func__
, __LINE__
, skb
);
386 vxge_hw_ring_rxd_1b_get(ringh
, dtr
, &dma_sizes
);
387 pkt_length
= dma_sizes
;
389 pkt_length
-= ETH_FCS_LEN
;
391 vxge_debug_rx(VXGE_TRACE
,
392 "%s: %s:%d Packet Length = %d",
393 ring
->ndev
->name
, __func__
, __LINE__
, pkt_length
);
395 vxge_hw_ring_rxd_1b_info_get(ringh
, dtr
, &ext_info
);
397 /* check skb validity */
400 prefetch((char *)skb
+ L1_CACHE_BYTES
);
401 if (unlikely(t_code
)) {
402 if (vxge_hw_ring_handle_tcode(ringh
, dtr
, t_code
) !=
405 ring
->stats
.rx_errors
++;
406 vxge_debug_rx(VXGE_TRACE
,
407 "%s: %s :%d Rx T_code is %d",
408 ring
->ndev
->name
, __func__
,
411 /* If the t_code is not supported and if the
412 * t_code is other than 0x5 (unparseable packet
413 * such as unknown UPV6 header), Drop it !!!
415 vxge_re_pre_post(dtr
, ring
, rx_priv
);
417 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
418 ring
->stats
.rx_dropped
++;
423 if (pkt_length
> VXGE_LL_RX_COPY_THRESHOLD
) {
424 if (vxge_rx_alloc(dtr
, ring
, data_size
) != NULL
) {
425 if (!vxge_rx_map(dtr
, ring
)) {
426 skb_put(skb
, pkt_length
);
428 pci_unmap_single(ring
->pdev
, data_dma
,
429 data_size
, PCI_DMA_FROMDEVICE
);
431 vxge_hw_ring_rxd_pre_post(ringh
, dtr
);
432 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
435 dev_kfree_skb(rx_priv
->skb
);
437 rx_priv
->data_size
= data_size
;
438 vxge_re_pre_post(dtr
, ring
, rx_priv
);
440 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
442 ring
->stats
.rx_dropped
++;
446 vxge_re_pre_post(dtr
, ring
, rx_priv
);
448 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
449 ring
->stats
.rx_dropped
++;
453 struct sk_buff
*skb_up
;
455 skb_up
= netdev_alloc_skb(dev
, pkt_length
+
456 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
457 if (skb_up
!= NULL
) {
459 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
461 pci_dma_sync_single_for_cpu(ring
->pdev
,
465 vxge_debug_mem(VXGE_TRACE
,
466 "%s: %s:%d skb_up = %p",
467 ring
->ndev
->name
, __func__
,
469 memcpy(skb_up
->data
, skb
->data
, pkt_length
);
471 vxge_re_pre_post(dtr
, ring
, rx_priv
);
473 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
475 /* will netif_rx small SKB instead */
477 skb_put(skb
, pkt_length
);
479 vxge_re_pre_post(dtr
, ring
, rx_priv
);
481 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
482 vxge_debug_rx(VXGE_ERR
,
483 "%s: vxge_rx_1b_compl: out of "
484 "memory", dev
->name
);
485 ring
->stats
.skb_alloc_fail
++;
490 if ((ext_info
.proto
& VXGE_HW_FRAME_PROTO_TCP_OR_UDP
) &&
491 !(ext_info
.proto
& VXGE_HW_FRAME_PROTO_IP_FRAG
) &&
492 (dev
->features
& NETIF_F_RXCSUM
) && /* Offload Rx side CSUM */
493 ext_info
.l3_cksum
== VXGE_HW_L3_CKSUM_OK
&&
494 ext_info
.l4_cksum
== VXGE_HW_L4_CKSUM_OK
)
495 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
497 skb_checksum_none_assert(skb
);
501 struct skb_shared_hwtstamps
*skb_hwts
;
502 u32 ns
= *(u32
*)(skb
->head
+ pkt_length
);
504 skb_hwts
= skb_hwtstamps(skb
);
505 skb_hwts
->hwtstamp
= ns_to_ktime(ns
);
508 /* rth_hash_type and rth_it_hit are non-zero regardless of
509 * whether rss is enabled. Only the rth_value is zero/non-zero
510 * if rss is disabled/enabled, so key off of that.
512 if (ext_info
.rth_value
)
513 skb_set_hash(skb
, ext_info
.rth_value
,
516 vxge_rx_complete(ring
, skb
, ext_info
.vlan
,
517 pkt_length
, &ext_info
);
520 ring
->pkts_processed
++;
524 } while (vxge_hw_ring_rxd_next_completed(ringh
, &dtr
,
525 &t_code
) == VXGE_HW_OK
);
528 vxge_hw_ring_rxd_post_post_wmb(ringh
, first_dtr
);
531 vxge_debug_entryexit(VXGE_TRACE
,
540 * If an interrupt was raised to indicate DMA complete of the Tx packet,
541 * this function is called. It identifies the last TxD whose buffer was
542 * freed and frees all skbs whose data have already DMA'ed into the NICs
545 static enum vxge_hw_status
546 vxge_xmit_compl(struct __vxge_hw_fifo
*fifo_hw
, void *dtr
,
547 enum vxge_hw_fifo_tcode t_code
, void *userdata
,
548 struct sk_buff
***skb_ptr
, int nr_skb
, int *more
)
550 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)userdata
;
551 struct sk_buff
*skb
, **done_skb
= *skb_ptr
;
554 vxge_debug_entryexit(VXGE_TRACE
,
555 "%s:%d Entered....", __func__
, __LINE__
);
561 struct vxge_tx_priv
*txd_priv
=
562 vxge_hw_fifo_txdl_private_get(dtr
);
565 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
566 frag
= &skb_shinfo(skb
)->frags
[0];
568 vxge_debug_tx(VXGE_TRACE
,
569 "%s: %s:%d fifo_hw = %p dtr = %p "
570 "tcode = 0x%x", fifo
->ndev
->name
, __func__
,
571 __LINE__
, fifo_hw
, dtr
, t_code
);
572 /* check skb validity */
574 vxge_debug_tx(VXGE_TRACE
,
575 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
576 fifo
->ndev
->name
, __func__
, __LINE__
,
577 skb
, txd_priv
, frg_cnt
);
578 if (unlikely(t_code
)) {
579 fifo
->stats
.tx_errors
++;
580 vxge_debug_tx(VXGE_ERR
,
581 "%s: tx: dtr %p completed due to "
582 "error t_code %01x", fifo
->ndev
->name
,
584 vxge_hw_fifo_handle_tcode(fifo_hw
, dtr
, t_code
);
587 /* for unfragmented skb */
588 pci_unmap_single(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
589 skb_headlen(skb
), PCI_DMA_TODEVICE
);
591 for (j
= 0; j
< frg_cnt
; j
++) {
592 pci_unmap_page(fifo
->pdev
,
593 txd_priv
->dma_buffers
[i
++],
594 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
598 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
600 /* Updating the statistics block */
601 u64_stats_update_begin(&fifo
->stats
.syncp
);
602 fifo
->stats
.tx_frms
++;
603 fifo
->stats
.tx_bytes
+= skb
->len
;
604 u64_stats_update_end(&fifo
->stats
.syncp
);
614 if (pkt_cnt
> fifo
->indicate_max_pkts
)
617 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw
,
618 &dtr
, &t_code
) == VXGE_HW_OK
);
621 if (netif_tx_queue_stopped(fifo
->txq
))
622 netif_tx_wake_queue(fifo
->txq
);
624 vxge_debug_entryexit(VXGE_TRACE
,
625 "%s: %s:%d Exiting...",
626 fifo
->ndev
->name
, __func__
, __LINE__
);
630 /* select a vpath to transmit the packet */
631 static u32
vxge_get_vpath_no(struct vxgedev
*vdev
, struct sk_buff
*skb
)
633 u16 queue_len
, counter
= 0;
634 if (skb
->protocol
== htons(ETH_P_IP
)) {
640 if (!ip_is_fragment(ip
)) {
641 th
= (struct tcphdr
*)(((unsigned char *)ip
) +
644 queue_len
= vdev
->no_of_vpath
;
645 counter
= (ntohs(th
->source
) +
647 vdev
->vpath_selector
[queue_len
- 1];
648 if (counter
>= queue_len
)
649 counter
= queue_len
- 1;
655 static enum vxge_hw_status
vxge_search_mac_addr_in_list(
656 struct vxge_vpath
*vpath
, u64 del_mac
)
658 struct list_head
*entry
, *next
;
659 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
660 if (((struct vxge_mac_addrs
*)entry
)->macaddr
== del_mac
)
666 static int vxge_mac_list_add(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
668 struct vxge_mac_addrs
*new_mac_entry
;
669 u8
*mac_address
= NULL
;
671 if (vpath
->mac_addr_cnt
>= VXGE_MAX_LEARN_MAC_ADDR_CNT
)
674 new_mac_entry
= kzalloc(sizeof(struct vxge_mac_addrs
), GFP_ATOMIC
);
675 if (!new_mac_entry
) {
676 vxge_debug_mem(VXGE_ERR
,
677 "%s: memory allocation failed",
682 list_add(&new_mac_entry
->item
, &vpath
->mac_addr_list
);
684 /* Copy the new mac address to the list */
685 mac_address
= (u8
*)&new_mac_entry
->macaddr
;
686 memcpy(mac_address
, mac
->macaddr
, ETH_ALEN
);
688 new_mac_entry
->state
= mac
->state
;
689 vpath
->mac_addr_cnt
++;
691 if (is_multicast_ether_addr(mac
->macaddr
))
692 vpath
->mcast_addr_cnt
++;
697 /* Add a mac address to DA table */
698 static enum vxge_hw_status
699 vxge_add_mac_addr(struct vxgedev
*vdev
, struct macInfo
*mac
)
701 enum vxge_hw_status status
= VXGE_HW_OK
;
702 struct vxge_vpath
*vpath
;
703 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode
;
705 if (is_multicast_ether_addr(mac
->macaddr
))
706 duplicate_mode
= VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
;
708 duplicate_mode
= VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE
;
710 vpath
= &vdev
->vpaths
[mac
->vpath_no
];
711 status
= vxge_hw_vpath_mac_addr_add(vpath
->handle
, mac
->macaddr
,
712 mac
->macmask
, duplicate_mode
);
713 if (status
!= VXGE_HW_OK
) {
714 vxge_debug_init(VXGE_ERR
,
715 "DA config add entry failed for vpath:%d",
718 if (FALSE
== vxge_mac_list_add(vpath
, mac
))
724 static int vxge_learn_mac(struct vxgedev
*vdev
, u8
*mac_header
)
726 struct macInfo mac_info
;
727 u8
*mac_address
= NULL
;
728 u64 mac_addr
= 0, vpath_vector
= 0;
730 enum vxge_hw_status status
= VXGE_HW_OK
;
731 struct vxge_vpath
*vpath
= NULL
;
733 mac_address
= (u8
*)&mac_addr
;
734 memcpy(mac_address
, mac_header
, ETH_ALEN
);
736 /* Is this mac address already in the list? */
737 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
738 vpath
= &vdev
->vpaths
[vpath_idx
];
739 if (vxge_search_mac_addr_in_list(vpath
, mac_addr
))
743 memset(&mac_info
, 0, sizeof(struct macInfo
));
744 memcpy(mac_info
.macaddr
, mac_header
, ETH_ALEN
);
746 /* Any vpath has room to add mac address to its da table? */
747 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
748 vpath
= &vdev
->vpaths
[vpath_idx
];
749 if (vpath
->mac_addr_cnt
< vpath
->max_mac_addr_cnt
) {
750 /* Add this mac address to this vpath */
751 mac_info
.vpath_no
= vpath_idx
;
752 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
753 status
= vxge_add_mac_addr(vdev
, &mac_info
);
754 if (status
!= VXGE_HW_OK
)
760 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_LIST
;
762 mac_info
.vpath_no
= vpath_idx
;
763 /* Is the first vpath already selected as catch-basin ? */
764 vpath
= &vdev
->vpaths
[vpath_idx
];
765 if (vpath
->mac_addr_cnt
> vpath
->max_mac_addr_cnt
) {
766 /* Add this mac address to this vpath */
767 if (FALSE
== vxge_mac_list_add(vpath
, &mac_info
))
772 /* Select first vpath as catch-basin */
773 vpath_vector
= vxge_mBIT(vpath
->device_id
);
774 status
= vxge_hw_mgmt_reg_write(vpath
->vdev
->devh
,
775 vxge_hw_mgmt_reg_type_mrpcim
,
778 struct vxge_hw_mrpcim_reg
,
781 if (status
!= VXGE_HW_OK
) {
782 vxge_debug_tx(VXGE_ERR
,
783 "%s: Unable to set the vpath-%d in catch-basin mode",
784 VXGE_DRIVER_NAME
, vpath
->device_id
);
788 if (FALSE
== vxge_mac_list_add(vpath
, &mac_info
))
796 * @skb : the socket buffer containing the Tx data.
797 * @dev : device pointer.
799 * This function is the Tx entry point of the driver. Neterion NIC supports
800 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
803 vxge_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
805 struct vxge_fifo
*fifo
= NULL
;
808 struct vxgedev
*vdev
= NULL
;
809 enum vxge_hw_status status
;
810 int frg_cnt
, first_frg_len
;
812 int i
= 0, j
= 0, avail
;
814 struct vxge_tx_priv
*txdl_priv
= NULL
;
815 struct __vxge_hw_fifo
*fifo_hw
;
819 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
820 dev
->name
, __func__
, __LINE__
);
822 /* A buffer with no data will be dropped */
823 if (unlikely(skb
->len
<= 0)) {
824 vxge_debug_tx(VXGE_ERR
,
825 "%s: Buffer has no data..", dev
->name
);
826 dev_kfree_skb_any(skb
);
830 vdev
= netdev_priv(dev
);
832 if (unlikely(!is_vxge_card_up(vdev
))) {
833 vxge_debug_tx(VXGE_ERR
,
834 "%s: vdev not initialized", dev
->name
);
835 dev_kfree_skb_any(skb
);
839 if (vdev
->config
.addr_learn_en
) {
840 vpath_no
= vxge_learn_mac(vdev
, skb
->data
+ ETH_ALEN
);
841 if (vpath_no
== -EPERM
) {
842 vxge_debug_tx(VXGE_ERR
,
843 "%s: Failed to store the mac address",
845 dev_kfree_skb_any(skb
);
850 if (vdev
->config
.tx_steering_type
== TX_MULTIQ_STEERING
)
851 vpath_no
= skb_get_queue_mapping(skb
);
852 else if (vdev
->config
.tx_steering_type
== TX_PORT_STEERING
)
853 vpath_no
= vxge_get_vpath_no(vdev
, skb
);
855 vxge_debug_tx(VXGE_TRACE
, "%s: vpath_no= %d", dev
->name
, vpath_no
);
857 if (vpath_no
>= vdev
->no_of_vpath
)
860 fifo
= &vdev
->vpaths
[vpath_no
].fifo
;
861 fifo_hw
= fifo
->handle
;
863 if (netif_tx_queue_stopped(fifo
->txq
))
864 return NETDEV_TX_BUSY
;
866 avail
= vxge_hw_fifo_free_txdl_count_get(fifo_hw
);
868 vxge_debug_tx(VXGE_ERR
,
869 "%s: No free TXDs available", dev
->name
);
870 fifo
->stats
.txd_not_free
++;
874 /* Last TXD? Stop tx queue to avoid dropping packets. TX
875 * completion will resume the queue.
878 netif_tx_stop_queue(fifo
->txq
);
880 status
= vxge_hw_fifo_txdl_reserve(fifo_hw
, &dtr
, &dtr_priv
);
881 if (unlikely(status
!= VXGE_HW_OK
)) {
882 vxge_debug_tx(VXGE_ERR
,
883 "%s: Out of descriptors .", dev
->name
);
884 fifo
->stats
.txd_out_of_desc
++;
888 vxge_debug_tx(VXGE_TRACE
,
889 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
890 dev
->name
, __func__
, __LINE__
,
891 fifo_hw
, dtr
, dtr_priv
);
893 if (skb_vlan_tag_present(skb
)) {
894 u16 vlan_tag
= skb_vlan_tag_get(skb
);
895 vxge_hw_fifo_txdl_vlan_set(dtr
, vlan_tag
);
898 first_frg_len
= skb_headlen(skb
);
900 dma_pointer
= pci_map_single(fifo
->pdev
, skb
->data
, first_frg_len
,
903 if (unlikely(pci_dma_mapping_error(fifo
->pdev
, dma_pointer
))) {
904 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
905 fifo
->stats
.pci_map_fail
++;
909 txdl_priv
= vxge_hw_fifo_txdl_private_get(dtr
);
910 txdl_priv
->skb
= skb
;
911 txdl_priv
->dma_buffers
[j
] = dma_pointer
;
913 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
914 vxge_debug_tx(VXGE_TRACE
,
915 "%s: %s:%d skb = %p txdl_priv = %p "
916 "frag_cnt = %d dma_pointer = 0x%llx", dev
->name
,
917 __func__
, __LINE__
, skb
, txdl_priv
,
918 frg_cnt
, (unsigned long long)dma_pointer
);
920 vxge_hw_fifo_txdl_buffer_set(fifo_hw
, dtr
, j
++, dma_pointer
,
923 frag
= &skb_shinfo(skb
)->frags
[0];
924 for (i
= 0; i
< frg_cnt
; i
++) {
925 /* ignore 0 length fragment */
926 if (!skb_frag_size(frag
))
929 dma_pointer
= (u64
)skb_frag_dma_map(&fifo
->pdev
->dev
, frag
,
930 0, skb_frag_size(frag
),
933 if (unlikely(dma_mapping_error(&fifo
->pdev
->dev
, dma_pointer
)))
935 vxge_debug_tx(VXGE_TRACE
,
936 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
937 dev
->name
, __func__
, __LINE__
, i
,
938 (unsigned long long)dma_pointer
);
940 txdl_priv
->dma_buffers
[j
] = dma_pointer
;
941 vxge_hw_fifo_txdl_buffer_set(fifo_hw
, dtr
, j
++, dma_pointer
,
942 skb_frag_size(frag
));
946 offload_type
= vxge_offload_type(skb
);
948 if (offload_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
949 int mss
= vxge_tcp_mss(skb
);
951 vxge_debug_tx(VXGE_TRACE
, "%s: %s:%d mss = %d",
952 dev
->name
, __func__
, __LINE__
, mss
);
953 vxge_hw_fifo_txdl_mss_set(dtr
, mss
);
955 vxge_assert(skb
->len
<=
956 dev
->mtu
+ VXGE_HW_MAC_HEADER_MAX_SIZE
);
962 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
963 vxge_hw_fifo_txdl_cksum_set_bits(dtr
,
964 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN
|
965 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN
|
966 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN
);
968 vxge_hw_fifo_txdl_post(fifo_hw
, dtr
);
970 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d Exiting...",
971 dev
->name
, __func__
, __LINE__
);
975 vxge_debug_tx(VXGE_TRACE
, "%s: pci_map_page failed", dev
->name
);
978 frag
= &skb_shinfo(skb
)->frags
[0];
980 pci_unmap_single(fifo
->pdev
, txdl_priv
->dma_buffers
[j
++],
981 skb_headlen(skb
), PCI_DMA_TODEVICE
);
984 pci_unmap_page(fifo
->pdev
, txdl_priv
->dma_buffers
[j
],
985 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
989 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
991 netif_tx_stop_queue(fifo
->txq
);
992 dev_kfree_skb_any(skb
);
1000 * Function will be called by hw function to abort all outstanding receive
1004 vxge_rx_term(void *dtrh
, enum vxge_hw_rxd_state state
, void *userdata
)
1006 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
1007 struct vxge_rx_priv
*rx_priv
=
1008 vxge_hw_ring_rxd_private_get(dtrh
);
1010 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
1011 ring
->ndev
->name
, __func__
, __LINE__
);
1012 if (state
!= VXGE_HW_RXD_STATE_POSTED
)
1015 pci_unmap_single(ring
->pdev
, rx_priv
->data_dma
,
1016 rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
1018 dev_kfree_skb(rx_priv
->skb
);
1019 rx_priv
->skb_data
= NULL
;
1021 vxge_debug_entryexit(VXGE_TRACE
,
1022 "%s: %s:%d Exiting...",
1023 ring
->ndev
->name
, __func__
, __LINE__
);
1029 * Function will be called to abort all outstanding tx descriptors
1032 vxge_tx_term(void *dtrh
, enum vxge_hw_txdl_state state
, void *userdata
)
1034 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)userdata
;
1036 int i
= 0, j
, frg_cnt
;
1037 struct vxge_tx_priv
*txd_priv
= vxge_hw_fifo_txdl_private_get(dtrh
);
1038 struct sk_buff
*skb
= txd_priv
->skb
;
1040 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1042 if (state
!= VXGE_HW_TXDL_STATE_POSTED
)
1045 /* check skb validity */
1047 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
1048 frag
= &skb_shinfo(skb
)->frags
[0];
1050 /* for unfragmented skb */
1051 pci_unmap_single(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
1052 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1054 for (j
= 0; j
< frg_cnt
; j
++) {
1055 pci_unmap_page(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
1056 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
1062 vxge_debug_entryexit(VXGE_TRACE
,
1063 "%s:%d Exiting...", __func__
, __LINE__
);
1066 static int vxge_mac_list_del(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
1068 struct list_head
*entry
, *next
;
1070 u8
*mac_address
= (u8
*) (&del_mac
);
1072 /* Copy the mac address to delete from the list */
1073 memcpy(mac_address
, mac
->macaddr
, ETH_ALEN
);
1075 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
1076 if (((struct vxge_mac_addrs
*)entry
)->macaddr
== del_mac
) {
1078 kfree((struct vxge_mac_addrs
*)entry
);
1079 vpath
->mac_addr_cnt
--;
1081 if (is_multicast_ether_addr(mac
->macaddr
))
1082 vpath
->mcast_addr_cnt
--;
1090 /* delete a mac address from DA table */
1091 static enum vxge_hw_status
1092 vxge_del_mac_addr(struct vxgedev
*vdev
, struct macInfo
*mac
)
1094 enum vxge_hw_status status
= VXGE_HW_OK
;
1095 struct vxge_vpath
*vpath
;
1097 vpath
= &vdev
->vpaths
[mac
->vpath_no
];
1098 status
= vxge_hw_vpath_mac_addr_delete(vpath
->handle
, mac
->macaddr
,
1100 if (status
!= VXGE_HW_OK
) {
1101 vxge_debug_init(VXGE_ERR
,
1102 "DA config delete entry failed for vpath:%d",
1105 vxge_mac_list_del(vpath
, mac
);
1110 * vxge_set_multicast
1111 * @dev: pointer to the device structure
1113 * Entry point for multicast address enable/disable
1114 * This function is a driver entry point which gets called by the kernel
1115 * whenever multicast addresses must be enabled/disabled. This also gets
1116 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1117 * determine, if multicast address must be enabled or if promiscuous mode
1118 * is to be disabled etc.
1120 static void vxge_set_multicast(struct net_device
*dev
)
1122 struct netdev_hw_addr
*ha
;
1123 struct vxgedev
*vdev
;
1124 int i
, mcast_cnt
= 0;
1125 struct vxge_vpath
*vpath
;
1126 enum vxge_hw_status status
= VXGE_HW_OK
;
1127 struct macInfo mac_info
;
1129 struct vxge_mac_addrs
*mac_entry
;
1130 struct list_head
*list_head
;
1131 struct list_head
*entry
, *next
;
1132 u8
*mac_address
= NULL
;
1134 vxge_debug_entryexit(VXGE_TRACE
,
1135 "%s:%d", __func__
, __LINE__
);
1137 vdev
= netdev_priv(dev
);
1139 if (unlikely(!is_vxge_card_up(vdev
)))
1142 if ((dev
->flags
& IFF_ALLMULTI
) && (!vdev
->all_multi_flg
)) {
1143 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1144 vpath
= &vdev
->vpaths
[i
];
1145 vxge_assert(vpath
->is_open
);
1146 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
1147 if (status
!= VXGE_HW_OK
)
1148 vxge_debug_init(VXGE_ERR
, "failed to enable "
1149 "multicast, status %d", status
);
1150 vdev
->all_multi_flg
= 1;
1152 } else if (!(dev
->flags
& IFF_ALLMULTI
) && (vdev
->all_multi_flg
)) {
1153 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1154 vpath
= &vdev
->vpaths
[i
];
1155 vxge_assert(vpath
->is_open
);
1156 status
= vxge_hw_vpath_mcast_disable(vpath
->handle
);
1157 if (status
!= VXGE_HW_OK
)
1158 vxge_debug_init(VXGE_ERR
, "failed to disable "
1159 "multicast, status %d", status
);
1160 vdev
->all_multi_flg
= 0;
1165 if (!vdev
->config
.addr_learn_en
) {
1166 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1167 vpath
= &vdev
->vpaths
[i
];
1168 vxge_assert(vpath
->is_open
);
1170 if (dev
->flags
& IFF_PROMISC
)
1171 status
= vxge_hw_vpath_promisc_enable(
1174 status
= vxge_hw_vpath_promisc_disable(
1176 if (status
!= VXGE_HW_OK
)
1177 vxge_debug_init(VXGE_ERR
, "failed to %s promisc"
1178 ", status %d", dev
->flags
&IFF_PROMISC
?
1179 "enable" : "disable", status
);
1183 memset(&mac_info
, 0, sizeof(struct macInfo
));
1184 /* Update individual M_CAST address list */
1185 if ((!vdev
->all_multi_flg
) && netdev_mc_count(dev
)) {
1186 mcast_cnt
= vdev
->vpaths
[0].mcast_addr_cnt
;
1187 list_head
= &vdev
->vpaths
[0].mac_addr_list
;
1188 if ((netdev_mc_count(dev
) +
1189 (vdev
->vpaths
[0].mac_addr_cnt
- mcast_cnt
)) >
1190 vdev
->vpaths
[0].max_mac_addr_cnt
)
1191 goto _set_all_mcast
;
1193 /* Delete previous MC's */
1194 for (i
= 0; i
< mcast_cnt
; i
++) {
1195 list_for_each_safe(entry
, next
, list_head
) {
1196 mac_entry
= (struct vxge_mac_addrs
*)entry
;
1197 /* Copy the mac address to delete */
1198 mac_address
= (u8
*)&mac_entry
->macaddr
;
1199 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1201 if (is_multicast_ether_addr(mac_info
.macaddr
)) {
1202 for (vpath_idx
= 0; vpath_idx
<
1205 mac_info
.vpath_no
= vpath_idx
;
1206 status
= vxge_del_mac_addr(
1215 netdev_for_each_mc_addr(ha
, dev
) {
1216 memcpy(mac_info
.macaddr
, ha
->addr
, ETH_ALEN
);
1217 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
;
1219 mac_info
.vpath_no
= vpath_idx
;
1220 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1221 status
= vxge_add_mac_addr(vdev
, &mac_info
);
1222 if (status
!= VXGE_HW_OK
) {
1223 vxge_debug_init(VXGE_ERR
,
1224 "%s:%d Setting individual"
1225 "multicast address failed",
1226 __func__
, __LINE__
);
1227 goto _set_all_mcast
;
1234 mcast_cnt
= vdev
->vpaths
[0].mcast_addr_cnt
;
1235 /* Delete previous MC's */
1236 for (i
= 0; i
< mcast_cnt
; i
++) {
1237 list_for_each_safe(entry
, next
, list_head
) {
1238 mac_entry
= (struct vxge_mac_addrs
*)entry
;
1239 /* Copy the mac address to delete */
1240 mac_address
= (u8
*)&mac_entry
->macaddr
;
1241 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1243 if (is_multicast_ether_addr(mac_info
.macaddr
))
1247 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
;
1249 mac_info
.vpath_no
= vpath_idx
;
1250 status
= vxge_del_mac_addr(vdev
, &mac_info
);
1254 /* Enable all multicast */
1255 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1256 vpath
= &vdev
->vpaths
[i
];
1257 vxge_assert(vpath
->is_open
);
1259 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
1260 if (status
!= VXGE_HW_OK
) {
1261 vxge_debug_init(VXGE_ERR
,
1262 "%s:%d Enabling all multicasts failed",
1263 __func__
, __LINE__
);
1265 vdev
->all_multi_flg
= 1;
1267 dev
->flags
|= IFF_ALLMULTI
;
1270 vxge_debug_entryexit(VXGE_TRACE
,
1271 "%s:%d Exiting...", __func__
, __LINE__
);
1276 * @dev: pointer to the device structure
1278 * Update entry "0" (default MAC addr)
1280 static int vxge_set_mac_addr(struct net_device
*dev
, void *p
)
1282 struct sockaddr
*addr
= p
;
1283 struct vxgedev
*vdev
;
1284 enum vxge_hw_status status
= VXGE_HW_OK
;
1285 struct macInfo mac_info_new
, mac_info_old
;
1288 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1290 vdev
= netdev_priv(dev
);
1292 if (!is_valid_ether_addr(addr
->sa_data
))
1295 memset(&mac_info_new
, 0, sizeof(struct macInfo
));
1296 memset(&mac_info_old
, 0, sizeof(struct macInfo
));
1298 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d Exiting...",
1299 __func__
, __LINE__
);
1301 /* Get the old address */
1302 memcpy(mac_info_old
.macaddr
, dev
->dev_addr
, dev
->addr_len
);
1304 /* Copy the new address */
1305 memcpy(mac_info_new
.macaddr
, addr
->sa_data
, dev
->addr_len
);
1307 /* First delete the old mac address from all the vpaths
1308 as we can't specify the index while adding new mac address */
1309 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
1310 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vpath_idx
];
1311 if (!vpath
->is_open
) {
1312 /* This can happen when this interface is added/removed
1313 to the bonding interface. Delete this station address
1314 from the linked list */
1315 vxge_mac_list_del(vpath
, &mac_info_old
);
1317 /* Add this new address to the linked list
1318 for later restoring */
1319 vxge_mac_list_add(vpath
, &mac_info_new
);
1323 /* Delete the station address */
1324 mac_info_old
.vpath_no
= vpath_idx
;
1325 status
= vxge_del_mac_addr(vdev
, &mac_info_old
);
1328 if (unlikely(!is_vxge_card_up(vdev
))) {
1329 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1333 /* Set this mac address to all the vpaths */
1334 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
1335 mac_info_new
.vpath_no
= vpath_idx
;
1336 mac_info_new
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1337 status
= vxge_add_mac_addr(vdev
, &mac_info_new
);
1338 if (status
!= VXGE_HW_OK
)
1342 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1348 * vxge_vpath_intr_enable
1349 * @vdev: pointer to vdev
1350 * @vp_id: vpath for which to enable the interrupts
1352 * Enables the interrupts for the vpath
1354 static void vxge_vpath_intr_enable(struct vxgedev
*vdev
, int vp_id
)
1356 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1358 int tim_msix_id
[4] = {0, 1, 0, 0};
1359 int alarm_msix_id
= VXGE_ALARM_MSIX_ID
;
1361 vxge_hw_vpath_intr_enable(vpath
->handle
);
1363 if (vdev
->config
.intr_type
== INTA
)
1364 vxge_hw_vpath_inta_unmask_tx_rx(vpath
->handle
);
1366 vxge_hw_vpath_msix_set(vpath
->handle
, tim_msix_id
,
1369 msix_id
= vpath
->device_id
* VXGE_HW_VPATH_MSIX_ACTIVE
;
1370 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
);
1371 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
+ 1);
1373 /* enable the alarm vector */
1374 msix_id
= (vpath
->handle
->vpath
->hldev
->first_vp_id
*
1375 VXGE_HW_VPATH_MSIX_ACTIVE
) + alarm_msix_id
;
1376 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
);
1381 * vxge_vpath_intr_disable
1382 * @vdev: pointer to vdev
1383 * @vp_id: vpath for which to disable the interrupts
1385 * Disables the interrupts for the vpath
1387 static void vxge_vpath_intr_disable(struct vxgedev
*vdev
, int vp_id
)
1389 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1390 struct __vxge_hw_device
*hldev
;
1393 hldev
= pci_get_drvdata(vdev
->pdev
);
1395 vxge_hw_vpath_wait_receive_idle(hldev
, vpath
->device_id
);
1397 vxge_hw_vpath_intr_disable(vpath
->handle
);
1399 if (vdev
->config
.intr_type
== INTA
)
1400 vxge_hw_vpath_inta_mask_tx_rx(vpath
->handle
);
1402 msix_id
= vpath
->device_id
* VXGE_HW_VPATH_MSIX_ACTIVE
;
1403 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
);
1404 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
+ 1);
1406 /* disable the alarm vector */
1407 msix_id
= (vpath
->handle
->vpath
->hldev
->first_vp_id
*
1408 VXGE_HW_VPATH_MSIX_ACTIVE
) + VXGE_ALARM_MSIX_ID
;
1409 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
);
1413 /* list all mac addresses from DA table */
1414 static enum vxge_hw_status
1415 vxge_search_mac_addr_in_da_table(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
1417 enum vxge_hw_status status
= VXGE_HW_OK
;
1418 unsigned char macmask
[ETH_ALEN
];
1419 unsigned char macaddr
[ETH_ALEN
];
1421 status
= vxge_hw_vpath_mac_addr_get(vpath
->handle
,
1423 if (status
!= VXGE_HW_OK
) {
1424 vxge_debug_init(VXGE_ERR
,
1425 "DA config list entry failed for vpath:%d",
1430 while (!ether_addr_equal(mac
->macaddr
, macaddr
)) {
1431 status
= vxge_hw_vpath_mac_addr_get_next(vpath
->handle
,
1433 if (status
!= VXGE_HW_OK
)
1440 /* Store all mac addresses from the list to the DA table */
1441 static enum vxge_hw_status
vxge_restore_vpath_mac_addr(struct vxge_vpath
*vpath
)
1443 enum vxge_hw_status status
= VXGE_HW_OK
;
1444 struct macInfo mac_info
;
1445 u8
*mac_address
= NULL
;
1446 struct list_head
*entry
, *next
;
1448 memset(&mac_info
, 0, sizeof(struct macInfo
));
1450 if (vpath
->is_open
) {
1451 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
1454 ((struct vxge_mac_addrs
*)entry
)->macaddr
;
1455 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1456 ((struct vxge_mac_addrs
*)entry
)->state
=
1457 VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1458 /* does this mac address already exist in da table? */
1459 status
= vxge_search_mac_addr_in_da_table(vpath
,
1461 if (status
!= VXGE_HW_OK
) {
1462 /* Add this mac address to the DA table */
1463 status
= vxge_hw_vpath_mac_addr_add(
1464 vpath
->handle
, mac_info
.macaddr
,
1466 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
);
1467 if (status
!= VXGE_HW_OK
) {
1468 vxge_debug_init(VXGE_ERR
,
1469 "DA add entry failed for vpath:%d",
1471 ((struct vxge_mac_addrs
*)entry
)->state
1472 = VXGE_LL_MAC_ADDR_IN_LIST
;
1481 /* Store all vlan ids from the list to the vid table */
1482 static enum vxge_hw_status
1483 vxge_restore_vpath_vid_table(struct vxge_vpath
*vpath
)
1485 enum vxge_hw_status status
= VXGE_HW_OK
;
1486 struct vxgedev
*vdev
= vpath
->vdev
;
1489 if (!vpath
->is_open
)
1492 for_each_set_bit(vid
, vdev
->active_vlans
, VLAN_N_VID
)
1493 status
= vxge_hw_vpath_vid_add(vpath
->handle
, vid
);
1500 * @vdev: pointer to vdev
1501 * @vp_id: vpath to reset
1505 static int vxge_reset_vpath(struct vxgedev
*vdev
, int vp_id
)
1507 enum vxge_hw_status status
= VXGE_HW_OK
;
1508 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1511 /* check if device is down already */
1512 if (unlikely(!is_vxge_card_up(vdev
)))
1515 /* is device reset already scheduled */
1516 if (test_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
1519 if (vpath
->handle
) {
1520 if (vxge_hw_vpath_reset(vpath
->handle
) == VXGE_HW_OK
) {
1521 if (is_vxge_card_up(vdev
) &&
1522 vxge_hw_vpath_recover_from_reset(vpath
->handle
)
1524 vxge_debug_init(VXGE_ERR
,
1525 "vxge_hw_vpath_recover_from_reset"
1526 "failed for vpath:%d", vp_id
);
1530 vxge_debug_init(VXGE_ERR
,
1531 "vxge_hw_vpath_reset failed for"
1536 return VXGE_HW_FAIL
;
1538 vxge_restore_vpath_mac_addr(vpath
);
1539 vxge_restore_vpath_vid_table(vpath
);
1541 /* Enable all broadcast */
1542 vxge_hw_vpath_bcast_enable(vpath
->handle
);
1544 /* Enable all multicast */
1545 if (vdev
->all_multi_flg
) {
1546 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
1547 if (status
!= VXGE_HW_OK
)
1548 vxge_debug_init(VXGE_ERR
,
1549 "%s:%d Enabling multicast failed",
1550 __func__
, __LINE__
);
1553 /* Enable the interrupts */
1554 vxge_vpath_intr_enable(vdev
, vp_id
);
1558 /* Enable the flow of traffic through the vpath */
1559 vxge_hw_vpath_enable(vpath
->handle
);
1562 vxge_hw_vpath_rx_doorbell_init(vpath
->handle
);
1563 vpath
->ring
.last_status
= VXGE_HW_OK
;
1565 /* Vpath reset done */
1566 clear_bit(vp_id
, &vdev
->vp_reset
);
1568 /* Start the vpath queue */
1569 if (netif_tx_queue_stopped(vpath
->fifo
.txq
))
1570 netif_tx_wake_queue(vpath
->fifo
.txq
);
1576 static void vxge_config_ci_for_tti_rti(struct vxgedev
*vdev
)
1580 /* Enable CI for RTI */
1581 if (vdev
->config
.intr_type
== MSI_X
) {
1582 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1583 struct __vxge_hw_ring
*hw_ring
;
1585 hw_ring
= vdev
->vpaths
[i
].ring
.handle
;
1586 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring
);
1590 /* Enable CI for TTI */
1591 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1592 struct __vxge_hw_fifo
*hw_fifo
= vdev
->vpaths
[i
].fifo
.handle
;
1593 vxge_hw_vpath_tti_ci_set(hw_fifo
);
1595 * For Inta (with or without napi), Set CI ON for only one
1596 * vpath. (Have only one free running timer).
1598 if ((vdev
->config
.intr_type
== INTA
) && (i
== 0))
1605 static int do_vxge_reset(struct vxgedev
*vdev
, int event
)
1607 enum vxge_hw_status status
;
1608 int ret
= 0, vp_id
, i
;
1610 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1612 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_START_RESET
)) {
1613 /* check if device is down already */
1614 if (unlikely(!is_vxge_card_up(vdev
)))
1617 /* is reset already scheduled */
1618 if (test_and_set_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
1622 if (event
== VXGE_LL_FULL_RESET
) {
1623 netif_carrier_off(vdev
->ndev
);
1625 /* wait for all the vpath reset to complete */
1626 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
1627 while (test_bit(vp_id
, &vdev
->vp_reset
))
1631 netif_carrier_on(vdev
->ndev
);
1633 /* if execution mode is set to debug, don't reset the adapter */
1634 if (unlikely(vdev
->exec_mode
)) {
1635 vxge_debug_init(VXGE_ERR
,
1636 "%s: execution mode is debug, returning..",
1638 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
1639 netif_tx_stop_all_queues(vdev
->ndev
);
1644 if (event
== VXGE_LL_FULL_RESET
) {
1645 vxge_hw_device_wait_receive_idle(vdev
->devh
);
1646 vxge_hw_device_intr_disable(vdev
->devh
);
1648 switch (vdev
->cric_err_event
) {
1649 case VXGE_HW_EVENT_UNKNOWN
:
1650 netif_tx_stop_all_queues(vdev
->ndev
);
1651 vxge_debug_init(VXGE_ERR
,
1652 "fatal: %s: Disabling device due to"
1657 case VXGE_HW_EVENT_RESET_START
:
1659 case VXGE_HW_EVENT_RESET_COMPLETE
:
1660 case VXGE_HW_EVENT_LINK_DOWN
:
1661 case VXGE_HW_EVENT_LINK_UP
:
1662 case VXGE_HW_EVENT_ALARM_CLEARED
:
1663 case VXGE_HW_EVENT_ECCERR
:
1664 case VXGE_HW_EVENT_MRPCIM_ECCERR
:
1667 case VXGE_HW_EVENT_FIFO_ERR
:
1668 case VXGE_HW_EVENT_VPATH_ERR
:
1670 case VXGE_HW_EVENT_CRITICAL_ERR
:
1671 netif_tx_stop_all_queues(vdev
->ndev
);
1672 vxge_debug_init(VXGE_ERR
,
1673 "fatal: %s: Disabling device due to"
1676 /* SOP or device reset required */
1677 /* This event is not currently used */
1680 case VXGE_HW_EVENT_SERR
:
1681 netif_tx_stop_all_queues(vdev
->ndev
);
1682 vxge_debug_init(VXGE_ERR
,
1683 "fatal: %s: Disabling device due to"
1688 case VXGE_HW_EVENT_SRPCIM_SERR
:
1689 case VXGE_HW_EVENT_MRPCIM_SERR
:
1692 case VXGE_HW_EVENT_SLOT_FREEZE
:
1693 netif_tx_stop_all_queues(vdev
->ndev
);
1694 vxge_debug_init(VXGE_ERR
,
1695 "fatal: %s: Disabling device due to"
1706 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_START_RESET
))
1707 netif_tx_stop_all_queues(vdev
->ndev
);
1709 if (event
== VXGE_LL_FULL_RESET
) {
1710 status
= vxge_reset_all_vpaths(vdev
);
1711 if (status
!= VXGE_HW_OK
) {
1712 vxge_debug_init(VXGE_ERR
,
1713 "fatal: %s: can not reset vpaths",
1720 if (event
== VXGE_LL_COMPL_RESET
) {
1721 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
1722 if (vdev
->vpaths
[i
].handle
) {
1723 if (vxge_hw_vpath_recover_from_reset(
1724 vdev
->vpaths
[i
].handle
)
1726 vxge_debug_init(VXGE_ERR
,
1727 "vxge_hw_vpath_recover_"
1728 "from_reset failed for vpath: "
1734 vxge_debug_init(VXGE_ERR
,
1735 "vxge_hw_vpath_reset failed for "
1742 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_COMPL_RESET
)) {
1743 /* Reprogram the DA table with populated mac addresses */
1744 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
1745 vxge_restore_vpath_mac_addr(&vdev
->vpaths
[vp_id
]);
1746 vxge_restore_vpath_vid_table(&vdev
->vpaths
[vp_id
]);
1749 /* enable vpath interrupts */
1750 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
1751 vxge_vpath_intr_enable(vdev
, i
);
1753 vxge_hw_device_intr_enable(vdev
->devh
);
1757 /* Indicate card up */
1758 set_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
1760 /* Get the traffic to flow through the vpaths */
1761 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1762 vxge_hw_vpath_enable(vdev
->vpaths
[i
].handle
);
1764 vxge_hw_vpath_rx_doorbell_init(vdev
->vpaths
[i
].handle
);
1767 netif_tx_wake_all_queues(vdev
->ndev
);
1771 vxge_config_ci_for_tti_rti(vdev
);
1774 vxge_debug_entryexit(VXGE_TRACE
,
1775 "%s:%d Exiting...", __func__
, __LINE__
);
1777 /* Indicate reset done */
1778 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_COMPL_RESET
))
1779 clear_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
);
1785 * @vdev: pointer to ll device
1787 * driver may reset the chip on events of serr, eccerr, etc
1789 static void vxge_reset(struct work_struct
*work
)
1791 struct vxgedev
*vdev
= container_of(work
, struct vxgedev
, reset_task
);
1793 if (!netif_running(vdev
->ndev
))
1796 do_vxge_reset(vdev
, VXGE_LL_FULL_RESET
);
1800 * vxge_poll - Receive handler when Receive Polling is used.
1801 * @dev: pointer to the device structure.
1802 * @budget: Number of packets budgeted to be processed in this iteration.
1804 * This function comes into picture only if Receive side is being handled
1805 * through polling (called NAPI in linux). It mostly does what the normal
1806 * Rx interrupt handler does in terms of descriptor and packet processing
1807 * but not in an interrupt context. Also it will process a specified number
1808 * of packets at most in one iteration. This value is passed down by the
1809 * kernel as the function argument 'budget'.
1811 static int vxge_poll_msix(struct napi_struct
*napi
, int budget
)
1813 struct vxge_ring
*ring
= container_of(napi
, struct vxge_ring
, napi
);
1815 int budget_org
= budget
;
1817 ring
->budget
= budget
;
1818 ring
->pkts_processed
= 0;
1819 vxge_hw_vpath_poll_rx(ring
->handle
);
1820 pkts_processed
= ring
->pkts_processed
;
1822 if (pkts_processed
< budget_org
) {
1823 napi_complete_done(napi
, pkts_processed
);
1825 /* Re enable the Rx interrupts for the vpath */
1826 vxge_hw_channel_msix_unmask(
1827 (struct __vxge_hw_channel
*)ring
->handle
,
1828 ring
->rx_vector_no
);
1831 /* We are copying and returning the local variable, in case if after
1832 * clearing the msix interrupt above, if the interrupt fires right
1833 * away which can preempt this NAPI thread */
1834 return pkts_processed
;
1837 static int vxge_poll_inta(struct napi_struct
*napi
, int budget
)
1839 struct vxgedev
*vdev
= container_of(napi
, struct vxgedev
, napi
);
1840 int pkts_processed
= 0;
1842 int budget_org
= budget
;
1843 struct vxge_ring
*ring
;
1845 struct __vxge_hw_device
*hldev
= pci_get_drvdata(vdev
->pdev
);
1847 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1848 ring
= &vdev
->vpaths
[i
].ring
;
1849 ring
->budget
= budget
;
1850 ring
->pkts_processed
= 0;
1851 vxge_hw_vpath_poll_rx(ring
->handle
);
1852 pkts_processed
+= ring
->pkts_processed
;
1853 budget
-= ring
->pkts_processed
;
1858 VXGE_COMPLETE_ALL_TX(vdev
);
1860 if (pkts_processed
< budget_org
) {
1861 napi_complete_done(napi
, pkts_processed
);
1862 /* Re enable the Rx interrupts for the ring */
1863 vxge_hw_device_unmask_all(hldev
);
1864 vxge_hw_device_flush_io(hldev
);
1867 return pkts_processed
;
1870 #ifdef CONFIG_NET_POLL_CONTROLLER
1872 * vxge_netpoll - netpoll event handler entry point
1873 * @dev : pointer to the device structure.
1875 * This function will be called by upper layer to check for events on the
1876 * interface in situations where interrupts are disabled. It is used for
1877 * specific in-kernel networking tasks, such as remote consoles and kernel
1878 * debugging over the network (example netdump in RedHat).
1880 static void vxge_netpoll(struct net_device
*dev
)
1882 struct vxgedev
*vdev
= netdev_priv(dev
);
1883 struct pci_dev
*pdev
= vdev
->pdev
;
1884 struct __vxge_hw_device
*hldev
= pci_get_drvdata(pdev
);
1885 const int irq
= pdev
->irq
;
1887 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1889 if (pci_channel_offline(pdev
))
1893 vxge_hw_device_clear_tx_rx(hldev
);
1895 vxge_hw_device_clear_tx_rx(hldev
);
1896 VXGE_COMPLETE_ALL_RX(vdev
);
1897 VXGE_COMPLETE_ALL_TX(vdev
);
1901 vxge_debug_entryexit(VXGE_TRACE
,
1902 "%s:%d Exiting...", __func__
, __LINE__
);
1906 /* RTH configuration */
1907 static enum vxge_hw_status
vxge_rth_configure(struct vxgedev
*vdev
)
1909 enum vxge_hw_status status
= VXGE_HW_OK
;
1910 struct vxge_hw_rth_hash_types hash_types
;
1911 u8 itable
[256] = {0}; /* indirection table */
1912 u8 mtable
[256] = {0}; /* CPU to vpath mapping */
1917 * - itable with bucket numbers
1918 * - mtable with bucket-to-vpath mapping
1920 for (index
= 0; index
< (1 << vdev
->config
.rth_bkt_sz
); index
++) {
1921 itable
[index
] = index
;
1922 mtable
[index
] = index
% vdev
->no_of_vpath
;
1925 /* set indirection table, bucket-to-vpath mapping */
1926 status
= vxge_hw_vpath_rts_rth_itable_set(vdev
->vp_handles
,
1929 vdev
->config
.rth_bkt_sz
);
1930 if (status
!= VXGE_HW_OK
) {
1931 vxge_debug_init(VXGE_ERR
,
1932 "RTH indirection table configuration failed "
1933 "for vpath:%d", vdev
->vpaths
[0].device_id
);
1937 /* Fill RTH hash types */
1938 hash_types
.hash_type_tcpipv4_en
= vdev
->config
.rth_hash_type_tcpipv4
;
1939 hash_types
.hash_type_ipv4_en
= vdev
->config
.rth_hash_type_ipv4
;
1940 hash_types
.hash_type_tcpipv6_en
= vdev
->config
.rth_hash_type_tcpipv6
;
1941 hash_types
.hash_type_ipv6_en
= vdev
->config
.rth_hash_type_ipv6
;
1942 hash_types
.hash_type_tcpipv6ex_en
=
1943 vdev
->config
.rth_hash_type_tcpipv6ex
;
1944 hash_types
.hash_type_ipv6ex_en
= vdev
->config
.rth_hash_type_ipv6ex
;
1947 * Because the itable_set() method uses the active_table field
1948 * for the target virtual path the RTH config should be updated
1949 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1950 * when steering frames.
1952 for (index
= 0; index
< vdev
->no_of_vpath
; index
++) {
1953 status
= vxge_hw_vpath_rts_rth_set(
1954 vdev
->vpaths
[index
].handle
,
1955 vdev
->config
.rth_algorithm
,
1957 vdev
->config
.rth_bkt_sz
);
1958 if (status
!= VXGE_HW_OK
) {
1959 vxge_debug_init(VXGE_ERR
,
1960 "RTH configuration failed for vpath:%d",
1961 vdev
->vpaths
[index
].device_id
);
1970 static enum vxge_hw_status
vxge_reset_all_vpaths(struct vxgedev
*vdev
)
1972 enum vxge_hw_status status
= VXGE_HW_OK
;
1973 struct vxge_vpath
*vpath
;
1976 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1977 vpath
= &vdev
->vpaths
[i
];
1978 if (vpath
->handle
) {
1979 if (vxge_hw_vpath_reset(vpath
->handle
) == VXGE_HW_OK
) {
1980 if (is_vxge_card_up(vdev
) &&
1981 vxge_hw_vpath_recover_from_reset(
1982 vpath
->handle
) != VXGE_HW_OK
) {
1983 vxge_debug_init(VXGE_ERR
,
1984 "vxge_hw_vpath_recover_"
1985 "from_reset failed for vpath: "
1990 vxge_debug_init(VXGE_ERR
,
1991 "vxge_hw_vpath_reset failed for "
2002 static void vxge_close_vpaths(struct vxgedev
*vdev
, int index
)
2004 struct vxge_vpath
*vpath
;
2007 for (i
= index
; i
< vdev
->no_of_vpath
; i
++) {
2008 vpath
= &vdev
->vpaths
[i
];
2010 if (vpath
->handle
&& vpath
->is_open
) {
2011 vxge_hw_vpath_close(vpath
->handle
);
2012 vdev
->stats
.vpaths_open
--;
2015 vpath
->handle
= NULL
;
2020 static int vxge_open_vpaths(struct vxgedev
*vdev
)
2022 struct vxge_hw_vpath_attr attr
;
2023 enum vxge_hw_status status
;
2024 struct vxge_vpath
*vpath
;
2028 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2029 vpath
= &vdev
->vpaths
[i
];
2030 vxge_assert(vpath
->is_configured
);
2032 if (!vdev
->titan1
) {
2033 struct vxge_hw_vp_config
*vcfg
;
2034 vcfg
= &vdev
->devh
->config
.vp_config
[vpath
->device_id
];
2036 vcfg
->rti
.urange_a
= RTI_T1A_RX_URANGE_A
;
2037 vcfg
->rti
.urange_b
= RTI_T1A_RX_URANGE_B
;
2038 vcfg
->rti
.urange_c
= RTI_T1A_RX_URANGE_C
;
2039 vcfg
->tti
.uec_a
= TTI_T1A_TX_UFC_A
;
2040 vcfg
->tti
.uec_b
= TTI_T1A_TX_UFC_B
;
2041 vcfg
->tti
.uec_c
= TTI_T1A_TX_UFC_C(vdev
->mtu
);
2042 vcfg
->tti
.uec_d
= TTI_T1A_TX_UFC_D(vdev
->mtu
);
2043 vcfg
->tti
.ltimer_val
= VXGE_T1A_TTI_LTIMER_VAL
;
2044 vcfg
->tti
.rtimer_val
= VXGE_T1A_TTI_RTIMER_VAL
;
2047 attr
.vp_id
= vpath
->device_id
;
2048 attr
.fifo_attr
.callback
= vxge_xmit_compl
;
2049 attr
.fifo_attr
.txdl_term
= vxge_tx_term
;
2050 attr
.fifo_attr
.per_txdl_space
= sizeof(struct vxge_tx_priv
);
2051 attr
.fifo_attr
.userdata
= &vpath
->fifo
;
2053 attr
.ring_attr
.callback
= vxge_rx_1b_compl
;
2054 attr
.ring_attr
.rxd_init
= vxge_rx_initial_replenish
;
2055 attr
.ring_attr
.rxd_term
= vxge_rx_term
;
2056 attr
.ring_attr
.per_rxd_space
= sizeof(struct vxge_rx_priv
);
2057 attr
.ring_attr
.userdata
= &vpath
->ring
;
2059 vpath
->ring
.ndev
= vdev
->ndev
;
2060 vpath
->ring
.pdev
= vdev
->pdev
;
2062 status
= vxge_hw_vpath_open(vdev
->devh
, &attr
, &vpath
->handle
);
2063 if (status
== VXGE_HW_OK
) {
2064 vpath
->fifo
.handle
=
2065 (struct __vxge_hw_fifo
*)attr
.fifo_attr
.userdata
;
2066 vpath
->ring
.handle
=
2067 (struct __vxge_hw_ring
*)attr
.ring_attr
.userdata
;
2068 vpath
->fifo
.tx_steering_type
=
2069 vdev
->config
.tx_steering_type
;
2070 vpath
->fifo
.ndev
= vdev
->ndev
;
2071 vpath
->fifo
.pdev
= vdev
->pdev
;
2073 u64_stats_init(&vpath
->fifo
.stats
.syncp
);
2074 u64_stats_init(&vpath
->ring
.stats
.syncp
);
2076 if (vdev
->config
.tx_steering_type
)
2078 netdev_get_tx_queue(vdev
->ndev
, i
);
2081 netdev_get_tx_queue(vdev
->ndev
, 0);
2082 vpath
->fifo
.indicate_max_pkts
=
2083 vdev
->config
.fifo_indicate_max_pkts
;
2084 vpath
->fifo
.tx_vector_no
= 0;
2085 vpath
->ring
.rx_vector_no
= 0;
2086 vpath
->ring
.rx_hwts
= vdev
->rx_hwts
;
2088 vdev
->vp_handles
[i
] = vpath
->handle
;
2089 vpath
->ring
.vlan_tag_strip
= vdev
->vlan_tag_strip
;
2090 vdev
->stats
.vpaths_open
++;
2092 vdev
->stats
.vpath_open_fail
++;
2093 vxge_debug_init(VXGE_ERR
, "%s: vpath: %d failed to "
2094 "open with status: %d",
2095 vdev
->ndev
->name
, vpath
->device_id
,
2097 vxge_close_vpaths(vdev
, 0);
2101 vp_id
= vpath
->handle
->vpath
->vp_id
;
2102 vdev
->vpaths_deployed
|= vxge_mBIT(vp_id
);
2109 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2110 * if the interrupts are not within a range
2111 * @fifo: pointer to transmit fifo structure
2112 * Description: The function changes boundary timer and restriction timer
2113 * value depends on the traffic
2114 * Return Value: None
2116 static void adaptive_coalesce_tx_interrupts(struct vxge_fifo
*fifo
)
2118 fifo
->interrupt_count
++;
2119 if (time_before(fifo
->jiffies
+ HZ
/ 100, jiffies
)) {
2120 struct __vxge_hw_fifo
*hw_fifo
= fifo
->handle
;
2122 fifo
->jiffies
= jiffies
;
2123 if (fifo
->interrupt_count
> VXGE_T1A_MAX_TX_INTERRUPT_COUNT
&&
2124 hw_fifo
->rtimer
!= VXGE_TTI_RTIMER_ADAPT_VAL
) {
2125 hw_fifo
->rtimer
= VXGE_TTI_RTIMER_ADAPT_VAL
;
2126 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo
);
2127 } else if (hw_fifo
->rtimer
!= 0) {
2128 hw_fifo
->rtimer
= 0;
2129 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo
);
2131 fifo
->interrupt_count
= 0;
2136 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2137 * if the interrupts are not within a range
2138 * @ring: pointer to receive ring structure
2139 * Description: The function increases of decreases the packet counts within
2140 * the ranges of traffic utilization, if the interrupts due to this ring are
2141 * not within a fixed range.
2142 * Return Value: Nothing
2144 static void adaptive_coalesce_rx_interrupts(struct vxge_ring
*ring
)
2146 ring
->interrupt_count
++;
2147 if (time_before(ring
->jiffies
+ HZ
/ 100, jiffies
)) {
2148 struct __vxge_hw_ring
*hw_ring
= ring
->handle
;
2150 ring
->jiffies
= jiffies
;
2151 if (ring
->interrupt_count
> VXGE_T1A_MAX_INTERRUPT_COUNT
&&
2152 hw_ring
->rtimer
!= VXGE_RTI_RTIMER_ADAPT_VAL
) {
2153 hw_ring
->rtimer
= VXGE_RTI_RTIMER_ADAPT_VAL
;
2154 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring
);
2155 } else if (hw_ring
->rtimer
!= 0) {
2156 hw_ring
->rtimer
= 0;
2157 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring
);
2159 ring
->interrupt_count
= 0;
2165 * @irq: the irq of the device.
2166 * @dev_id: a void pointer to the hldev structure of the Titan device
2167 * @ptregs: pointer to the registers pushed on the stack.
2169 * This function is the ISR handler of the device when napi is enabled. It
2170 * identifies the reason for the interrupt and calls the relevant service
2173 static irqreturn_t
vxge_isr_napi(int irq
, void *dev_id
)
2175 struct __vxge_hw_device
*hldev
;
2177 enum vxge_hw_status status
;
2178 struct vxgedev
*vdev
= (struct vxgedev
*)dev_id
;
2180 vxge_debug_intr(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
2182 hldev
= pci_get_drvdata(vdev
->pdev
);
2184 if (pci_channel_offline(vdev
->pdev
))
2187 if (unlikely(!is_vxge_card_up(vdev
)))
2190 status
= vxge_hw_device_begin_irq(hldev
, vdev
->exec_mode
, &reason
);
2191 if (status
== VXGE_HW_OK
) {
2192 vxge_hw_device_mask_all(hldev
);
2195 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2196 vdev
->vpaths_deployed
>>
2197 (64 - VXGE_HW_MAX_VIRTUAL_PATHS
))) {
2199 vxge_hw_device_clear_tx_rx(hldev
);
2200 napi_schedule(&vdev
->napi
);
2201 vxge_debug_intr(VXGE_TRACE
,
2202 "%s:%d Exiting...", __func__
, __LINE__
);
2205 vxge_hw_device_unmask_all(hldev
);
2206 } else if (unlikely((status
== VXGE_HW_ERR_VPATH
) ||
2207 (status
== VXGE_HW_ERR_CRITICAL
) ||
2208 (status
== VXGE_HW_ERR_FIFO
))) {
2209 vxge_hw_device_mask_all(hldev
);
2210 vxge_hw_device_flush_io(hldev
);
2212 } else if (unlikely(status
== VXGE_HW_ERR_SLOT_FREEZE
))
2215 vxge_debug_intr(VXGE_TRACE
, "%s:%d Exiting...", __func__
, __LINE__
);
2219 static irqreturn_t
vxge_tx_msix_handle(int irq
, void *dev_id
)
2221 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)dev_id
;
2223 adaptive_coalesce_tx_interrupts(fifo
);
2225 vxge_hw_channel_msix_mask((struct __vxge_hw_channel
*)fifo
->handle
,
2226 fifo
->tx_vector_no
);
2228 vxge_hw_channel_msix_clear((struct __vxge_hw_channel
*)fifo
->handle
,
2229 fifo
->tx_vector_no
);
2231 VXGE_COMPLETE_VPATH_TX(fifo
);
2233 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel
*)fifo
->handle
,
2234 fifo
->tx_vector_no
);
2239 static irqreturn_t
vxge_rx_msix_napi_handle(int irq
, void *dev_id
)
2241 struct vxge_ring
*ring
= (struct vxge_ring
*)dev_id
;
2243 adaptive_coalesce_rx_interrupts(ring
);
2245 vxge_hw_channel_msix_mask((struct __vxge_hw_channel
*)ring
->handle
,
2246 ring
->rx_vector_no
);
2248 vxge_hw_channel_msix_clear((struct __vxge_hw_channel
*)ring
->handle
,
2249 ring
->rx_vector_no
);
2251 napi_schedule(&ring
->napi
);
2256 vxge_alarm_msix_handle(int irq
, void *dev_id
)
2259 enum vxge_hw_status status
;
2260 struct vxge_vpath
*vpath
= (struct vxge_vpath
*)dev_id
;
2261 struct vxgedev
*vdev
= vpath
->vdev
;
2262 int msix_id
= (vpath
->handle
->vpath
->vp_id
*
2263 VXGE_HW_VPATH_MSIX_ACTIVE
) + VXGE_ALARM_MSIX_ID
;
2265 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2266 /* Reduce the chance of losing alarm interrupts by masking
2267 * the vector. A pending bit will be set if an alarm is
2268 * generated and on unmask the interrupt will be fired.
2270 vxge_hw_vpath_msix_mask(vdev
->vpaths
[i
].handle
, msix_id
);
2271 vxge_hw_vpath_msix_clear(vdev
->vpaths
[i
].handle
, msix_id
);
2273 status
= vxge_hw_vpath_alarm_process(vdev
->vpaths
[i
].handle
,
2275 if (status
== VXGE_HW_OK
) {
2276 vxge_hw_vpath_msix_unmask(vdev
->vpaths
[i
].handle
,
2280 vxge_debug_intr(VXGE_ERR
,
2281 "%s: vxge_hw_vpath_alarm_process failed %x ",
2282 VXGE_DRIVER_NAME
, status
);
2287 static int vxge_alloc_msix(struct vxgedev
*vdev
)
2290 int msix_intr_vect
= 0, temp
;
2294 /* Tx/Rx MSIX Vectors count */
2295 vdev
->intr_cnt
= vdev
->no_of_vpath
* 2;
2297 /* Alarm MSIX Vectors count */
2300 vdev
->entries
= kcalloc(vdev
->intr_cnt
, sizeof(struct msix_entry
),
2302 if (!vdev
->entries
) {
2303 vxge_debug_init(VXGE_ERR
,
2304 "%s: memory allocation failed",
2307 goto alloc_entries_failed
;
2310 vdev
->vxge_entries
= kcalloc(vdev
->intr_cnt
,
2311 sizeof(struct vxge_msix_entry
),
2313 if (!vdev
->vxge_entries
) {
2314 vxge_debug_init(VXGE_ERR
, "%s: memory allocation failed",
2317 goto alloc_vxge_entries_failed
;
2320 for (i
= 0, j
= 0; i
< vdev
->no_of_vpath
; i
++) {
2322 msix_intr_vect
= i
* VXGE_HW_VPATH_MSIX_ACTIVE
;
2324 /* Initialize the fifo vector */
2325 vdev
->entries
[j
].entry
= msix_intr_vect
;
2326 vdev
->vxge_entries
[j
].entry
= msix_intr_vect
;
2327 vdev
->vxge_entries
[j
].in_use
= 0;
2330 /* Initialize the ring vector */
2331 vdev
->entries
[j
].entry
= msix_intr_vect
+ 1;
2332 vdev
->vxge_entries
[j
].entry
= msix_intr_vect
+ 1;
2333 vdev
->vxge_entries
[j
].in_use
= 0;
2337 /* Initialize the alarm vector */
2338 vdev
->entries
[j
].entry
= VXGE_ALARM_MSIX_ID
;
2339 vdev
->vxge_entries
[j
].entry
= VXGE_ALARM_MSIX_ID
;
2340 vdev
->vxge_entries
[j
].in_use
= 0;
2342 ret
= pci_enable_msix_range(vdev
->pdev
,
2343 vdev
->entries
, 3, vdev
->intr_cnt
);
2346 goto enable_msix_failed
;
2347 } else if (ret
< vdev
->intr_cnt
) {
2348 pci_disable_msix(vdev
->pdev
);
2350 vxge_debug_init(VXGE_ERR
,
2351 "%s: MSI-X enable failed for %d vectors, ret: %d",
2352 VXGE_DRIVER_NAME
, vdev
->intr_cnt
, ret
);
2353 if (max_config_vpath
!= VXGE_USE_DEFAULT
) {
2355 goto enable_msix_failed
;
2358 kfree(vdev
->entries
);
2359 kfree(vdev
->vxge_entries
);
2360 vdev
->entries
= NULL
;
2361 vdev
->vxge_entries
= NULL
;
2362 /* Try with less no of vector by reducing no of vpaths count */
2364 vxge_close_vpaths(vdev
, temp
);
2365 vdev
->no_of_vpath
= temp
;
2371 kfree(vdev
->vxge_entries
);
2372 alloc_vxge_entries_failed
:
2373 kfree(vdev
->entries
);
2374 alloc_entries_failed
:
2378 static int vxge_enable_msix(struct vxgedev
*vdev
)
2382 /* 0 - Tx, 1 - Rx */
2383 int tim_msix_id
[4] = {0, 1, 0, 0};
2387 /* allocate msix vectors */
2388 ret
= vxge_alloc_msix(vdev
);
2390 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2391 struct vxge_vpath
*vpath
= &vdev
->vpaths
[i
];
2393 /* If fifo or ring are not enabled, the MSIX vector for
2394 * it should be set to 0.
2396 vpath
->ring
.rx_vector_no
= (vpath
->device_id
*
2397 VXGE_HW_VPATH_MSIX_ACTIVE
) + 1;
2399 vpath
->fifo
.tx_vector_no
= (vpath
->device_id
*
2400 VXGE_HW_VPATH_MSIX_ACTIVE
);
2402 vxge_hw_vpath_msix_set(vpath
->handle
, tim_msix_id
,
2403 VXGE_ALARM_MSIX_ID
);
2410 static void vxge_rem_msix_isr(struct vxgedev
*vdev
)
2414 for (intr_cnt
= 0; intr_cnt
< (vdev
->no_of_vpath
* 2 + 1);
2416 if (vdev
->vxge_entries
[intr_cnt
].in_use
) {
2417 synchronize_irq(vdev
->entries
[intr_cnt
].vector
);
2418 free_irq(vdev
->entries
[intr_cnt
].vector
,
2419 vdev
->vxge_entries
[intr_cnt
].arg
);
2420 vdev
->vxge_entries
[intr_cnt
].in_use
= 0;
2424 kfree(vdev
->entries
);
2425 kfree(vdev
->vxge_entries
);
2426 vdev
->entries
= NULL
;
2427 vdev
->vxge_entries
= NULL
;
2429 if (vdev
->config
.intr_type
== MSI_X
)
2430 pci_disable_msix(vdev
->pdev
);
2433 static void vxge_rem_isr(struct vxgedev
*vdev
)
2435 if (IS_ENABLED(CONFIG_PCI_MSI
) &&
2436 vdev
->config
.intr_type
== MSI_X
) {
2437 vxge_rem_msix_isr(vdev
);
2438 } else if (vdev
->config
.intr_type
== INTA
) {
2439 synchronize_irq(vdev
->pdev
->irq
);
2440 free_irq(vdev
->pdev
->irq
, vdev
);
2444 static int vxge_add_isr(struct vxgedev
*vdev
)
2447 int vp_idx
= 0, intr_idx
= 0, intr_cnt
= 0, msix_idx
= 0, irq_req
= 0;
2448 int pci_fun
= PCI_FUNC(vdev
->pdev
->devfn
);
2450 if (IS_ENABLED(CONFIG_PCI_MSI
) && vdev
->config
.intr_type
== MSI_X
)
2451 ret
= vxge_enable_msix(vdev
);
2454 vxge_debug_init(VXGE_ERR
,
2455 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME
);
2456 vxge_debug_init(VXGE_ERR
,
2457 "%s: Defaulting to INTA", VXGE_DRIVER_NAME
);
2458 vdev
->config
.intr_type
= INTA
;
2461 if (IS_ENABLED(CONFIG_PCI_MSI
) && vdev
->config
.intr_type
== MSI_X
) {
2463 intr_idx
< (vdev
->no_of_vpath
*
2464 VXGE_HW_VPATH_MSIX_ACTIVE
); intr_idx
++) {
2466 msix_idx
= intr_idx
% VXGE_HW_VPATH_MSIX_ACTIVE
;
2471 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2472 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2474 vdev
->entries
[intr_cnt
].entry
,
2477 vdev
->entries
[intr_cnt
].vector
,
2478 vxge_tx_msix_handle
, 0,
2479 vdev
->desc
[intr_cnt
],
2480 &vdev
->vpaths
[vp_idx
].fifo
);
2481 vdev
->vxge_entries
[intr_cnt
].arg
=
2482 &vdev
->vpaths
[vp_idx
].fifo
;
2486 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2487 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2489 vdev
->entries
[intr_cnt
].entry
,
2492 vdev
->entries
[intr_cnt
].vector
,
2493 vxge_rx_msix_napi_handle
, 0,
2494 vdev
->desc
[intr_cnt
],
2495 &vdev
->vpaths
[vp_idx
].ring
);
2496 vdev
->vxge_entries
[intr_cnt
].arg
=
2497 &vdev
->vpaths
[vp_idx
].ring
;
2503 vxge_debug_init(VXGE_ERR
,
2504 "%s: MSIX - %d Registration failed",
2505 vdev
->ndev
->name
, intr_cnt
);
2506 vxge_rem_msix_isr(vdev
);
2507 vdev
->config
.intr_type
= INTA
;
2508 vxge_debug_init(VXGE_ERR
,
2509 "%s: Defaulting to INTA",
2515 /* We requested for this msix interrupt */
2516 vdev
->vxge_entries
[intr_cnt
].in_use
= 1;
2517 msix_idx
+= vdev
->vpaths
[vp_idx
].device_id
*
2518 VXGE_HW_VPATH_MSIX_ACTIVE
;
2519 vxge_hw_vpath_msix_unmask(
2520 vdev
->vpaths
[vp_idx
].handle
,
2525 /* Point to next vpath handler */
2526 if (((intr_idx
+ 1) % VXGE_HW_VPATH_MSIX_ACTIVE
== 0) &&
2527 (vp_idx
< (vdev
->no_of_vpath
- 1)))
2531 intr_cnt
= vdev
->no_of_vpath
* 2;
2532 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2533 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2535 vdev
->entries
[intr_cnt
].entry
,
2537 /* For Alarm interrupts */
2538 ret
= request_irq(vdev
->entries
[intr_cnt
].vector
,
2539 vxge_alarm_msix_handle
, 0,
2540 vdev
->desc
[intr_cnt
],
2543 vxge_debug_init(VXGE_ERR
,
2544 "%s: MSIX - %d Registration failed",
2545 vdev
->ndev
->name
, intr_cnt
);
2546 vxge_rem_msix_isr(vdev
);
2547 vdev
->config
.intr_type
= INTA
;
2548 vxge_debug_init(VXGE_ERR
,
2549 "%s: Defaulting to INTA",
2554 msix_idx
= (vdev
->vpaths
[0].handle
->vpath
->vp_id
*
2555 VXGE_HW_VPATH_MSIX_ACTIVE
) + VXGE_ALARM_MSIX_ID
;
2556 vxge_hw_vpath_msix_unmask(vdev
->vpaths
[vp_idx
].handle
,
2558 vdev
->vxge_entries
[intr_cnt
].in_use
= 1;
2559 vdev
->vxge_entries
[intr_cnt
].arg
= &vdev
->vpaths
[0];
2563 if (vdev
->config
.intr_type
== INTA
) {
2564 snprintf(vdev
->desc
[0], VXGE_INTR_STRLEN
,
2565 "%s:vxge:INTA", vdev
->ndev
->name
);
2566 vxge_hw_device_set_intr_type(vdev
->devh
,
2567 VXGE_HW_INTR_MODE_IRQLINE
);
2569 vxge_hw_vpath_tti_ci_set(vdev
->vpaths
[0].fifo
.handle
);
2571 ret
= request_irq((int) vdev
->pdev
->irq
,
2573 IRQF_SHARED
, vdev
->desc
[0], vdev
);
2575 vxge_debug_init(VXGE_ERR
,
2576 "%s %s-%d: ISR registration failed",
2577 VXGE_DRIVER_NAME
, "IRQ", vdev
->pdev
->irq
);
2580 vxge_debug_init(VXGE_TRACE
,
2581 "new %s-%d line allocated",
2582 "IRQ", vdev
->pdev
->irq
);
2588 static void vxge_poll_vp_reset(struct timer_list
*t
)
2590 struct vxgedev
*vdev
= from_timer(vdev
, t
, vp_reset_timer
);
2593 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2594 if (test_bit(i
, &vdev
->vp_reset
)) {
2595 vxge_reset_vpath(vdev
, i
);
2599 if (j
&& (vdev
->config
.intr_type
!= MSI_X
)) {
2600 vxge_hw_device_unmask_all(vdev
->devh
);
2601 vxge_hw_device_flush_io(vdev
->devh
);
2604 mod_timer(&vdev
->vp_reset_timer
, jiffies
+ HZ
/ 2);
2607 static void vxge_poll_vp_lockup(struct timer_list
*t
)
2609 struct vxgedev
*vdev
= from_timer(vdev
, t
, vp_lockup_timer
);
2610 enum vxge_hw_status status
= VXGE_HW_OK
;
2611 struct vxge_vpath
*vpath
;
2612 struct vxge_ring
*ring
;
2614 unsigned long rx_frms
;
2616 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2617 ring
= &vdev
->vpaths
[i
].ring
;
2619 /* Truncated to machine word size number of frames */
2620 rx_frms
= READ_ONCE(ring
->stats
.rx_frms
);
2622 /* Did this vpath received any packets */
2623 if (ring
->stats
.prev_rx_frms
== rx_frms
) {
2624 status
= vxge_hw_vpath_check_leak(ring
->handle
);
2626 /* Did it received any packets last time */
2627 if ((VXGE_HW_FAIL
== status
) &&
2628 (VXGE_HW_FAIL
== ring
->last_status
)) {
2630 /* schedule vpath reset */
2631 if (!test_and_set_bit(i
, &vdev
->vp_reset
)) {
2632 vpath
= &vdev
->vpaths
[i
];
2634 /* disable interrupts for this vpath */
2635 vxge_vpath_intr_disable(vdev
, i
);
2637 /* stop the queue for this vpath */
2638 netif_tx_stop_queue(vpath
->fifo
.txq
);
2643 ring
->stats
.prev_rx_frms
= rx_frms
;
2644 ring
->last_status
= status
;
2647 /* Check every 1 milli second */
2648 mod_timer(&vdev
->vp_lockup_timer
, jiffies
+ HZ
/ 1000);
2651 static netdev_features_t
vxge_fix_features(struct net_device
*dev
,
2652 netdev_features_t features
)
2654 netdev_features_t changed
= dev
->features
^ features
;
2656 /* Enabling RTH requires some of the logic in vxge_device_register and a
2657 * vpath reset. Due to these restrictions, only allow modification
2658 * while the interface is down.
2660 if ((changed
& NETIF_F_RXHASH
) && netif_running(dev
))
2661 features
^= NETIF_F_RXHASH
;
2666 static int vxge_set_features(struct net_device
*dev
, netdev_features_t features
)
2668 struct vxgedev
*vdev
= netdev_priv(dev
);
2669 netdev_features_t changed
= dev
->features
^ features
;
2671 if (!(changed
& NETIF_F_RXHASH
))
2674 /* !netif_running() ensured by vxge_fix_features() */
2676 vdev
->devh
->config
.rth_en
= !!(features
& NETIF_F_RXHASH
);
2677 if (vxge_reset_all_vpaths(vdev
) != VXGE_HW_OK
) {
2678 dev
->features
= features
^ NETIF_F_RXHASH
;
2679 vdev
->devh
->config
.rth_en
= !!(dev
->features
& NETIF_F_RXHASH
);
2688 * @dev: pointer to the device structure.
2690 * This function is the open entry point of the driver. It mainly calls a
2691 * function to allocate Rx buffers and inserts them into the buffer
2692 * descriptors and then enables the Rx part of the NIC.
2693 * Return value: '0' on success and an appropriate (-)ve integer as
2694 * defined in errno.h file on failure.
2696 static int vxge_open(struct net_device
*dev
)
2698 enum vxge_hw_status status
;
2699 struct vxgedev
*vdev
;
2700 struct __vxge_hw_device
*hldev
;
2701 struct vxge_vpath
*vpath
;
2706 vxge_debug_entryexit(VXGE_TRACE
,
2707 "%s: %s:%d", dev
->name
, __func__
, __LINE__
);
2709 vdev
= netdev_priv(dev
);
2710 hldev
= pci_get_drvdata(vdev
->pdev
);
2712 /* make sure you have link off by default every time Nic is
2714 netif_carrier_off(dev
);
2717 status
= vxge_open_vpaths(vdev
);
2718 if (status
!= VXGE_HW_OK
) {
2719 vxge_debug_init(VXGE_ERR
,
2720 "%s: fatal: Vpath open failed", vdev
->ndev
->name
);
2725 vdev
->mtu
= dev
->mtu
;
2727 status
= vxge_add_isr(vdev
);
2728 if (status
!= VXGE_HW_OK
) {
2729 vxge_debug_init(VXGE_ERR
,
2730 "%s: fatal: ISR add failed", dev
->name
);
2735 if (vdev
->config
.intr_type
!= MSI_X
) {
2736 netif_napi_add(dev
, &vdev
->napi
, vxge_poll_inta
,
2737 vdev
->config
.napi_weight
);
2738 napi_enable(&vdev
->napi
);
2739 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2740 vpath
= &vdev
->vpaths
[i
];
2741 vpath
->ring
.napi_p
= &vdev
->napi
;
2744 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2745 vpath
= &vdev
->vpaths
[i
];
2746 netif_napi_add(dev
, &vpath
->ring
.napi
,
2747 vxge_poll_msix
, vdev
->config
.napi_weight
);
2748 napi_enable(&vpath
->ring
.napi
);
2749 vpath
->ring
.napi_p
= &vpath
->ring
.napi
;
2754 if (vdev
->config
.rth_steering
) {
2755 status
= vxge_rth_configure(vdev
);
2756 if (status
!= VXGE_HW_OK
) {
2757 vxge_debug_init(VXGE_ERR
,
2758 "%s: fatal: RTH configuration failed",
2764 printk(KERN_INFO
"%s: Receive Hashing Offload %s\n", dev
->name
,
2765 hldev
->config
.rth_en
? "enabled" : "disabled");
2767 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2768 vpath
= &vdev
->vpaths
[i
];
2770 /* set initial mtu before enabling the device */
2771 status
= vxge_hw_vpath_mtu_set(vpath
->handle
, vdev
->mtu
);
2772 if (status
!= VXGE_HW_OK
) {
2773 vxge_debug_init(VXGE_ERR
,
2774 "%s: fatal: can not set new MTU", dev
->name
);
2780 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE
, VXGE_COMPONENT_LL
, vdev
);
2781 vxge_debug_init(vdev
->level_trace
,
2782 "%s: MTU is %d", vdev
->ndev
->name
, vdev
->mtu
);
2783 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR
, VXGE_COMPONENT_LL
, vdev
);
2785 /* Restore the DA, VID table and also multicast and promiscuous mode
2788 if (vdev
->all_multi_flg
) {
2789 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2790 vpath
= &vdev
->vpaths
[i
];
2791 vxge_restore_vpath_mac_addr(vpath
);
2792 vxge_restore_vpath_vid_table(vpath
);
2794 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
2795 if (status
!= VXGE_HW_OK
)
2796 vxge_debug_init(VXGE_ERR
,
2797 "%s:%d Enabling multicast failed",
2798 __func__
, __LINE__
);
2802 /* Enable vpath to sniff all unicast/multicast traffic that not
2803 * addressed to them. We allow promiscuous mode for PF only
2807 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
2808 val64
|= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i
);
2810 vxge_hw_mgmt_reg_write(vdev
->devh
,
2811 vxge_hw_mgmt_reg_type_mrpcim
,
2813 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2814 rxmac_authorize_all_addr
),
2817 vxge_hw_mgmt_reg_write(vdev
->devh
,
2818 vxge_hw_mgmt_reg_type_mrpcim
,
2820 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2821 rxmac_authorize_all_vid
),
2824 vxge_set_multicast(dev
);
2826 /* Enabling Bcast and mcast for all vpath */
2827 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2828 vpath
= &vdev
->vpaths
[i
];
2829 status
= vxge_hw_vpath_bcast_enable(vpath
->handle
);
2830 if (status
!= VXGE_HW_OK
)
2831 vxge_debug_init(VXGE_ERR
,
2832 "%s : Can not enable bcast for vpath "
2833 "id %d", dev
->name
, i
);
2834 if (vdev
->config
.addr_learn_en
) {
2835 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
2836 if (status
!= VXGE_HW_OK
)
2837 vxge_debug_init(VXGE_ERR
,
2838 "%s : Can not enable mcast for vpath "
2839 "id %d", dev
->name
, i
);
2843 vxge_hw_device_setpause_data(vdev
->devh
, 0,
2844 vdev
->config
.tx_pause_enable
,
2845 vdev
->config
.rx_pause_enable
);
2847 if (vdev
->vp_reset_timer
.function
== NULL
)
2848 vxge_os_timer(&vdev
->vp_reset_timer
, vxge_poll_vp_reset
,
2851 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2852 if (vdev
->titan1
&& vdev
->vp_lockup_timer
.function
== NULL
)
2853 vxge_os_timer(&vdev
->vp_lockup_timer
, vxge_poll_vp_lockup
,
2856 set_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
2860 if (vxge_hw_device_link_state_get(vdev
->devh
) == VXGE_HW_LINK_UP
) {
2861 netif_carrier_on(vdev
->ndev
);
2862 netdev_notice(vdev
->ndev
, "Link Up\n");
2863 vdev
->stats
.link_up
++;
2866 vxge_hw_device_intr_enable(vdev
->devh
);
2870 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2871 vpath
= &vdev
->vpaths
[i
];
2873 vxge_hw_vpath_enable(vpath
->handle
);
2875 vxge_hw_vpath_rx_doorbell_init(vpath
->handle
);
2878 netif_tx_start_all_queues(vdev
->ndev
);
2881 vxge_config_ci_for_tti_rti(vdev
);
2889 if (vdev
->config
.intr_type
!= MSI_X
)
2890 napi_disable(&vdev
->napi
);
2892 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2893 napi_disable(&vdev
->vpaths
[i
].ring
.napi
);
2897 vxge_close_vpaths(vdev
, 0);
2899 vxge_debug_entryexit(VXGE_TRACE
,
2900 "%s: %s:%d Exiting...",
2901 dev
->name
, __func__
, __LINE__
);
2905 /* Loop through the mac address list and delete all the entries */
2906 static void vxge_free_mac_add_list(struct vxge_vpath
*vpath
)
2909 struct list_head
*entry
, *next
;
2910 if (list_empty(&vpath
->mac_addr_list
))
2913 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
2915 kfree((struct vxge_mac_addrs
*)entry
);
2919 static void vxge_napi_del_all(struct vxgedev
*vdev
)
2922 if (vdev
->config
.intr_type
!= MSI_X
)
2923 netif_napi_del(&vdev
->napi
);
2925 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2926 netif_napi_del(&vdev
->vpaths
[i
].ring
.napi
);
2930 static int do_vxge_close(struct net_device
*dev
, int do_io
)
2932 enum vxge_hw_status status
;
2933 struct vxgedev
*vdev
;
2934 struct __vxge_hw_device
*hldev
;
2936 u64 val64
, vpath_vector
;
2937 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
2938 dev
->name
, __func__
, __LINE__
);
2940 vdev
= netdev_priv(dev
);
2941 hldev
= pci_get_drvdata(vdev
->pdev
);
2943 if (unlikely(!is_vxge_card_up(vdev
)))
2946 /* If vxge_handle_crit_err task is executing,
2947 * wait till it completes. */
2948 while (test_and_set_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
2952 /* Put the vpath back in normal mode */
2953 vpath_vector
= vxge_mBIT(vdev
->vpaths
[0].device_id
);
2954 status
= vxge_hw_mgmt_reg_read(vdev
->devh
,
2955 vxge_hw_mgmt_reg_type_mrpcim
,
2958 struct vxge_hw_mrpcim_reg
,
2959 rts_mgr_cbasin_cfg
),
2961 if (status
== VXGE_HW_OK
) {
2962 val64
&= ~vpath_vector
;
2963 status
= vxge_hw_mgmt_reg_write(vdev
->devh
,
2964 vxge_hw_mgmt_reg_type_mrpcim
,
2967 struct vxge_hw_mrpcim_reg
,
2968 rts_mgr_cbasin_cfg
),
2972 /* Remove the function 0 from promiscuous mode */
2973 vxge_hw_mgmt_reg_write(vdev
->devh
,
2974 vxge_hw_mgmt_reg_type_mrpcim
,
2976 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2977 rxmac_authorize_all_addr
),
2980 vxge_hw_mgmt_reg_write(vdev
->devh
,
2981 vxge_hw_mgmt_reg_type_mrpcim
,
2983 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2984 rxmac_authorize_all_vid
),
2991 del_timer_sync(&vdev
->vp_lockup_timer
);
2993 del_timer_sync(&vdev
->vp_reset_timer
);
2996 vxge_hw_device_wait_receive_idle(hldev
);
2998 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3001 if (vdev
->config
.intr_type
!= MSI_X
)
3002 napi_disable(&vdev
->napi
);
3004 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
3005 napi_disable(&vdev
->vpaths
[i
].ring
.napi
);
3008 netif_carrier_off(vdev
->ndev
);
3009 netdev_notice(vdev
->ndev
, "Link Down\n");
3010 netif_tx_stop_all_queues(vdev
->ndev
);
3012 /* Note that at this point xmit() is stopped by upper layer */
3014 vxge_hw_device_intr_disable(vdev
->devh
);
3018 vxge_napi_del_all(vdev
);
3021 vxge_reset_all_vpaths(vdev
);
3023 vxge_close_vpaths(vdev
, 0);
3025 vxge_debug_entryexit(VXGE_TRACE
,
3026 "%s: %s:%d Exiting...", dev
->name
, __func__
, __LINE__
);
3028 clear_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
);
3035 * @dev: device pointer.
3037 * This is the stop entry point of the driver. It needs to undo exactly
3038 * whatever was done by the open entry point, thus it's usually referred to
3039 * as the close function.Among other things this function mainly stops the
3040 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3041 * Return value: '0' on success and an appropriate (-)ve integer as
3042 * defined in errno.h file on failure.
3044 static int vxge_close(struct net_device
*dev
)
3046 do_vxge_close(dev
, 1);
3052 * @dev: net device pointer.
3053 * @new_mtu :the new MTU size for the device.
3055 * A driver entry point to change MTU size for the device. Before changing
3056 * the MTU the device must be stopped.
3058 static int vxge_change_mtu(struct net_device
*dev
, int new_mtu
)
3060 struct vxgedev
*vdev
= netdev_priv(dev
);
3062 vxge_debug_entryexit(vdev
->level_trace
,
3063 "%s:%d", __func__
, __LINE__
);
3065 /* check if device is down already */
3066 if (unlikely(!is_vxge_card_up(vdev
))) {
3067 /* just store new value, will use later on open() */
3069 vxge_debug_init(vdev
->level_err
,
3070 "%s", "device is down on MTU change");
3074 vxge_debug_init(vdev
->level_trace
,
3075 "trying to apply new MTU %d", new_mtu
);
3077 if (vxge_close(dev
))
3081 vdev
->mtu
= new_mtu
;
3086 vxge_debug_init(vdev
->level_trace
,
3087 "%s: MTU changed to %d", vdev
->ndev
->name
, new_mtu
);
3089 vxge_debug_entryexit(vdev
->level_trace
,
3090 "%s:%d Exiting...", __func__
, __LINE__
);
3097 * @dev: pointer to the device structure
3098 * @stats: pointer to struct rtnl_link_stats64
3102 vxge_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*net_stats
)
3104 struct vxgedev
*vdev
= netdev_priv(dev
);
3107 /* net_stats already zeroed by caller */
3108 for (k
= 0; k
< vdev
->no_of_vpath
; k
++) {
3109 struct vxge_ring_stats
*rxstats
= &vdev
->vpaths
[k
].ring
.stats
;
3110 struct vxge_fifo_stats
*txstats
= &vdev
->vpaths
[k
].fifo
.stats
;
3112 u64 packets
, bytes
, multicast
;
3115 start
= u64_stats_fetch_begin_irq(&rxstats
->syncp
);
3117 packets
= rxstats
->rx_frms
;
3118 multicast
= rxstats
->rx_mcast
;
3119 bytes
= rxstats
->rx_bytes
;
3120 } while (u64_stats_fetch_retry_irq(&rxstats
->syncp
, start
));
3122 net_stats
->rx_packets
+= packets
;
3123 net_stats
->rx_bytes
+= bytes
;
3124 net_stats
->multicast
+= multicast
;
3126 net_stats
->rx_errors
+= rxstats
->rx_errors
;
3127 net_stats
->rx_dropped
+= rxstats
->rx_dropped
;
3130 start
= u64_stats_fetch_begin_irq(&txstats
->syncp
);
3132 packets
= txstats
->tx_frms
;
3133 bytes
= txstats
->tx_bytes
;
3134 } while (u64_stats_fetch_retry_irq(&txstats
->syncp
, start
));
3136 net_stats
->tx_packets
+= packets
;
3137 net_stats
->tx_bytes
+= bytes
;
3138 net_stats
->tx_errors
+= txstats
->tx_errors
;
3142 static enum vxge_hw_status
vxge_timestamp_config(struct __vxge_hw_device
*devh
)
3144 enum vxge_hw_status status
;
3147 /* Timestamp is passed to the driver via the FCS, therefore we
3148 * must disable the FCS stripping by the adapter. Since this is
3149 * required for the driver to load (due to a hardware bug),
3150 * there is no need to do anything special here.
3152 val64
= VXGE_HW_XMAC_TIMESTAMP_EN
|
3153 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3154 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3156 status
= vxge_hw_mgmt_reg_write(devh
,
3157 vxge_hw_mgmt_reg_type_mrpcim
,
3159 offsetof(struct vxge_hw_mrpcim_reg
,
3162 vxge_hw_device_flush_io(devh
);
3163 devh
->config
.hwts_en
= VXGE_HW_HWTS_ENABLE
;
3167 static int vxge_hwtstamp_set(struct vxgedev
*vdev
, void __user
*data
)
3169 struct hwtstamp_config config
;
3172 if (copy_from_user(&config
, data
, sizeof(config
)))
3175 /* reserved for future extensions */
3179 /* Transmit HW Timestamp not supported */
3180 switch (config
.tx_type
) {
3181 case HWTSTAMP_TX_OFF
:
3183 case HWTSTAMP_TX_ON
:
3188 switch (config
.rx_filter
) {
3189 case HWTSTAMP_FILTER_NONE
:
3191 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
3194 case HWTSTAMP_FILTER_ALL
:
3195 case HWTSTAMP_FILTER_SOME
:
3196 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
3197 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
3198 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
3199 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
3200 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
3201 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
3202 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
3203 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
3204 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
3205 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
3206 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
3207 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
3208 case HWTSTAMP_FILTER_NTP_ALL
:
3209 if (vdev
->devh
->config
.hwts_en
!= VXGE_HW_HWTS_ENABLE
)
3213 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
3220 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
3221 vdev
->vpaths
[i
].ring
.rx_hwts
= vdev
->rx_hwts
;
3223 if (copy_to_user(data
, &config
, sizeof(config
)))
3229 static int vxge_hwtstamp_get(struct vxgedev
*vdev
, void __user
*data
)
3231 struct hwtstamp_config config
;
3234 config
.tx_type
= HWTSTAMP_TX_OFF
;
3235 config
.rx_filter
= (vdev
->rx_hwts
?
3236 HWTSTAMP_FILTER_ALL
: HWTSTAMP_FILTER_NONE
);
3238 if (copy_to_user(data
, &config
, sizeof(config
)))
3246 * @dev: Device pointer.
3247 * @ifr: An IOCTL specific structure, that can contain a pointer to
3248 * a proprietary structure used to pass information to the driver.
3249 * @cmd: This is used to distinguish between the different commands that
3250 * can be passed to the IOCTL functions.
3252 * Entry point for the Ioctl.
3254 static int vxge_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
3256 struct vxgedev
*vdev
= netdev_priv(dev
);
3260 return vxge_hwtstamp_set(vdev
, rq
->ifr_data
);
3262 return vxge_hwtstamp_get(vdev
, rq
->ifr_data
);
3270 * @dev: pointer to net device structure
3272 * Watchdog for transmit side.
3273 * This function is triggered if the Tx Queue is stopped
3274 * for a pre-defined amount of time when the Interface is still up.
3276 static void vxge_tx_watchdog(struct net_device
*dev
, unsigned int txqueue
)
3278 struct vxgedev
*vdev
;
3280 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3282 vdev
= netdev_priv(dev
);
3284 vdev
->cric_err_event
= VXGE_HW_EVENT_RESET_START
;
3286 schedule_work(&vdev
->reset_task
);
3287 vxge_debug_entryexit(VXGE_TRACE
,
3288 "%s:%d Exiting...", __func__
, __LINE__
);
3292 * vxge_vlan_rx_add_vid
3293 * @dev: net device pointer.
3294 * @proto: vlan protocol
3297 * Add the vlan id to the devices vlan id table
3300 vxge_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
3302 struct vxgedev
*vdev
= netdev_priv(dev
);
3303 struct vxge_vpath
*vpath
;
3306 /* Add these vlan to the vid table */
3307 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
3308 vpath
= &vdev
->vpaths
[vp_id
];
3309 if (!vpath
->is_open
)
3311 vxge_hw_vpath_vid_add(vpath
->handle
, vid
);
3313 set_bit(vid
, vdev
->active_vlans
);
3318 * vxge_vlan_rx_kill_vid
3319 * @dev: net device pointer.
3320 * @proto: vlan protocol
3323 * Remove the vlan id from the device's vlan id table
3326 vxge_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
3328 struct vxgedev
*vdev
= netdev_priv(dev
);
3329 struct vxge_vpath
*vpath
;
3332 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3334 /* Delete this vlan from the vid table */
3335 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
3336 vpath
= &vdev
->vpaths
[vp_id
];
3337 if (!vpath
->is_open
)
3339 vxge_hw_vpath_vid_delete(vpath
->handle
, vid
);
3341 vxge_debug_entryexit(VXGE_TRACE
,
3342 "%s:%d Exiting...", __func__
, __LINE__
);
3343 clear_bit(vid
, vdev
->active_vlans
);
3347 static const struct net_device_ops vxge_netdev_ops
= {
3348 .ndo_open
= vxge_open
,
3349 .ndo_stop
= vxge_close
,
3350 .ndo_get_stats64
= vxge_get_stats64
,
3351 .ndo_start_xmit
= vxge_xmit
,
3352 .ndo_validate_addr
= eth_validate_addr
,
3353 .ndo_set_rx_mode
= vxge_set_multicast
,
3354 .ndo_do_ioctl
= vxge_ioctl
,
3355 .ndo_set_mac_address
= vxge_set_mac_addr
,
3356 .ndo_change_mtu
= vxge_change_mtu
,
3357 .ndo_fix_features
= vxge_fix_features
,
3358 .ndo_set_features
= vxge_set_features
,
3359 .ndo_vlan_rx_kill_vid
= vxge_vlan_rx_kill_vid
,
3360 .ndo_vlan_rx_add_vid
= vxge_vlan_rx_add_vid
,
3361 .ndo_tx_timeout
= vxge_tx_watchdog
,
3362 #ifdef CONFIG_NET_POLL_CONTROLLER
3363 .ndo_poll_controller
= vxge_netpoll
,
3367 static int vxge_device_register(struct __vxge_hw_device
*hldev
,
3368 struct vxge_config
*config
, int high_dma
,
3369 int no_of_vpath
, struct vxgedev
**vdev_out
)
3371 struct net_device
*ndev
;
3372 enum vxge_hw_status status
= VXGE_HW_OK
;
3373 struct vxgedev
*vdev
;
3374 int ret
= 0, no_of_queue
= 1;
3378 if (config
->tx_steering_type
)
3379 no_of_queue
= no_of_vpath
;
3381 ndev
= alloc_etherdev_mq(sizeof(struct vxgedev
),
3385 vxge_hw_device_trace_level_get(hldev
),
3386 "%s : device allocation failed", __func__
);
3391 vxge_debug_entryexit(
3392 vxge_hw_device_trace_level_get(hldev
),
3393 "%s: %s:%d Entering...",
3394 ndev
->name
, __func__
, __LINE__
);
3396 vdev
= netdev_priv(ndev
);
3397 memset(vdev
, 0, sizeof(struct vxgedev
));
3401 vdev
->pdev
= hldev
->pdev
;
3402 memcpy(&vdev
->config
, config
, sizeof(struct vxge_config
));
3404 vdev
->titan1
= (vdev
->pdev
->revision
== VXGE_HW_TITAN1_PCI_REVISION
);
3406 SET_NETDEV_DEV(ndev
, &vdev
->pdev
->dev
);
3408 ndev
->hw_features
= NETIF_F_RXCSUM
| NETIF_F_SG
|
3409 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3410 NETIF_F_TSO
| NETIF_F_TSO6
|
3411 NETIF_F_HW_VLAN_CTAG_TX
;
3412 if (vdev
->config
.rth_steering
!= NO_STEERING
)
3413 ndev
->hw_features
|= NETIF_F_RXHASH
;
3415 ndev
->features
|= ndev
->hw_features
|
3416 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_CTAG_FILTER
;
3419 ndev
->netdev_ops
= &vxge_netdev_ops
;
3421 ndev
->watchdog_timeo
= VXGE_LL_WATCH_DOG_TIMEOUT
;
3422 INIT_WORK(&vdev
->reset_task
, vxge_reset
);
3424 vxge_initialize_ethtool_ops(ndev
);
3426 /* Allocate memory for vpath */
3427 vdev
->vpaths
= kcalloc(no_of_vpath
, sizeof(struct vxge_vpath
),
3429 if (!vdev
->vpaths
) {
3430 vxge_debug_init(VXGE_ERR
,
3431 "%s: vpath memory allocation failed",
3437 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3438 "%s : checksumming enabled", __func__
);
3441 ndev
->features
|= NETIF_F_HIGHDMA
;
3442 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3443 "%s : using High DMA", __func__
);
3446 /* MTU range: 68 - 9600 */
3447 ndev
->min_mtu
= VXGE_HW_MIN_MTU
;
3448 ndev
->max_mtu
= VXGE_HW_MAX_MTU
;
3450 ret
= register_netdev(ndev
);
3452 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3453 "%s: %s : device registration failed!",
3454 ndev
->name
, __func__
);
3458 /* Set the factory defined MAC address initially */
3459 ndev
->addr_len
= ETH_ALEN
;
3461 /* Make Link state as off at this point, when the Link change
3462 * interrupt comes the state will be automatically changed to
3465 netif_carrier_off(ndev
);
3467 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3468 "%s: Ethernet device registered",
3474 /* Resetting the Device stats */
3475 status
= vxge_hw_mrpcim_stats_access(
3477 VXGE_HW_STATS_OP_CLEAR_ALL_STATS
,
3482 if (status
== VXGE_HW_ERR_PRIVILEGED_OPERATION
)
3484 vxge_hw_device_trace_level_get(hldev
),
3485 "%s: device stats clear returns"
3486 "VXGE_HW_ERR_PRIVILEGED_OPERATION", ndev
->name
);
3488 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev
),
3489 "%s: %s:%d Exiting...",
3490 ndev
->name
, __func__
, __LINE__
);
3494 kfree(vdev
->vpaths
);
3502 * vxge_device_unregister
3504 * This function will unregister and free network device
3506 static void vxge_device_unregister(struct __vxge_hw_device
*hldev
)
3508 struct vxgedev
*vdev
;
3509 struct net_device
*dev
;
3513 vdev
= netdev_priv(dev
);
3515 vxge_debug_entryexit(vdev
->level_trace
, "%s: %s:%d", vdev
->ndev
->name
,
3516 __func__
, __LINE__
);
3518 strlcpy(buf
, dev
->name
, IFNAMSIZ
);
3520 flush_work(&vdev
->reset_task
);
3522 /* in 2.6 will call stop() if device is up */
3523 unregister_netdev(dev
);
3525 kfree(vdev
->vpaths
);
3527 /* we are safe to free it now */
3530 vxge_debug_init(vdev
->level_trace
, "%s: ethernet device unregistered",
3532 vxge_debug_entryexit(vdev
->level_trace
, "%s: %s:%d Exiting...", buf
,
3533 __func__
, __LINE__
);
3537 * vxge_callback_crit_err
3539 * This function is called by the alarm handler in interrupt context.
3540 * Driver must analyze it based on the event type.
3543 vxge_callback_crit_err(struct __vxge_hw_device
*hldev
,
3544 enum vxge_hw_event type
, u64 vp_id
)
3546 struct net_device
*dev
= hldev
->ndev
;
3547 struct vxgedev
*vdev
= netdev_priv(dev
);
3548 struct vxge_vpath
*vpath
= NULL
;
3551 vxge_debug_entryexit(vdev
->level_trace
,
3552 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
3554 /* Note: This event type should be used for device wide
3555 * indications only - Serious errors, Slot freeze and critical errors
3557 vdev
->cric_err_event
= type
;
3559 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
3560 vpath
= &vdev
->vpaths
[vpath_idx
];
3561 if (vpath
->device_id
== vp_id
)
3565 if (!test_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
)) {
3566 if (type
== VXGE_HW_EVENT_SLOT_FREEZE
) {
3567 vxge_debug_init(VXGE_ERR
,
3568 "%s: Slot is frozen", vdev
->ndev
->name
);
3569 } else if (type
== VXGE_HW_EVENT_SERR
) {
3570 vxge_debug_init(VXGE_ERR
,
3571 "%s: Encountered Serious Error",
3573 } else if (type
== VXGE_HW_EVENT_CRITICAL_ERR
)
3574 vxge_debug_init(VXGE_ERR
,
3575 "%s: Encountered Critical Error",
3579 if ((type
== VXGE_HW_EVENT_SERR
) ||
3580 (type
== VXGE_HW_EVENT_SLOT_FREEZE
)) {
3581 if (unlikely(vdev
->exec_mode
))
3582 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3583 } else if (type
== VXGE_HW_EVENT_CRITICAL_ERR
) {
3584 vxge_hw_device_mask_all(hldev
);
3585 if (unlikely(vdev
->exec_mode
))
3586 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3587 } else if ((type
== VXGE_HW_EVENT_FIFO_ERR
) ||
3588 (type
== VXGE_HW_EVENT_VPATH_ERR
)) {
3590 if (unlikely(vdev
->exec_mode
))
3591 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3593 /* check if this vpath is already set for reset */
3594 if (!test_and_set_bit(vpath_idx
, &vdev
->vp_reset
)) {
3596 /* disable interrupts for this vpath */
3597 vxge_vpath_intr_disable(vdev
, vpath_idx
);
3599 /* stop the queue for this vpath */
3600 netif_tx_stop_queue(vpath
->fifo
.txq
);
3605 vxge_debug_entryexit(vdev
->level_trace
,
3606 "%s: %s:%d Exiting...",
3607 vdev
->ndev
->name
, __func__
, __LINE__
);
3610 static void verify_bandwidth(void)
3612 int i
, band_width
, total
= 0, equal_priority
= 0;
3614 /* 1. If user enters 0 for some fifo, give equal priority to all */
3615 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3616 if (bw_percentage
[i
] == 0) {
3622 if (!equal_priority
) {
3623 /* 2. If sum exceeds 100, give equal priority to all */
3624 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3625 if (bw_percentage
[i
] == 0xFF)
3628 total
+= bw_percentage
[i
];
3629 if (total
> VXGE_HW_VPATH_BANDWIDTH_MAX
) {
3636 if (!equal_priority
) {
3637 /* Is all the bandwidth consumed? */
3638 if (total
< VXGE_HW_VPATH_BANDWIDTH_MAX
) {
3639 if (i
< VXGE_HW_MAX_VIRTUAL_PATHS
) {
3640 /* Split rest of bw equally among next VPs*/
3642 (VXGE_HW_VPATH_BANDWIDTH_MAX
- total
) /
3643 (VXGE_HW_MAX_VIRTUAL_PATHS
- i
);
3644 if (band_width
< 2) /* min of 2% */
3647 for (; i
< VXGE_HW_MAX_VIRTUAL_PATHS
;
3653 } else if (i
< VXGE_HW_MAX_VIRTUAL_PATHS
)
3657 if (equal_priority
) {
3658 vxge_debug_init(VXGE_ERR
,
3659 "%s: Assigning equal bandwidth to all the vpaths",
3661 bw_percentage
[0] = VXGE_HW_VPATH_BANDWIDTH_MAX
/
3662 VXGE_HW_MAX_VIRTUAL_PATHS
;
3663 for (i
= 1; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3664 bw_percentage
[i
] = bw_percentage
[0];
3669 * Vpath configuration
3671 static int vxge_config_vpaths(struct vxge_hw_device_config
*device_config
,
3672 u64 vpath_mask
, struct vxge_config
*config_param
)
3674 int i
, no_of_vpaths
= 0, default_no_vpath
= 0, temp
;
3675 u32 txdl_size
, txdl_per_memblock
;
3677 temp
= driver_config
->vpath_per_dev
;
3678 if ((driver_config
->vpath_per_dev
== VXGE_USE_DEFAULT
) &&
3679 (max_config_dev
== VXGE_MAX_CONFIG_DEV
)) {
3680 /* No more CPU. Return vpath number as zero.*/
3681 if (driver_config
->g_no_cpus
== -1)
3684 if (!driver_config
->g_no_cpus
)
3685 driver_config
->g_no_cpus
=
3686 netif_get_num_default_rss_queues();
3688 driver_config
->vpath_per_dev
= driver_config
->g_no_cpus
>> 1;
3689 if (!driver_config
->vpath_per_dev
)
3690 driver_config
->vpath_per_dev
= 1;
3692 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3693 if (!vxge_bVALn(vpath_mask
, i
, 1))
3697 if (default_no_vpath
< driver_config
->vpath_per_dev
)
3698 driver_config
->vpath_per_dev
= default_no_vpath
;
3700 driver_config
->g_no_cpus
= driver_config
->g_no_cpus
-
3701 (driver_config
->vpath_per_dev
* 2);
3702 if (driver_config
->g_no_cpus
<= 0)
3703 driver_config
->g_no_cpus
= -1;
3706 if (driver_config
->vpath_per_dev
== 1) {
3707 vxge_debug_ll_config(VXGE_TRACE
,
3708 "%s: Disable tx and rx steering, "
3709 "as single vpath is configured", VXGE_DRIVER_NAME
);
3710 config_param
->rth_steering
= NO_STEERING
;
3711 config_param
->tx_steering_type
= NO_STEERING
;
3712 device_config
->rth_en
= 0;
3715 /* configure bandwidth */
3716 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3717 device_config
->vp_config
[i
].min_bandwidth
= bw_percentage
[i
];
3719 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3720 device_config
->vp_config
[i
].vp_id
= i
;
3721 device_config
->vp_config
[i
].mtu
= VXGE_HW_DEFAULT_MTU
;
3722 if (no_of_vpaths
< driver_config
->vpath_per_dev
) {
3723 if (!vxge_bVALn(vpath_mask
, i
, 1)) {
3724 vxge_debug_ll_config(VXGE_TRACE
,
3725 "%s: vpath: %d is not available",
3726 VXGE_DRIVER_NAME
, i
);
3729 vxge_debug_ll_config(VXGE_TRACE
,
3730 "%s: vpath: %d available",
3731 VXGE_DRIVER_NAME
, i
);
3735 vxge_debug_ll_config(VXGE_TRACE
,
3736 "%s: vpath: %d is not configured, "
3737 "max_config_vpath exceeded",
3738 VXGE_DRIVER_NAME
, i
);
3742 /* Configure Tx fifo's */
3743 device_config
->vp_config
[i
].fifo
.enable
=
3744 VXGE_HW_FIFO_ENABLE
;
3745 device_config
->vp_config
[i
].fifo
.max_frags
=
3747 device_config
->vp_config
[i
].fifo
.memblock_size
=
3748 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE
;
3750 txdl_size
= device_config
->vp_config
[i
].fifo
.max_frags
*
3751 sizeof(struct vxge_hw_fifo_txd
);
3752 txdl_per_memblock
= VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE
/ txdl_size
;
3754 device_config
->vp_config
[i
].fifo
.fifo_blocks
=
3755 ((VXGE_DEF_FIFO_LENGTH
- 1) / txdl_per_memblock
) + 1;
3757 device_config
->vp_config
[i
].fifo
.intr
=
3758 VXGE_HW_FIFO_QUEUE_INTR_DISABLE
;
3760 /* Configure tti properties */
3761 device_config
->vp_config
[i
].tti
.intr_enable
=
3762 VXGE_HW_TIM_INTR_ENABLE
;
3764 device_config
->vp_config
[i
].tti
.btimer_val
=
3765 (VXGE_TTI_BTIMER_VAL
* 1000) / 272;
3767 device_config
->vp_config
[i
].tti
.timer_ac_en
=
3768 VXGE_HW_TIM_TIMER_AC_ENABLE
;
3770 /* For msi-x with napi (each vector has a handler of its own) -
3771 * Set CI to OFF for all vpaths
3773 device_config
->vp_config
[i
].tti
.timer_ci_en
=
3774 VXGE_HW_TIM_TIMER_CI_DISABLE
;
3776 device_config
->vp_config
[i
].tti
.timer_ri_en
=
3777 VXGE_HW_TIM_TIMER_RI_DISABLE
;
3779 device_config
->vp_config
[i
].tti
.util_sel
=
3780 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL
;
3782 device_config
->vp_config
[i
].tti
.ltimer_val
=
3783 (VXGE_TTI_LTIMER_VAL
* 1000) / 272;
3785 device_config
->vp_config
[i
].tti
.rtimer_val
=
3786 (VXGE_TTI_RTIMER_VAL
* 1000) / 272;
3788 device_config
->vp_config
[i
].tti
.urange_a
= TTI_TX_URANGE_A
;
3789 device_config
->vp_config
[i
].tti
.urange_b
= TTI_TX_URANGE_B
;
3790 device_config
->vp_config
[i
].tti
.urange_c
= TTI_TX_URANGE_C
;
3791 device_config
->vp_config
[i
].tti
.uec_a
= TTI_TX_UFC_A
;
3792 device_config
->vp_config
[i
].tti
.uec_b
= TTI_TX_UFC_B
;
3793 device_config
->vp_config
[i
].tti
.uec_c
= TTI_TX_UFC_C
;
3794 device_config
->vp_config
[i
].tti
.uec_d
= TTI_TX_UFC_D
;
3796 /* Configure Rx rings */
3797 device_config
->vp_config
[i
].ring
.enable
=
3798 VXGE_HW_RING_ENABLE
;
3800 device_config
->vp_config
[i
].ring
.ring_blocks
=
3801 VXGE_HW_DEF_RING_BLOCKS
;
3803 device_config
->vp_config
[i
].ring
.buffer_mode
=
3804 VXGE_HW_RING_RXD_BUFFER_MODE_1
;
3806 device_config
->vp_config
[i
].ring
.rxds_limit
=
3807 VXGE_HW_DEF_RING_RXDS_LIMIT
;
3809 device_config
->vp_config
[i
].ring
.scatter_mode
=
3810 VXGE_HW_RING_SCATTER_MODE_A
;
3812 /* Configure rti properties */
3813 device_config
->vp_config
[i
].rti
.intr_enable
=
3814 VXGE_HW_TIM_INTR_ENABLE
;
3816 device_config
->vp_config
[i
].rti
.btimer_val
=
3817 (VXGE_RTI_BTIMER_VAL
* 1000)/272;
3819 device_config
->vp_config
[i
].rti
.timer_ac_en
=
3820 VXGE_HW_TIM_TIMER_AC_ENABLE
;
3822 device_config
->vp_config
[i
].rti
.timer_ci_en
=
3823 VXGE_HW_TIM_TIMER_CI_DISABLE
;
3825 device_config
->vp_config
[i
].rti
.timer_ri_en
=
3826 VXGE_HW_TIM_TIMER_RI_DISABLE
;
3828 device_config
->vp_config
[i
].rti
.util_sel
=
3829 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL
;
3831 device_config
->vp_config
[i
].rti
.urange_a
=
3833 device_config
->vp_config
[i
].rti
.urange_b
=
3835 device_config
->vp_config
[i
].rti
.urange_c
=
3837 device_config
->vp_config
[i
].rti
.uec_a
= RTI_RX_UFC_A
;
3838 device_config
->vp_config
[i
].rti
.uec_b
= RTI_RX_UFC_B
;
3839 device_config
->vp_config
[i
].rti
.uec_c
= RTI_RX_UFC_C
;
3840 device_config
->vp_config
[i
].rti
.uec_d
= RTI_RX_UFC_D
;
3842 device_config
->vp_config
[i
].rti
.rtimer_val
=
3843 (VXGE_RTI_RTIMER_VAL
* 1000) / 272;
3845 device_config
->vp_config
[i
].rti
.ltimer_val
=
3846 (VXGE_RTI_LTIMER_VAL
* 1000) / 272;
3848 device_config
->vp_config
[i
].rpa_strip_vlan_tag
=
3852 driver_config
->vpath_per_dev
= temp
;
3853 return no_of_vpaths
;
3856 /* initialize device configuratrions */
3857 static void vxge_device_config_init(struct vxge_hw_device_config
*device_config
,
3860 /* Used for CQRQ/SRQ. */
3861 device_config
->dma_blockpool_initial
=
3862 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE
;
3864 device_config
->dma_blockpool_max
=
3865 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE
;
3867 if (max_mac_vpath
> VXGE_MAX_MAC_ADDR_COUNT
)
3868 max_mac_vpath
= VXGE_MAX_MAC_ADDR_COUNT
;
3870 if (!IS_ENABLED(CONFIG_PCI_MSI
)) {
3871 vxge_debug_init(VXGE_ERR
,
3872 "%s: This Kernel does not support "
3873 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME
);
3877 /* Configure whether MSI-X or IRQL. */
3878 switch (*intr_type
) {
3880 device_config
->intr_mode
= VXGE_HW_INTR_MODE_IRQLINE
;
3884 device_config
->intr_mode
= VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
;
3888 /* Timer period between device poll */
3889 device_config
->device_poll_millis
= VXGE_TIMER_DELAY
;
3891 /* Configure mac based steering. */
3892 device_config
->rts_mac_en
= addr_learn_en
;
3894 /* Configure Vpaths */
3895 device_config
->rth_it_type
= VXGE_HW_RTH_IT_TYPE_MULTI_IT
;
3897 vxge_debug_ll_config(VXGE_TRACE
, "%s : Device Config Params ",
3899 vxge_debug_ll_config(VXGE_TRACE
, "intr_mode : %d",
3900 device_config
->intr_mode
);
3901 vxge_debug_ll_config(VXGE_TRACE
, "device_poll_millis : %d",
3902 device_config
->device_poll_millis
);
3903 vxge_debug_ll_config(VXGE_TRACE
, "rth_en : %d",
3904 device_config
->rth_en
);
3905 vxge_debug_ll_config(VXGE_TRACE
, "rth_it_type : %d",
3906 device_config
->rth_it_type
);
3909 static void vxge_print_parm(struct vxgedev
*vdev
, u64 vpath_mask
)
3913 vxge_debug_init(VXGE_TRACE
,
3914 "%s: %d Vpath(s) opened",
3915 vdev
->ndev
->name
, vdev
->no_of_vpath
);
3917 switch (vdev
->config
.intr_type
) {
3919 vxge_debug_init(VXGE_TRACE
,
3920 "%s: Interrupt type INTA", vdev
->ndev
->name
);
3924 vxge_debug_init(VXGE_TRACE
,
3925 "%s: Interrupt type MSI-X", vdev
->ndev
->name
);
3929 if (vdev
->config
.rth_steering
) {
3930 vxge_debug_init(VXGE_TRACE
,
3931 "%s: RTH steering enabled for TCP_IPV4",
3934 vxge_debug_init(VXGE_TRACE
,
3935 "%s: RTH steering disabled", vdev
->ndev
->name
);
3938 switch (vdev
->config
.tx_steering_type
) {
3940 vxge_debug_init(VXGE_TRACE
,
3941 "%s: Tx steering disabled", vdev
->ndev
->name
);
3943 case TX_PRIORITY_STEERING
:
3944 vxge_debug_init(VXGE_TRACE
,
3945 "%s: Unsupported tx steering option",
3947 vxge_debug_init(VXGE_TRACE
,
3948 "%s: Tx steering disabled", vdev
->ndev
->name
);
3949 vdev
->config
.tx_steering_type
= 0;
3951 case TX_VLAN_STEERING
:
3952 vxge_debug_init(VXGE_TRACE
,
3953 "%s: Unsupported tx steering option",
3955 vxge_debug_init(VXGE_TRACE
,
3956 "%s: Tx steering disabled", vdev
->ndev
->name
);
3957 vdev
->config
.tx_steering_type
= 0;
3959 case TX_MULTIQ_STEERING
:
3960 vxge_debug_init(VXGE_TRACE
,
3961 "%s: Tx multiqueue steering enabled",
3964 case TX_PORT_STEERING
:
3965 vxge_debug_init(VXGE_TRACE
,
3966 "%s: Tx port steering enabled",
3970 vxge_debug_init(VXGE_ERR
,
3971 "%s: Unsupported tx steering type",
3973 vxge_debug_init(VXGE_TRACE
,
3974 "%s: Tx steering disabled", vdev
->ndev
->name
);
3975 vdev
->config
.tx_steering_type
= 0;
3978 if (vdev
->config
.addr_learn_en
)
3979 vxge_debug_init(VXGE_TRACE
,
3980 "%s: MAC Address learning enabled", vdev
->ndev
->name
);
3982 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3983 if (!vxge_bVALn(vpath_mask
, i
, 1))
3985 vxge_debug_ll_config(VXGE_TRACE
,
3986 "%s: MTU size - %d", vdev
->ndev
->name
,
3988 config
.vp_config
[i
].mtu
);
3989 vxge_debug_init(VXGE_TRACE
,
3990 "%s: VLAN tag stripping %s", vdev
->ndev
->name
,
3992 config
.vp_config
[i
].rpa_strip_vlan_tag
3993 ? "Enabled" : "Disabled");
3994 vxge_debug_ll_config(VXGE_TRACE
,
3995 "%s: Max frags : %d", vdev
->ndev
->name
,
3997 config
.vp_config
[i
].fifo
.max_frags
);
4004 * vxge_pm_suspend - vxge power management suspend entry point
4007 static int vxge_pm_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4012 * vxge_pm_resume - vxge power management resume entry point
4015 static int vxge_pm_resume(struct pci_dev
*pdev
)
4023 * vxge_io_error_detected - called when PCI error is detected
4024 * @pdev: Pointer to PCI device
4025 * @state: The current pci connection state
4027 * This function is called after a PCI bus error affecting
4028 * this device has been detected.
4030 static pci_ers_result_t
vxge_io_error_detected(struct pci_dev
*pdev
,
4031 pci_channel_state_t state
)
4033 struct __vxge_hw_device
*hldev
= pci_get_drvdata(pdev
);
4034 struct net_device
*netdev
= hldev
->ndev
;
4036 netif_device_detach(netdev
);
4038 if (state
== pci_channel_io_perm_failure
)
4039 return PCI_ERS_RESULT_DISCONNECT
;
4041 if (netif_running(netdev
)) {
4042 /* Bring down the card, while avoiding PCI I/O */
4043 do_vxge_close(netdev
, 0);
4046 pci_disable_device(pdev
);
4048 return PCI_ERS_RESULT_NEED_RESET
;
4052 * vxge_io_slot_reset - called after the pci bus has been reset.
4053 * @pdev: Pointer to PCI device
4055 * Restart the card from scratch, as if from a cold-boot.
4056 * At this point, the card has exprienced a hard reset,
4057 * followed by fixups by BIOS, and has its config space
4058 * set up identically to what it was at cold boot.
4060 static pci_ers_result_t
vxge_io_slot_reset(struct pci_dev
*pdev
)
4062 struct __vxge_hw_device
*hldev
= pci_get_drvdata(pdev
);
4063 struct net_device
*netdev
= hldev
->ndev
;
4065 struct vxgedev
*vdev
= netdev_priv(netdev
);
4067 if (pci_enable_device(pdev
)) {
4068 netdev_err(netdev
, "Cannot re-enable device after reset\n");
4069 return PCI_ERS_RESULT_DISCONNECT
;
4072 pci_set_master(pdev
);
4073 do_vxge_reset(vdev
, VXGE_LL_FULL_RESET
);
4075 return PCI_ERS_RESULT_RECOVERED
;
4079 * vxge_io_resume - called when traffic can start flowing again.
4080 * @pdev: Pointer to PCI device
4082 * This callback is called when the error recovery driver tells
4083 * us that its OK to resume normal operation.
4085 static void vxge_io_resume(struct pci_dev
*pdev
)
4087 struct __vxge_hw_device
*hldev
= pci_get_drvdata(pdev
);
4088 struct net_device
*netdev
= hldev
->ndev
;
4090 if (netif_running(netdev
)) {
4091 if (vxge_open(netdev
)) {
4093 "Can't bring device back up after reset\n");
4098 netif_device_attach(netdev
);
4101 static inline u32
vxge_get_num_vfs(u64 function_mode
)
4103 u32 num_functions
= 0;
4105 switch (function_mode
) {
4106 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
:
4107 case VXGE_HW_FUNCTION_MODE_SRIOV_8
:
4110 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION
:
4113 case VXGE_HW_FUNCTION_MODE_SRIOV
:
4114 case VXGE_HW_FUNCTION_MODE_MRIOV
:
4115 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17
:
4118 case VXGE_HW_FUNCTION_MODE_SRIOV_4
:
4121 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2
:
4124 case VXGE_HW_FUNCTION_MODE_MRIOV_8
:
4125 num_functions
= 8; /* TODO */
4128 return num_functions
;
4131 int vxge_fw_upgrade(struct vxgedev
*vdev
, char *fw_name
, int override
)
4133 struct __vxge_hw_device
*hldev
= vdev
->devh
;
4134 u32 maj
, min
, bld
, cmaj
, cmin
, cbld
;
4135 enum vxge_hw_status status
;
4136 const struct firmware
*fw
;
4139 ret
= request_firmware(&fw
, fw_name
, &vdev
->pdev
->dev
);
4141 vxge_debug_init(VXGE_ERR
, "%s: Firmware file '%s' not found",
4142 VXGE_DRIVER_NAME
, fw_name
);
4146 /* Load the new firmware onto the adapter */
4147 status
= vxge_update_fw_image(hldev
, fw
->data
, fw
->size
);
4148 if (status
!= VXGE_HW_OK
) {
4149 vxge_debug_init(VXGE_ERR
,
4150 "%s: FW image download to adapter failed '%s'.",
4151 VXGE_DRIVER_NAME
, fw_name
);
4156 /* Read the version of the new firmware */
4157 status
= vxge_hw_upgrade_read_version(hldev
, &maj
, &min
, &bld
);
4158 if (status
!= VXGE_HW_OK
) {
4159 vxge_debug_init(VXGE_ERR
,
4160 "%s: Upgrade read version failed '%s'.",
4161 VXGE_DRIVER_NAME
, fw_name
);
4166 cmaj
= vdev
->config
.device_hw_info
.fw_version
.major
;
4167 cmin
= vdev
->config
.device_hw_info
.fw_version
.minor
;
4168 cbld
= vdev
->config
.device_hw_info
.fw_version
.build
;
4169 /* It's possible the version in /lib/firmware is not the latest version.
4170 * If so, we could get into a loop of trying to upgrade to the latest
4171 * and flashing the older version.
4173 if (VXGE_FW_VER(maj
, min
, bld
) == VXGE_FW_VER(cmaj
, cmin
, cbld
) &&
4179 printk(KERN_NOTICE
"Upgrade to firmware version %d.%d.%d commencing\n",
4182 /* Flash the adapter with the new firmware */
4183 status
= vxge_hw_flash_fw(hldev
);
4184 if (status
!= VXGE_HW_OK
) {
4185 vxge_debug_init(VXGE_ERR
, "%s: Upgrade commit failed '%s'.",
4186 VXGE_DRIVER_NAME
, fw_name
);
4191 printk(KERN_NOTICE
"Upgrade of firmware successful! Adapter must be "
4192 "hard reset before using, thus requiring a system reboot or a "
4193 "hotplug event.\n");
4196 release_firmware(fw
);
4200 static int vxge_probe_fw_update(struct vxgedev
*vdev
)
4206 maj
= vdev
->config
.device_hw_info
.fw_version
.major
;
4207 min
= vdev
->config
.device_hw_info
.fw_version
.minor
;
4208 bld
= vdev
->config
.device_hw_info
.fw_version
.build
;
4210 if (VXGE_FW_VER(maj
, min
, bld
) == VXGE_CERT_FW_VER
)
4213 /* Ignore the build number when determining if the current firmware is
4214 * "too new" to load the driver
4216 if (VXGE_FW_VER(maj
, min
, 0) > VXGE_CERT_FW_VER
) {
4217 vxge_debug_init(VXGE_ERR
, "%s: Firmware newer than last known "
4218 "version, unable to load driver\n",
4223 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4224 * work with this driver.
4226 if (VXGE_FW_VER(maj
, min
, bld
) <= VXGE_FW_DEAD_VER
) {
4227 vxge_debug_init(VXGE_ERR
, "%s: Firmware %d.%d.%d cannot be "
4228 "upgraded\n", VXGE_DRIVER_NAME
, maj
, min
, bld
);
4232 /* If file not specified, determine gPXE or not */
4233 if (VXGE_FW_VER(maj
, min
, bld
) >= VXGE_EPROM_FW_VER
) {
4235 for (i
= 0; i
< VXGE_HW_MAX_ROM_IMAGES
; i
++)
4236 if (vdev
->devh
->eprom_versions
[i
]) {
4242 fw_name
= "vxge/X3fw-pxe.ncf";
4244 fw_name
= "vxge/X3fw.ncf";
4246 ret
= vxge_fw_upgrade(vdev
, fw_name
, 0);
4247 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4248 * probe, so ignore them
4250 if (ret
!= -EINVAL
&& ret
!= -ENOENT
)
4255 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR
, VXGE_CERT_FW_VER_MINOR
, 0) >
4256 VXGE_FW_VER(maj
, min
, 0)) {
4257 vxge_debug_init(VXGE_ERR
, "%s: Firmware %d.%d.%d is too old to"
4258 " be used with this driver.",
4259 VXGE_DRIVER_NAME
, maj
, min
, bld
);
4266 static int is_sriov_initialized(struct pci_dev
*pdev
)
4271 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
4273 pci_read_config_word(pdev
, pos
+ PCI_SRIOV_CTRL
, &ctrl
);
4274 if (ctrl
& PCI_SRIOV_CTRL_VFE
)
4280 static const struct vxge_hw_uld_cbs vxge_callbacks
= {
4281 .link_up
= vxge_callback_link_up
,
4282 .link_down
= vxge_callback_link_down
,
4283 .crit_err
= vxge_callback_crit_err
,
4288 * @pdev : structure containing the PCI related information of the device.
4289 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4291 * This function is called when a new PCI device gets detected and initializes
4294 * returns 0 on success and negative on failure.
4298 vxge_probe(struct pci_dev
*pdev
, const struct pci_device_id
*pre
)
4300 struct __vxge_hw_device
*hldev
;
4301 enum vxge_hw_status status
;
4305 struct vxgedev
*vdev
;
4306 struct vxge_config
*ll_config
= NULL
;
4307 struct vxge_hw_device_config
*device_config
= NULL
;
4308 struct vxge_hw_device_attr attr
;
4309 int i
, j
, no_of_vpath
= 0, max_vpath_supported
= 0;
4311 struct vxge_mac_addrs
*entry
;
4312 static int bus
= -1, device
= -1;
4315 enum vxge_hw_status is_privileged
;
4319 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
4322 /* In SRIOV-17 mode, functions of the same adapter
4323 * can be deployed on different buses
4325 if (((bus
!= pdev
->bus
->number
) || (device
!= PCI_SLOT(pdev
->devfn
))) &&
4329 bus
= pdev
->bus
->number
;
4330 device
= PCI_SLOT(pdev
->devfn
);
4333 if (driver_config
->config_dev_cnt
&&
4334 (driver_config
->config_dev_cnt
!=
4335 driver_config
->total_dev_cnt
))
4336 vxge_debug_init(VXGE_ERR
,
4337 "%s: Configured %d of %d devices",
4339 driver_config
->config_dev_cnt
,
4340 driver_config
->total_dev_cnt
);
4341 driver_config
->config_dev_cnt
= 0;
4342 driver_config
->total_dev_cnt
= 0;
4345 /* Now making the CPU based no of vpath calculation
4346 * applicable for individual functions as well.
4348 driver_config
->g_no_cpus
= 0;
4349 driver_config
->vpath_per_dev
= max_config_vpath
;
4351 driver_config
->total_dev_cnt
++;
4352 if (++driver_config
->config_dev_cnt
> max_config_dev
) {
4357 device_config
= kzalloc(sizeof(struct vxge_hw_device_config
),
4359 if (!device_config
) {
4361 vxge_debug_init(VXGE_ERR
,
4362 "device_config : malloc failed %s %d",
4363 __FILE__
, __LINE__
);
4367 ll_config
= kzalloc(sizeof(struct vxge_config
), GFP_KERNEL
);
4370 vxge_debug_init(VXGE_ERR
,
4371 "device_config : malloc failed %s %d",
4372 __FILE__
, __LINE__
);
4375 ll_config
->tx_steering_type
= TX_MULTIQ_STEERING
;
4376 ll_config
->intr_type
= MSI_X
;
4377 ll_config
->napi_weight
= NEW_NAPI_WEIGHT
;
4378 ll_config
->rth_steering
= RTH_STEERING
;
4380 /* get the default configuration parameters */
4381 vxge_hw_device_config_default_get(device_config
);
4383 /* initialize configuration parameters */
4384 vxge_device_config_init(device_config
, &ll_config
->intr_type
);
4386 ret
= pci_enable_device(pdev
);
4388 vxge_debug_init(VXGE_ERR
,
4389 "%s : can not enable PCI device", __func__
);
4393 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4394 vxge_debug_ll_config(VXGE_TRACE
,
4395 "%s : using 64bit DMA", __func__
);
4399 if (pci_set_consistent_dma_mask(pdev
,
4400 DMA_BIT_MASK(64))) {
4401 vxge_debug_init(VXGE_ERR
,
4402 "%s : unable to obtain 64bit DMA for "
4403 "consistent allocations", __func__
);
4407 } else if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) {
4408 vxge_debug_ll_config(VXGE_TRACE
,
4409 "%s : using 32bit DMA", __func__
);
4415 ret
= pci_request_region(pdev
, 0, VXGE_DRIVER_NAME
);
4417 vxge_debug_init(VXGE_ERR
,
4418 "%s : request regions failed", __func__
);
4422 pci_set_master(pdev
);
4424 attr
.bar0
= pci_ioremap_bar(pdev
, 0);
4426 vxge_debug_init(VXGE_ERR
,
4427 "%s : cannot remap io memory bar0", __func__
);
4431 vxge_debug_ll_config(VXGE_TRACE
,
4432 "pci ioremap bar0: %p:0x%llx",
4434 (unsigned long long)pci_resource_start(pdev
, 0));
4436 status
= vxge_hw_device_hw_info_get(attr
.bar0
,
4437 &ll_config
->device_hw_info
);
4438 if (status
!= VXGE_HW_OK
) {
4439 vxge_debug_init(VXGE_ERR
,
4440 "%s: Reading of hardware info failed."
4441 "Please try upgrading the firmware.", VXGE_DRIVER_NAME
);
4446 vpath_mask
= ll_config
->device_hw_info
.vpath_mask
;
4447 if (vpath_mask
== 0) {
4448 vxge_debug_ll_config(VXGE_TRACE
,
4449 "%s: No vpaths available in device", VXGE_DRIVER_NAME
);
4454 vxge_debug_ll_config(VXGE_TRACE
,
4455 "%s:%d Vpath mask = %llx", __func__
, __LINE__
,
4456 (unsigned long long)vpath_mask
);
4458 function_mode
= ll_config
->device_hw_info
.function_mode
;
4459 host_type
= ll_config
->device_hw_info
.host_type
;
4460 is_privileged
= __vxge_hw_device_is_privilaged(host_type
,
4461 ll_config
->device_hw_info
.func_id
);
4463 /* Check how many vpaths are available */
4464 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
4465 if (!((vpath_mask
) & vxge_mBIT(i
)))
4467 max_vpath_supported
++;
4471 num_vfs
= vxge_get_num_vfs(function_mode
) - 1;
4473 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4474 if (is_sriov(function_mode
) && !is_sriov_initialized(pdev
) &&
4475 (ll_config
->intr_type
!= INTA
)) {
4476 ret
= pci_enable_sriov(pdev
, num_vfs
);
4478 vxge_debug_ll_config(VXGE_ERR
,
4479 "Failed in enabling SRIOV mode: %d\n", ret
);
4480 /* No need to fail out, as an error here is non-fatal */
4484 * Configure vpaths and get driver configured number of vpaths
4485 * which is less than or equal to the maximum vpaths per function.
4487 no_of_vpath
= vxge_config_vpaths(device_config
, vpath_mask
, ll_config
);
4489 vxge_debug_ll_config(VXGE_ERR
,
4490 "%s: No more vpaths to configure", VXGE_DRIVER_NAME
);
4495 /* Setting driver callbacks */
4496 attr
.uld_callbacks
= &vxge_callbacks
;
4498 status
= vxge_hw_device_initialize(&hldev
, &attr
, device_config
);
4499 if (status
!= VXGE_HW_OK
) {
4500 vxge_debug_init(VXGE_ERR
,
4501 "Failed to initialize device (%d)", status
);
4506 if (VXGE_FW_VER(ll_config
->device_hw_info
.fw_version
.major
,
4507 ll_config
->device_hw_info
.fw_version
.minor
,
4508 ll_config
->device_hw_info
.fw_version
.build
) >=
4509 VXGE_EPROM_FW_VER
) {
4510 struct eprom_image img
[VXGE_HW_MAX_ROM_IMAGES
];
4512 status
= vxge_hw_vpath_eprom_img_ver_get(hldev
, img
);
4513 if (status
!= VXGE_HW_OK
) {
4514 vxge_debug_init(VXGE_ERR
, "%s: Reading of EPROM failed",
4516 /* This is a non-fatal error, continue */
4519 for (i
= 0; i
< VXGE_HW_MAX_ROM_IMAGES
; i
++) {
4520 hldev
->eprom_versions
[i
] = img
[i
].version
;
4521 if (!img
[i
].is_valid
)
4523 vxge_debug_init(VXGE_TRACE
, "%s: EPROM %d, version "
4524 "%d.%d.%d.%d", VXGE_DRIVER_NAME
, i
,
4525 VXGE_EPROM_IMG_MAJOR(img
[i
].version
),
4526 VXGE_EPROM_IMG_MINOR(img
[i
].version
),
4527 VXGE_EPROM_IMG_FIX(img
[i
].version
),
4528 VXGE_EPROM_IMG_BUILD(img
[i
].version
));
4532 /* if FCS stripping is not disabled in MAC fail driver load */
4533 status
= vxge_hw_vpath_strip_fcs_check(hldev
, vpath_mask
);
4534 if (status
!= VXGE_HW_OK
) {
4535 vxge_debug_init(VXGE_ERR
, "%s: FCS stripping is enabled in MAC"
4536 " failing driver load", VXGE_DRIVER_NAME
);
4541 /* Always enable HWTS. This will always cause the FCS to be invalid,
4542 * due to the fact that HWTS is using the FCS as the location of the
4543 * timestamp. The HW FCS checking will still correctly determine if
4544 * there is a valid checksum, and the FCS is being removed by the driver
4545 * anyway. So no fucntionality is being lost. Since it is always
4546 * enabled, we now simply use the ioctl call to set whether or not the
4547 * driver should be paying attention to the HWTS.
4549 if (is_privileged
== VXGE_HW_OK
) {
4550 status
= vxge_timestamp_config(hldev
);
4551 if (status
!= VXGE_HW_OK
) {
4552 vxge_debug_init(VXGE_ERR
, "%s: HWTS enable failed",
4559 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_LL
);
4561 /* set private device info */
4562 pci_set_drvdata(pdev
, hldev
);
4564 ll_config
->fifo_indicate_max_pkts
= VXGE_FIFO_INDICATE_MAX_PKTS
;
4565 ll_config
->addr_learn_en
= addr_learn_en
;
4566 ll_config
->rth_algorithm
= RTH_ALG_JENKINS
;
4567 ll_config
->rth_hash_type_tcpipv4
= 1;
4568 ll_config
->rth_hash_type_ipv4
= 0;
4569 ll_config
->rth_hash_type_tcpipv6
= 0;
4570 ll_config
->rth_hash_type_ipv6
= 0;
4571 ll_config
->rth_hash_type_tcpipv6ex
= 0;
4572 ll_config
->rth_hash_type_ipv6ex
= 0;
4573 ll_config
->rth_bkt_sz
= RTH_BUCKET_SIZE
;
4574 ll_config
->tx_pause_enable
= VXGE_PAUSE_CTRL_ENABLE
;
4575 ll_config
->rx_pause_enable
= VXGE_PAUSE_CTRL_ENABLE
;
4577 ret
= vxge_device_register(hldev
, ll_config
, high_dma
, no_of_vpath
,
4584 ret
= vxge_probe_fw_update(vdev
);
4588 vxge_hw_device_debug_set(hldev
, VXGE_TRACE
, VXGE_COMPONENT_LL
);
4589 VXGE_COPY_DEBUG_INFO_TO_LL(vdev
, vxge_hw_device_error_level_get(hldev
),
4590 vxge_hw_device_trace_level_get(hldev
));
4592 /* set private HW device info */
4593 vdev
->mtu
= VXGE_HW_DEFAULT_MTU
;
4594 vdev
->bar0
= attr
.bar0
;
4595 vdev
->max_vpath_supported
= max_vpath_supported
;
4596 vdev
->no_of_vpath
= no_of_vpath
;
4598 /* Virtual Path count */
4599 for (i
= 0, j
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
4600 if (!vxge_bVALn(vpath_mask
, i
, 1))
4602 if (j
>= vdev
->no_of_vpath
)
4605 vdev
->vpaths
[j
].is_configured
= 1;
4606 vdev
->vpaths
[j
].device_id
= i
;
4607 vdev
->vpaths
[j
].ring
.driver_id
= j
;
4608 vdev
->vpaths
[j
].vdev
= vdev
;
4609 vdev
->vpaths
[j
].max_mac_addr_cnt
= max_mac_vpath
;
4610 memcpy((u8
*)vdev
->vpaths
[j
].macaddr
,
4611 ll_config
->device_hw_info
.mac_addrs
[i
],
4614 /* Initialize the mac address list header */
4615 INIT_LIST_HEAD(&vdev
->vpaths
[j
].mac_addr_list
);
4617 vdev
->vpaths
[j
].mac_addr_cnt
= 0;
4618 vdev
->vpaths
[j
].mcast_addr_cnt
= 0;
4621 vdev
->exec_mode
= VXGE_EXEC_MODE_DISABLE
;
4622 vdev
->max_config_port
= max_config_port
;
4624 vdev
->vlan_tag_strip
= vlan_tag_strip
;
4626 /* map the hashing selector table to the configured vpaths */
4627 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4628 vdev
->vpath_selector
[i
] = vpath_selector
[i
];
4630 macaddr
= (u8
*)vdev
->vpaths
[0].macaddr
;
4632 ll_config
->device_hw_info
.serial_number
[VXGE_HW_INFO_LEN
- 1] = '\0';
4633 ll_config
->device_hw_info
.product_desc
[VXGE_HW_INFO_LEN
- 1] = '\0';
4634 ll_config
->device_hw_info
.part_number
[VXGE_HW_INFO_LEN
- 1] = '\0';
4636 vxge_debug_init(VXGE_TRACE
, "%s: SERIAL NUMBER: %s",
4637 vdev
->ndev
->name
, ll_config
->device_hw_info
.serial_number
);
4639 vxge_debug_init(VXGE_TRACE
, "%s: PART NUMBER: %s",
4640 vdev
->ndev
->name
, ll_config
->device_hw_info
.part_number
);
4642 vxge_debug_init(VXGE_TRACE
, "%s: Neterion %s Server Adapter",
4643 vdev
->ndev
->name
, ll_config
->device_hw_info
.product_desc
);
4645 vxge_debug_init(VXGE_TRACE
, "%s: MAC ADDR: %pM",
4646 vdev
->ndev
->name
, macaddr
);
4648 vxge_debug_init(VXGE_TRACE
, "%s: Link Width x%d",
4649 vdev
->ndev
->name
, vxge_hw_device_link_width_get(hldev
));
4651 vxge_debug_init(VXGE_TRACE
,
4652 "%s: Firmware version : %s Date : %s", vdev
->ndev
->name
,
4653 ll_config
->device_hw_info
.fw_version
.version
,
4654 ll_config
->device_hw_info
.fw_date
.date
);
4657 switch (ll_config
->device_hw_info
.function_mode
) {
4658 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION
:
4659 vxge_debug_init(VXGE_TRACE
,
4660 "%s: Single Function Mode Enabled", vdev
->ndev
->name
);
4662 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
:
4663 vxge_debug_init(VXGE_TRACE
,
4664 "%s: Multi Function Mode Enabled", vdev
->ndev
->name
);
4666 case VXGE_HW_FUNCTION_MODE_SRIOV
:
4667 vxge_debug_init(VXGE_TRACE
,
4668 "%s: Single Root IOV Mode Enabled", vdev
->ndev
->name
);
4670 case VXGE_HW_FUNCTION_MODE_MRIOV
:
4671 vxge_debug_init(VXGE_TRACE
,
4672 "%s: Multi Root IOV Mode Enabled", vdev
->ndev
->name
);
4677 vxge_print_parm(vdev
, vpath_mask
);
4679 /* Store the fw version for ethttool option */
4680 strcpy(vdev
->fw_version
, ll_config
->device_hw_info
.fw_version
.version
);
4681 memcpy(vdev
->ndev
->dev_addr
, (u8
*)vdev
->vpaths
[0].macaddr
, ETH_ALEN
);
4683 /* Copy the station mac address to the list */
4684 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
4685 entry
= kzalloc(sizeof(struct vxge_mac_addrs
), GFP_KERNEL
);
4686 if (NULL
== entry
) {
4687 vxge_debug_init(VXGE_ERR
,
4688 "%s: mac_addr_list : memory allocation failed",
4693 macaddr
= (u8
*)&entry
->macaddr
;
4694 memcpy(macaddr
, vdev
->ndev
->dev_addr
, ETH_ALEN
);
4695 list_add(&entry
->item
, &vdev
->vpaths
[i
].mac_addr_list
);
4696 vdev
->vpaths
[i
].mac_addr_cnt
= 1;
4699 kfree(device_config
);
4702 * INTA is shared in multi-function mode. This is unlike the INTA
4703 * implementation in MR mode, where each VH has its own INTA message.
4704 * - INTA is masked (disabled) as long as at least one function sets
4705 * its TITAN_MASK_ALL_INT.ALARM bit.
4706 * - INTA is unmasked (enabled) when all enabled functions have cleared
4707 * their own TITAN_MASK_ALL_INT.ALARM bit.
4708 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4709 * Though this driver leaves the top level interrupts unmasked while
4710 * leaving the required module interrupt bits masked on exit, there
4711 * could be a rougue driver around that does not follow this procedure
4712 * resulting in a failure to generate interrupts. The following code is
4713 * present to prevent such a failure.
4716 if (ll_config
->device_hw_info
.function_mode
==
4717 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
)
4718 if (vdev
->config
.intr_type
== INTA
)
4719 vxge_hw_device_unmask_all(hldev
);
4721 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d Exiting...",
4722 vdev
->ndev
->name
, __func__
, __LINE__
);
4724 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_LL
);
4725 VXGE_COPY_DEBUG_INFO_TO_LL(vdev
, vxge_hw_device_error_level_get(hldev
),
4726 vxge_hw_device_trace_level_get(hldev
));
4732 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4733 vxge_free_mac_add_list(&vdev
->vpaths
[i
]);
4735 vxge_device_unregister(hldev
);
4737 vxge_hw_device_terminate(hldev
);
4738 pci_disable_sriov(pdev
);
4742 pci_release_region(pdev
, 0);
4744 pci_disable_device(pdev
);
4747 kfree(device_config
);
4748 driver_config
->config_dev_cnt
--;
4749 driver_config
->total_dev_cnt
--;
4754 * vxge_rem_nic - Free the PCI device
4755 * @pdev: structure containing the PCI related information of the device.
4756 * Description: This function is called by the Pci subsystem to release a
4757 * PCI device and free up all resource held up by the device.
4759 static void vxge_remove(struct pci_dev
*pdev
)
4761 struct __vxge_hw_device
*hldev
;
4762 struct vxgedev
*vdev
;
4765 hldev
= pci_get_drvdata(pdev
);
4769 vdev
= netdev_priv(hldev
->ndev
);
4771 vxge_debug_entryexit(vdev
->level_trace
, "%s:%d", __func__
, __LINE__
);
4772 vxge_debug_init(vdev
->level_trace
, "%s : removing PCI device...",
4775 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4776 vxge_free_mac_add_list(&vdev
->vpaths
[i
]);
4778 vxge_device_unregister(hldev
);
4779 /* Do not call pci_disable_sriov here, as it will break child devices */
4780 vxge_hw_device_terminate(hldev
);
4781 iounmap(vdev
->bar0
);
4782 pci_release_region(pdev
, 0);
4783 pci_disable_device(pdev
);
4784 driver_config
->config_dev_cnt
--;
4785 driver_config
->total_dev_cnt
--;
4787 vxge_debug_init(vdev
->level_trace
, "%s:%d Device unregistered",
4788 __func__
, __LINE__
);
4789 vxge_debug_entryexit(vdev
->level_trace
, "%s:%d Exiting...", __func__
,
4793 static const struct pci_error_handlers vxge_err_handler
= {
4794 .error_detected
= vxge_io_error_detected
,
4795 .slot_reset
= vxge_io_slot_reset
,
4796 .resume
= vxge_io_resume
,
4799 static struct pci_driver vxge_driver
= {
4800 .name
= VXGE_DRIVER_NAME
,
4801 .id_table
= vxge_id_table
,
4802 .probe
= vxge_probe
,
4803 .remove
= vxge_remove
,
4805 .suspend
= vxge_pm_suspend
,
4806 .resume
= vxge_pm_resume
,
4808 .err_handler
= &vxge_err_handler
,
4816 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4817 pr_info("Driver version: %s\n", DRV_VERSION
);
4821 driver_config
= kzalloc(sizeof(struct vxge_drv_config
), GFP_KERNEL
);
4825 ret
= pci_register_driver(&vxge_driver
);
4827 kfree(driver_config
);
4831 if (driver_config
->config_dev_cnt
&&
4832 (driver_config
->config_dev_cnt
!= driver_config
->total_dev_cnt
))
4833 vxge_debug_init(VXGE_ERR
,
4834 "%s: Configured %d of %d devices",
4835 VXGE_DRIVER_NAME
, driver_config
->config_dev_cnt
,
4836 driver_config
->total_dev_cnt
);
4844 pci_unregister_driver(&vxge_driver
);
4845 kfree(driver_config
);
4847 module_init(vxge_starter
);
4848 module_exit(vxge_closer
);