1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
14 * The module loadable parameters that are supported by the driver and a brief
15 * explanation of all the variables:
17 * Strip VLAN Tag enable/disable. Instructs the device to remove
18 * the VLAN tag from all received tagged frames that are not
19 * replicated at the internal L2 switch.
20 * 0 - Do not strip the VLAN tag.
21 * 1 - Strip the VLAN tag.
24 * Enable learning the mac address of the guest OS interface in
25 * a virtualization environment.
30 * Maximum number of port to be supported.
34 * This configures the maximum no of VPATH configures for each
36 * MIN - 1 and MAX - 17
39 * This configures maximum no of Device function to be enabled.
40 * MIN - 1 and MAX - 17
42 ******************************************************************************/
44 #include <linux/if_vlan.h>
45 #include <linux/pci.h>
46 #include <linux/tcp.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include "vxge-main.h"
53 MODULE_LICENSE("Dual BSD/GPL");
54 MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
55 "Virtualized Server Adapter");
57 static struct pci_device_id vxge_id_table
[] __devinitdata
= {
58 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_TITAN_WIN
, PCI_ANY_ID
,
60 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_TITAN_UNI
, PCI_ANY_ID
,
65 MODULE_DEVICE_TABLE(pci
, vxge_id_table
);
67 VXGE_MODULE_PARAM_INT(vlan_tag_strip
, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
);
68 VXGE_MODULE_PARAM_INT(addr_learn_en
, VXGE_HW_MAC_ADDR_LEARN_DEFAULT
);
69 VXGE_MODULE_PARAM_INT(max_config_port
, VXGE_MAX_CONFIG_PORT
);
70 VXGE_MODULE_PARAM_INT(max_config_vpath
, VXGE_USE_DEFAULT
);
71 VXGE_MODULE_PARAM_INT(max_mac_vpath
, VXGE_MAX_MAC_ADDR_COUNT
);
72 VXGE_MODULE_PARAM_INT(max_config_dev
, VXGE_MAX_CONFIG_DEV
);
74 static u16 vpath_selector
[VXGE_HW_MAX_VIRTUAL_PATHS
] =
75 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
76 static unsigned int bw_percentage
[VXGE_HW_MAX_VIRTUAL_PATHS
] =
77 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS
- 1)] = 0xFF};
78 module_param_array(bw_percentage
, uint
, NULL
, 0);
80 static struct vxge_drv_config
*driver_config
;
82 static inline int is_vxge_card_up(struct vxgedev
*vdev
)
84 return test_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
87 static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo
*fifo
)
89 unsigned long flags
= 0;
90 struct sk_buff
**skb_ptr
= NULL
;
91 struct sk_buff
**temp
;
92 #define NR_SKB_COMPLETED 128
93 struct sk_buff
*completed
[NR_SKB_COMPLETED
];
100 if (spin_trylock_irqsave(&fifo
->tx_lock
, flags
)) {
101 vxge_hw_vpath_poll_tx(fifo
->handle
, &skb_ptr
,
102 NR_SKB_COMPLETED
, &more
);
103 spin_unlock_irqrestore(&fifo
->tx_lock
, flags
);
106 for (temp
= completed
; temp
!= skb_ptr
; temp
++)
107 dev_kfree_skb_irq(*temp
);
111 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev
*vdev
)
115 /* Complete all transmits */
116 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
117 VXGE_COMPLETE_VPATH_TX(&vdev
->vpaths
[i
].fifo
);
120 static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev
*vdev
)
123 struct vxge_ring
*ring
;
125 /* Complete all receives*/
126 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
127 ring
= &vdev
->vpaths
[i
].ring
;
128 vxge_hw_vpath_poll_rx(ring
->handle
);
133 * MultiQ manipulation helper functions
135 void vxge_stop_all_tx_queue(struct vxgedev
*vdev
)
138 struct net_device
*dev
= vdev
->ndev
;
140 if (vdev
->config
.tx_steering_type
!= TX_MULTIQ_STEERING
) {
141 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
142 vdev
->vpaths
[i
].fifo
.queue_state
= VPATH_QUEUE_STOP
;
144 netif_tx_stop_all_queues(dev
);
147 void vxge_stop_tx_queue(struct vxge_fifo
*fifo
)
149 struct net_device
*dev
= fifo
->ndev
;
151 struct netdev_queue
*txq
= NULL
;
152 if (fifo
->tx_steering_type
== TX_MULTIQ_STEERING
)
153 txq
= netdev_get_tx_queue(dev
, fifo
->driver_id
);
155 txq
= netdev_get_tx_queue(dev
, 0);
156 fifo
->queue_state
= VPATH_QUEUE_STOP
;
159 netif_tx_stop_queue(txq
);
162 void vxge_start_all_tx_queue(struct vxgedev
*vdev
)
165 struct net_device
*dev
= vdev
->ndev
;
167 if (vdev
->config
.tx_steering_type
!= TX_MULTIQ_STEERING
) {
168 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
169 vdev
->vpaths
[i
].fifo
.queue_state
= VPATH_QUEUE_START
;
171 netif_tx_start_all_queues(dev
);
174 static void vxge_wake_all_tx_queue(struct vxgedev
*vdev
)
177 struct net_device
*dev
= vdev
->ndev
;
179 if (vdev
->config
.tx_steering_type
!= TX_MULTIQ_STEERING
) {
180 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
181 vdev
->vpaths
[i
].fifo
.queue_state
= VPATH_QUEUE_START
;
183 netif_tx_wake_all_queues(dev
);
186 void vxge_wake_tx_queue(struct vxge_fifo
*fifo
, struct sk_buff
*skb
)
188 struct net_device
*dev
= fifo
->ndev
;
190 int vpath_no
= fifo
->driver_id
;
191 struct netdev_queue
*txq
= NULL
;
192 if (fifo
->tx_steering_type
== TX_MULTIQ_STEERING
) {
193 txq
= netdev_get_tx_queue(dev
, vpath_no
);
194 if (netif_tx_queue_stopped(txq
))
195 netif_tx_wake_queue(txq
);
197 txq
= netdev_get_tx_queue(dev
, 0);
198 if (fifo
->queue_state
== VPATH_QUEUE_STOP
)
199 if (netif_tx_queue_stopped(txq
)) {
200 fifo
->queue_state
= VPATH_QUEUE_START
;
201 netif_tx_wake_queue(txq
);
207 * vxge_callback_link_up
209 * This function is called during interrupt context to notify link up state
213 vxge_callback_link_up(struct __vxge_hw_device
*hldev
)
215 struct net_device
*dev
= hldev
->ndev
;
216 struct vxgedev
*vdev
= (struct vxgedev
*)netdev_priv(dev
);
218 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
219 vdev
->ndev
->name
, __func__
, __LINE__
);
220 printk(KERN_NOTICE
"%s: Link Up\n", vdev
->ndev
->name
);
221 vdev
->stats
.link_up
++;
223 netif_carrier_on(vdev
->ndev
);
224 vxge_wake_all_tx_queue(vdev
);
226 vxge_debug_entryexit(VXGE_TRACE
,
227 "%s: %s:%d Exiting...", vdev
->ndev
->name
, __func__
, __LINE__
);
231 * vxge_callback_link_down
233 * This function is called during interrupt context to notify link down state
237 vxge_callback_link_down(struct __vxge_hw_device
*hldev
)
239 struct net_device
*dev
= hldev
->ndev
;
240 struct vxgedev
*vdev
= (struct vxgedev
*)netdev_priv(dev
);
242 vxge_debug_entryexit(VXGE_TRACE
,
243 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
244 printk(KERN_NOTICE
"%s: Link Down\n", vdev
->ndev
->name
);
246 vdev
->stats
.link_down
++;
247 netif_carrier_off(vdev
->ndev
);
248 vxge_stop_all_tx_queue(vdev
);
250 vxge_debug_entryexit(VXGE_TRACE
,
251 "%s: %s:%d Exiting...", vdev
->ndev
->name
, __func__
, __LINE__
);
259 static struct sk_buff
*
260 vxge_rx_alloc(void *dtrh
, struct vxge_ring
*ring
, const int skb_size
)
262 struct net_device
*dev
;
264 struct vxge_rx_priv
*rx_priv
;
267 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
268 ring
->ndev
->name
, __func__
, __LINE__
);
270 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
272 /* try to allocate skb first. this one may fail */
273 skb
= netdev_alloc_skb(dev
, skb_size
+
274 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
276 vxge_debug_mem(VXGE_ERR
,
277 "%s: out of memory to allocate SKB", dev
->name
);
278 ring
->stats
.skb_alloc_fail
++;
282 vxge_debug_mem(VXGE_TRACE
,
283 "%s: %s:%d Skb : 0x%p", ring
->ndev
->name
,
284 __func__
, __LINE__
, skb
);
286 skb_reserve(skb
, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
289 rx_priv
->skb_data
= NULL
;
290 rx_priv
->data_size
= skb_size
;
291 vxge_debug_entryexit(VXGE_TRACE
,
292 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
300 static int vxge_rx_map(void *dtrh
, struct vxge_ring
*ring
)
302 struct vxge_rx_priv
*rx_priv
;
305 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
306 ring
->ndev
->name
, __func__
, __LINE__
);
307 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
309 rx_priv
->skb_data
= rx_priv
->skb
->data
;
310 dma_addr
= pci_map_single(ring
->pdev
, rx_priv
->skb_data
,
311 rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
314 ring
->stats
.pci_map_fail
++;
317 vxge_debug_mem(VXGE_TRACE
,
318 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
319 ring
->ndev
->name
, __func__
, __LINE__
,
320 (unsigned long long)dma_addr
);
321 vxge_hw_ring_rxd_1b_set(dtrh
, dma_addr
, rx_priv
->data_size
);
323 rx_priv
->data_dma
= dma_addr
;
324 vxge_debug_entryexit(VXGE_TRACE
,
325 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
331 * vxge_rx_initial_replenish
332 * Allocation of RxD as an initial replenish procedure.
334 static enum vxge_hw_status
335 vxge_rx_initial_replenish(void *dtrh
, void *userdata
)
337 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
338 struct vxge_rx_priv
*rx_priv
;
340 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
341 ring
->ndev
->name
, __func__
, __LINE__
);
342 if (vxge_rx_alloc(dtrh
, ring
,
343 VXGE_LL_MAX_FRAME_SIZE(ring
->ndev
)) == NULL
)
346 if (vxge_rx_map(dtrh
, ring
)) {
347 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
348 dev_kfree_skb(rx_priv
->skb
);
352 vxge_debug_entryexit(VXGE_TRACE
,
353 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
359 vxge_rx_complete(struct vxge_ring
*ring
, struct sk_buff
*skb
, u16 vlan
,
360 int pkt_length
, struct vxge_hw_ring_rxd_info
*ext_info
)
363 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
364 ring
->ndev
->name
, __func__
, __LINE__
);
365 skb_record_rx_queue(skb
, ring
->driver_id
);
366 skb
->protocol
= eth_type_trans(skb
, ring
->ndev
);
368 ring
->stats
.rx_frms
++;
369 ring
->stats
.rx_bytes
+= pkt_length
;
371 if (skb
->pkt_type
== PACKET_MULTICAST
)
372 ring
->stats
.rx_mcast
++;
374 vxge_debug_rx(VXGE_TRACE
,
375 "%s: %s:%d skb protocol = %d",
376 ring
->ndev
->name
, __func__
, __LINE__
, skb
->protocol
);
378 if (ring
->gro_enable
) {
379 if (ring
->vlgrp
&& ext_info
->vlan
&&
380 (ring
->vlan_tag_strip
==
381 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
))
382 vlan_gro_receive(ring
->napi_p
, ring
->vlgrp
,
383 ext_info
->vlan
, skb
);
385 napi_gro_receive(ring
->napi_p
, skb
);
387 if (ring
->vlgrp
&& vlan
&&
388 (ring
->vlan_tag_strip
==
389 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
))
390 vlan_hwaccel_receive_skb(skb
, ring
->vlgrp
, vlan
);
392 netif_receive_skb(skb
);
394 vxge_debug_entryexit(VXGE_TRACE
,
395 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
398 static inline void vxge_re_pre_post(void *dtr
, struct vxge_ring
*ring
,
399 struct vxge_rx_priv
*rx_priv
)
401 pci_dma_sync_single_for_device(ring
->pdev
,
402 rx_priv
->data_dma
, rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
404 vxge_hw_ring_rxd_1b_set(dtr
, rx_priv
->data_dma
, rx_priv
->data_size
);
405 vxge_hw_ring_rxd_pre_post(ring
->handle
, dtr
);
408 static inline void vxge_post(int *dtr_cnt
, void **first_dtr
,
409 void *post_dtr
, struct __vxge_hw_ring
*ringh
)
411 int dtr_count
= *dtr_cnt
;
412 if ((*dtr_cnt
% VXGE_HW_RXSYNC_FREQ_CNT
) == 0) {
414 vxge_hw_ring_rxd_post_post_wmb(ringh
, *first_dtr
);
415 *first_dtr
= post_dtr
;
417 vxge_hw_ring_rxd_post_post(ringh
, post_dtr
);
419 *dtr_cnt
= dtr_count
;
425 * If the interrupt is because of a received frame or if the receive ring
426 * contains fresh as yet un-processed frames, this function is called.
429 vxge_rx_1b_compl(struct __vxge_hw_ring
*ringh
, void *dtr
,
430 u8 t_code
, void *userdata
)
432 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
433 struct net_device
*dev
= ring
->ndev
;
434 unsigned int dma_sizes
;
435 void *first_dtr
= NULL
;
441 struct vxge_rx_priv
*rx_priv
;
442 struct vxge_hw_ring_rxd_info ext_info
;
443 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
444 ring
->ndev
->name
, __func__
, __LINE__
);
445 ring
->pkts_processed
= 0;
447 vxge_hw_ring_replenish(ringh
, 0);
450 prefetch((char *)dtr
+ L1_CACHE_BYTES
);
451 rx_priv
= vxge_hw_ring_rxd_private_get(dtr
);
453 data_size
= rx_priv
->data_size
;
454 data_dma
= rx_priv
->data_dma
;
455 prefetch(rx_priv
->skb_data
);
457 vxge_debug_rx(VXGE_TRACE
,
458 "%s: %s:%d skb = 0x%p",
459 ring
->ndev
->name
, __func__
, __LINE__
, skb
);
461 vxge_hw_ring_rxd_1b_get(ringh
, dtr
, &dma_sizes
);
462 pkt_length
= dma_sizes
;
464 pkt_length
-= ETH_FCS_LEN
;
466 vxge_debug_rx(VXGE_TRACE
,
467 "%s: %s:%d Packet Length = %d",
468 ring
->ndev
->name
, __func__
, __LINE__
, pkt_length
);
470 vxge_hw_ring_rxd_1b_info_get(ringh
, dtr
, &ext_info
);
472 /* check skb validity */
475 prefetch((char *)skb
+ L1_CACHE_BYTES
);
476 if (unlikely(t_code
)) {
478 if (vxge_hw_ring_handle_tcode(ringh
, dtr
, t_code
) !=
481 ring
->stats
.rx_errors
++;
482 vxge_debug_rx(VXGE_TRACE
,
483 "%s: %s :%d Rx T_code is %d",
484 ring
->ndev
->name
, __func__
,
487 /* If the t_code is not supported and if the
488 * t_code is other than 0x5 (unparseable packet
489 * such as unknown UPV6 header), Drop it !!!
491 vxge_re_pre_post(dtr
, ring
, rx_priv
);
493 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
494 ring
->stats
.rx_dropped
++;
499 if (pkt_length
> VXGE_LL_RX_COPY_THRESHOLD
) {
501 if (vxge_rx_alloc(dtr
, ring
, data_size
) != NULL
) {
503 if (!vxge_rx_map(dtr
, ring
)) {
504 skb_put(skb
, pkt_length
);
506 pci_unmap_single(ring
->pdev
, data_dma
,
507 data_size
, PCI_DMA_FROMDEVICE
);
509 vxge_hw_ring_rxd_pre_post(ringh
, dtr
);
510 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
513 dev_kfree_skb(rx_priv
->skb
);
515 rx_priv
->data_size
= data_size
;
516 vxge_re_pre_post(dtr
, ring
, rx_priv
);
518 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
520 ring
->stats
.rx_dropped
++;
524 vxge_re_pre_post(dtr
, ring
, rx_priv
);
526 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
527 ring
->stats
.rx_dropped
++;
531 struct sk_buff
*skb_up
;
533 skb_up
= netdev_alloc_skb(dev
, pkt_length
+
534 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
535 if (skb_up
!= NULL
) {
537 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
539 pci_dma_sync_single_for_cpu(ring
->pdev
,
543 vxge_debug_mem(VXGE_TRACE
,
544 "%s: %s:%d skb_up = %p",
545 ring
->ndev
->name
, __func__
,
547 memcpy(skb_up
->data
, skb
->data
, pkt_length
);
549 vxge_re_pre_post(dtr
, ring
, rx_priv
);
551 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
553 /* will netif_rx small SKB instead */
555 skb_put(skb
, pkt_length
);
557 vxge_re_pre_post(dtr
, ring
, rx_priv
);
559 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
560 vxge_debug_rx(VXGE_ERR
,
561 "%s: vxge_rx_1b_compl: out of "
562 "memory", dev
->name
);
563 ring
->stats
.skb_alloc_fail
++;
568 if ((ext_info
.proto
& VXGE_HW_FRAME_PROTO_TCP_OR_UDP
) &&
569 !(ext_info
.proto
& VXGE_HW_FRAME_PROTO_IP_FRAG
) &&
570 ring
->rx_csum
&& /* Offload Rx side CSUM */
571 ext_info
.l3_cksum
== VXGE_HW_L3_CKSUM_OK
&&
572 ext_info
.l4_cksum
== VXGE_HW_L4_CKSUM_OK
)
573 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
575 skb
->ip_summed
= CHECKSUM_NONE
;
577 vxge_rx_complete(ring
, skb
, ext_info
.vlan
,
578 pkt_length
, &ext_info
);
581 ring
->pkts_processed
++;
585 } while (vxge_hw_ring_rxd_next_completed(ringh
, &dtr
,
586 &t_code
) == VXGE_HW_OK
);
589 vxge_hw_ring_rxd_post_post_wmb(ringh
, first_dtr
);
591 vxge_debug_entryexit(VXGE_TRACE
,
600 * If an interrupt was raised to indicate DMA complete of the Tx packet,
601 * this function is called. It identifies the last TxD whose buffer was
602 * freed and frees all skbs whose data have already DMA'ed into the NICs
606 vxge_xmit_compl(struct __vxge_hw_fifo
*fifo_hw
, void *dtr
,
607 enum vxge_hw_fifo_tcode t_code
, void *userdata
,
608 struct sk_buff
***skb_ptr
, int nr_skb
, int *more
)
610 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)userdata
;
611 struct sk_buff
*skb
, **done_skb
= *skb_ptr
;
614 vxge_debug_entryexit(VXGE_TRACE
,
615 "%s:%d Entered....", __func__
, __LINE__
);
621 struct vxge_tx_priv
*txd_priv
=
622 vxge_hw_fifo_txdl_private_get(dtr
);
625 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
626 frag
= &skb_shinfo(skb
)->frags
[0];
628 vxge_debug_tx(VXGE_TRACE
,
629 "%s: %s:%d fifo_hw = %p dtr = %p "
630 "tcode = 0x%x", fifo
->ndev
->name
, __func__
,
631 __LINE__
, fifo_hw
, dtr
, t_code
);
632 /* check skb validity */
634 vxge_debug_tx(VXGE_TRACE
,
635 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
636 fifo
->ndev
->name
, __func__
, __LINE__
,
637 skb
, txd_priv
, frg_cnt
);
638 if (unlikely(t_code
)) {
639 fifo
->stats
.tx_errors
++;
640 vxge_debug_tx(VXGE_ERR
,
641 "%s: tx: dtr %p completed due to "
642 "error t_code %01x", fifo
->ndev
->name
,
644 vxge_hw_fifo_handle_tcode(fifo_hw
, dtr
, t_code
);
647 /* for unfragmented skb */
648 pci_unmap_single(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
649 skb_headlen(skb
), PCI_DMA_TODEVICE
);
651 for (j
= 0; j
< frg_cnt
; j
++) {
652 pci_unmap_page(fifo
->pdev
,
653 txd_priv
->dma_buffers
[i
++],
654 frag
->size
, PCI_DMA_TODEVICE
);
658 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
660 /* Updating the statistics block */
661 fifo
->stats
.tx_frms
++;
662 fifo
->stats
.tx_bytes
+= skb
->len
;
672 if (pkt_cnt
> fifo
->indicate_max_pkts
)
675 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw
,
676 &dtr
, &t_code
) == VXGE_HW_OK
);
679 vxge_wake_tx_queue(fifo
, skb
);
681 vxge_debug_entryexit(VXGE_TRACE
,
682 "%s: %s:%d Exiting...",
683 fifo
->ndev
->name
, __func__
, __LINE__
);
687 /* select a vpath to transmit the packet */
688 static u32
vxge_get_vpath_no(struct vxgedev
*vdev
, struct sk_buff
*skb
,
691 u16 queue_len
, counter
= 0;
692 if (skb
->protocol
== htons(ETH_P_IP
)) {
698 if ((ip
->frag_off
& htons(IP_OFFSET
|IP_MF
)) == 0) {
699 th
= (struct tcphdr
*)(((unsigned char *)ip
) +
702 queue_len
= vdev
->no_of_vpath
;
703 counter
= (ntohs(th
->source
) +
705 vdev
->vpath_selector
[queue_len
- 1];
706 if (counter
>= queue_len
)
707 counter
= queue_len
- 1;
709 if (ip
->protocol
== IPPROTO_UDP
) {
719 static enum vxge_hw_status
vxge_search_mac_addr_in_list(
720 struct vxge_vpath
*vpath
, u64 del_mac
)
722 struct list_head
*entry
, *next
;
723 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
724 if (((struct vxge_mac_addrs
*)entry
)->macaddr
== del_mac
)
730 static int vxge_learn_mac(struct vxgedev
*vdev
, u8
*mac_header
)
732 struct macInfo mac_info
;
733 u8
*mac_address
= NULL
;
734 u64 mac_addr
= 0, vpath_vector
= 0;
736 enum vxge_hw_status status
= VXGE_HW_OK
;
737 struct vxge_vpath
*vpath
= NULL
;
738 struct __vxge_hw_device
*hldev
;
740 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
742 mac_address
= (u8
*)&mac_addr
;
743 memcpy(mac_address
, mac_header
, ETH_ALEN
);
745 /* Is this mac address already in the list? */
746 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
747 vpath
= &vdev
->vpaths
[vpath_idx
];
748 if (vxge_search_mac_addr_in_list(vpath
, mac_addr
))
752 memset(&mac_info
, 0, sizeof(struct macInfo
));
753 memcpy(mac_info
.macaddr
, mac_header
, ETH_ALEN
);
755 /* Any vpath has room to add mac address to its da table? */
756 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
757 vpath
= &vdev
->vpaths
[vpath_idx
];
758 if (vpath
->mac_addr_cnt
< vpath
->max_mac_addr_cnt
) {
759 /* Add this mac address to this vpath */
760 mac_info
.vpath_no
= vpath_idx
;
761 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
762 status
= vxge_add_mac_addr(vdev
, &mac_info
);
763 if (status
!= VXGE_HW_OK
)
769 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_LIST
;
771 mac_info
.vpath_no
= vpath_idx
;
772 /* Is the first vpath already selected as catch-basin ? */
773 vpath
= &vdev
->vpaths
[vpath_idx
];
774 if (vpath
->mac_addr_cnt
> vpath
->max_mac_addr_cnt
) {
775 /* Add this mac address to this vpath */
776 if (FALSE
== vxge_mac_list_add(vpath
, &mac_info
))
781 /* Select first vpath as catch-basin */
782 vpath_vector
= vxge_mBIT(vpath
->device_id
);
783 status
= vxge_hw_mgmt_reg_write(vpath
->vdev
->devh
,
784 vxge_hw_mgmt_reg_type_mrpcim
,
787 struct vxge_hw_mrpcim_reg
,
790 if (status
!= VXGE_HW_OK
) {
791 vxge_debug_tx(VXGE_ERR
,
792 "%s: Unable to set the vpath-%d in catch-basin mode",
793 VXGE_DRIVER_NAME
, vpath
->device_id
);
797 if (FALSE
== vxge_mac_list_add(vpath
, &mac_info
))
805 * @skb : the socket buffer containing the Tx data.
806 * @dev : device pointer.
808 * This function is the Tx entry point of the driver. Neterion NIC supports
809 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
810 * NOTE: when device cant queue the pkt, just the trans_start variable will
814 vxge_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
816 struct vxge_fifo
*fifo
= NULL
;
819 struct vxgedev
*vdev
= NULL
;
820 enum vxge_hw_status status
;
821 int frg_cnt
, first_frg_len
;
823 int i
= 0, j
= 0, avail
;
825 struct vxge_tx_priv
*txdl_priv
= NULL
;
826 struct __vxge_hw_fifo
*fifo_hw
;
828 unsigned long flags
= 0;
830 int do_spin_tx_lock
= 1;
832 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
833 dev
->name
, __func__
, __LINE__
);
835 /* A buffer with no data will be dropped */
836 if (unlikely(skb
->len
<= 0)) {
837 vxge_debug_tx(VXGE_ERR
,
838 "%s: Buffer has no data..", dev
->name
);
843 vdev
= (struct vxgedev
*)netdev_priv(dev
);
845 if (unlikely(!is_vxge_card_up(vdev
))) {
846 vxge_debug_tx(VXGE_ERR
,
847 "%s: vdev not initialized", dev
->name
);
852 if (vdev
->config
.addr_learn_en
) {
853 vpath_no
= vxge_learn_mac(vdev
, skb
->data
+ ETH_ALEN
);
854 if (vpath_no
== -EPERM
) {
855 vxge_debug_tx(VXGE_ERR
,
856 "%s: Failed to store the mac address",
863 if (vdev
->config
.tx_steering_type
== TX_MULTIQ_STEERING
)
864 vpath_no
= skb_get_queue_mapping(skb
);
865 else if (vdev
->config
.tx_steering_type
== TX_PORT_STEERING
)
866 vpath_no
= vxge_get_vpath_no(vdev
, skb
, &do_spin_tx_lock
);
868 vxge_debug_tx(VXGE_TRACE
, "%s: vpath_no= %d", dev
->name
, vpath_no
);
870 if (vpath_no
>= vdev
->no_of_vpath
)
873 fifo
= &vdev
->vpaths
[vpath_no
].fifo
;
874 fifo_hw
= fifo
->handle
;
877 spin_lock_irqsave(&fifo
->tx_lock
, flags
);
879 if (unlikely(!spin_trylock_irqsave(&fifo
->tx_lock
, flags
)))
880 return NETDEV_TX_LOCKED
;
883 if (vdev
->config
.tx_steering_type
== TX_MULTIQ_STEERING
) {
884 if (netif_subqueue_stopped(dev
, skb
)) {
885 spin_unlock_irqrestore(&fifo
->tx_lock
, flags
);
886 return NETDEV_TX_BUSY
;
888 } else if (unlikely(fifo
->queue_state
== VPATH_QUEUE_STOP
)) {
889 if (netif_queue_stopped(dev
)) {
890 spin_unlock_irqrestore(&fifo
->tx_lock
, flags
);
891 return NETDEV_TX_BUSY
;
894 avail
= vxge_hw_fifo_free_txdl_count_get(fifo_hw
);
896 vxge_debug_tx(VXGE_ERR
,
897 "%s: No free TXDs available", dev
->name
);
898 fifo
->stats
.txd_not_free
++;
899 vxge_stop_tx_queue(fifo
);
903 /* Last TXD? Stop tx queue to avoid dropping packets. TX
904 * completion will resume the queue.
907 vxge_stop_tx_queue(fifo
);
909 status
= vxge_hw_fifo_txdl_reserve(fifo_hw
, &dtr
, &dtr_priv
);
910 if (unlikely(status
!= VXGE_HW_OK
)) {
911 vxge_debug_tx(VXGE_ERR
,
912 "%s: Out of descriptors .", dev
->name
);
913 fifo
->stats
.txd_out_of_desc
++;
914 vxge_stop_tx_queue(fifo
);
918 vxge_debug_tx(VXGE_TRACE
,
919 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
920 dev
->name
, __func__
, __LINE__
,
921 fifo_hw
, dtr
, dtr_priv
);
923 if (vdev
->vlgrp
&& vlan_tx_tag_present(skb
)) {
924 u16 vlan_tag
= vlan_tx_tag_get(skb
);
925 vxge_hw_fifo_txdl_vlan_set(dtr
, vlan_tag
);
928 first_frg_len
= skb_headlen(skb
);
930 dma_pointer
= pci_map_single(fifo
->pdev
, skb
->data
, first_frg_len
,
933 if (unlikely(pci_dma_mapping_error(fifo
->pdev
, dma_pointer
))) {
934 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
935 vxge_stop_tx_queue(fifo
);
936 fifo
->stats
.pci_map_fail
++;
940 txdl_priv
= vxge_hw_fifo_txdl_private_get(dtr
);
941 txdl_priv
->skb
= skb
;
942 txdl_priv
->dma_buffers
[j
] = dma_pointer
;
944 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
945 vxge_debug_tx(VXGE_TRACE
,
946 "%s: %s:%d skb = %p txdl_priv = %p "
947 "frag_cnt = %d dma_pointer = 0x%llx", dev
->name
,
948 __func__
, __LINE__
, skb
, txdl_priv
,
949 frg_cnt
, (unsigned long long)dma_pointer
);
951 vxge_hw_fifo_txdl_buffer_set(fifo_hw
, dtr
, j
++, dma_pointer
,
954 frag
= &skb_shinfo(skb
)->frags
[0];
955 for (i
= 0; i
< frg_cnt
; i
++) {
956 /* ignore 0 length fragment */
961 (u64
)pci_map_page(fifo
->pdev
, frag
->page
,
962 frag
->page_offset
, frag
->size
,
965 if (unlikely(pci_dma_mapping_error(fifo
->pdev
, dma_pointer
)))
967 vxge_debug_tx(VXGE_TRACE
,
968 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
969 dev
->name
, __func__
, __LINE__
, i
,
970 (unsigned long long)dma_pointer
);
972 txdl_priv
->dma_buffers
[j
] = dma_pointer
;
973 vxge_hw_fifo_txdl_buffer_set(fifo_hw
, dtr
, j
++, dma_pointer
,
978 offload_type
= vxge_offload_type(skb
);
980 if (offload_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
982 int mss
= vxge_tcp_mss(skb
);
984 vxge_debug_tx(VXGE_TRACE
,
985 "%s: %s:%d mss = %d",
986 dev
->name
, __func__
, __LINE__
, mss
);
987 vxge_hw_fifo_txdl_mss_set(dtr
, mss
);
989 vxge_assert(skb
->len
<=
990 dev
->mtu
+ VXGE_HW_MAC_HEADER_MAX_SIZE
);
996 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
997 vxge_hw_fifo_txdl_cksum_set_bits(dtr
,
998 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN
|
999 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN
|
1000 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN
);
1002 vxge_hw_fifo_txdl_post(fifo_hw
, dtr
);
1004 dev
->trans_start
= jiffies
; /* NETIF_F_LLTX driver :( */
1006 spin_unlock_irqrestore(&fifo
->tx_lock
, flags
);
1008 VXGE_COMPLETE_VPATH_TX(fifo
);
1009 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d Exiting...",
1010 dev
->name
, __func__
, __LINE__
);
1011 return NETDEV_TX_OK
;
1014 vxge_debug_tx(VXGE_TRACE
, "%s: pci_map_page failed", dev
->name
);
1018 frag
= &skb_shinfo(skb
)->frags
[0];
1020 pci_unmap_single(fifo
->pdev
, txdl_priv
->dma_buffers
[j
++],
1021 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1023 for (; j
< i
; j
++) {
1024 pci_unmap_page(fifo
->pdev
, txdl_priv
->dma_buffers
[j
],
1025 frag
->size
, PCI_DMA_TODEVICE
);
1029 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
1032 spin_unlock_irqrestore(&fifo
->tx_lock
, flags
);
1033 VXGE_COMPLETE_VPATH_TX(fifo
);
1035 return NETDEV_TX_OK
;
1041 * Function will be called by hw function to abort all outstanding receive
1045 vxge_rx_term(void *dtrh
, enum vxge_hw_rxd_state state
, void *userdata
)
1047 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
1048 struct vxge_rx_priv
*rx_priv
=
1049 vxge_hw_ring_rxd_private_get(dtrh
);
1051 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
1052 ring
->ndev
->name
, __func__
, __LINE__
);
1053 if (state
!= VXGE_HW_RXD_STATE_POSTED
)
1056 pci_unmap_single(ring
->pdev
, rx_priv
->data_dma
,
1057 rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
1059 dev_kfree_skb(rx_priv
->skb
);
1060 rx_priv
->skb_data
= NULL
;
1062 vxge_debug_entryexit(VXGE_TRACE
,
1063 "%s: %s:%d Exiting...",
1064 ring
->ndev
->name
, __func__
, __LINE__
);
1070 * Function will be called to abort all outstanding tx descriptors
1073 vxge_tx_term(void *dtrh
, enum vxge_hw_txdl_state state
, void *userdata
)
1075 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)userdata
;
1077 int i
= 0, j
, frg_cnt
;
1078 struct vxge_tx_priv
*txd_priv
= vxge_hw_fifo_txdl_private_get(dtrh
);
1079 struct sk_buff
*skb
= txd_priv
->skb
;
1081 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1083 if (state
!= VXGE_HW_TXDL_STATE_POSTED
)
1086 /* check skb validity */
1088 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
1089 frag
= &skb_shinfo(skb
)->frags
[0];
1091 /* for unfragmented skb */
1092 pci_unmap_single(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
1093 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1095 for (j
= 0; j
< frg_cnt
; j
++) {
1096 pci_unmap_page(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
1097 frag
->size
, PCI_DMA_TODEVICE
);
1103 vxge_debug_entryexit(VXGE_TRACE
,
1104 "%s:%d Exiting...", __func__
, __LINE__
);
1108 * vxge_set_multicast
1109 * @dev: pointer to the device structure
1111 * Entry point for multicast address enable/disable
1112 * This function is a driver entry point which gets called by the kernel
1113 * whenever multicast addresses must be enabled/disabled. This also gets
1114 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1115 * determine, if multicast address must be enabled or if promiscuous mode
1116 * is to be disabled etc.
1118 static void vxge_set_multicast(struct net_device
*dev
)
1120 struct dev_mc_list
*mclist
;
1121 struct vxgedev
*vdev
;
1122 int i
, mcast_cnt
= 0;
1123 struct __vxge_hw_device
*hldev
;
1124 enum vxge_hw_status status
= VXGE_HW_OK
;
1125 struct macInfo mac_info
;
1127 struct vxge_mac_addrs
*mac_entry
;
1128 struct list_head
*list_head
;
1129 struct list_head
*entry
, *next
;
1130 u8
*mac_address
= NULL
;
1132 vxge_debug_entryexit(VXGE_TRACE
,
1133 "%s:%d", __func__
, __LINE__
);
1135 vdev
= (struct vxgedev
*)netdev_priv(dev
);
1136 hldev
= (struct __vxge_hw_device
*)vdev
->devh
;
1138 if (unlikely(!is_vxge_card_up(vdev
)))
1141 if ((dev
->flags
& IFF_ALLMULTI
) && (!vdev
->all_multi_flg
)) {
1142 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1143 vxge_assert(vdev
->vpaths
[i
].is_open
);
1144 status
= vxge_hw_vpath_mcast_enable(
1145 vdev
->vpaths
[i
].handle
);
1146 vdev
->all_multi_flg
= 1;
1148 } else if ((dev
->flags
& IFF_ALLMULTI
) && (vdev
->all_multi_flg
)) {
1149 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1150 vxge_assert(vdev
->vpaths
[i
].is_open
);
1151 status
= vxge_hw_vpath_mcast_disable(
1152 vdev
->vpaths
[i
].handle
);
1153 vdev
->all_multi_flg
= 1;
1157 if (status
!= VXGE_HW_OK
)
1158 vxge_debug_init(VXGE_ERR
,
1159 "failed to %s multicast, status %d",
1160 dev
->flags
& IFF_ALLMULTI
?
1161 "enable" : "disable", status
);
1163 if (!vdev
->config
.addr_learn_en
) {
1164 if (dev
->flags
& IFF_PROMISC
) {
1165 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1166 vxge_assert(vdev
->vpaths
[i
].is_open
);
1167 status
= vxge_hw_vpath_promisc_enable(
1168 vdev
->vpaths
[i
].handle
);
1171 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1172 vxge_assert(vdev
->vpaths
[i
].is_open
);
1173 status
= vxge_hw_vpath_promisc_disable(
1174 vdev
->vpaths
[i
].handle
);
1179 memset(&mac_info
, 0, sizeof(struct macInfo
));
1180 /* Update individual M_CAST address list */
1181 if ((!vdev
->all_multi_flg
) && dev
->mc_count
) {
1183 mcast_cnt
= vdev
->vpaths
[0].mcast_addr_cnt
;
1184 list_head
= &vdev
->vpaths
[0].mac_addr_list
;
1185 if ((dev
->mc_count
+
1186 (vdev
->vpaths
[0].mac_addr_cnt
- mcast_cnt
)) >
1187 vdev
->vpaths
[0].max_mac_addr_cnt
)
1188 goto _set_all_mcast
;
1190 /* Delete previous MC's */
1191 for (i
= 0; i
< mcast_cnt
; i
++) {
1192 if (!list_empty(list_head
))
1193 mac_entry
= (struct vxge_mac_addrs
*)
1194 list_first_entry(list_head
,
1195 struct vxge_mac_addrs
,
1198 list_for_each_safe(entry
, next
, list_head
) {
1200 mac_entry
= (struct vxge_mac_addrs
*) entry
;
1201 /* Copy the mac address to delete */
1202 mac_address
= (u8
*)&mac_entry
->macaddr
;
1203 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1205 /* Is this a multicast address */
1206 if (0x01 & mac_info
.macaddr
[0]) {
1207 for (vpath_idx
= 0; vpath_idx
<
1210 mac_info
.vpath_no
= vpath_idx
;
1211 status
= vxge_del_mac_addr(
1220 for (i
= 0, mclist
= dev
->mc_list
; i
< dev
->mc_count
;
1221 i
++, mclist
= mclist
->next
) {
1223 memcpy(mac_info
.macaddr
, mclist
->dmi_addr
, ETH_ALEN
);
1224 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
;
1226 mac_info
.vpath_no
= vpath_idx
;
1227 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1228 status
= vxge_add_mac_addr(vdev
, &mac_info
);
1229 if (status
!= VXGE_HW_OK
) {
1230 vxge_debug_init(VXGE_ERR
,
1231 "%s:%d Setting individual"
1232 "multicast address failed",
1233 __func__
, __LINE__
);
1234 goto _set_all_mcast
;
1241 mcast_cnt
= vdev
->vpaths
[0].mcast_addr_cnt
;
1242 /* Delete previous MC's */
1243 for (i
= 0; i
< mcast_cnt
; i
++) {
1245 list_for_each_safe(entry
, next
, list_head
) {
1247 mac_entry
= (struct vxge_mac_addrs
*) entry
;
1248 /* Copy the mac address to delete */
1249 mac_address
= (u8
*)&mac_entry
->macaddr
;
1250 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1252 /* Is this a multicast address */
1253 if (0x01 & mac_info
.macaddr
[0])
1257 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
;
1259 mac_info
.vpath_no
= vpath_idx
;
1260 status
= vxge_del_mac_addr(vdev
, &mac_info
);
1264 /* Enable all multicast */
1265 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1266 vxge_assert(vdev
->vpaths
[i
].is_open
);
1267 status
= vxge_hw_vpath_mcast_enable(
1268 vdev
->vpaths
[i
].handle
);
1269 if (status
!= VXGE_HW_OK
) {
1270 vxge_debug_init(VXGE_ERR
,
1271 "%s:%d Enabling all multicasts failed",
1272 __func__
, __LINE__
);
1274 vdev
->all_multi_flg
= 1;
1276 dev
->flags
|= IFF_ALLMULTI
;
1279 vxge_debug_entryexit(VXGE_TRACE
,
1280 "%s:%d Exiting...", __func__
, __LINE__
);
1285 * @dev: pointer to the device structure
1287 * Update entry "0" (default MAC addr)
1289 static int vxge_set_mac_addr(struct net_device
*dev
, void *p
)
1291 struct sockaddr
*addr
= p
;
1292 struct vxgedev
*vdev
;
1293 struct __vxge_hw_device
*hldev
;
1294 enum vxge_hw_status status
= VXGE_HW_OK
;
1295 struct macInfo mac_info_new
, mac_info_old
;
1298 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1300 vdev
= (struct vxgedev
*)netdev_priv(dev
);
1303 if (!is_valid_ether_addr(addr
->sa_data
))
1306 memset(&mac_info_new
, 0, sizeof(struct macInfo
));
1307 memset(&mac_info_old
, 0, sizeof(struct macInfo
));
1309 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d Exiting...",
1310 __func__
, __LINE__
);
1312 /* Get the old address */
1313 memcpy(mac_info_old
.macaddr
, dev
->dev_addr
, dev
->addr_len
);
1315 /* Copy the new address */
1316 memcpy(mac_info_new
.macaddr
, addr
->sa_data
, dev
->addr_len
);
1318 /* First delete the old mac address from all the vpaths
1319 as we can't specify the index while adding new mac address */
1320 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
1321 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vpath_idx
];
1322 if (!vpath
->is_open
) {
1323 /* This can happen when this interface is added/removed
1324 to the bonding interface. Delete this station address
1325 from the linked list */
1326 vxge_mac_list_del(vpath
, &mac_info_old
);
1328 /* Add this new address to the linked list
1329 for later restoring */
1330 vxge_mac_list_add(vpath
, &mac_info_new
);
1334 /* Delete the station address */
1335 mac_info_old
.vpath_no
= vpath_idx
;
1336 status
= vxge_del_mac_addr(vdev
, &mac_info_old
);
1339 if (unlikely(!is_vxge_card_up(vdev
))) {
1340 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1344 /* Set this mac address to all the vpaths */
1345 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
1346 mac_info_new
.vpath_no
= vpath_idx
;
1347 mac_info_new
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1348 status
= vxge_add_mac_addr(vdev
, &mac_info_new
);
1349 if (status
!= VXGE_HW_OK
)
1353 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1359 * vxge_vpath_intr_enable
1360 * @vdev: pointer to vdev
1361 * @vp_id: vpath for which to enable the interrupts
1363 * Enables the interrupts for the vpath
1365 void vxge_vpath_intr_enable(struct vxgedev
*vdev
, int vp_id
)
1367 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1368 int msix_id
, alarm_msix_id
;
1369 int tim_msix_id
[4] = {[0 ...3] = 0};
1371 vxge_hw_vpath_intr_enable(vpath
->handle
);
1373 if (vdev
->config
.intr_type
== INTA
)
1374 vxge_hw_vpath_inta_unmask_tx_rx(vpath
->handle
);
1376 msix_id
= vp_id
* VXGE_HW_VPATH_MSIX_ACTIVE
;
1378 VXGE_HW_VPATH_MSIX_ACTIVE
* vdev
->no_of_vpath
- 2;
1380 tim_msix_id
[0] = msix_id
;
1381 tim_msix_id
[1] = msix_id
+ 1;
1382 vxge_hw_vpath_msix_set(vpath
->handle
, tim_msix_id
,
1385 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
);
1386 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
+ 1);
1388 /* enable the alarm vector */
1389 vxge_hw_vpath_msix_unmask(vpath
->handle
, alarm_msix_id
);
1394 * vxge_vpath_intr_disable
1395 * @vdev: pointer to vdev
1396 * @vp_id: vpath for which to disable the interrupts
1398 * Disables the interrupts for the vpath
1400 void vxge_vpath_intr_disable(struct vxgedev
*vdev
, int vp_id
)
1402 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1405 vxge_hw_vpath_intr_disable(vpath
->handle
);
1407 if (vdev
->config
.intr_type
== INTA
)
1408 vxge_hw_vpath_inta_mask_tx_rx(vpath
->handle
);
1410 msix_id
= vp_id
* VXGE_HW_VPATH_MSIX_ACTIVE
;
1411 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
);
1412 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
+ 1);
1414 /* disable the alarm vector */
1415 msix_id
= VXGE_HW_VPATH_MSIX_ACTIVE
* vdev
->no_of_vpath
- 2;
1416 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
);
1422 * @vdev: pointer to vdev
1423 * @vp_id: vpath to reset
1427 static int vxge_reset_vpath(struct vxgedev
*vdev
, int vp_id
)
1429 enum vxge_hw_status status
= VXGE_HW_OK
;
1432 /* check if device is down already */
1433 if (unlikely(!is_vxge_card_up(vdev
)))
1436 /* is device reset already scheduled */
1437 if (test_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
1440 if (vdev
->vpaths
[vp_id
].handle
) {
1441 if (vxge_hw_vpath_reset(vdev
->vpaths
[vp_id
].handle
)
1443 if (is_vxge_card_up(vdev
) &&
1444 vxge_hw_vpath_recover_from_reset(
1445 vdev
->vpaths
[vp_id
].handle
)
1447 vxge_debug_init(VXGE_ERR
,
1448 "vxge_hw_vpath_recover_from_reset"
1449 "failed for vpath:%d", vp_id
);
1453 vxge_debug_init(VXGE_ERR
,
1454 "vxge_hw_vpath_reset failed for"
1459 return VXGE_HW_FAIL
;
1461 vxge_restore_vpath_mac_addr(&vdev
->vpaths
[vp_id
]);
1462 vxge_restore_vpath_vid_table(&vdev
->vpaths
[vp_id
]);
1464 /* Enable all broadcast */
1465 vxge_hw_vpath_bcast_enable(vdev
->vpaths
[vp_id
].handle
);
1467 /* Enable the interrupts */
1468 vxge_vpath_intr_enable(vdev
, vp_id
);
1472 /* Enable the flow of traffic through the vpath */
1473 vxge_hw_vpath_enable(vdev
->vpaths
[vp_id
].handle
);
1476 vxge_hw_vpath_rx_doorbell_init(vdev
->vpaths
[vp_id
].handle
);
1477 vdev
->vpaths
[vp_id
].ring
.last_status
= VXGE_HW_OK
;
1479 /* Vpath reset done */
1480 clear_bit(vp_id
, &vdev
->vp_reset
);
1482 /* Start the vpath queue */
1483 vxge_wake_tx_queue(&vdev
->vpaths
[vp_id
].fifo
, NULL
);
1488 static int do_vxge_reset(struct vxgedev
*vdev
, int event
)
1490 enum vxge_hw_status status
;
1491 int ret
= 0, vp_id
, i
;
1493 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1495 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_START_RESET
)) {
1496 /* check if device is down already */
1497 if (unlikely(!is_vxge_card_up(vdev
)))
1500 /* is reset already scheduled */
1501 if (test_and_set_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
1505 if (event
== VXGE_LL_FULL_RESET
) {
1506 /* wait for all the vpath reset to complete */
1507 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
1508 while (test_bit(vp_id
, &vdev
->vp_reset
))
1512 /* if execution mode is set to debug, don't reset the adapter */
1513 if (unlikely(vdev
->exec_mode
)) {
1514 vxge_debug_init(VXGE_ERR
,
1515 "%s: execution mode is debug, returning..",
1517 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
1518 vxge_stop_all_tx_queue(vdev
);
1523 if (event
== VXGE_LL_FULL_RESET
) {
1524 vxge_hw_device_intr_disable(vdev
->devh
);
1526 switch (vdev
->cric_err_event
) {
1527 case VXGE_HW_EVENT_UNKNOWN
:
1528 vxge_stop_all_tx_queue(vdev
);
1529 vxge_debug_init(VXGE_ERR
,
1530 "fatal: %s: Disabling device due to"
1535 case VXGE_HW_EVENT_RESET_START
:
1537 case VXGE_HW_EVENT_RESET_COMPLETE
:
1538 case VXGE_HW_EVENT_LINK_DOWN
:
1539 case VXGE_HW_EVENT_LINK_UP
:
1540 case VXGE_HW_EVENT_ALARM_CLEARED
:
1541 case VXGE_HW_EVENT_ECCERR
:
1542 case VXGE_HW_EVENT_MRPCIM_ECCERR
:
1545 case VXGE_HW_EVENT_FIFO_ERR
:
1546 case VXGE_HW_EVENT_VPATH_ERR
:
1548 case VXGE_HW_EVENT_CRITICAL_ERR
:
1549 vxge_stop_all_tx_queue(vdev
);
1550 vxge_debug_init(VXGE_ERR
,
1551 "fatal: %s: Disabling device due to"
1554 /* SOP or device reset required */
1555 /* This event is not currently used */
1558 case VXGE_HW_EVENT_SERR
:
1559 vxge_stop_all_tx_queue(vdev
);
1560 vxge_debug_init(VXGE_ERR
,
1561 "fatal: %s: Disabling device due to"
1566 case VXGE_HW_EVENT_SRPCIM_SERR
:
1567 case VXGE_HW_EVENT_MRPCIM_SERR
:
1570 case VXGE_HW_EVENT_SLOT_FREEZE
:
1571 vxge_stop_all_tx_queue(vdev
);
1572 vxge_debug_init(VXGE_ERR
,
1573 "fatal: %s: Disabling device due to"
1584 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_START_RESET
))
1585 vxge_stop_all_tx_queue(vdev
);
1587 if (event
== VXGE_LL_FULL_RESET
) {
1588 status
= vxge_reset_all_vpaths(vdev
);
1589 if (status
!= VXGE_HW_OK
) {
1590 vxge_debug_init(VXGE_ERR
,
1591 "fatal: %s: can not reset vpaths",
1598 if (event
== VXGE_LL_COMPL_RESET
) {
1599 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
1600 if (vdev
->vpaths
[i
].handle
) {
1601 if (vxge_hw_vpath_recover_from_reset(
1602 vdev
->vpaths
[i
].handle
)
1604 vxge_debug_init(VXGE_ERR
,
1605 "vxge_hw_vpath_recover_"
1606 "from_reset failed for vpath: "
1612 vxge_debug_init(VXGE_ERR
,
1613 "vxge_hw_vpath_reset failed for "
1620 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_COMPL_RESET
)) {
1621 /* Reprogram the DA table with populated mac addresses */
1622 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
1623 vxge_restore_vpath_mac_addr(&vdev
->vpaths
[vp_id
]);
1624 vxge_restore_vpath_vid_table(&vdev
->vpaths
[vp_id
]);
1627 /* enable vpath interrupts */
1628 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
1629 vxge_vpath_intr_enable(vdev
, i
);
1631 vxge_hw_device_intr_enable(vdev
->devh
);
1635 /* Indicate card up */
1636 set_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
1638 /* Get the traffic to flow through the vpaths */
1639 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1640 vxge_hw_vpath_enable(vdev
->vpaths
[i
].handle
);
1642 vxge_hw_vpath_rx_doorbell_init(vdev
->vpaths
[i
].handle
);
1645 vxge_wake_all_tx_queue(vdev
);
1649 vxge_debug_entryexit(VXGE_TRACE
,
1650 "%s:%d Exiting...", __func__
, __LINE__
);
1652 /* Indicate reset done */
1653 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_COMPL_RESET
))
1654 clear_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
);
1660 * @vdev: pointer to ll device
1662 * driver may reset the chip on events of serr, eccerr, etc
1664 int vxge_reset(struct vxgedev
*vdev
)
1666 do_vxge_reset(vdev
, VXGE_LL_FULL_RESET
);
1671 * vxge_poll - Receive handler when Receive Polling is used.
1672 * @dev: pointer to the device structure.
1673 * @budget: Number of packets budgeted to be processed in this iteration.
1675 * This function comes into picture only if Receive side is being handled
1676 * through polling (called NAPI in linux). It mostly does what the normal
1677 * Rx interrupt handler does in terms of descriptor and packet processing
1678 * but not in an interrupt context. Also it will process a specified number
1679 * of packets at most in one iteration. This value is passed down by the
1680 * kernel as the function argument 'budget'.
1682 static int vxge_poll_msix(struct napi_struct
*napi
, int budget
)
1684 struct vxge_ring
*ring
=
1685 container_of(napi
, struct vxge_ring
, napi
);
1686 int budget_org
= budget
;
1687 ring
->budget
= budget
;
1689 vxge_hw_vpath_poll_rx(ring
->handle
);
1691 if (ring
->pkts_processed
< budget_org
) {
1692 napi_complete(napi
);
1693 /* Re enable the Rx interrupts for the vpath */
1694 vxge_hw_channel_msix_unmask(
1695 (struct __vxge_hw_channel
*)ring
->handle
,
1696 ring
->rx_vector_no
);
1699 return ring
->pkts_processed
;
1702 static int vxge_poll_inta(struct napi_struct
*napi
, int budget
)
1704 struct vxgedev
*vdev
= container_of(napi
, struct vxgedev
, napi
);
1705 int pkts_processed
= 0;
1707 int budget_org
= budget
;
1708 struct vxge_ring
*ring
;
1710 struct __vxge_hw_device
*hldev
= (struct __vxge_hw_device
*)
1711 pci_get_drvdata(vdev
->pdev
);
1713 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1714 ring
= &vdev
->vpaths
[i
].ring
;
1715 ring
->budget
= budget
;
1716 vxge_hw_vpath_poll_rx(ring
->handle
);
1717 pkts_processed
+= ring
->pkts_processed
;
1718 budget
-= ring
->pkts_processed
;
1723 VXGE_COMPLETE_ALL_TX(vdev
);
1725 if (pkts_processed
< budget_org
) {
1726 napi_complete(napi
);
1727 /* Re enable the Rx interrupts for the ring */
1728 vxge_hw_device_unmask_all(hldev
);
1729 vxge_hw_device_flush_io(hldev
);
1732 return pkts_processed
;
1735 #ifdef CONFIG_NET_POLL_CONTROLLER
1737 * vxge_netpoll - netpoll event handler entry point
1738 * @dev : pointer to the device structure.
1740 * This function will be called by upper layer to check for events on the
1741 * interface in situations where interrupts are disabled. It is used for
1742 * specific in-kernel networking tasks, such as remote consoles and kernel
1743 * debugging over the network (example netdump in RedHat).
1745 static void vxge_netpoll(struct net_device
*dev
)
1747 struct __vxge_hw_device
*hldev
;
1748 struct vxgedev
*vdev
;
1750 vdev
= (struct vxgedev
*)netdev_priv(dev
);
1751 hldev
= (struct __vxge_hw_device
*)pci_get_drvdata(vdev
->pdev
);
1753 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1755 if (pci_channel_offline(vdev
->pdev
))
1758 disable_irq(dev
->irq
);
1759 vxge_hw_device_clear_tx_rx(hldev
);
1761 vxge_hw_device_clear_tx_rx(hldev
);
1762 VXGE_COMPLETE_ALL_RX(vdev
);
1763 VXGE_COMPLETE_ALL_TX(vdev
);
1765 enable_irq(dev
->irq
);
1767 vxge_debug_entryexit(VXGE_TRACE
,
1768 "%s:%d Exiting...", __func__
, __LINE__
);
1773 /* RTH configuration */
1774 static enum vxge_hw_status
vxge_rth_configure(struct vxgedev
*vdev
)
1776 enum vxge_hw_status status
= VXGE_HW_OK
;
1777 struct vxge_hw_rth_hash_types hash_types
;
1778 u8 itable
[256] = {0}; /* indirection table */
1779 u8 mtable
[256] = {0}; /* CPU to vpath mapping */
1784 * - itable with bucket numbers
1785 * - mtable with bucket-to-vpath mapping
1787 for (index
= 0; index
< (1 << vdev
->config
.rth_bkt_sz
); index
++) {
1788 itable
[index
] = index
;
1789 mtable
[index
] = index
% vdev
->no_of_vpath
;
1792 /* Fill RTH hash types */
1793 hash_types
.hash_type_tcpipv4_en
= vdev
->config
.rth_hash_type_tcpipv4
;
1794 hash_types
.hash_type_ipv4_en
= vdev
->config
.rth_hash_type_ipv4
;
1795 hash_types
.hash_type_tcpipv6_en
= vdev
->config
.rth_hash_type_tcpipv6
;
1796 hash_types
.hash_type_ipv6_en
= vdev
->config
.rth_hash_type_ipv6
;
1797 hash_types
.hash_type_tcpipv6ex_en
=
1798 vdev
->config
.rth_hash_type_tcpipv6ex
;
1799 hash_types
.hash_type_ipv6ex_en
= vdev
->config
.rth_hash_type_ipv6ex
;
1801 /* set indirection table, bucket-to-vpath mapping */
1802 status
= vxge_hw_vpath_rts_rth_itable_set(vdev
->vp_handles
,
1805 vdev
->config
.rth_bkt_sz
);
1806 if (status
!= VXGE_HW_OK
) {
1807 vxge_debug_init(VXGE_ERR
,
1808 "RTH indirection table configuration failed "
1809 "for vpath:%d", vdev
->vpaths
[0].device_id
);
1814 * Because the itable_set() method uses the active_table field
1815 * for the target virtual path the RTH config should be updated
1816 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1817 * when steering frames.
1819 for (index
= 0; index
< vdev
->no_of_vpath
; index
++) {
1820 status
= vxge_hw_vpath_rts_rth_set(
1821 vdev
->vpaths
[index
].handle
,
1822 vdev
->config
.rth_algorithm
,
1824 vdev
->config
.rth_bkt_sz
);
1826 if (status
!= VXGE_HW_OK
) {
1827 vxge_debug_init(VXGE_ERR
,
1828 "RTH configuration failed for vpath:%d",
1829 vdev
->vpaths
[index
].device_id
);
1837 int vxge_mac_list_add(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
1839 struct vxge_mac_addrs
*new_mac_entry
;
1840 u8
*mac_address
= NULL
;
1842 if (vpath
->mac_addr_cnt
>= VXGE_MAX_LEARN_MAC_ADDR_CNT
)
1845 new_mac_entry
= kzalloc(sizeof(struct vxge_mac_addrs
), GFP_ATOMIC
);
1846 if (!new_mac_entry
) {
1847 vxge_debug_mem(VXGE_ERR
,
1848 "%s: memory allocation failed",
1853 list_add(&new_mac_entry
->item
, &vpath
->mac_addr_list
);
1855 /* Copy the new mac address to the list */
1856 mac_address
= (u8
*)&new_mac_entry
->macaddr
;
1857 memcpy(mac_address
, mac
->macaddr
, ETH_ALEN
);
1859 new_mac_entry
->state
= mac
->state
;
1860 vpath
->mac_addr_cnt
++;
1862 /* Is this a multicast address */
1863 if (0x01 & mac
->macaddr
[0])
1864 vpath
->mcast_addr_cnt
++;
1869 /* Add a mac address to DA table */
1870 enum vxge_hw_status
vxge_add_mac_addr(struct vxgedev
*vdev
, struct macInfo
*mac
)
1872 enum vxge_hw_status status
= VXGE_HW_OK
;
1873 struct vxge_vpath
*vpath
;
1874 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode
;
1876 if (0x01 & mac
->macaddr
[0]) /* multicast address */
1877 duplicate_mode
= VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
;
1879 duplicate_mode
= VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE
;
1881 vpath
= &vdev
->vpaths
[mac
->vpath_no
];
1882 status
= vxge_hw_vpath_mac_addr_add(vpath
->handle
, mac
->macaddr
,
1883 mac
->macmask
, duplicate_mode
);
1884 if (status
!= VXGE_HW_OK
) {
1885 vxge_debug_init(VXGE_ERR
,
1886 "DA config add entry failed for vpath:%d",
1889 if (FALSE
== vxge_mac_list_add(vpath
, mac
))
1895 int vxge_mac_list_del(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
1897 struct list_head
*entry
, *next
;
1899 u8
*mac_address
= (u8
*) (&del_mac
);
1901 /* Copy the mac address to delete from the list */
1902 memcpy(mac_address
, mac
->macaddr
, ETH_ALEN
);
1904 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
1905 if (((struct vxge_mac_addrs
*)entry
)->macaddr
== del_mac
) {
1907 kfree((struct vxge_mac_addrs
*)entry
);
1908 vpath
->mac_addr_cnt
--;
1910 /* Is this a multicast address */
1911 if (0x01 & mac
->macaddr
[0])
1912 vpath
->mcast_addr_cnt
--;
1919 /* delete a mac address from DA table */
1920 enum vxge_hw_status
vxge_del_mac_addr(struct vxgedev
*vdev
, struct macInfo
*mac
)
1922 enum vxge_hw_status status
= VXGE_HW_OK
;
1923 struct vxge_vpath
*vpath
;
1925 vpath
= &vdev
->vpaths
[mac
->vpath_no
];
1926 status
= vxge_hw_vpath_mac_addr_delete(vpath
->handle
, mac
->macaddr
,
1928 if (status
!= VXGE_HW_OK
) {
1929 vxge_debug_init(VXGE_ERR
,
1930 "DA config delete entry failed for vpath:%d",
1933 vxge_mac_list_del(vpath
, mac
);
1937 /* list all mac addresses from DA table */
1939 static vxge_search_mac_addr_in_da_table(struct vxge_vpath
*vpath
,
1940 struct macInfo
*mac
)
1942 enum vxge_hw_status status
= VXGE_HW_OK
;
1943 unsigned char macmask
[ETH_ALEN
];
1944 unsigned char macaddr
[ETH_ALEN
];
1946 status
= vxge_hw_vpath_mac_addr_get(vpath
->handle
,
1948 if (status
!= VXGE_HW_OK
) {
1949 vxge_debug_init(VXGE_ERR
,
1950 "DA config list entry failed for vpath:%d",
1955 while (memcmp(mac
->macaddr
, macaddr
, ETH_ALEN
)) {
1957 status
= vxge_hw_vpath_mac_addr_get_next(vpath
->handle
,
1959 if (status
!= VXGE_HW_OK
)
1966 /* Store all vlan ids from the list to the vid table */
1967 enum vxge_hw_status
vxge_restore_vpath_vid_table(struct vxge_vpath
*vpath
)
1969 enum vxge_hw_status status
= VXGE_HW_OK
;
1970 struct vxgedev
*vdev
= vpath
->vdev
;
1973 if (vdev
->vlgrp
&& vpath
->is_open
) {
1975 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
1976 if (!vlan_group_get_device(vdev
->vlgrp
, vid
))
1978 /* Add these vlan to the vid table */
1979 status
= vxge_hw_vpath_vid_add(vpath
->handle
, vid
);
1986 /* Store all mac addresses from the list to the DA table */
1987 enum vxge_hw_status
vxge_restore_vpath_mac_addr(struct vxge_vpath
*vpath
)
1989 enum vxge_hw_status status
= VXGE_HW_OK
;
1990 struct macInfo mac_info
;
1991 u8
*mac_address
= NULL
;
1992 struct list_head
*entry
, *next
;
1994 memset(&mac_info
, 0, sizeof(struct macInfo
));
1996 if (vpath
->is_open
) {
1998 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
2001 ((struct vxge_mac_addrs
*)entry
)->macaddr
;
2002 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
2003 ((struct vxge_mac_addrs
*)entry
)->state
=
2004 VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
2005 /* does this mac address already exist in da table? */
2006 status
= vxge_search_mac_addr_in_da_table(vpath
,
2008 if (status
!= VXGE_HW_OK
) {
2009 /* Add this mac address to the DA table */
2010 status
= vxge_hw_vpath_mac_addr_add(
2011 vpath
->handle
, mac_info
.macaddr
,
2013 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
);
2014 if (status
!= VXGE_HW_OK
) {
2015 vxge_debug_init(VXGE_ERR
,
2016 "DA add entry failed for vpath:%d",
2018 ((struct vxge_mac_addrs
*)entry
)->state
2019 = VXGE_LL_MAC_ADDR_IN_LIST
;
2029 enum vxge_hw_status
vxge_reset_all_vpaths(struct vxgedev
*vdev
)
2032 enum vxge_hw_status status
= VXGE_HW_OK
;
2034 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2035 if (vdev
->vpaths
[i
].handle
) {
2036 if (vxge_hw_vpath_reset(vdev
->vpaths
[i
].handle
)
2038 if (is_vxge_card_up(vdev
) &&
2039 vxge_hw_vpath_recover_from_reset(
2040 vdev
->vpaths
[i
].handle
)
2042 vxge_debug_init(VXGE_ERR
,
2043 "vxge_hw_vpath_recover_"
2044 "from_reset failed for vpath: "
2049 vxge_debug_init(VXGE_ERR
,
2050 "vxge_hw_vpath_reset failed for "
2059 void vxge_close_vpaths(struct vxgedev
*vdev
, int index
)
2062 for (i
= index
; i
< vdev
->no_of_vpath
; i
++) {
2063 if (vdev
->vpaths
[i
].handle
&& vdev
->vpaths
[i
].is_open
) {
2064 vxge_hw_vpath_close(vdev
->vpaths
[i
].handle
);
2065 vdev
->stats
.vpaths_open
--;
2067 vdev
->vpaths
[i
].is_open
= 0;
2068 vdev
->vpaths
[i
].handle
= NULL
;
2073 int vxge_open_vpaths(struct vxgedev
*vdev
)
2075 enum vxge_hw_status status
;
2078 struct vxge_hw_vpath_attr attr
;
2080 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2081 vxge_assert(vdev
->vpaths
[i
].is_configured
);
2082 attr
.vp_id
= vdev
->vpaths
[i
].device_id
;
2083 attr
.fifo_attr
.callback
= vxge_xmit_compl
;
2084 attr
.fifo_attr
.txdl_term
= vxge_tx_term
;
2085 attr
.fifo_attr
.per_txdl_space
= sizeof(struct vxge_tx_priv
);
2086 attr
.fifo_attr
.userdata
= (void *)&vdev
->vpaths
[i
].fifo
;
2088 attr
.ring_attr
.callback
= vxge_rx_1b_compl
;
2089 attr
.ring_attr
.rxd_init
= vxge_rx_initial_replenish
;
2090 attr
.ring_attr
.rxd_term
= vxge_rx_term
;
2091 attr
.ring_attr
.per_rxd_space
= sizeof(struct vxge_rx_priv
);
2092 attr
.ring_attr
.userdata
= (void *)&vdev
->vpaths
[i
].ring
;
2094 vdev
->vpaths
[i
].ring
.ndev
= vdev
->ndev
;
2095 vdev
->vpaths
[i
].ring
.pdev
= vdev
->pdev
;
2096 status
= vxge_hw_vpath_open(vdev
->devh
, &attr
,
2097 &(vdev
->vpaths
[i
].handle
));
2098 if (status
== VXGE_HW_OK
) {
2099 vdev
->vpaths
[i
].fifo
.handle
=
2100 (struct __vxge_hw_fifo
*)attr
.fifo_attr
.userdata
;
2101 vdev
->vpaths
[i
].ring
.handle
=
2102 (struct __vxge_hw_ring
*)attr
.ring_attr
.userdata
;
2103 vdev
->vpaths
[i
].fifo
.tx_steering_type
=
2104 vdev
->config
.tx_steering_type
;
2105 vdev
->vpaths
[i
].fifo
.ndev
= vdev
->ndev
;
2106 vdev
->vpaths
[i
].fifo
.pdev
= vdev
->pdev
;
2107 vdev
->vpaths
[i
].fifo
.indicate_max_pkts
=
2108 vdev
->config
.fifo_indicate_max_pkts
;
2109 vdev
->vpaths
[i
].ring
.rx_vector_no
= 0;
2110 vdev
->vpaths
[i
].ring
.rx_csum
= vdev
->rx_csum
;
2111 vdev
->vpaths
[i
].is_open
= 1;
2112 vdev
->vp_handles
[i
] = vdev
->vpaths
[i
].handle
;
2113 vdev
->vpaths
[i
].ring
.gro_enable
=
2114 vdev
->config
.gro_enable
;
2115 vdev
->vpaths
[i
].ring
.vlan_tag_strip
=
2116 vdev
->vlan_tag_strip
;
2117 vdev
->stats
.vpaths_open
++;
2119 vdev
->stats
.vpath_open_fail
++;
2120 vxge_debug_init(VXGE_ERR
,
2121 "%s: vpath: %d failed to open "
2123 vdev
->ndev
->name
, vdev
->vpaths
[i
].device_id
,
2125 vxge_close_vpaths(vdev
, 0);
2130 ((struct __vxge_hw_vpath_handle
*)vdev
->vpaths
[i
].handle
)->
2132 vdev
->vpaths_deployed
|= vxge_mBIT(vp_id
);
2139 * @irq: the irq of the device.
2140 * @dev_id: a void pointer to the hldev structure of the Titan device
2141 * @ptregs: pointer to the registers pushed on the stack.
2143 * This function is the ISR handler of the device when napi is enabled. It
2144 * identifies the reason for the interrupt and calls the relevant service
2147 static irqreturn_t
vxge_isr_napi(int irq
, void *dev_id
)
2149 struct net_device
*dev
;
2150 struct __vxge_hw_device
*hldev
;
2152 enum vxge_hw_status status
;
2153 struct vxgedev
*vdev
= (struct vxgedev
*) dev_id
;;
2155 vxge_debug_intr(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
2158 hldev
= (struct __vxge_hw_device
*)pci_get_drvdata(vdev
->pdev
);
2160 if (pci_channel_offline(vdev
->pdev
))
2163 if (unlikely(!is_vxge_card_up(vdev
)))
2166 status
= vxge_hw_device_begin_irq(hldev
, vdev
->exec_mode
,
2168 if (status
== VXGE_HW_OK
) {
2169 vxge_hw_device_mask_all(hldev
);
2172 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2173 vdev
->vpaths_deployed
>>
2174 (64 - VXGE_HW_MAX_VIRTUAL_PATHS
))) {
2176 vxge_hw_device_clear_tx_rx(hldev
);
2177 napi_schedule(&vdev
->napi
);
2178 vxge_debug_intr(VXGE_TRACE
,
2179 "%s:%d Exiting...", __func__
, __LINE__
);
2182 vxge_hw_device_unmask_all(hldev
);
2183 } else if (unlikely((status
== VXGE_HW_ERR_VPATH
) ||
2184 (status
== VXGE_HW_ERR_CRITICAL
) ||
2185 (status
== VXGE_HW_ERR_FIFO
))) {
2186 vxge_hw_device_mask_all(hldev
);
2187 vxge_hw_device_flush_io(hldev
);
2189 } else if (unlikely(status
== VXGE_HW_ERR_SLOT_FREEZE
))
2192 vxge_debug_intr(VXGE_TRACE
, "%s:%d Exiting...", __func__
, __LINE__
);
2196 #ifdef CONFIG_PCI_MSI
2199 vxge_tx_msix_handle(int irq
, void *dev_id
)
2201 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)dev_id
;
2203 VXGE_COMPLETE_VPATH_TX(fifo
);
2209 vxge_rx_msix_napi_handle(int irq
, void *dev_id
)
2211 struct vxge_ring
*ring
= (struct vxge_ring
*)dev_id
;
2213 /* MSIX_IDX for Rx is 1 */
2214 vxge_hw_channel_msix_mask((struct __vxge_hw_channel
*)ring
->handle
,
2215 ring
->rx_vector_no
);
2217 napi_schedule(&ring
->napi
);
2222 vxge_alarm_msix_handle(int irq
, void *dev_id
)
2225 enum vxge_hw_status status
;
2226 struct vxge_vpath
*vpath
= (struct vxge_vpath
*)dev_id
;
2227 struct vxgedev
*vdev
= vpath
->vdev
;
2229 VXGE_HW_VPATH_MSIX_ACTIVE
* vdev
->no_of_vpath
- 2;
2231 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2232 vxge_hw_vpath_msix_mask(vdev
->vpaths
[i
].handle
,
2235 status
= vxge_hw_vpath_alarm_process(vdev
->vpaths
[i
].handle
,
2237 if (status
== VXGE_HW_OK
) {
2239 vxge_hw_vpath_msix_unmask(vdev
->vpaths
[i
].handle
,
2243 vxge_debug_intr(VXGE_ERR
,
2244 "%s: vxge_hw_vpath_alarm_process failed %x ",
2245 VXGE_DRIVER_NAME
, status
);
2250 static int vxge_alloc_msix(struct vxgedev
*vdev
)
2254 int alarm_msix_id
= 0, msix_intr_vect
= 0;
2257 /* Tx/Rx MSIX Vectors count */
2258 vdev
->intr_cnt
= vdev
->no_of_vpath
* 2;
2260 /* Alarm MSIX Vectors count */
2263 intr_cnt
= (vdev
->max_vpath_supported
* 2) + 1;
2264 vdev
->entries
= kzalloc(intr_cnt
* sizeof(struct msix_entry
),
2266 if (!vdev
->entries
) {
2267 vxge_debug_init(VXGE_ERR
,
2268 "%s: memory allocation failed",
2273 vdev
->vxge_entries
= kzalloc(intr_cnt
* sizeof(struct vxge_msix_entry
),
2275 if (!vdev
->vxge_entries
) {
2276 vxge_debug_init(VXGE_ERR
, "%s: memory allocation failed",
2278 kfree(vdev
->entries
);
2282 /* Last vector in the list is used for alarm */
2283 alarm_msix_id
= VXGE_HW_VPATH_MSIX_ACTIVE
* vdev
->no_of_vpath
- 2;
2284 for (i
= 0, j
= 0; i
< vdev
->max_vpath_supported
; i
++) {
2286 msix_intr_vect
= i
* VXGE_HW_VPATH_MSIX_ACTIVE
;
2288 /* Initialize the fifo vector */
2289 vdev
->entries
[j
].entry
= msix_intr_vect
;
2290 vdev
->vxge_entries
[j
].entry
= msix_intr_vect
;
2291 vdev
->vxge_entries
[j
].in_use
= 0;
2294 /* Initialize the ring vector */
2295 vdev
->entries
[j
].entry
= msix_intr_vect
+ 1;
2296 vdev
->vxge_entries
[j
].entry
= msix_intr_vect
+ 1;
2297 vdev
->vxge_entries
[j
].in_use
= 0;
2301 /* Initialize the alarm vector */
2302 vdev
->entries
[j
].entry
= alarm_msix_id
;
2303 vdev
->vxge_entries
[j
].entry
= alarm_msix_id
;
2304 vdev
->vxge_entries
[j
].in_use
= 0;
2306 ret
= pci_enable_msix(vdev
->pdev
, vdev
->entries
, intr_cnt
);
2307 /* if driver request exceeeds available irq's, request with a small
2311 vxge_debug_init(VXGE_ERR
,
2312 "%s: MSI-X enable failed for %d vectors, available: %d",
2313 VXGE_DRIVER_NAME
, intr_cnt
, ret
);
2314 vdev
->max_vpath_supported
= vdev
->no_of_vpath
;
2315 intr_cnt
= (vdev
->max_vpath_supported
* 2) + 1;
2317 /* Reset the alarm vector setting */
2318 vdev
->entries
[j
].entry
= 0;
2319 vdev
->vxge_entries
[j
].entry
= 0;
2321 /* Initialize the alarm vector with new setting */
2322 vdev
->entries
[intr_cnt
- 1].entry
= alarm_msix_id
;
2323 vdev
->vxge_entries
[intr_cnt
- 1].entry
= alarm_msix_id
;
2324 vdev
->vxge_entries
[intr_cnt
- 1].in_use
= 0;
2326 ret
= pci_enable_msix(vdev
->pdev
, vdev
->entries
, intr_cnt
);
2328 vxge_debug_init(VXGE_ERR
,
2329 "%s: MSI-X enabled for %d vectors",
2330 VXGE_DRIVER_NAME
, intr_cnt
);
2334 vxge_debug_init(VXGE_ERR
,
2335 "%s: MSI-X enable failed for %d vectors, ret: %d",
2336 VXGE_DRIVER_NAME
, intr_cnt
, ret
);
2337 kfree(vdev
->entries
);
2338 kfree(vdev
->vxge_entries
);
2339 vdev
->entries
= NULL
;
2340 vdev
->vxge_entries
= NULL
;
2346 static int vxge_enable_msix(struct vxgedev
*vdev
)
2350 enum vxge_hw_status status
;
2351 /* 0 - Tx, 1 - Rx */
2353 int alarm_msix_id
= 0, msix_intr_vect
= 0;
2356 /* allocate msix vectors */
2357 ret
= vxge_alloc_msix(vdev
);
2359 /* Last vector in the list is used for alarm */
2361 VXGE_HW_VPATH_MSIX_ACTIVE
* vdev
->no_of_vpath
- 2;
2362 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2364 /* If fifo or ring are not enabled
2365 the MSIX vector for that should be set to 0
2366 Hence initializeing this array to all 0s.
2368 memset(tim_msix_id
, 0, sizeof(tim_msix_id
));
2369 msix_intr_vect
= i
* VXGE_HW_VPATH_MSIX_ACTIVE
;
2370 tim_msix_id
[0] = msix_intr_vect
;
2372 tim_msix_id
[1] = msix_intr_vect
+ 1;
2373 vdev
->vpaths
[i
].ring
.rx_vector_no
= tim_msix_id
[1];
2375 status
= vxge_hw_vpath_msix_set(
2376 vdev
->vpaths
[i
].handle
,
2377 tim_msix_id
, alarm_msix_id
);
2378 if (status
!= VXGE_HW_OK
) {
2379 vxge_debug_init(VXGE_ERR
,
2380 "vxge_hw_vpath_msix_set "
2381 "failed with status : %x", status
);
2382 kfree(vdev
->entries
);
2383 kfree(vdev
->vxge_entries
);
2384 pci_disable_msix(vdev
->pdev
);
2393 static void vxge_rem_msix_isr(struct vxgedev
*vdev
)
2397 for (intr_cnt
= 0; intr_cnt
< (vdev
->max_vpath_supported
* 2 + 1);
2399 if (vdev
->vxge_entries
[intr_cnt
].in_use
) {
2400 synchronize_irq(vdev
->entries
[intr_cnt
].vector
);
2401 free_irq(vdev
->entries
[intr_cnt
].vector
,
2402 vdev
->vxge_entries
[intr_cnt
].arg
);
2403 vdev
->vxge_entries
[intr_cnt
].in_use
= 0;
2407 kfree(vdev
->entries
);
2408 kfree(vdev
->vxge_entries
);
2409 vdev
->entries
= NULL
;
2410 vdev
->vxge_entries
= NULL
;
2412 if (vdev
->config
.intr_type
== MSI_X
)
2413 pci_disable_msix(vdev
->pdev
);
2417 static void vxge_rem_isr(struct vxgedev
*vdev
)
2419 struct __vxge_hw_device
*hldev
;
2420 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
2422 #ifdef CONFIG_PCI_MSI
2423 if (vdev
->config
.intr_type
== MSI_X
) {
2424 vxge_rem_msix_isr(vdev
);
2427 if (vdev
->config
.intr_type
== INTA
) {
2428 synchronize_irq(vdev
->pdev
->irq
);
2429 free_irq(vdev
->pdev
->irq
, vdev
);
2433 static int vxge_add_isr(struct vxgedev
*vdev
)
2436 #ifdef CONFIG_PCI_MSI
2437 int vp_idx
= 0, intr_idx
= 0, intr_cnt
= 0, msix_idx
= 0, irq_req
= 0;
2438 u64 function_mode
= vdev
->config
.device_hw_info
.function_mode
;
2439 int pci_fun
= PCI_FUNC(vdev
->pdev
->devfn
);
2441 if (vdev
->config
.intr_type
== MSI_X
)
2442 ret
= vxge_enable_msix(vdev
);
2445 vxge_debug_init(VXGE_ERR
,
2446 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME
);
2447 if ((function_mode
== VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
) &&
2448 test_and_set_bit(__VXGE_STATE_CARD_UP
,
2449 &driver_config
->inta_dev_open
))
2450 return VXGE_HW_FAIL
;
2452 vxge_debug_init(VXGE_ERR
,
2453 "%s: Defaulting to INTA", VXGE_DRIVER_NAME
);
2454 vdev
->config
.intr_type
= INTA
;
2455 vxge_hw_device_set_intr_type(vdev
->devh
,
2456 VXGE_HW_INTR_MODE_IRQLINE
);
2457 vxge_close_vpaths(vdev
, 1);
2458 vdev
->no_of_vpath
= 1;
2459 vdev
->stats
.vpaths_open
= 1;
2463 if (vdev
->config
.intr_type
== MSI_X
) {
2465 intr_idx
< (vdev
->no_of_vpath
*
2466 VXGE_HW_VPATH_MSIX_ACTIVE
); intr_idx
++) {
2468 msix_idx
= intr_idx
% VXGE_HW_VPATH_MSIX_ACTIVE
;
2473 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2474 "%s:vxge fn: %d vpath: %d Tx MSI-X: %d",
2475 vdev
->ndev
->name
, pci_fun
, vp_idx
,
2476 vdev
->entries
[intr_cnt
].entry
);
2478 vdev
->entries
[intr_cnt
].vector
,
2479 vxge_tx_msix_handle
, 0,
2480 vdev
->desc
[intr_cnt
],
2481 &vdev
->vpaths
[vp_idx
].fifo
);
2482 vdev
->vxge_entries
[intr_cnt
].arg
=
2483 &vdev
->vpaths
[vp_idx
].fifo
;
2487 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2488 "%s:vxge fn: %d vpath: %d Rx MSI-X: %d",
2489 vdev
->ndev
->name
, pci_fun
, vp_idx
,
2490 vdev
->entries
[intr_cnt
].entry
);
2492 vdev
->entries
[intr_cnt
].vector
,
2493 vxge_rx_msix_napi_handle
,
2495 vdev
->desc
[intr_cnt
],
2496 &vdev
->vpaths
[vp_idx
].ring
);
2497 vdev
->vxge_entries
[intr_cnt
].arg
=
2498 &vdev
->vpaths
[vp_idx
].ring
;
2504 vxge_debug_init(VXGE_ERR
,
2505 "%s: MSIX - %d Registration failed",
2506 vdev
->ndev
->name
, intr_cnt
);
2507 vxge_rem_msix_isr(vdev
);
2508 if ((function_mode
==
2509 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
) &&
2510 test_and_set_bit(__VXGE_STATE_CARD_UP
,
2511 &driver_config
->inta_dev_open
))
2512 return VXGE_HW_FAIL
;
2514 vxge_hw_device_set_intr_type(
2516 VXGE_HW_INTR_MODE_IRQLINE
);
2517 vdev
->config
.intr_type
= INTA
;
2518 vxge_debug_init(VXGE_ERR
,
2519 "%s: Defaulting to INTA"
2520 , vdev
->ndev
->name
);
2521 vxge_close_vpaths(vdev
, 1);
2522 vdev
->no_of_vpath
= 1;
2523 vdev
->stats
.vpaths_open
= 1;
2529 /* We requested for this msix interrupt */
2530 vdev
->vxge_entries
[intr_cnt
].in_use
= 1;
2531 vxge_hw_vpath_msix_unmask(
2532 vdev
->vpaths
[vp_idx
].handle
,
2537 /* Point to next vpath handler */
2538 if (((intr_idx
+ 1) % VXGE_HW_VPATH_MSIX_ACTIVE
== 0)
2539 && (vp_idx
< (vdev
->no_of_vpath
- 1)))
2543 intr_cnt
= vdev
->max_vpath_supported
* 2;
2544 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2545 "%s:vxge Alarm fn: %d MSI-X: %d",
2546 vdev
->ndev
->name
, pci_fun
,
2547 vdev
->entries
[intr_cnt
].entry
);
2548 /* For Alarm interrupts */
2549 ret
= request_irq(vdev
->entries
[intr_cnt
].vector
,
2550 vxge_alarm_msix_handle
, 0,
2551 vdev
->desc
[intr_cnt
],
2552 &vdev
->vpaths
[vp_idx
]);
2554 vxge_debug_init(VXGE_ERR
,
2555 "%s: MSIX - %d Registration failed",
2556 vdev
->ndev
->name
, intr_cnt
);
2557 vxge_rem_msix_isr(vdev
);
2558 if ((function_mode
==
2559 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
) &&
2560 test_and_set_bit(__VXGE_STATE_CARD_UP
,
2561 &driver_config
->inta_dev_open
))
2562 return VXGE_HW_FAIL
;
2564 vxge_hw_device_set_intr_type(vdev
->devh
,
2565 VXGE_HW_INTR_MODE_IRQLINE
);
2566 vdev
->config
.intr_type
= INTA
;
2567 vxge_debug_init(VXGE_ERR
,
2568 "%s: Defaulting to INTA",
2570 vxge_close_vpaths(vdev
, 1);
2571 vdev
->no_of_vpath
= 1;
2572 vdev
->stats
.vpaths_open
= 1;
2577 vxge_hw_vpath_msix_unmask(vdev
->vpaths
[vp_idx
].handle
,
2579 vdev
->vxge_entries
[intr_cnt
].in_use
= 1;
2580 vdev
->vxge_entries
[intr_cnt
].arg
= &vdev
->vpaths
[vp_idx
];
2584 snprintf(vdev
->desc
[0], VXGE_INTR_STRLEN
, "%s:vxge", vdev
->ndev
->name
);
2586 if (vdev
->config
.intr_type
== INTA
) {
2587 ret
= request_irq((int) vdev
->pdev
->irq
,
2589 IRQF_SHARED
, vdev
->desc
[0], vdev
);
2591 vxge_debug_init(VXGE_ERR
,
2592 "%s %s-%d: ISR registration failed",
2593 VXGE_DRIVER_NAME
, "IRQ", vdev
->pdev
->irq
);
2596 vxge_debug_init(VXGE_TRACE
,
2597 "new %s-%d line allocated",
2598 "IRQ", vdev
->pdev
->irq
);
2604 static void vxge_poll_vp_reset(unsigned long data
)
2606 struct vxgedev
*vdev
= (struct vxgedev
*)data
;
2609 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2610 if (test_bit(i
, &vdev
->vp_reset
)) {
2611 vxge_reset_vpath(vdev
, i
);
2615 if (j
&& (vdev
->config
.intr_type
!= MSI_X
)) {
2616 vxge_hw_device_unmask_all(vdev
->devh
);
2617 vxge_hw_device_flush_io(vdev
->devh
);
2620 mod_timer(&vdev
->vp_reset_timer
, jiffies
+ HZ
/ 2);
2623 static void vxge_poll_vp_lockup(unsigned long data
)
2625 struct vxgedev
*vdev
= (struct vxgedev
*)data
;
2627 struct vxge_ring
*ring
;
2628 enum vxge_hw_status status
= VXGE_HW_OK
;
2630 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2631 ring
= &vdev
->vpaths
[i
].ring
;
2632 /* Did this vpath received any packets */
2633 if (ring
->stats
.prev_rx_frms
== ring
->stats
.rx_frms
) {
2634 status
= vxge_hw_vpath_check_leak(ring
->handle
);
2636 /* Did it received any packets last time */
2637 if ((VXGE_HW_FAIL
== status
) &&
2638 (VXGE_HW_FAIL
== ring
->last_status
)) {
2640 /* schedule vpath reset */
2641 if (!test_and_set_bit(i
, &vdev
->vp_reset
)) {
2643 /* disable interrupts for this vpath */
2644 vxge_vpath_intr_disable(vdev
, i
);
2646 /* stop the queue for this vpath */
2647 vxge_stop_tx_queue(&vdev
->vpaths
[i
].
2653 ring
->stats
.prev_rx_frms
= ring
->stats
.rx_frms
;
2654 ring
->last_status
= status
;
2657 /* Check every 1 milli second */
2658 mod_timer(&vdev
->vp_lockup_timer
, jiffies
+ HZ
/ 1000);
2663 * @dev: pointer to the device structure.
2665 * This function is the open entry point of the driver. It mainly calls a
2666 * function to allocate Rx buffers and inserts them into the buffer
2667 * descriptors and then enables the Rx part of the NIC.
2668 * Return value: '0' on success and an appropriate (-)ve integer as
2669 * defined in errno.h file on failure.
2672 vxge_open(struct net_device
*dev
)
2674 enum vxge_hw_status status
;
2675 struct vxgedev
*vdev
;
2676 struct __vxge_hw_device
*hldev
;
2679 u64 val64
, function_mode
;
2680 vxge_debug_entryexit(VXGE_TRACE
,
2681 "%s: %s:%d", dev
->name
, __func__
, __LINE__
);
2683 vdev
= (struct vxgedev
*)netdev_priv(dev
);
2684 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
2685 function_mode
= vdev
->config
.device_hw_info
.function_mode
;
2687 /* make sure you have link off by default every time Nic is
2689 netif_carrier_off(dev
);
2691 /* Check for another device already opn with INTA */
2692 if ((function_mode
== VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
) &&
2693 test_bit(__VXGE_STATE_CARD_UP
, &driver_config
->inta_dev_open
)) {
2699 status
= vxge_open_vpaths(vdev
);
2700 if (status
!= VXGE_HW_OK
) {
2701 vxge_debug_init(VXGE_ERR
,
2702 "%s: fatal: Vpath open failed", vdev
->ndev
->name
);
2707 vdev
->mtu
= dev
->mtu
;
2709 status
= vxge_add_isr(vdev
);
2710 if (status
!= VXGE_HW_OK
) {
2711 vxge_debug_init(VXGE_ERR
,
2712 "%s: fatal: ISR add failed", dev
->name
);
2718 if (vdev
->config
.intr_type
!= MSI_X
) {
2719 netif_napi_add(dev
, &vdev
->napi
, vxge_poll_inta
,
2720 vdev
->config
.napi_weight
);
2721 napi_enable(&vdev
->napi
);
2722 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2723 vdev
->vpaths
[i
].ring
.napi_p
= &vdev
->napi
;
2725 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2726 netif_napi_add(dev
, &vdev
->vpaths
[i
].ring
.napi
,
2727 vxge_poll_msix
, vdev
->config
.napi_weight
);
2728 napi_enable(&vdev
->vpaths
[i
].ring
.napi
);
2729 vdev
->vpaths
[i
].ring
.napi_p
=
2730 &vdev
->vpaths
[i
].ring
.napi
;
2735 if (vdev
->config
.rth_steering
) {
2736 status
= vxge_rth_configure(vdev
);
2737 if (status
!= VXGE_HW_OK
) {
2738 vxge_debug_init(VXGE_ERR
,
2739 "%s: fatal: RTH configuration failed",
2746 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2747 /* set initial mtu before enabling the device */
2748 status
= vxge_hw_vpath_mtu_set(vdev
->vpaths
[i
].handle
,
2750 if (status
!= VXGE_HW_OK
) {
2751 vxge_debug_init(VXGE_ERR
,
2752 "%s: fatal: can not set new MTU", dev
->name
);
2758 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE
, VXGE_COMPONENT_LL
, vdev
);
2759 vxge_debug_init(vdev
->level_trace
,
2760 "%s: MTU is %d", vdev
->ndev
->name
, vdev
->mtu
);
2761 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR
, VXGE_COMPONENT_LL
, vdev
);
2763 /* Reprogram the DA table with populated mac addresses */
2764 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2765 vxge_restore_vpath_mac_addr(&vdev
->vpaths
[i
]);
2766 vxge_restore_vpath_vid_table(&vdev
->vpaths
[i
]);
2769 /* Enable vpath to sniff all unicast/multicast traffic that not
2770 * addressed to them. We allow promiscous mode for PF only
2774 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
2775 val64
|= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i
);
2777 vxge_hw_mgmt_reg_write(vdev
->devh
,
2778 vxge_hw_mgmt_reg_type_mrpcim
,
2780 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2781 rxmac_authorize_all_addr
),
2784 vxge_hw_mgmt_reg_write(vdev
->devh
,
2785 vxge_hw_mgmt_reg_type_mrpcim
,
2787 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2788 rxmac_authorize_all_vid
),
2791 vxge_set_multicast(dev
);
2793 /* Enabling Bcast and mcast for all vpath */
2794 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2795 status
= vxge_hw_vpath_bcast_enable(vdev
->vpaths
[i
].handle
);
2796 if (status
!= VXGE_HW_OK
)
2797 vxge_debug_init(VXGE_ERR
,
2798 "%s : Can not enable bcast for vpath "
2799 "id %d", dev
->name
, i
);
2800 if (vdev
->config
.addr_learn_en
) {
2802 vxge_hw_vpath_mcast_enable(vdev
->vpaths
[i
].handle
);
2803 if (status
!= VXGE_HW_OK
)
2804 vxge_debug_init(VXGE_ERR
,
2805 "%s : Can not enable mcast for vpath "
2806 "id %d", dev
->name
, i
);
2810 vxge_hw_device_setpause_data(vdev
->devh
, 0,
2811 vdev
->config
.tx_pause_enable
,
2812 vdev
->config
.rx_pause_enable
);
2814 if (vdev
->vp_reset_timer
.function
== NULL
)
2815 vxge_os_timer(vdev
->vp_reset_timer
,
2816 vxge_poll_vp_reset
, vdev
, (HZ
/2));
2818 if (vdev
->vp_lockup_timer
.function
== NULL
)
2819 vxge_os_timer(vdev
->vp_lockup_timer
,
2820 vxge_poll_vp_lockup
, vdev
, (HZ
/2));
2822 set_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
2826 if (vxge_hw_device_link_state_get(vdev
->devh
) == VXGE_HW_LINK_UP
) {
2827 netif_carrier_on(vdev
->ndev
);
2828 printk(KERN_NOTICE
"%s: Link Up\n", vdev
->ndev
->name
);
2829 vdev
->stats
.link_up
++;
2832 vxge_hw_device_intr_enable(vdev
->devh
);
2836 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2837 vxge_hw_vpath_enable(vdev
->vpaths
[i
].handle
);
2839 vxge_hw_vpath_rx_doorbell_init(vdev
->vpaths
[i
].handle
);
2842 vxge_start_all_tx_queue(vdev
);
2849 if (vdev
->config
.intr_type
!= MSI_X
)
2850 napi_disable(&vdev
->napi
);
2852 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2853 napi_disable(&vdev
->vpaths
[i
].ring
.napi
);
2857 vxge_close_vpaths(vdev
, 0);
2859 vxge_debug_entryexit(VXGE_TRACE
,
2860 "%s: %s:%d Exiting...",
2861 dev
->name
, __func__
, __LINE__
);
2865 /* Loop throught the mac address list and delete all the entries */
2866 void vxge_free_mac_add_list(struct vxge_vpath
*vpath
)
2869 struct list_head
*entry
, *next
;
2870 if (list_empty(&vpath
->mac_addr_list
))
2873 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
2875 kfree((struct vxge_mac_addrs
*)entry
);
2879 static void vxge_napi_del_all(struct vxgedev
*vdev
)
2882 if (vdev
->config
.intr_type
!= MSI_X
)
2883 netif_napi_del(&vdev
->napi
);
2885 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2886 netif_napi_del(&vdev
->vpaths
[i
].ring
.napi
);
2891 int do_vxge_close(struct net_device
*dev
, int do_io
)
2893 enum vxge_hw_status status
;
2894 struct vxgedev
*vdev
;
2895 struct __vxge_hw_device
*hldev
;
2897 u64 val64
, vpath_vector
;
2898 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
2899 dev
->name
, __func__
, __LINE__
);
2901 vdev
= (struct vxgedev
*)netdev_priv(dev
);
2902 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
2904 if (unlikely(!is_vxge_card_up(vdev
)))
2907 /* If vxge_handle_crit_err task is executing,
2908 * wait till it completes. */
2909 while (test_and_set_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
2912 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
2914 /* Put the vpath back in normal mode */
2915 vpath_vector
= vxge_mBIT(vdev
->vpaths
[0].device_id
);
2916 status
= vxge_hw_mgmt_reg_read(vdev
->devh
,
2917 vxge_hw_mgmt_reg_type_mrpcim
,
2920 struct vxge_hw_mrpcim_reg
,
2921 rts_mgr_cbasin_cfg
),
2924 if (status
== VXGE_HW_OK
) {
2925 val64
&= ~vpath_vector
;
2926 status
= vxge_hw_mgmt_reg_write(vdev
->devh
,
2927 vxge_hw_mgmt_reg_type_mrpcim
,
2930 struct vxge_hw_mrpcim_reg
,
2931 rts_mgr_cbasin_cfg
),
2935 /* Remove the function 0 from promiscous mode */
2936 vxge_hw_mgmt_reg_write(vdev
->devh
,
2937 vxge_hw_mgmt_reg_type_mrpcim
,
2939 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2940 rxmac_authorize_all_addr
),
2943 vxge_hw_mgmt_reg_write(vdev
->devh
,
2944 vxge_hw_mgmt_reg_type_mrpcim
,
2946 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2947 rxmac_authorize_all_vid
),
2952 del_timer_sync(&vdev
->vp_lockup_timer
);
2954 del_timer_sync(&vdev
->vp_reset_timer
);
2957 if (vdev
->config
.intr_type
!= MSI_X
)
2958 napi_disable(&vdev
->napi
);
2960 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2961 napi_disable(&vdev
->vpaths
[i
].ring
.napi
);
2964 netif_carrier_off(vdev
->ndev
);
2965 printk(KERN_NOTICE
"%s: Link Down\n", vdev
->ndev
->name
);
2966 vxge_stop_all_tx_queue(vdev
);
2968 /* Note that at this point xmit() is stopped by upper layer */
2970 vxge_hw_device_intr_disable(vdev
->devh
);
2976 vxge_napi_del_all(vdev
);
2979 vxge_reset_all_vpaths(vdev
);
2981 vxge_close_vpaths(vdev
, 0);
2983 vxge_debug_entryexit(VXGE_TRACE
,
2984 "%s: %s:%d Exiting...", dev
->name
, __func__
, __LINE__
);
2986 clear_bit(__VXGE_STATE_CARD_UP
, &driver_config
->inta_dev_open
);
2987 clear_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
);
2994 * @dev: device pointer.
2996 * This is the stop entry point of the driver. It needs to undo exactly
2997 * whatever was done by the open entry point, thus it's usually referred to
2998 * as the close function.Among other things this function mainly stops the
2999 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3000 * Return value: '0' on success and an appropriate (-)ve integer as
3001 * defined in errno.h file on failure.
3004 vxge_close(struct net_device
*dev
)
3006 do_vxge_close(dev
, 1);
3012 * @dev: net device pointer.
3013 * @new_mtu :the new MTU size for the device.
3015 * A driver entry point to change MTU size for the device. Before changing
3016 * the MTU the device must be stopped.
3018 static int vxge_change_mtu(struct net_device
*dev
, int new_mtu
)
3020 struct vxgedev
*vdev
= netdev_priv(dev
);
3022 vxge_debug_entryexit(vdev
->level_trace
,
3023 "%s:%d", __func__
, __LINE__
);
3024 if ((new_mtu
< VXGE_HW_MIN_MTU
) || (new_mtu
> VXGE_HW_MAX_MTU
)) {
3025 vxge_debug_init(vdev
->level_err
,
3026 "%s: mtu size is invalid", dev
->name
);
3030 /* check if device is down already */
3031 if (unlikely(!is_vxge_card_up(vdev
))) {
3032 /* just store new value, will use later on open() */
3034 vxge_debug_init(vdev
->level_err
,
3035 "%s", "device is down on MTU change");
3039 vxge_debug_init(vdev
->level_trace
,
3040 "trying to apply new MTU %d", new_mtu
);
3042 if (vxge_close(dev
))
3046 vdev
->mtu
= new_mtu
;
3051 vxge_debug_init(vdev
->level_trace
,
3052 "%s: MTU changed to %d", vdev
->ndev
->name
, new_mtu
);
3054 vxge_debug_entryexit(vdev
->level_trace
,
3055 "%s:%d Exiting...", __func__
, __LINE__
);
3062 * @dev: pointer to the device structure
3064 * Updates the device statistics structure. This function updates the device
3065 * statistics structure in the net_device structure and returns a pointer
3068 static struct net_device_stats
*
3069 vxge_get_stats(struct net_device
*dev
)
3071 struct vxgedev
*vdev
;
3072 struct net_device_stats
*net_stats
;
3075 vdev
= netdev_priv(dev
);
3077 net_stats
= &vdev
->stats
.net_stats
;
3079 memset(net_stats
, 0, sizeof(struct net_device_stats
));
3081 for (k
= 0; k
< vdev
->no_of_vpath
; k
++) {
3082 net_stats
->rx_packets
+= vdev
->vpaths
[k
].ring
.stats
.rx_frms
;
3083 net_stats
->rx_bytes
+= vdev
->vpaths
[k
].ring
.stats
.rx_bytes
;
3084 net_stats
->rx_errors
+= vdev
->vpaths
[k
].ring
.stats
.rx_errors
;
3085 net_stats
->multicast
+= vdev
->vpaths
[k
].ring
.stats
.rx_mcast
;
3086 net_stats
->rx_dropped
+=
3087 vdev
->vpaths
[k
].ring
.stats
.rx_dropped
;
3089 net_stats
->tx_packets
+= vdev
->vpaths
[k
].fifo
.stats
.tx_frms
;
3090 net_stats
->tx_bytes
+= vdev
->vpaths
[k
].fifo
.stats
.tx_bytes
;
3091 net_stats
->tx_errors
+= vdev
->vpaths
[k
].fifo
.stats
.tx_errors
;
3099 * @dev: Device pointer.
3100 * @ifr: An IOCTL specific structure, that can contain a pointer to
3101 * a proprietary structure used to pass information to the driver.
3102 * @cmd: This is used to distinguish between the different commands that
3103 * can be passed to the IOCTL functions.
3105 * Entry point for the Ioctl.
3107 static int vxge_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
3114 * @dev: pointer to net device structure
3116 * Watchdog for transmit side.
3117 * This function is triggered if the Tx Queue is stopped
3118 * for a pre-defined amount of time when the Interface is still up.
3121 vxge_tx_watchdog(struct net_device
*dev
)
3123 struct vxgedev
*vdev
;
3125 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3127 vdev
= (struct vxgedev
*)netdev_priv(dev
);
3129 vdev
->cric_err_event
= VXGE_HW_EVENT_RESET_START
;
3132 vxge_debug_entryexit(VXGE_TRACE
,
3133 "%s:%d Exiting...", __func__
, __LINE__
);
3137 * vxge_vlan_rx_register
3138 * @dev: net device pointer.
3141 * Vlan group registration
3144 vxge_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
3146 struct vxgedev
*vdev
;
3147 struct vxge_vpath
*vpath
;
3150 enum vxge_hw_status status
;
3153 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3155 vdev
= (struct vxgedev
*)netdev_priv(dev
);
3157 vpath
= &vdev
->vpaths
[0];
3158 if ((NULL
== grp
) && (vpath
->is_open
)) {
3159 /* Get the first vlan */
3160 status
= vxge_hw_vpath_vid_get(vpath
->handle
, &vid
);
3162 while (status
== VXGE_HW_OK
) {
3164 /* Delete this vlan from the vid table */
3165 for (vp
= 0; vp
< vdev
->no_of_vpath
; vp
++) {
3166 vpath
= &vdev
->vpaths
[vp
];
3167 if (!vpath
->is_open
)
3170 vxge_hw_vpath_vid_delete(vpath
->handle
, vid
);
3173 /* Get the next vlan to be deleted */
3174 vpath
= &vdev
->vpaths
[0];
3175 status
= vxge_hw_vpath_vid_get(vpath
->handle
, &vid
);
3181 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
3182 if (vdev
->vpaths
[i
].is_configured
)
3183 vdev
->vpaths
[i
].ring
.vlgrp
= grp
;
3186 vxge_debug_entryexit(VXGE_TRACE
,
3187 "%s:%d Exiting...", __func__
, __LINE__
);
3191 * vxge_vlan_rx_add_vid
3192 * @dev: net device pointer.
3195 * Add the vlan id to the devices vlan id table
3198 vxge_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
3200 struct vxgedev
*vdev
;
3201 struct vxge_vpath
*vpath
;
3204 vdev
= (struct vxgedev
*)netdev_priv(dev
);
3206 /* Add these vlan to the vid table */
3207 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
3208 vpath
= &vdev
->vpaths
[vp_id
];
3209 if (!vpath
->is_open
)
3211 vxge_hw_vpath_vid_add(vpath
->handle
, vid
);
3216 * vxge_vlan_rx_add_vid
3217 * @dev: net device pointer.
3220 * Remove the vlan id from the device's vlan id table
3223 vxge_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
3225 struct vxgedev
*vdev
;
3226 struct vxge_vpath
*vpath
;
3229 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3231 vdev
= (struct vxgedev
*)netdev_priv(dev
);
3233 vlan_group_set_device(vdev
->vlgrp
, vid
, NULL
);
3235 /* Delete this vlan from the vid table */
3236 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
3237 vpath
= &vdev
->vpaths
[vp_id
];
3238 if (!vpath
->is_open
)
3240 vxge_hw_vpath_vid_delete(vpath
->handle
, vid
);
3242 vxge_debug_entryexit(VXGE_TRACE
,
3243 "%s:%d Exiting...", __func__
, __LINE__
);
3246 static const struct net_device_ops vxge_netdev_ops
= {
3247 .ndo_open
= vxge_open
,
3248 .ndo_stop
= vxge_close
,
3249 .ndo_get_stats
= vxge_get_stats
,
3250 .ndo_start_xmit
= vxge_xmit
,
3251 .ndo_validate_addr
= eth_validate_addr
,
3252 .ndo_set_multicast_list
= vxge_set_multicast
,
3254 .ndo_do_ioctl
= vxge_ioctl
,
3256 .ndo_set_mac_address
= vxge_set_mac_addr
,
3257 .ndo_change_mtu
= vxge_change_mtu
,
3258 .ndo_vlan_rx_register
= vxge_vlan_rx_register
,
3259 .ndo_vlan_rx_kill_vid
= vxge_vlan_rx_kill_vid
,
3260 .ndo_vlan_rx_add_vid
= vxge_vlan_rx_add_vid
,
3262 .ndo_tx_timeout
= vxge_tx_watchdog
,
3263 #ifdef CONFIG_NET_POLL_CONTROLLER
3264 .ndo_poll_controller
= vxge_netpoll
,
3268 int __devinit
vxge_device_register(struct __vxge_hw_device
*hldev
,
3269 struct vxge_config
*config
,
3270 int high_dma
, int no_of_vpath
,
3271 struct vxgedev
**vdev_out
)
3273 struct net_device
*ndev
;
3274 enum vxge_hw_status status
= VXGE_HW_OK
;
3275 struct vxgedev
*vdev
;
3276 int i
, ret
= 0, no_of_queue
= 1;
3280 if (config
->tx_steering_type
== TX_MULTIQ_STEERING
)
3281 no_of_queue
= no_of_vpath
;
3283 ndev
= alloc_etherdev_mq(sizeof(struct vxgedev
),
3287 vxge_hw_device_trace_level_get(hldev
),
3288 "%s : device allocation failed", __func__
);
3293 vxge_debug_entryexit(
3294 vxge_hw_device_trace_level_get(hldev
),
3295 "%s: %s:%d Entering...",
3296 ndev
->name
, __func__
, __LINE__
);
3298 vdev
= netdev_priv(ndev
);
3299 memset(vdev
, 0, sizeof(struct vxgedev
));
3303 vdev
->pdev
= hldev
->pdev
;
3304 memcpy(&vdev
->config
, config
, sizeof(struct vxge_config
));
3305 vdev
->rx_csum
= 1; /* Enable Rx CSUM by default. */
3307 SET_NETDEV_DEV(ndev
, &vdev
->pdev
->dev
);
3309 ndev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
|
3310 NETIF_F_HW_VLAN_FILTER
;
3311 /* Driver entry points */
3312 ndev
->irq
= vdev
->pdev
->irq
;
3313 ndev
->base_addr
= (unsigned long) hldev
->bar0
;
3315 ndev
->netdev_ops
= &vxge_netdev_ops
;
3317 ndev
->watchdog_timeo
= VXGE_LL_WATCH_DOG_TIMEOUT
;
3319 initialize_ethtool_ops(ndev
);
3321 /* Allocate memory for vpath */
3322 vdev
->vpaths
= kzalloc((sizeof(struct vxge_vpath
)) *
3323 no_of_vpath
, GFP_KERNEL
);
3324 if (!vdev
->vpaths
) {
3325 vxge_debug_init(VXGE_ERR
,
3326 "%s: vpath memory allocation failed",
3332 ndev
->features
|= NETIF_F_SG
;
3334 ndev
->features
|= NETIF_F_HW_CSUM
;
3335 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3336 "%s : checksuming enabled", __func__
);
3339 ndev
->features
|= NETIF_F_HIGHDMA
;
3340 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3341 "%s : using High DMA", __func__
);
3344 ndev
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
3346 if (vdev
->config
.gro_enable
)
3347 ndev
->features
|= NETIF_F_GRO
;
3349 if (vdev
->config
.tx_steering_type
== TX_MULTIQ_STEERING
)
3350 ndev
->real_num_tx_queues
= no_of_vpath
;
3353 ndev
->features
|= NETIF_F_LLTX
;
3356 for (i
= 0; i
< no_of_vpath
; i
++)
3357 spin_lock_init(&vdev
->vpaths
[i
].fifo
.tx_lock
);
3359 if (register_netdev(ndev
)) {
3360 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3361 "%s: %s : device registration failed!",
3362 ndev
->name
, __func__
);
3367 /* Set the factory defined MAC address initially */
3368 ndev
->addr_len
= ETH_ALEN
;
3370 /* Make Link state as off at this point, when the Link change
3371 * interrupt comes the state will be automatically changed to
3374 netif_carrier_off(ndev
);
3376 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3377 "%s: Ethernet device registered",
3382 /* Resetting the Device stats */
3383 status
= vxge_hw_mrpcim_stats_access(
3385 VXGE_HW_STATS_OP_CLEAR_ALL_STATS
,
3390 if (status
== VXGE_HW_ERR_PRIVILAGED_OPEARATION
)
3392 vxge_hw_device_trace_level_get(hldev
),
3393 "%s: device stats clear returns"
3394 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev
->name
);
3396 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev
),
3397 "%s: %s:%d Exiting...",
3398 ndev
->name
, __func__
, __LINE__
);
3402 kfree(vdev
->vpaths
);
3410 * vxge_device_unregister
3412 * This function will unregister and free network device
3415 vxge_device_unregister(struct __vxge_hw_device
*hldev
)
3417 struct vxgedev
*vdev
;
3418 struct net_device
*dev
;
3420 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3421 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3426 vdev
= netdev_priv(dev
);
3427 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3428 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3429 level_trace
= vdev
->level_trace
;
3431 vxge_debug_entryexit(level_trace
,
3432 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
3434 memcpy(buf
, vdev
->ndev
->name
, IFNAMSIZ
);
3436 /* in 2.6 will call stop() if device is up */
3437 unregister_netdev(dev
);
3439 flush_scheduled_work();
3441 vxge_debug_init(level_trace
, "%s: ethernet device unregistered", buf
);
3442 vxge_debug_entryexit(level_trace
,
3443 "%s: %s:%d Exiting...", buf
, __func__
, __LINE__
);
3447 * vxge_callback_crit_err
3449 * This function is called by the alarm handler in interrupt context.
3450 * Driver must analyze it based on the event type.
3453 vxge_callback_crit_err(struct __vxge_hw_device
*hldev
,
3454 enum vxge_hw_event type
, u64 vp_id
)
3456 struct net_device
*dev
= hldev
->ndev
;
3457 struct vxgedev
*vdev
= (struct vxgedev
*)netdev_priv(dev
);
3460 vxge_debug_entryexit(vdev
->level_trace
,
3461 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
3463 /* Note: This event type should be used for device wide
3464 * indications only - Serious errors, Slot freeze and critical errors
3466 vdev
->cric_err_event
= type
;
3468 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++)
3469 if (vdev
->vpaths
[vpath_idx
].device_id
== vp_id
)
3472 if (!test_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
)) {
3473 if (type
== VXGE_HW_EVENT_SLOT_FREEZE
) {
3474 vxge_debug_init(VXGE_ERR
,
3475 "%s: Slot is frozen", vdev
->ndev
->name
);
3476 } else if (type
== VXGE_HW_EVENT_SERR
) {
3477 vxge_debug_init(VXGE_ERR
,
3478 "%s: Encountered Serious Error",
3480 } else if (type
== VXGE_HW_EVENT_CRITICAL_ERR
)
3481 vxge_debug_init(VXGE_ERR
,
3482 "%s: Encountered Critical Error",
3486 if ((type
== VXGE_HW_EVENT_SERR
) ||
3487 (type
== VXGE_HW_EVENT_SLOT_FREEZE
)) {
3488 if (unlikely(vdev
->exec_mode
))
3489 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3490 } else if (type
== VXGE_HW_EVENT_CRITICAL_ERR
) {
3491 vxge_hw_device_mask_all(hldev
);
3492 if (unlikely(vdev
->exec_mode
))
3493 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3494 } else if ((type
== VXGE_HW_EVENT_FIFO_ERR
) ||
3495 (type
== VXGE_HW_EVENT_VPATH_ERR
)) {
3497 if (unlikely(vdev
->exec_mode
))
3498 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3500 /* check if this vpath is already set for reset */
3501 if (!test_and_set_bit(vpath_idx
, &vdev
->vp_reset
)) {
3503 /* disable interrupts for this vpath */
3504 vxge_vpath_intr_disable(vdev
, vpath_idx
);
3506 /* stop the queue for this vpath */
3507 vxge_stop_tx_queue(&vdev
->vpaths
[vpath_idx
].
3513 vxge_debug_entryexit(vdev
->level_trace
,
3514 "%s: %s:%d Exiting...",
3515 vdev
->ndev
->name
, __func__
, __LINE__
);
3518 static void verify_bandwidth(void)
3520 int i
, band_width
, total
= 0, equal_priority
= 0;
3522 /* 1. If user enters 0 for some fifo, give equal priority to all */
3523 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3524 if (bw_percentage
[i
] == 0) {
3530 if (!equal_priority
) {
3531 /* 2. If sum exceeds 100, give equal priority to all */
3532 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3533 if (bw_percentage
[i
] == 0xFF)
3536 total
+= bw_percentage
[i
];
3537 if (total
> VXGE_HW_VPATH_BANDWIDTH_MAX
) {
3544 if (!equal_priority
) {
3545 /* Is all the bandwidth consumed? */
3546 if (total
< VXGE_HW_VPATH_BANDWIDTH_MAX
) {
3547 if (i
< VXGE_HW_MAX_VIRTUAL_PATHS
) {
3548 /* Split rest of bw equally among next VPs*/
3550 (VXGE_HW_VPATH_BANDWIDTH_MAX
- total
) /
3551 (VXGE_HW_MAX_VIRTUAL_PATHS
- i
);
3552 if (band_width
< 2) /* min of 2% */
3555 for (; i
< VXGE_HW_MAX_VIRTUAL_PATHS
;
3561 } else if (i
< VXGE_HW_MAX_VIRTUAL_PATHS
)
3565 if (equal_priority
) {
3566 vxge_debug_init(VXGE_ERR
,
3567 "%s: Assigning equal bandwidth to all the vpaths",
3569 bw_percentage
[0] = VXGE_HW_VPATH_BANDWIDTH_MAX
/
3570 VXGE_HW_MAX_VIRTUAL_PATHS
;
3571 for (i
= 1; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3572 bw_percentage
[i
] = bw_percentage
[0];
3579 * Vpath configuration
3581 static int __devinit
vxge_config_vpaths(
3582 struct vxge_hw_device_config
*device_config
,
3583 u64 vpath_mask
, struct vxge_config
*config_param
)
3585 int i
, no_of_vpaths
= 0, default_no_vpath
= 0, temp
;
3586 u32 txdl_size
, txdl_per_memblock
;
3588 temp
= driver_config
->vpath_per_dev
;
3589 if ((driver_config
->vpath_per_dev
== VXGE_USE_DEFAULT
) &&
3590 (max_config_dev
== VXGE_MAX_CONFIG_DEV
)) {
3591 /* No more CPU. Return vpath number as zero.*/
3592 if (driver_config
->g_no_cpus
== -1)
3595 if (!driver_config
->g_no_cpus
)
3596 driver_config
->g_no_cpus
= num_online_cpus();
3598 driver_config
->vpath_per_dev
= driver_config
->g_no_cpus
>> 1;
3599 if (!driver_config
->vpath_per_dev
)
3600 driver_config
->vpath_per_dev
= 1;
3602 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3603 if (!vxge_bVALn(vpath_mask
, i
, 1))
3607 if (default_no_vpath
< driver_config
->vpath_per_dev
)
3608 driver_config
->vpath_per_dev
= default_no_vpath
;
3610 driver_config
->g_no_cpus
= driver_config
->g_no_cpus
-
3611 (driver_config
->vpath_per_dev
* 2);
3612 if (driver_config
->g_no_cpus
<= 0)
3613 driver_config
->g_no_cpus
= -1;
3616 if (driver_config
->vpath_per_dev
== 1) {
3617 vxge_debug_ll_config(VXGE_TRACE
,
3618 "%s: Disable tx and rx steering, "
3619 "as single vpath is configured", VXGE_DRIVER_NAME
);
3620 config_param
->rth_steering
= NO_STEERING
;
3621 config_param
->tx_steering_type
= NO_STEERING
;
3622 device_config
->rth_en
= 0;
3625 /* configure bandwidth */
3626 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3627 device_config
->vp_config
[i
].min_bandwidth
= bw_percentage
[i
];
3629 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3630 device_config
->vp_config
[i
].vp_id
= i
;
3631 device_config
->vp_config
[i
].mtu
= VXGE_HW_DEFAULT_MTU
;
3632 if (no_of_vpaths
< driver_config
->vpath_per_dev
) {
3633 if (!vxge_bVALn(vpath_mask
, i
, 1)) {
3634 vxge_debug_ll_config(VXGE_TRACE
,
3635 "%s: vpath: %d is not available",
3636 VXGE_DRIVER_NAME
, i
);
3639 vxge_debug_ll_config(VXGE_TRACE
,
3640 "%s: vpath: %d available",
3641 VXGE_DRIVER_NAME
, i
);
3645 vxge_debug_ll_config(VXGE_TRACE
,
3646 "%s: vpath: %d is not configured, "
3647 "max_config_vpath exceeded",
3648 VXGE_DRIVER_NAME
, i
);
3652 /* Configure Tx fifo's */
3653 device_config
->vp_config
[i
].fifo
.enable
=
3654 VXGE_HW_FIFO_ENABLE
;
3655 device_config
->vp_config
[i
].fifo
.max_frags
=
3657 device_config
->vp_config
[i
].fifo
.memblock_size
=
3658 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE
;
3660 txdl_size
= MAX_SKB_FRAGS
* sizeof(struct vxge_hw_fifo_txd
);
3661 txdl_per_memblock
= VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE
/ txdl_size
;
3663 device_config
->vp_config
[i
].fifo
.fifo_blocks
=
3664 ((VXGE_DEF_FIFO_LENGTH
- 1) / txdl_per_memblock
) + 1;
3666 device_config
->vp_config
[i
].fifo
.intr
=
3667 VXGE_HW_FIFO_QUEUE_INTR_DISABLE
;
3669 /* Configure tti properties */
3670 device_config
->vp_config
[i
].tti
.intr_enable
=
3671 VXGE_HW_TIM_INTR_ENABLE
;
3673 device_config
->vp_config
[i
].tti
.btimer_val
=
3674 (VXGE_TTI_BTIMER_VAL
* 1000) / 272;
3676 device_config
->vp_config
[i
].tti
.timer_ac_en
=
3677 VXGE_HW_TIM_TIMER_AC_ENABLE
;
3679 /* For msi-x with napi (each vector
3680 has a handler of its own) -
3681 Set CI to OFF for all vpaths */
3682 device_config
->vp_config
[i
].tti
.timer_ci_en
=
3683 VXGE_HW_TIM_TIMER_CI_DISABLE
;
3685 device_config
->vp_config
[i
].tti
.timer_ri_en
=
3686 VXGE_HW_TIM_TIMER_RI_DISABLE
;
3688 device_config
->vp_config
[i
].tti
.util_sel
=
3689 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL
;
3691 device_config
->vp_config
[i
].tti
.ltimer_val
=
3692 (VXGE_TTI_LTIMER_VAL
* 1000) / 272;
3694 device_config
->vp_config
[i
].tti
.rtimer_val
=
3695 (VXGE_TTI_RTIMER_VAL
* 1000) / 272;
3697 device_config
->vp_config
[i
].tti
.urange_a
= TTI_TX_URANGE_A
;
3698 device_config
->vp_config
[i
].tti
.urange_b
= TTI_TX_URANGE_B
;
3699 device_config
->vp_config
[i
].tti
.urange_c
= TTI_TX_URANGE_C
;
3700 device_config
->vp_config
[i
].tti
.uec_a
= TTI_TX_UFC_A
;
3701 device_config
->vp_config
[i
].tti
.uec_b
= TTI_TX_UFC_B
;
3702 device_config
->vp_config
[i
].tti
.uec_c
= TTI_TX_UFC_C
;
3703 device_config
->vp_config
[i
].tti
.uec_d
= TTI_TX_UFC_D
;
3705 /* Configure Rx rings */
3706 device_config
->vp_config
[i
].ring
.enable
=
3707 VXGE_HW_RING_ENABLE
;
3709 device_config
->vp_config
[i
].ring
.ring_blocks
=
3710 VXGE_HW_DEF_RING_BLOCKS
;
3711 device_config
->vp_config
[i
].ring
.buffer_mode
=
3712 VXGE_HW_RING_RXD_BUFFER_MODE_1
;
3713 device_config
->vp_config
[i
].ring
.rxds_limit
=
3714 VXGE_HW_DEF_RING_RXDS_LIMIT
;
3715 device_config
->vp_config
[i
].ring
.scatter_mode
=
3716 VXGE_HW_RING_SCATTER_MODE_A
;
3718 /* Configure rti properties */
3719 device_config
->vp_config
[i
].rti
.intr_enable
=
3720 VXGE_HW_TIM_INTR_ENABLE
;
3722 device_config
->vp_config
[i
].rti
.btimer_val
=
3723 (VXGE_RTI_BTIMER_VAL
* 1000)/272;
3725 device_config
->vp_config
[i
].rti
.timer_ac_en
=
3726 VXGE_HW_TIM_TIMER_AC_ENABLE
;
3728 device_config
->vp_config
[i
].rti
.timer_ci_en
=
3729 VXGE_HW_TIM_TIMER_CI_DISABLE
;
3731 device_config
->vp_config
[i
].rti
.timer_ri_en
=
3732 VXGE_HW_TIM_TIMER_RI_DISABLE
;
3734 device_config
->vp_config
[i
].rti
.util_sel
=
3735 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL
;
3737 device_config
->vp_config
[i
].rti
.urange_a
=
3739 device_config
->vp_config
[i
].rti
.urange_b
=
3741 device_config
->vp_config
[i
].rti
.urange_c
=
3743 device_config
->vp_config
[i
].rti
.uec_a
= RTI_RX_UFC_A
;
3744 device_config
->vp_config
[i
].rti
.uec_b
= RTI_RX_UFC_B
;
3745 device_config
->vp_config
[i
].rti
.uec_c
= RTI_RX_UFC_C
;
3746 device_config
->vp_config
[i
].rti
.uec_d
= RTI_RX_UFC_D
;
3748 device_config
->vp_config
[i
].rti
.rtimer_val
=
3749 (VXGE_RTI_RTIMER_VAL
* 1000) / 272;
3751 device_config
->vp_config
[i
].rti
.ltimer_val
=
3752 (VXGE_RTI_LTIMER_VAL
* 1000) / 272;
3754 device_config
->vp_config
[i
].rpa_strip_vlan_tag
=
3758 driver_config
->vpath_per_dev
= temp
;
3759 return no_of_vpaths
;
3762 /* initialize device configuratrions */
3763 static void __devinit
vxge_device_config_init(
3764 struct vxge_hw_device_config
*device_config
,
3767 /* Used for CQRQ/SRQ. */
3768 device_config
->dma_blockpool_initial
=
3769 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE
;
3771 device_config
->dma_blockpool_max
=
3772 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE
;
3774 if (max_mac_vpath
> VXGE_MAX_MAC_ADDR_COUNT
)
3775 max_mac_vpath
= VXGE_MAX_MAC_ADDR_COUNT
;
3777 #ifndef CONFIG_PCI_MSI
3778 vxge_debug_init(VXGE_ERR
,
3779 "%s: This Kernel does not support "
3780 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME
);
3784 /* Configure whether MSI-X or IRQL. */
3785 switch (*intr_type
) {
3787 device_config
->intr_mode
= VXGE_HW_INTR_MODE_IRQLINE
;
3791 device_config
->intr_mode
= VXGE_HW_INTR_MODE_MSIX
;
3794 /* Timer period between device poll */
3795 device_config
->device_poll_millis
= VXGE_TIMER_DELAY
;
3797 /* Configure mac based steering. */
3798 device_config
->rts_mac_en
= addr_learn_en
;
3800 /* Configure Vpaths */
3801 device_config
->rth_it_type
= VXGE_HW_RTH_IT_TYPE_MULTI_IT
;
3803 vxge_debug_ll_config(VXGE_TRACE
, "%s : Device Config Params ",
3805 vxge_debug_ll_config(VXGE_TRACE
, "dma_blockpool_initial : %d",
3806 device_config
->dma_blockpool_initial
);
3807 vxge_debug_ll_config(VXGE_TRACE
, "dma_blockpool_max : %d",
3808 device_config
->dma_blockpool_max
);
3809 vxge_debug_ll_config(VXGE_TRACE
, "intr_mode : %d",
3810 device_config
->intr_mode
);
3811 vxge_debug_ll_config(VXGE_TRACE
, "device_poll_millis : %d",
3812 device_config
->device_poll_millis
);
3813 vxge_debug_ll_config(VXGE_TRACE
, "rts_mac_en : %d",
3814 device_config
->rts_mac_en
);
3815 vxge_debug_ll_config(VXGE_TRACE
, "rth_en : %d",
3816 device_config
->rth_en
);
3817 vxge_debug_ll_config(VXGE_TRACE
, "rth_it_type : %d",
3818 device_config
->rth_it_type
);
3821 static void __devinit
vxge_print_parm(struct vxgedev
*vdev
, u64 vpath_mask
)
3825 vxge_debug_init(VXGE_TRACE
,
3826 "%s: %d Vpath(s) opened",
3827 vdev
->ndev
->name
, vdev
->no_of_vpath
);
3829 switch (vdev
->config
.intr_type
) {
3831 vxge_debug_init(VXGE_TRACE
,
3832 "%s: Interrupt type INTA", vdev
->ndev
->name
);
3836 vxge_debug_init(VXGE_TRACE
,
3837 "%s: Interrupt type MSI-X", vdev
->ndev
->name
);
3841 if (vdev
->config
.rth_steering
) {
3842 vxge_debug_init(VXGE_TRACE
,
3843 "%s: RTH steering enabled for TCP_IPV4",
3846 vxge_debug_init(VXGE_TRACE
,
3847 "%s: RTH steering disabled", vdev
->ndev
->name
);
3850 switch (vdev
->config
.tx_steering_type
) {
3852 vxge_debug_init(VXGE_TRACE
,
3853 "%s: Tx steering disabled", vdev
->ndev
->name
);
3855 case TX_PRIORITY_STEERING
:
3856 vxge_debug_init(VXGE_TRACE
,
3857 "%s: Unsupported tx steering option",
3859 vxge_debug_init(VXGE_TRACE
,
3860 "%s: Tx steering disabled", vdev
->ndev
->name
);
3861 vdev
->config
.tx_steering_type
= 0;
3863 case TX_VLAN_STEERING
:
3864 vxge_debug_init(VXGE_TRACE
,
3865 "%s: Unsupported tx steering option",
3867 vxge_debug_init(VXGE_TRACE
,
3868 "%s: Tx steering disabled", vdev
->ndev
->name
);
3869 vdev
->config
.tx_steering_type
= 0;
3871 case TX_MULTIQ_STEERING
:
3872 vxge_debug_init(VXGE_TRACE
,
3873 "%s: Tx multiqueue steering enabled",
3876 case TX_PORT_STEERING
:
3877 vxge_debug_init(VXGE_TRACE
,
3878 "%s: Tx port steering enabled",
3882 vxge_debug_init(VXGE_ERR
,
3883 "%s: Unsupported tx steering type",
3885 vxge_debug_init(VXGE_TRACE
,
3886 "%s: Tx steering disabled", vdev
->ndev
->name
);
3887 vdev
->config
.tx_steering_type
= 0;
3890 if (vdev
->config
.gro_enable
) {
3891 vxge_debug_init(VXGE_ERR
,
3892 "%s: Generic receive offload enabled",
3895 vxge_debug_init(VXGE_TRACE
,
3896 "%s: Generic receive offload disabled",
3899 if (vdev
->config
.addr_learn_en
)
3900 vxge_debug_init(VXGE_TRACE
,
3901 "%s: MAC Address learning enabled", vdev
->ndev
->name
);
3903 vxge_debug_init(VXGE_TRACE
,
3904 "%s: Rx doorbell mode enabled", vdev
->ndev
->name
);
3906 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3907 if (!vxge_bVALn(vpath_mask
, i
, 1))
3909 vxge_debug_ll_config(VXGE_TRACE
,
3910 "%s: MTU size - %d", vdev
->ndev
->name
,
3911 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3912 config
.vp_config
[i
].mtu
);
3913 vxge_debug_init(VXGE_TRACE
,
3914 "%s: VLAN tag stripping %s", vdev
->ndev
->name
,
3915 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3916 config
.vp_config
[i
].rpa_strip_vlan_tag
3917 ? "Enabled" : "Disabled");
3918 vxge_debug_init(VXGE_TRACE
,
3919 "%s: Ring blocks : %d", vdev
->ndev
->name
,
3920 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3921 config
.vp_config
[i
].ring
.ring_blocks
);
3922 vxge_debug_init(VXGE_TRACE
,
3923 "%s: Fifo blocks : %d", vdev
->ndev
->name
,
3924 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3925 config
.vp_config
[i
].fifo
.fifo_blocks
);
3926 vxge_debug_ll_config(VXGE_TRACE
,
3927 "%s: Max frags : %d", vdev
->ndev
->name
,
3928 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3929 config
.vp_config
[i
].fifo
.max_frags
);
3936 * vxge_pm_suspend - vxge power management suspend entry point
3939 static int vxge_pm_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3944 * vxge_pm_resume - vxge power management resume entry point
3947 static int vxge_pm_resume(struct pci_dev
*pdev
)
3955 * vxge_io_error_detected - called when PCI error is detected
3956 * @pdev: Pointer to PCI device
3957 * @state: The current pci connection state
3959 * This function is called after a PCI bus error affecting
3960 * this device has been detected.
3962 static pci_ers_result_t
vxge_io_error_detected(struct pci_dev
*pdev
,
3963 pci_channel_state_t state
)
3965 struct __vxge_hw_device
*hldev
=
3966 (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
3967 struct net_device
*netdev
= hldev
->ndev
;
3969 netif_device_detach(netdev
);
3971 if (state
== pci_channel_io_perm_failure
)
3972 return PCI_ERS_RESULT_DISCONNECT
;
3974 if (netif_running(netdev
)) {
3975 /* Bring down the card, while avoiding PCI I/O */
3976 do_vxge_close(netdev
, 0);
3979 pci_disable_device(pdev
);
3981 return PCI_ERS_RESULT_NEED_RESET
;
3985 * vxge_io_slot_reset - called after the pci bus has been reset.
3986 * @pdev: Pointer to PCI device
3988 * Restart the card from scratch, as if from a cold-boot.
3989 * At this point, the card has exprienced a hard reset,
3990 * followed by fixups by BIOS, and has its config space
3991 * set up identically to what it was at cold boot.
3993 static pci_ers_result_t
vxge_io_slot_reset(struct pci_dev
*pdev
)
3995 struct __vxge_hw_device
*hldev
=
3996 (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
3997 struct net_device
*netdev
= hldev
->ndev
;
3999 struct vxgedev
*vdev
= netdev_priv(netdev
);
4001 if (pci_enable_device(pdev
)) {
4002 printk(KERN_ERR
"%s: "
4003 "Cannot re-enable device after reset\n",
4005 return PCI_ERS_RESULT_DISCONNECT
;
4008 pci_set_master(pdev
);
4011 return PCI_ERS_RESULT_RECOVERED
;
4015 * vxge_io_resume - called when traffic can start flowing again.
4016 * @pdev: Pointer to PCI device
4018 * This callback is called when the error recovery driver tells
4019 * us that its OK to resume normal operation.
4021 static void vxge_io_resume(struct pci_dev
*pdev
)
4023 struct __vxge_hw_device
*hldev
=
4024 (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
4025 struct net_device
*netdev
= hldev
->ndev
;
4027 if (netif_running(netdev
)) {
4028 if (vxge_open(netdev
)) {
4029 printk(KERN_ERR
"%s: "
4030 "Can't bring device back up after reset\n",
4036 netif_device_attach(netdev
);
4041 * @pdev : structure containing the PCI related information of the device.
4042 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4044 * This function is called when a new PCI device gets detected and initializes
4047 * returns 0 on success and negative on failure.
4050 static int __devinit
4051 vxge_probe(struct pci_dev
*pdev
, const struct pci_device_id
*pre
)
4053 struct __vxge_hw_device
*hldev
;
4054 enum vxge_hw_status status
;
4058 struct vxgedev
*vdev
;
4059 struct vxge_config ll_config
;
4060 struct vxge_hw_device_config
*device_config
= NULL
;
4061 struct vxge_hw_device_attr attr
;
4062 int i
, j
, no_of_vpath
= 0, max_vpath_supported
= 0;
4064 struct vxge_mac_addrs
*entry
;
4065 static int bus
= -1, device
= -1;
4068 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
4071 if (bus
!= pdev
->bus
->number
)
4073 if (device
!= PCI_SLOT(pdev
->devfn
))
4076 bus
= pdev
->bus
->number
;
4077 device
= PCI_SLOT(pdev
->devfn
);
4080 if (driver_config
->config_dev_cnt
&&
4081 (driver_config
->config_dev_cnt
!=
4082 driver_config
->total_dev_cnt
))
4083 vxge_debug_init(VXGE_ERR
,
4084 "%s: Configured %d of %d devices",
4086 driver_config
->config_dev_cnt
,
4087 driver_config
->total_dev_cnt
);
4088 driver_config
->config_dev_cnt
= 0;
4089 driver_config
->total_dev_cnt
= 0;
4090 driver_config
->g_no_cpus
= 0;
4091 driver_config
->vpath_per_dev
= max_config_vpath
;
4094 driver_config
->total_dev_cnt
++;
4095 if (++driver_config
->config_dev_cnt
> max_config_dev
) {
4100 device_config
= kzalloc(sizeof(struct vxge_hw_device_config
),
4102 if (!device_config
) {
4104 vxge_debug_init(VXGE_ERR
,
4105 "device_config : malloc failed %s %d",
4106 __FILE__
, __LINE__
);
4110 memset(&ll_config
, 0, sizeof(struct vxge_config
));
4111 ll_config
.tx_steering_type
= TX_MULTIQ_STEERING
;
4112 ll_config
.intr_type
= MSI_X
;
4113 ll_config
.napi_weight
= NEW_NAPI_WEIGHT
;
4114 ll_config
.rth_steering
= RTH_STEERING
;
4116 /* get the default configuration parameters */
4117 vxge_hw_device_config_default_get(device_config
);
4119 /* initialize configuration parameters */
4120 vxge_device_config_init(device_config
, &ll_config
.intr_type
);
4122 ret
= pci_enable_device(pdev
);
4124 vxge_debug_init(VXGE_ERR
,
4125 "%s : can not enable PCI device", __func__
);
4129 if (!pci_set_dma_mask(pdev
, 0xffffffffffffffffULL
)) {
4130 vxge_debug_ll_config(VXGE_TRACE
,
4131 "%s : using 64bit DMA", __func__
);
4135 if (pci_set_consistent_dma_mask(pdev
,
4136 0xffffffffffffffffULL
)) {
4137 vxge_debug_init(VXGE_ERR
,
4138 "%s : unable to obtain 64bit DMA for "
4139 "consistent allocations", __func__
);
4143 } else if (!pci_set_dma_mask(pdev
, 0xffffffffUL
)) {
4144 vxge_debug_ll_config(VXGE_TRACE
,
4145 "%s : using 32bit DMA", __func__
);
4151 if (pci_request_regions(pdev
, VXGE_DRIVER_NAME
)) {
4152 vxge_debug_init(VXGE_ERR
,
4153 "%s : request regions failed", __func__
);
4158 pci_set_master(pdev
);
4160 attr
.bar0
= pci_ioremap_bar(pdev
, 0);
4162 vxge_debug_init(VXGE_ERR
,
4163 "%s : cannot remap io memory bar0", __func__
);
4167 vxge_debug_ll_config(VXGE_TRACE
,
4168 "pci ioremap bar0: %p:0x%llx",
4170 (unsigned long long)pci_resource_start(pdev
, 0));
4172 status
= vxge_hw_device_hw_info_get(attr
.bar0
,
4173 &ll_config
.device_hw_info
);
4174 if (status
!= VXGE_HW_OK
) {
4175 vxge_debug_init(VXGE_ERR
,
4176 "%s: Reading of hardware info failed."
4177 "Please try upgrading the firmware.", VXGE_DRIVER_NAME
);
4182 if (ll_config
.device_hw_info
.fw_version
.major
!=
4183 VXGE_DRIVER_FW_VERSION_MAJOR
) {
4184 vxge_debug_init(VXGE_ERR
,
4185 "%s: Incorrect firmware version."
4186 "Please upgrade the firmware to version 1.x.x",
4192 vpath_mask
= ll_config
.device_hw_info
.vpath_mask
;
4193 if (vpath_mask
== 0) {
4194 vxge_debug_ll_config(VXGE_TRACE
,
4195 "%s: No vpaths available in device", VXGE_DRIVER_NAME
);
4200 vxge_debug_ll_config(VXGE_TRACE
,
4201 "%s:%d Vpath mask = %llx", __func__
, __LINE__
,
4202 (unsigned long long)vpath_mask
);
4204 /* Check how many vpaths are available */
4205 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
4206 if (!((vpath_mask
) & vxge_mBIT(i
)))
4208 max_vpath_supported
++;
4211 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4212 if ((VXGE_HW_FUNCTION_MODE_SRIOV
==
4213 ll_config
.device_hw_info
.function_mode
) &&
4214 (max_config_dev
> 1) && (pdev
->is_physfn
)) {
4215 ret
= pci_enable_sriov(pdev
, max_config_dev
- 1);
4217 vxge_debug_ll_config(VXGE_ERR
,
4218 "Failed to enable SRIOV: %d \n", ret
);
4222 * Configure vpaths and get driver configured number of vpaths
4223 * which is less than or equal to the maximum vpaths per function.
4225 no_of_vpath
= vxge_config_vpaths(device_config
, vpath_mask
, &ll_config
);
4227 vxge_debug_ll_config(VXGE_ERR
,
4228 "%s: No more vpaths to configure", VXGE_DRIVER_NAME
);
4233 /* Setting driver callbacks */
4234 attr
.uld_callbacks
.link_up
= vxge_callback_link_up
;
4235 attr
.uld_callbacks
.link_down
= vxge_callback_link_down
;
4236 attr
.uld_callbacks
.crit_err
= vxge_callback_crit_err
;
4238 status
= vxge_hw_device_initialize(&hldev
, &attr
, device_config
);
4239 if (status
!= VXGE_HW_OK
) {
4240 vxge_debug_init(VXGE_ERR
,
4241 "Failed to initialize device (%d)", status
);
4246 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_LL
);
4248 /* set private device info */
4249 pci_set_drvdata(pdev
, hldev
);
4251 ll_config
.gro_enable
= VXGE_GRO_ALWAYS_AGGREGATE
;
4252 ll_config
.fifo_indicate_max_pkts
= VXGE_FIFO_INDICATE_MAX_PKTS
;
4253 ll_config
.addr_learn_en
= addr_learn_en
;
4254 ll_config
.rth_algorithm
= RTH_ALG_JENKINS
;
4255 ll_config
.rth_hash_type_tcpipv4
= VXGE_HW_RING_HASH_TYPE_TCP_IPV4
;
4256 ll_config
.rth_hash_type_ipv4
= VXGE_HW_RING_HASH_TYPE_NONE
;
4257 ll_config
.rth_hash_type_tcpipv6
= VXGE_HW_RING_HASH_TYPE_NONE
;
4258 ll_config
.rth_hash_type_ipv6
= VXGE_HW_RING_HASH_TYPE_NONE
;
4259 ll_config
.rth_hash_type_tcpipv6ex
= VXGE_HW_RING_HASH_TYPE_NONE
;
4260 ll_config
.rth_hash_type_ipv6ex
= VXGE_HW_RING_HASH_TYPE_NONE
;
4261 ll_config
.rth_bkt_sz
= RTH_BUCKET_SIZE
;
4262 ll_config
.tx_pause_enable
= VXGE_PAUSE_CTRL_ENABLE
;
4263 ll_config
.rx_pause_enable
= VXGE_PAUSE_CTRL_ENABLE
;
4265 if (vxge_device_register(hldev
, &ll_config
, high_dma
, no_of_vpath
,
4271 vxge_hw_device_debug_set(hldev
, VXGE_TRACE
, VXGE_COMPONENT_LL
);
4272 VXGE_COPY_DEBUG_INFO_TO_LL(vdev
, vxge_hw_device_error_level_get(hldev
),
4273 vxge_hw_device_trace_level_get(hldev
));
4275 /* set private HW device info */
4276 hldev
->ndev
= vdev
->ndev
;
4277 vdev
->mtu
= VXGE_HW_DEFAULT_MTU
;
4278 vdev
->bar0
= attr
.bar0
;
4279 vdev
->max_vpath_supported
= max_vpath_supported
;
4280 vdev
->no_of_vpath
= no_of_vpath
;
4282 /* Virtual Path count */
4283 for (i
= 0, j
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
4284 if (!vxge_bVALn(vpath_mask
, i
, 1))
4286 if (j
>= vdev
->no_of_vpath
)
4289 vdev
->vpaths
[j
].is_configured
= 1;
4290 vdev
->vpaths
[j
].device_id
= i
;
4291 vdev
->vpaths
[j
].fifo
.driver_id
= j
;
4292 vdev
->vpaths
[j
].ring
.driver_id
= j
;
4293 vdev
->vpaths
[j
].vdev
= vdev
;
4294 vdev
->vpaths
[j
].max_mac_addr_cnt
= max_mac_vpath
;
4295 memcpy((u8
*)vdev
->vpaths
[j
].macaddr
,
4296 (u8
*)ll_config
.device_hw_info
.mac_addrs
[i
],
4299 /* Initialize the mac address list header */
4300 INIT_LIST_HEAD(&vdev
->vpaths
[j
].mac_addr_list
);
4302 vdev
->vpaths
[j
].mac_addr_cnt
= 0;
4303 vdev
->vpaths
[j
].mcast_addr_cnt
= 0;
4306 vdev
->exec_mode
= VXGE_EXEC_MODE_DISABLE
;
4307 vdev
->max_config_port
= max_config_port
;
4309 vdev
->vlan_tag_strip
= vlan_tag_strip
;
4311 /* map the hashing selector table to the configured vpaths */
4312 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4313 vdev
->vpath_selector
[i
] = vpath_selector
[i
];
4315 macaddr
= (u8
*)vdev
->vpaths
[0].macaddr
;
4317 ll_config
.device_hw_info
.serial_number
[VXGE_HW_INFO_LEN
- 1] = '\0';
4318 ll_config
.device_hw_info
.product_desc
[VXGE_HW_INFO_LEN
- 1] = '\0';
4319 ll_config
.device_hw_info
.part_number
[VXGE_HW_INFO_LEN
- 1] = '\0';
4321 vxge_debug_init(VXGE_TRACE
, "%s: SERIAL NUMBER: %s",
4322 vdev
->ndev
->name
, ll_config
.device_hw_info
.serial_number
);
4324 vxge_debug_init(VXGE_TRACE
, "%s: PART NUMBER: %s",
4325 vdev
->ndev
->name
, ll_config
.device_hw_info
.part_number
);
4327 vxge_debug_init(VXGE_TRACE
, "%s: Neterion %s Server Adapter",
4328 vdev
->ndev
->name
, ll_config
.device_hw_info
.product_desc
);
4330 vxge_debug_init(VXGE_TRACE
,
4331 "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X",
4332 vdev
->ndev
->name
, macaddr
[0], macaddr
[1], macaddr
[2],
4333 macaddr
[3], macaddr
[4], macaddr
[5]);
4335 vxge_debug_init(VXGE_TRACE
, "%s: Link Width x%d",
4336 vdev
->ndev
->name
, vxge_hw_device_link_width_get(hldev
));
4338 vxge_debug_init(VXGE_TRACE
,
4339 "%s: Firmware version : %s Date : %s", vdev
->ndev
->name
,
4340 ll_config
.device_hw_info
.fw_version
.version
,
4341 ll_config
.device_hw_info
.fw_date
.date
);
4344 switch (ll_config
.device_hw_info
.function_mode
) {
4345 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION
:
4346 vxge_debug_init(VXGE_TRACE
,
4347 "%s: Single Function Mode Enabled", vdev
->ndev
->name
);
4349 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
:
4350 vxge_debug_init(VXGE_TRACE
,
4351 "%s: Multi Function Mode Enabled", vdev
->ndev
->name
);
4353 case VXGE_HW_FUNCTION_MODE_SRIOV
:
4354 vxge_debug_init(VXGE_TRACE
,
4355 "%s: Single Root IOV Mode Enabled", vdev
->ndev
->name
);
4357 case VXGE_HW_FUNCTION_MODE_MRIOV
:
4358 vxge_debug_init(VXGE_TRACE
,
4359 "%s: Multi Root IOV Mode Enabled", vdev
->ndev
->name
);
4364 vxge_print_parm(vdev
, vpath_mask
);
4366 /* Store the fw version for ethttool option */
4367 strcpy(vdev
->fw_version
, ll_config
.device_hw_info
.fw_version
.version
);
4368 memcpy(vdev
->ndev
->dev_addr
, (u8
*)vdev
->vpaths
[0].macaddr
, ETH_ALEN
);
4369 memcpy(vdev
->ndev
->perm_addr
, vdev
->ndev
->dev_addr
, ETH_ALEN
);
4371 /* Copy the station mac address to the list */
4372 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
4373 entry
= (struct vxge_mac_addrs
*)
4374 kzalloc(sizeof(struct vxge_mac_addrs
),
4376 if (NULL
== entry
) {
4377 vxge_debug_init(VXGE_ERR
,
4378 "%s: mac_addr_list : memory allocation failed",
4383 macaddr
= (u8
*)&entry
->macaddr
;
4384 memcpy(macaddr
, vdev
->ndev
->dev_addr
, ETH_ALEN
);
4385 list_add(&entry
->item
, &vdev
->vpaths
[i
].mac_addr_list
);
4386 vdev
->vpaths
[i
].mac_addr_cnt
= 1;
4389 kfree(device_config
);
4390 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d Exiting...",
4391 vdev
->ndev
->name
, __func__
, __LINE__
);
4393 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_LL
);
4394 VXGE_COPY_DEBUG_INFO_TO_LL(vdev
, vxge_hw_device_error_level_get(hldev
),
4395 vxge_hw_device_trace_level_get(hldev
));
4400 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4401 vxge_free_mac_add_list(&vdev
->vpaths
[i
]);
4403 vxge_device_unregister(hldev
);
4405 pci_disable_sriov(pdev
);
4406 vxge_hw_device_terminate(hldev
);
4410 pci_release_regions(pdev
);
4412 pci_disable_device(pdev
);
4414 kfree(device_config
);
4415 driver_config
->config_dev_cnt
--;
4416 pci_set_drvdata(pdev
, NULL
);
4421 * vxge_rem_nic - Free the PCI device
4422 * @pdev: structure containing the PCI related information of the device.
4423 * Description: This function is called by the Pci subsystem to release a
4424 * PCI device and free up all resource held up by the device.
4426 static void __devexit
4427 vxge_remove(struct pci_dev
*pdev
)
4429 struct __vxge_hw_device
*hldev
;
4430 struct vxgedev
*vdev
= NULL
;
4431 struct net_device
*dev
;
4433 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4434 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4438 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
4443 vdev
= netdev_priv(dev
);
4445 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4446 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4447 level_trace
= vdev
->level_trace
;
4449 vxge_debug_entryexit(level_trace
,
4450 "%s:%d", __func__
, __LINE__
);
4452 vxge_debug_init(level_trace
,
4453 "%s : removing PCI device...", __func__
);
4454 vxge_device_unregister(hldev
);
4456 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
4457 vxge_free_mac_add_list(&vdev
->vpaths
[i
]);
4458 vdev
->vpaths
[i
].mcast_addr_cnt
= 0;
4459 vdev
->vpaths
[i
].mac_addr_cnt
= 0;
4462 kfree(vdev
->vpaths
);
4464 iounmap(vdev
->bar0
);
4466 pci_disable_sriov(pdev
);
4468 /* we are safe to free it now */
4471 vxge_debug_init(level_trace
,
4472 "%s:%d Device unregistered", __func__
, __LINE__
);
4474 vxge_hw_device_terminate(hldev
);
4476 pci_disable_device(pdev
);
4477 pci_release_regions(pdev
);
4478 pci_set_drvdata(pdev
, NULL
);
4479 vxge_debug_entryexit(level_trace
,
4480 "%s:%d Exiting...", __func__
, __LINE__
);
4483 static struct pci_error_handlers vxge_err_handler
= {
4484 .error_detected
= vxge_io_error_detected
,
4485 .slot_reset
= vxge_io_slot_reset
,
4486 .resume
= vxge_io_resume
,
4489 static struct pci_driver vxge_driver
= {
4490 .name
= VXGE_DRIVER_NAME
,
4491 .id_table
= vxge_id_table
,
4492 .probe
= vxge_probe
,
4493 .remove
= __devexit_p(vxge_remove
),
4495 .suspend
= vxge_pm_suspend
,
4496 .resume
= vxge_pm_resume
,
4498 .err_handler
= &vxge_err_handler
,
4506 snprintf(version
, 32, "%s", DRV_VERSION
);
4508 printk(KERN_CRIT
"%s: Copyright(c) 2002-2009 Neterion Inc\n",
4510 printk(KERN_CRIT
"%s: Driver version: %s\n",
4511 VXGE_DRIVER_NAME
, version
);
4515 driver_config
= kzalloc(sizeof(struct vxge_drv_config
), GFP_KERNEL
);
4519 ret
= pci_register_driver(&vxge_driver
);
4521 if (driver_config
->config_dev_cnt
&&
4522 (driver_config
->config_dev_cnt
!= driver_config
->total_dev_cnt
))
4523 vxge_debug_init(VXGE_ERR
,
4524 "%s: Configured %d of %d devices",
4525 VXGE_DRIVER_NAME
, driver_config
->config_dev_cnt
,
4526 driver_config
->total_dev_cnt
);
4529 kfree(driver_config
);
4537 pci_unregister_driver(&vxge_driver
);
4538 kfree(driver_config
);
4540 module_init(vxge_starter
);
4541 module_exit(vxge_closer
);