2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
21 #include <linux/pci.h>
22 #include <linux/etherdevice.h>
23 #include <linux/delay.h>
27 #include <linux/if_vlan.h>
28 #include <linux/workqueue.h>
29 #include <linux/interrupt.h>
30 #include <linux/firmware.h>
31 #include <linux/slab.h>
32 #include <linux/u64_stats_sync.h>
36 #define DRV_VER "4.0.100u"
37 #define DRV_NAME "be2net"
38 #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39 #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
40 #define OC_NAME "Emulex OneConnect 10Gbps NIC"
41 #define OC_NAME_BE OC_NAME "(be3)"
42 #define OC_NAME_LANCER OC_NAME "(Lancer)"
43 #define OC_NAME_SH OC_NAME "(Skyhawk)"
44 #define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
46 #define BE_VENDOR_ID 0x19a2
47 #define EMULEX_VENDOR_ID 0x10df
48 #define BE_DEVICE_ID1 0x211
49 #define BE_DEVICE_ID2 0x221
50 #define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
51 #define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
52 #define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
53 #define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
54 #define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
56 static inline char *nic_name(struct pci_dev
*pdev
)
58 switch (pdev
->device
) {
65 return OC_NAME_LANCER
;
75 /* Number of bytes of an RX frame that are copied to skb->data */
76 #define BE_HDR_LEN ((u16) 64)
77 #define BE_MAX_JUMBO_FRAME_SIZE 9018
78 #define BE_MIN_MTU 256
80 #define BE_NUM_VLANS_SUPPORTED 64
82 #define BE_MAX_TX_FRAG_COUNT 30
84 #define EVNT_Q_LEN 1024
86 #define TX_CQ_LEN 1024
87 #define RX_Q_LEN 1024 /* Does not support any other value */
88 #define RX_CQ_LEN 1024
89 #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
90 #define MCC_CQ_LEN 256
92 #define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
93 #define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
95 #define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
96 #define BE_NAPI_WEIGHT 64
97 #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
98 #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
100 #define FW_VER_LEN 32
108 struct be_queue_info
{
109 struct be_dma_mem dma_mem
;
111 u16 entry_size
; /* Size of an element in the queue */
115 atomic_t used
; /* Number of valid elements in the queue */
118 static inline u32
MODULO(u16 val
, u16 limit
)
120 BUG_ON(limit
& (limit
- 1));
121 return val
& (limit
- 1);
124 static inline void index_adv(u16
*index
, u16 val
, u16 limit
)
126 *index
= MODULO((*index
+ val
), limit
);
129 static inline void index_inc(u16
*index
, u16 limit
)
131 *index
= MODULO((*index
+ 1), limit
);
134 static inline void *queue_head_node(struct be_queue_info
*q
)
136 return q
->dma_mem
.va
+ q
->head
* q
->entry_size
;
139 static inline void *queue_tail_node(struct be_queue_info
*q
)
141 return q
->dma_mem
.va
+ q
->tail
* q
->entry_size
;
144 static inline void *queue_index_node(struct be_queue_info
*q
, u16 index
)
146 return q
->dma_mem
.va
+ index
* q
->entry_size
;
149 static inline void queue_head_inc(struct be_queue_info
*q
)
151 index_inc(&q
->head
, q
->len
);
154 static inline void queue_tail_inc(struct be_queue_info
*q
)
156 index_inc(&q
->tail
, q
->len
);
160 struct be_queue_info q
;
163 /* Adaptive interrupt coalescing (AIC) info */
165 u16 min_eqd
; /* in usecs */
166 u16 max_eqd
; /* in usecs */
167 u16 cur_eqd
; /* in usecs */
170 struct napi_struct napi
;
174 struct be_queue_info q
;
175 struct be_queue_info cq
;
187 struct u64_stats_sync sync
;
188 struct u64_stats_sync sync_compl
;
192 struct be_queue_info q
;
193 struct be_queue_info cq
;
194 /* Remember the skbs that were transmitted */
195 struct sk_buff
*sent_skb_list
[TX_Q_LEN
];
196 struct be_tx_stats stats
;
199 /* Struct to remember the pages posted for rx frags */
200 struct be_rx_page_info
{
202 DEFINE_DMA_UNMAP_ADDR(bus
);
212 u32 rx_drops_no_skbs
; /* skb allocation errors */
213 u32 rx_drops_no_frags
; /* HW has no fetched frags */
214 u32 rx_post_fail
; /* page post alloc failures */
215 u32 rx_polls
; /* NAPI calls */
219 u32 rx_compl_err
; /* completions with err set */
220 u32 rx_pps
; /* pkts per second */
221 struct u64_stats_sync sync
;
224 struct be_rx_compl_info
{
244 struct be_adapter
*adapter
;
245 struct be_queue_info q
;
246 struct be_queue_info cq
;
247 struct be_rx_compl_info rxcp
;
248 struct be_rx_page_info page_info_tbl
[RX_Q_LEN
];
249 struct be_eq_obj rx_eq
;
250 struct be_rx_stats stats
;
252 bool rx_post_starved
; /* Zero rx frags have been posted to BE */
253 u32 cache_line_barrier
[16];
256 struct be_drv_stats
{
257 u32 be_on_die_temperature
;
260 u32 rx_drops_no_pbuf
;
261 u32 rx_drops_no_txpb
;
262 u32 rx_drops_no_erx_descr
;
263 u32 rx_drops_no_tpre_descr
;
264 u32 rx_drops_too_many_frags
;
265 u32 rx_drops_invalid_ring
;
266 u32 forwarded_packets
;
269 u32 rx_alignment_symbol_errors
;
271 u32 rx_priority_pause_frames
;
272 u32 rx_control_frames
;
273 u32 rx_in_range_errors
;
274 u32 rx_out_range_errors
;
275 u32 rx_frame_too_long
;
276 u32 rx_address_match_errors
;
277 u32 rx_dropped_too_small
;
278 u32 rx_dropped_too_short
;
279 u32 rx_dropped_header_too_small
;
280 u32 rx_dropped_tcp_length
;
282 u32 rx_ip_checksum_errs
;
283 u32 rx_tcp_checksum_errs
;
284 u32 rx_udp_checksum_errs
;
286 u32 tx_priority_pauseframes
;
287 u32 tx_controlframes
;
288 u32 rxpp_fifo_overflow_drop
;
289 u32 rx_input_fifo_overflow_drop
;
290 u32 pmem_fifo_overflow_drop
;
295 unsigned char mac_addr
[ETH_ALEN
];
302 #define BE_FLAGS_LINK_STATUS_INIT 1
305 struct pci_dev
*pdev
;
306 struct net_device
*netdev
;
309 u8 __iomem
*db
; /* Door Bell */
311 struct mutex mbox_lock
; /* For serializing mbox cmds to BE card */
312 struct be_dma_mem mbox_mem
;
313 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
314 * is stored for freeing purpose */
315 struct be_dma_mem mbox_mem_alloced
;
317 struct be_mcc_obj mcc_obj
;
318 spinlock_t mcc_lock
; /* For serializing mcc cmds to BE card */
319 spinlock_t mcc_cq_lock
;
321 struct msix_entry msix_entries
[BE_MAX_MSIX_VECTORS
];
326 struct be_eq_obj tx_eq
;
327 struct be_tx_obj tx_obj
[MAX_TX_QS
];
330 u32 cache_line_break
[8];
333 struct be_rx_obj rx_obj
[MAX_RX_QS
];
335 u32 big_page_size
; /* Compounded page size shared by rx wrbs */
338 struct be_drv_stats drv_stats
;
341 u16 max_vlans
; /* Number of vlans supported */
342 u8 vlan_tag
[VLAN_N_VID
];
343 u8 vlan_prio_bmap
; /* Available Priority BitMap */
344 u16 recommended_prio
; /* Recommended Priority */
345 struct be_dma_mem rx_filter
; /* Cmd DMA mem for rx-filter */
347 struct be_dma_mem stats_cmd
;
348 /* Work queue used to perform periodic tasks like getting statistics */
349 struct delayed_work work
;
353 /* Ethtool knobs and info */
354 char fw_ver
[FW_VER_LEN
];
355 int if_handle
; /* Used to configure filtering */
356 u32 pmac_id
; /* MAC addr handle used by BE card */
357 u32 beacon_state
; /* for set_phys_id */
367 u32 rx_fc
; /* Rx flow control */
368 u32 tx_fc
; /* Tx flow control */
374 u8 generation
; /* BladeEngine ASIC generation */
376 struct completion flash_compl
;
380 struct be_vf_cfg
*vf_cfg
;
387 #define be_physfn(adapter) (!adapter->is_virtfn)
388 #define sriov_enabled(adapter) (adapter->num_vfs > 0)
389 #define for_all_vfs(adapter, vf_cfg, i) \
390 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
393 /* BladeEngine Generation numbers */
399 #define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
400 (adapter->pdev->device == OC_DEVICE_ID4))
402 extern const struct ethtool_ops be_ethtool_ops
;
404 #define msix_enabled(adapter) (adapter->num_msix_vec > 0)
405 #define tx_stats(txo) (&txo->stats)
406 #define rx_stats(rxo) (&rxo->stats)
408 #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
410 #define for_all_rx_queues(adapter, rxo, i) \
411 for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
414 /* Just skip the first default non-rss queue */
415 #define for_all_rss_queues(adapter, rxo, i) \
416 for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
419 #define for_all_tx_queues(adapter, txo, i) \
420 for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
423 #define PAGE_SHIFT_4K 12
424 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
426 /* Returns number of pages spanned by the data starting at the given addr */
427 #define PAGES_4K_SPANNED(_address, size) \
428 ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
429 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
431 /* Byte offset into the page corresponding to given address */
432 #define OFFSET_IN_PAGE(addr) \
433 ((size_t)(addr) & (PAGE_SIZE_4K-1))
435 /* Returns bit offset within a DWORD of a bitfield */
436 #define AMAP_BIT_OFFSET(_struct, field) \
437 (((size_t)&(((_struct *)0)->field))%32)
439 /* Returns the bit mask of the field that is NOT shifted into location. */
440 static inline u32
amap_mask(u32 bitsize
)
442 return (bitsize
== 32 ? 0xFFFFFFFF : (1 << bitsize
) - 1);
446 amap_set(void *ptr
, u32 dw_offset
, u32 mask
, u32 offset
, u32 value
)
448 u32
*dw
= (u32
*) ptr
+ dw_offset
;
449 *dw
&= ~(mask
<< offset
);
450 *dw
|= (mask
& value
) << offset
;
453 #define AMAP_SET_BITS(_struct, field, ptr, val) \
455 offsetof(_struct, field)/32, \
456 amap_mask(sizeof(((_struct *)0)->field)), \
457 AMAP_BIT_OFFSET(_struct, field), \
460 static inline u32
amap_get(void *ptr
, u32 dw_offset
, u32 mask
, u32 offset
)
462 u32
*dw
= (u32
*) ptr
;
463 return mask
& (*(dw
+ dw_offset
) >> offset
);
466 #define AMAP_GET_BITS(_struct, field, ptr) \
468 offsetof(_struct, field)/32, \
469 amap_mask(sizeof(((_struct *)0)->field)), \
470 AMAP_BIT_OFFSET(_struct, field))
472 #define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
473 #define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
474 static inline void swap_dws(void *wrb
, int len
)
480 *dw
= cpu_to_le32(*dw
);
484 #endif /* __BIG_ENDIAN */
487 static inline u8
is_tcp_pkt(struct sk_buff
*skb
)
491 if (ip_hdr(skb
)->version
== 4)
492 val
= (ip_hdr(skb
)->protocol
== IPPROTO_TCP
);
493 else if (ip_hdr(skb
)->version
== 6)
494 val
= (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_TCP
);
499 static inline u8
is_udp_pkt(struct sk_buff
*skb
)
503 if (ip_hdr(skb
)->version
== 4)
504 val
= (ip_hdr(skb
)->protocol
== IPPROTO_UDP
);
505 else if (ip_hdr(skb
)->version
== 6)
506 val
= (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_UDP
);
511 static inline void be_check_sriov_fn_type(struct be_adapter
*adapter
)
515 pci_read_config_dword(adapter
->pdev
, SLI_INTF_REG_OFFSET
, &sli_intf
);
516 adapter
->is_virtfn
= (sli_intf
& SLI_INTF_FT_MASK
) ? 1 : 0;
519 static inline void be_vf_eth_addr_generate(struct be_adapter
*adapter
, u8
*mac
)
523 addr
= jhash(adapter
->netdev
->dev_addr
, ETH_ALEN
, 0);
525 mac
[5] = (u8
)(addr
& 0xFF);
526 mac
[4] = (u8
)((addr
>> 8) & 0xFF);
527 mac
[3] = (u8
)((addr
>> 16) & 0xFF);
528 /* Use the OUI from the current MAC address */
529 memcpy(mac
, adapter
->netdev
->dev_addr
, 3);
532 static inline bool be_multi_rxq(const struct be_adapter
*adapter
)
534 return adapter
->num_rx_qs
> 1;
537 static inline bool be_error(struct be_adapter
*adapter
)
539 return adapter
->eeh_err
|| adapter
->ue_detected
|| adapter
->fw_timeout
;
542 extern void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
,
544 extern void be_link_status_update(struct be_adapter
*adapter
, u8 link_status
);
545 extern void be_parse_stats(struct be_adapter
*adapter
);
546 extern int be_load_fw(struct be_adapter
*adapter
, u8
*func
);