2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
8 #include <linux/netdevice.h>
9 #include <linux/if_vlan.h>
11 #include <linux/ipv6.h>
12 #include <net/checksum.h>
13 #include <linux/printk.h>
17 #define QLCNIC_TX_ETHER_PKT 0x01
18 #define QLCNIC_TX_TCP_PKT 0x02
19 #define QLCNIC_TX_UDP_PKT 0x03
20 #define QLCNIC_TX_IP_PKT 0x04
21 #define QLCNIC_TX_TCP_LSO 0x05
22 #define QLCNIC_TX_TCP_LSO6 0x06
23 #define QLCNIC_TX_ENCAP_PKT 0x07
24 #define QLCNIC_TX_ENCAP_LSO 0x08
25 #define QLCNIC_TX_TCPV6_PKT 0x0b
26 #define QLCNIC_TX_UDPV6_PKT 0x0c
28 #define QLCNIC_FLAGS_VLAN_TAGGED 0x10
29 #define QLCNIC_FLAGS_VLAN_OOB 0x40
31 #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
32 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
33 #define qlcnic_set_cmd_desc_port(cmd_desc, var) \
34 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
35 #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
36 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
38 #define qlcnic_set_tx_port(_desc, _port) \
39 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
41 #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
42 ((_desc)->flags_opcode |= \
43 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
45 #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
46 ((_desc)->nfrags__length = \
47 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
49 /* owner bits of status_desc */
50 #define STATUS_OWNER_HOST (0x1ULL << 56)
51 #define STATUS_OWNER_PHANTOM (0x2ULL << 56)
54 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
55 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
56 53-55 desc_cnt, 56-57 owner, 58-63 opcode
58 #define qlcnic_get_sts_port(sts_data) \
60 #define qlcnic_get_sts_status(sts_data) \
61 (((sts_data) >> 4) & 0x0F)
62 #define qlcnic_get_sts_type(sts_data) \
63 (((sts_data) >> 8) & 0x0F)
64 #define qlcnic_get_sts_totallength(sts_data) \
65 (((sts_data) >> 12) & 0xFFFF)
66 #define qlcnic_get_sts_refhandle(sts_data) \
67 (((sts_data) >> 28) & 0xFFFF)
68 #define qlcnic_get_sts_prot(sts_data) \
69 (((sts_data) >> 44) & 0x0F)
70 #define qlcnic_get_sts_pkt_offset(sts_data) \
71 (((sts_data) >> 48) & 0x1F)
72 #define qlcnic_get_sts_desc_cnt(sts_data) \
73 (((sts_data) >> 53) & 0x7)
74 #define qlcnic_get_sts_opcode(sts_data) \
75 (((sts_data) >> 58) & 0x03F)
77 #define qlcnic_get_lro_sts_refhandle(sts_data) \
78 ((sts_data) & 0x07FFF)
79 #define qlcnic_get_lro_sts_length(sts_data) \
80 (((sts_data) >> 16) & 0x0FFFF)
81 #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
82 (((sts_data) >> 32) & 0x0FF)
83 #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
84 (((sts_data) >> 40) & 0x0FF)
85 #define qlcnic_get_lro_sts_timestamp(sts_data) \
86 (((sts_data) >> 48) & 0x1)
87 #define qlcnic_get_lro_sts_type(sts_data) \
88 (((sts_data) >> 49) & 0x7)
89 #define qlcnic_get_lro_sts_push_flag(sts_data) \
90 (((sts_data) >> 52) & 0x1)
91 #define qlcnic_get_lro_sts_seq_number(sts_data) \
92 ((sts_data) & 0x0FFFFFFFF)
93 #define qlcnic_get_lro_sts_mss(sts_data1) \
94 ((sts_data1 >> 32) & 0x0FFFF)
96 #define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
98 /* opcode field in status_desc */
99 #define QLCNIC_SYN_OFFLOAD 0x03
100 #define QLCNIC_RXPKT_DESC 0x04
101 #define QLCNIC_OLD_RXPKT_DESC 0x3f
102 #define QLCNIC_RESPONSE_DESC 0x05
103 #define QLCNIC_LRO_DESC 0x12
105 #define QLCNIC_TCP_HDR_SIZE 20
106 #define QLCNIC_TCP_TS_OPTION_SIZE 12
107 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
108 #define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
110 #define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
112 /* for status field in status_desc */
113 #define STATUS_CKSUM_LOOP 0
114 #define STATUS_CKSUM_OK 2
116 #define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
117 #define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
118 #define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
119 #define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
120 #define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
121 #define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
122 #define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
123 #define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
124 #define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
125 #define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
126 #define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
127 #define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
128 #define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
130 static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
,
133 static struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*,
134 struct qlcnic_host_rds_ring
*,
137 static inline u8
qlcnic_mac_hash(u64 mac
, u16 vlan
)
139 return (u8
)((mac
& 0xff) ^ ((mac
>> 40) & 0xff) ^ (vlan
& 0xff));
142 static inline u32
qlcnic_get_ref_handle(struct qlcnic_adapter
*adapter
,
143 u16 handle
, u8 ring_id
)
145 if (qlcnic_83xx_check(adapter
))
146 return handle
| (ring_id
<< 15);
151 static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data
)
153 return (qlcnic_get_sts_status(sts_data
) == STATUS_CKSUM_LOOP
) ? 1 : 0;
156 static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter
*adapter
,
157 struct qlcnic_filter
*fil
,
158 void *addr
, u16 vlan_id
)
163 op
= vlan_id
? QLCNIC_MAC_VLAN_ADD
: QLCNIC_MAC_ADD
;
164 ret
= qlcnic_sre_macaddr_change(adapter
, addr
, vlan_id
, op
);
168 op
= vlan_id
? QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
;
169 ret
= qlcnic_sre_macaddr_change(adapter
, addr
, vlan_id
, op
);
171 hlist_del(&fil
->fnode
);
172 adapter
->rx_fhash
.fnum
--;
176 static struct qlcnic_filter
*qlcnic_find_mac_filter(struct hlist_head
*head
,
177 void *addr
, u16 vlan_id
)
179 struct qlcnic_filter
*tmp_fil
= NULL
;
180 struct hlist_node
*n
;
182 hlist_for_each_entry_safe(tmp_fil
, n
, head
, fnode
) {
183 if (ether_addr_equal(tmp_fil
->faddr
, addr
) &&
184 tmp_fil
->vlan_id
== vlan_id
)
191 static void qlcnic_add_lb_filter(struct qlcnic_adapter
*adapter
,
192 struct sk_buff
*skb
, int loopback_pkt
, u16 vlan_id
)
194 struct ethhdr
*phdr
= (struct ethhdr
*)(skb
->data
);
195 struct qlcnic_filter
*fil
, *tmp_fil
;
196 struct hlist_head
*head
;
202 if (!qlcnic_sriov_pf_check(adapter
) || (vlan_id
== 0xffff))
205 memcpy(&src_addr
, phdr
->h_source
, ETH_ALEN
);
206 hindex
= qlcnic_mac_hash(src_addr
, vlan_id
) &
207 (adapter
->fhash
.fbucket_size
- 1);
210 if (adapter
->rx_fhash
.fnum
>= adapter
->rx_fhash
.fmax
)
213 head
= &(adapter
->rx_fhash
.fhead
[hindex
]);
215 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
217 time
= tmp_fil
->ftime
;
218 if (time_after(jiffies
, QLCNIC_READD_AGE
* HZ
+ time
))
219 tmp_fil
->ftime
= jiffies
;
223 fil
= kzalloc(sizeof(struct qlcnic_filter
), GFP_ATOMIC
);
227 fil
->ftime
= jiffies
;
228 memcpy(fil
->faddr
, &src_addr
, ETH_ALEN
);
229 fil
->vlan_id
= vlan_id
;
230 spin_lock(&adapter
->rx_mac_learn_lock
);
231 hlist_add_head(&(fil
->fnode
), head
);
232 adapter
->rx_fhash
.fnum
++;
233 spin_unlock(&adapter
->rx_mac_learn_lock
);
235 head
= &adapter
->fhash
.fhead
[hindex
];
237 spin_lock(&adapter
->mac_learn_lock
);
239 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
241 op
= vlan_id
? QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
;
242 ret
= qlcnic_sre_macaddr_change(adapter
,
246 hlist_del(&tmp_fil
->fnode
);
247 adapter
->fhash
.fnum
--;
250 spin_unlock(&adapter
->mac_learn_lock
);
255 spin_unlock(&adapter
->mac_learn_lock
);
257 head
= &adapter
->rx_fhash
.fhead
[hindex
];
259 spin_lock(&adapter
->rx_mac_learn_lock
);
261 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
263 qlcnic_delete_rx_list_mac(adapter
, tmp_fil
, &src_addr
,
266 spin_unlock(&adapter
->rx_mac_learn_lock
);
270 void qlcnic_82xx_change_filter(struct qlcnic_adapter
*adapter
, u64
*uaddr
,
273 struct cmd_desc_type0
*hwdesc
;
274 struct qlcnic_nic_req
*req
;
275 struct qlcnic_mac_req
*mac_req
;
276 struct qlcnic_vlan_req
*vlan_req
;
277 struct qlcnic_host_tx_ring
*tx_ring
= adapter
->tx_ring
;
281 producer
= tx_ring
->producer
;
282 hwdesc
= &tx_ring
->desc_head
[tx_ring
->producer
];
284 req
= (struct qlcnic_nic_req
*)hwdesc
;
285 memset(req
, 0, sizeof(struct qlcnic_nic_req
));
286 req
->qhdr
= cpu_to_le64(QLCNIC_REQUEST
<< 23);
288 word
= QLCNIC_MAC_EVENT
| ((u64
)(adapter
->portnum
) << 16);
289 req
->req_hdr
= cpu_to_le64(word
);
291 mac_req
= (struct qlcnic_mac_req
*)&(req
->words
[0]);
292 mac_req
->op
= vlan_id
? QLCNIC_MAC_VLAN_ADD
: QLCNIC_MAC_ADD
;
293 memcpy(mac_req
->mac_addr
, uaddr
, ETH_ALEN
);
295 vlan_req
= (struct qlcnic_vlan_req
*)&req
->words
[1];
296 vlan_req
->vlan_id
= cpu_to_le16(vlan_id
);
298 tx_ring
->producer
= get_next_index(producer
, tx_ring
->num_desc
);
302 static void qlcnic_send_filter(struct qlcnic_adapter
*adapter
,
303 struct cmd_desc_type0
*first_desc
,
306 struct vlan_ethhdr
*vh
= (struct vlan_ethhdr
*)(skb
->data
);
307 struct ethhdr
*phdr
= (struct ethhdr
*)(skb
->data
);
308 u16 protocol
= ntohs(skb
->protocol
);
309 struct qlcnic_filter
*fil
, *tmp_fil
;
310 struct hlist_head
*head
;
311 struct hlist_node
*n
;
316 if (ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
319 if (adapter
->flags
& QLCNIC_VLAN_FILTERING
) {
320 if (protocol
== ETH_P_8021Q
) {
321 vh
= (struct vlan_ethhdr
*)skb
->data
;
322 vlan_id
= ntohs(vh
->h_vlan_TCI
);
323 } else if (skb_vlan_tag_present(skb
)) {
324 vlan_id
= skb_vlan_tag_get(skb
);
328 memcpy(&src_addr
, phdr
->h_source
, ETH_ALEN
);
329 hval
= qlcnic_mac_hash(src_addr
, vlan_id
);
330 hindex
= hval
& (adapter
->fhash
.fbucket_size
- 1);
331 head
= &(adapter
->fhash
.fhead
[hindex
]);
333 hlist_for_each_entry_safe(tmp_fil
, n
, head
, fnode
) {
334 if (ether_addr_equal(tmp_fil
->faddr
, (u8
*)&src_addr
) &&
335 tmp_fil
->vlan_id
== vlan_id
) {
336 if (jiffies
> (QLCNIC_READD_AGE
* HZ
+ tmp_fil
->ftime
))
337 qlcnic_change_filter(adapter
, &src_addr
,
339 tmp_fil
->ftime
= jiffies
;
344 if (unlikely(adapter
->fhash
.fnum
>= adapter
->fhash
.fmax
)) {
345 adapter
->stats
.mac_filter_limit_overrun
++;
349 fil
= kzalloc(sizeof(struct qlcnic_filter
), GFP_ATOMIC
);
353 qlcnic_change_filter(adapter
, &src_addr
, vlan_id
);
354 fil
->ftime
= jiffies
;
355 fil
->vlan_id
= vlan_id
;
356 memcpy(fil
->faddr
, &src_addr
, ETH_ALEN
);
357 spin_lock(&adapter
->mac_learn_lock
);
358 hlist_add_head(&(fil
->fnode
), head
);
359 adapter
->fhash
.fnum
++;
360 spin_unlock(&adapter
->mac_learn_lock
);
363 #define QLCNIC_ENCAP_VXLAN_PKT BIT_0
364 #define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1
365 #define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2
366 #define QLCNIC_ENCAP_INNER_L4_UDP BIT_3
367 #define QLCNIC_ENCAP_DO_L3_CSUM BIT_4
368 #define QLCNIC_ENCAP_DO_L4_CSUM BIT_5
370 static int qlcnic_tx_encap_pkt(struct qlcnic_adapter
*adapter
,
371 struct cmd_desc_type0
*first_desc
,
373 struct qlcnic_host_tx_ring
*tx_ring
)
375 u8 opcode
= 0, inner_hdr_len
= 0, outer_hdr_len
= 0, total_hdr_len
= 0;
376 int copied
, copy_len
, descr_size
;
377 u32 producer
= tx_ring
->producer
;
378 struct cmd_desc_type0
*hwdesc
;
379 u16 flags
= 0, encap_descr
= 0;
381 opcode
= QLCNIC_TX_ETHER_PKT
;
382 encap_descr
= QLCNIC_ENCAP_VXLAN_PKT
;
384 if (skb_is_gso(skb
)) {
385 inner_hdr_len
= skb_inner_transport_header(skb
) +
386 inner_tcp_hdrlen(skb
) -
387 skb_inner_mac_header(skb
);
389 /* VXLAN header size = 8 */
390 outer_hdr_len
= skb_transport_offset(skb
) + 8 +
391 sizeof(struct udphdr
);
392 first_desc
->outer_hdr_length
= outer_hdr_len
;
393 total_hdr_len
= inner_hdr_len
+ outer_hdr_len
;
394 encap_descr
|= QLCNIC_ENCAP_DO_L3_CSUM
|
395 QLCNIC_ENCAP_DO_L4_CSUM
;
396 first_desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
397 first_desc
->hdr_length
= inner_hdr_len
;
399 /* Copy inner and outer headers in Tx descriptor(s)
400 * If total_hdr_len > cmd_desc_type0, use multiple
404 descr_size
= (int)sizeof(struct cmd_desc_type0
);
405 while (copied
< total_hdr_len
) {
406 copy_len
= min(descr_size
, (total_hdr_len
- copied
));
407 hwdesc
= &tx_ring
->desc_head
[producer
];
408 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
409 skb_copy_from_linear_data_offset(skb
, copied
,
413 producer
= get_next_index(producer
, tx_ring
->num_desc
);
416 tx_ring
->producer
= producer
;
418 /* Make sure updated tx_ring->producer is visible
419 * for qlcnic_tx_avail()
422 adapter
->stats
.encap_lso_frames
++;
424 opcode
= QLCNIC_TX_ENCAP_LSO
;
425 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
426 if (inner_ip_hdr(skb
)->version
== 6) {
427 if (inner_ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
428 encap_descr
|= QLCNIC_ENCAP_INNER_L4_UDP
;
430 if (inner_ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
431 encap_descr
|= QLCNIC_ENCAP_INNER_L4_UDP
;
434 adapter
->stats
.encap_tx_csummed
++;
435 opcode
= QLCNIC_TX_ENCAP_PKT
;
438 /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
439 if (ip_hdr(skb
)->version
== 6)
440 encap_descr
|= QLCNIC_ENCAP_OUTER_L3_IP6
;
442 /* outer IP header's size in 32bit words size*/
443 encap_descr
|= (skb_network_header_len(skb
) >> 2) << 6;
445 /* outer IP header offset */
446 encap_descr
|= skb_network_offset(skb
) << 10;
447 first_desc
->encap_descr
= cpu_to_le16(encap_descr
);
449 first_desc
->tcp_hdr_offset
= skb_inner_transport_header(skb
) -
451 first_desc
->ip_hdr_offset
= skb_inner_network_offset(skb
);
453 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
458 static int qlcnic_tx_pkt(struct qlcnic_adapter
*adapter
,
459 struct cmd_desc_type0
*first_desc
, struct sk_buff
*skb
,
460 struct qlcnic_host_tx_ring
*tx_ring
)
462 u8 l4proto
, opcode
= 0, hdr_len
= 0;
463 u16 flags
= 0, vlan_tci
= 0;
464 int copied
, offset
, copy_len
, size
;
465 struct cmd_desc_type0
*hwdesc
;
466 struct vlan_ethhdr
*vh
;
467 u16 protocol
= ntohs(skb
->protocol
);
468 u32 producer
= tx_ring
->producer
;
470 if (protocol
== ETH_P_8021Q
) {
471 vh
= (struct vlan_ethhdr
*)skb
->data
;
472 flags
= QLCNIC_FLAGS_VLAN_TAGGED
;
473 vlan_tci
= ntohs(vh
->h_vlan_TCI
);
474 protocol
= ntohs(vh
->h_vlan_encapsulated_proto
);
475 } else if (skb_vlan_tag_present(skb
)) {
476 flags
= QLCNIC_FLAGS_VLAN_OOB
;
477 vlan_tci
= skb_vlan_tag_get(skb
);
479 if (unlikely(adapter
->tx_pvid
)) {
480 if (vlan_tci
&& !(adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
482 if (vlan_tci
&& (adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
485 flags
= QLCNIC_FLAGS_VLAN_OOB
;
486 vlan_tci
= adapter
->tx_pvid
;
489 qlcnic_set_tx_vlan_tci(first_desc
, vlan_tci
);
490 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
492 if (*(skb
->data
) & BIT_0
) {
494 memcpy(&first_desc
->eth_addr
, skb
->data
, ETH_ALEN
);
496 opcode
= QLCNIC_TX_ETHER_PKT
;
497 if (skb_is_gso(skb
)) {
498 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
499 first_desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
500 first_desc
->hdr_length
= hdr_len
;
501 opcode
= (protocol
== ETH_P_IPV6
) ? QLCNIC_TX_TCP_LSO6
:
504 /* For LSO, we need to copy the MAC/IP/TCP headers into
505 * the descriptor ring */
509 if (flags
& QLCNIC_FLAGS_VLAN_OOB
) {
510 first_desc
->hdr_length
+= VLAN_HLEN
;
511 first_desc
->tcp_hdr_offset
= VLAN_HLEN
;
512 first_desc
->ip_hdr_offset
= VLAN_HLEN
;
514 /* Only in case of TSO on vlan device */
515 flags
|= QLCNIC_FLAGS_VLAN_TAGGED
;
517 /* Create a TSO vlan header template for firmware */
518 hwdesc
= &tx_ring
->desc_head
[producer
];
519 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
521 copy_len
= min((int)sizeof(struct cmd_desc_type0
) -
522 offset
, hdr_len
+ VLAN_HLEN
);
524 vh
= (struct vlan_ethhdr
*)((char *) hwdesc
+ 2);
525 skb_copy_from_linear_data(skb
, vh
, 12);
526 vh
->h_vlan_proto
= htons(ETH_P_8021Q
);
527 vh
->h_vlan_TCI
= htons(vlan_tci
);
529 skb_copy_from_linear_data_offset(skb
, 12,
532 copied
= copy_len
- VLAN_HLEN
;
534 producer
= get_next_index(producer
, tx_ring
->num_desc
);
537 while (copied
< hdr_len
) {
538 size
= (int)sizeof(struct cmd_desc_type0
) - offset
;
539 copy_len
= min(size
, (hdr_len
- copied
));
540 hwdesc
= &tx_ring
->desc_head
[producer
];
541 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
542 skb_copy_from_linear_data_offset(skb
, copied
,
547 producer
= get_next_index(producer
, tx_ring
->num_desc
);
550 tx_ring
->producer
= producer
;
552 adapter
->stats
.lso_frames
++;
554 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
555 if (protocol
== ETH_P_IP
) {
556 l4proto
= ip_hdr(skb
)->protocol
;
558 if (l4proto
== IPPROTO_TCP
)
559 opcode
= QLCNIC_TX_TCP_PKT
;
560 else if (l4proto
== IPPROTO_UDP
)
561 opcode
= QLCNIC_TX_UDP_PKT
;
562 } else if (protocol
== ETH_P_IPV6
) {
563 l4proto
= ipv6_hdr(skb
)->nexthdr
;
565 if (l4proto
== IPPROTO_TCP
)
566 opcode
= QLCNIC_TX_TCPV6_PKT
;
567 else if (l4proto
== IPPROTO_UDP
)
568 opcode
= QLCNIC_TX_UDPV6_PKT
;
571 first_desc
->tcp_hdr_offset
+= skb_transport_offset(skb
);
572 first_desc
->ip_hdr_offset
+= skb_network_offset(skb
);
573 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
578 static int qlcnic_map_tx_skb(struct pci_dev
*pdev
, struct sk_buff
*skb
,
579 struct qlcnic_cmd_buffer
*pbuf
)
581 struct qlcnic_skb_frag
*nf
;
582 struct skb_frag_struct
*frag
;
586 nr_frags
= skb_shinfo(skb
)->nr_frags
;
587 nf
= &pbuf
->frag_array
[0];
589 map
= pci_map_single(pdev
, skb
->data
, skb_headlen(skb
),
591 if (pci_dma_mapping_error(pdev
, map
))
595 nf
->length
= skb_headlen(skb
);
597 for (i
= 0; i
< nr_frags
; i
++) {
598 frag
= &skb_shinfo(skb
)->frags
[i
];
599 nf
= &pbuf
->frag_array
[i
+1];
600 map
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, skb_frag_size(frag
),
602 if (dma_mapping_error(&pdev
->dev
, map
))
606 nf
->length
= skb_frag_size(frag
);
613 nf
= &pbuf
->frag_array
[i
+1];
614 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
617 nf
= &pbuf
->frag_array
[0];
618 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
624 static void qlcnic_unmap_buffers(struct pci_dev
*pdev
, struct sk_buff
*skb
,
625 struct qlcnic_cmd_buffer
*pbuf
)
627 struct qlcnic_skb_frag
*nf
= &pbuf
->frag_array
[0];
628 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
630 for (i
= 0; i
< nr_frags
; i
++) {
631 nf
= &pbuf
->frag_array
[i
+1];
632 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
635 nf
= &pbuf
->frag_array
[0];
636 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
640 static inline void qlcnic_clear_cmddesc(u64
*desc
)
647 netdev_tx_t
qlcnic_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
649 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
650 struct qlcnic_host_tx_ring
*tx_ring
;
651 struct qlcnic_cmd_buffer
*pbuf
;
652 struct qlcnic_skb_frag
*buffrag
;
653 struct cmd_desc_type0
*hwdesc
, *first_desc
;
654 struct pci_dev
*pdev
;
656 int i
, k
, frag_count
, delta
= 0;
657 u32 producer
, num_txd
;
659 bool l4_is_udp
= false;
661 if (!test_bit(__QLCNIC_DEV_UP
, &adapter
->state
)) {
662 netif_tx_stop_all_queues(netdev
);
663 return NETDEV_TX_BUSY
;
666 if (adapter
->flags
& QLCNIC_MACSPOOF
) {
667 phdr
= (struct ethhdr
*)skb
->data
;
668 if (!ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
672 tx_ring
= &adapter
->tx_ring
[skb_get_queue_mapping(skb
)];
673 num_txd
= tx_ring
->num_desc
;
675 frag_count
= skb_shinfo(skb
)->nr_frags
+ 1;
677 /* 14 frags supported for normal packet and
678 * 32 frags supported for TSO packet
680 if (!skb_is_gso(skb
) && frag_count
> QLCNIC_MAX_FRAGS_PER_TX
) {
681 for (i
= 0; i
< (frag_count
- QLCNIC_MAX_FRAGS_PER_TX
); i
++)
682 delta
+= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
684 if (!__pskb_pull_tail(skb
, delta
))
687 frag_count
= 1 + skb_shinfo(skb
)->nr_frags
;
690 if (unlikely(qlcnic_tx_avail(tx_ring
) <= TX_STOP_THRESH
)) {
691 netif_tx_stop_queue(tx_ring
->txq
);
692 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
693 netif_tx_start_queue(tx_ring
->txq
);
695 tx_ring
->tx_stats
.xmit_off
++;
696 return NETDEV_TX_BUSY
;
700 producer
= tx_ring
->producer
;
701 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
702 pdev
= adapter
->pdev
;
703 first_desc
= &tx_ring
->desc_head
[producer
];
704 hwdesc
= &tx_ring
->desc_head
[producer
];
705 qlcnic_clear_cmddesc((u64
*)hwdesc
);
707 if (qlcnic_map_tx_skb(pdev
, skb
, pbuf
)) {
708 adapter
->stats
.tx_dma_map_error
++;
713 pbuf
->frag_count
= frag_count
;
715 qlcnic_set_tx_frags_len(first_desc
, frag_count
, skb
->len
);
716 qlcnic_set_tx_port(first_desc
, adapter
->portnum
);
718 for (i
= 0; i
< frag_count
; i
++) {
721 if ((k
== 0) && (i
> 0)) {
722 /* move to next desc.*/
723 producer
= get_next_index(producer
, num_txd
);
724 hwdesc
= &tx_ring
->desc_head
[producer
];
725 qlcnic_clear_cmddesc((u64
*)hwdesc
);
726 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
729 buffrag
= &pbuf
->frag_array
[i
];
730 hwdesc
->buffer_length
[k
] = cpu_to_le16(buffrag
->length
);
733 hwdesc
->addr_buffer1
= cpu_to_le64(buffrag
->dma
);
736 hwdesc
->addr_buffer2
= cpu_to_le64(buffrag
->dma
);
739 hwdesc
->addr_buffer3
= cpu_to_le64(buffrag
->dma
);
742 hwdesc
->addr_buffer4
= cpu_to_le64(buffrag
->dma
);
747 tx_ring
->producer
= get_next_index(producer
, num_txd
);
750 protocol
= ntohs(skb
->protocol
);
751 if (protocol
== ETH_P_IP
)
752 l4_is_udp
= ip_hdr(skb
)->protocol
== IPPROTO_UDP
;
753 else if (protocol
== ETH_P_IPV6
)
754 l4_is_udp
= ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
;
756 /* Check if it is a VXLAN packet */
757 if (!skb
->encapsulation
|| !l4_is_udp
||
758 !qlcnic_encap_tx_offload(adapter
)) {
759 if (unlikely(qlcnic_tx_pkt(adapter
, first_desc
, skb
,
763 if (unlikely(qlcnic_tx_encap_pkt(adapter
, first_desc
,
768 if (adapter
->drv_mac_learn
)
769 qlcnic_send_filter(adapter
, first_desc
, skb
);
771 tx_ring
->tx_stats
.tx_bytes
+= skb
->len
;
772 tx_ring
->tx_stats
.xmit_called
++;
774 /* Ensure writes are complete before HW fetches Tx descriptors */
776 qlcnic_update_cmd_producer(tx_ring
);
781 qlcnic_unmap_buffers(pdev
, skb
, pbuf
);
783 adapter
->stats
.txdropped
++;
784 dev_kfree_skb_any(skb
);
788 void qlcnic_advert_link_change(struct qlcnic_adapter
*adapter
, int linkup
)
790 struct net_device
*netdev
= adapter
->netdev
;
792 if (adapter
->ahw
->linkup
&& !linkup
) {
793 netdev_info(netdev
, "NIC Link is down\n");
794 adapter
->ahw
->linkup
= 0;
795 netif_carrier_off(netdev
);
796 } else if (!adapter
->ahw
->linkup
&& linkup
) {
797 adapter
->ahw
->linkup
= 1;
799 /* Do not advertise Link up to the stack if device
800 * is in loopback mode
802 if (qlcnic_83xx_check(adapter
) && adapter
->ahw
->lb_mode
) {
803 netdev_info(netdev
, "NIC Link is up for loopback test\n");
807 netdev_info(netdev
, "NIC Link is up\n");
808 netif_carrier_on(netdev
);
812 static int qlcnic_alloc_rx_skb(struct qlcnic_adapter
*adapter
,
813 struct qlcnic_host_rds_ring
*rds_ring
,
814 struct qlcnic_rx_buffer
*buffer
)
818 struct pci_dev
*pdev
= adapter
->pdev
;
820 skb
= netdev_alloc_skb(adapter
->netdev
, rds_ring
->skb_size
);
822 adapter
->stats
.skb_alloc_failure
++;
826 skb_reserve(skb
, NET_IP_ALIGN
);
827 dma
= pci_map_single(pdev
, skb
->data
,
828 rds_ring
->dma_size
, PCI_DMA_FROMDEVICE
);
830 if (pci_dma_mapping_error(pdev
, dma
)) {
831 adapter
->stats
.rx_dma_map_error
++;
832 dev_kfree_skb_any(skb
);
842 static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter
*adapter
,
843 struct qlcnic_host_rds_ring
*rds_ring
,
846 struct rcv_desc
*pdesc
;
847 struct qlcnic_rx_buffer
*buffer
;
849 uint32_t producer
, handle
;
850 struct list_head
*head
;
852 if (!spin_trylock(&rds_ring
->lock
))
855 producer
= rds_ring
->producer
;
856 head
= &rds_ring
->free_list
;
857 while (!list_empty(head
)) {
858 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
861 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
865 list_del(&buffer
->list
);
867 /* make a rcv descriptor */
868 pdesc
= &rds_ring
->desc_head
[producer
];
869 handle
= qlcnic_get_ref_handle(adapter
,
870 buffer
->ref_handle
, ring_id
);
871 pdesc
->reference_handle
= cpu_to_le16(handle
);
872 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
873 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
874 producer
= get_next_index(producer
, rds_ring
->num_desc
);
877 rds_ring
->producer
= producer
;
878 writel((producer
- 1) & (rds_ring
->num_desc
- 1),
879 rds_ring
->crb_rcv_producer
);
881 spin_unlock(&rds_ring
->lock
);
884 static int qlcnic_process_cmd_ring(struct qlcnic_adapter
*adapter
,
885 struct qlcnic_host_tx_ring
*tx_ring
,
888 u32 sw_consumer
, hw_consumer
;
889 int i
, done
, count
= 0;
890 struct qlcnic_cmd_buffer
*buffer
;
891 struct pci_dev
*pdev
= adapter
->pdev
;
892 struct net_device
*netdev
= adapter
->netdev
;
893 struct qlcnic_skb_frag
*frag
;
895 if (!spin_trylock(&tx_ring
->tx_clean_lock
))
898 sw_consumer
= tx_ring
->sw_consumer
;
899 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
901 while (sw_consumer
!= hw_consumer
) {
902 buffer
= &tx_ring
->cmd_buf_arr
[sw_consumer
];
904 frag
= &buffer
->frag_array
[0];
905 pci_unmap_single(pdev
, frag
->dma
, frag
->length
,
908 for (i
= 1; i
< buffer
->frag_count
; i
++) {
910 pci_unmap_page(pdev
, frag
->dma
, frag
->length
,
914 tx_ring
->tx_stats
.xmit_finished
++;
915 dev_kfree_skb_any(buffer
->skb
);
919 sw_consumer
= get_next_index(sw_consumer
, tx_ring
->num_desc
);
920 if (++count
>= budget
)
924 tx_ring
->sw_consumer
= sw_consumer
;
926 if (count
&& netif_running(netdev
)) {
928 if (netif_tx_queue_stopped(tx_ring
->txq
) &&
929 netif_carrier_ok(netdev
)) {
930 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
931 netif_tx_wake_queue(tx_ring
->txq
);
932 tx_ring
->tx_stats
.xmit_on
++;
935 adapter
->tx_timeo_cnt
= 0;
938 * If everything is freed up to consumer then check if the ring is full
939 * If the ring is full then check if more needs to be freed and
940 * schedule the call back again.
942 * This happens when there are 2 CPUs. One could be freeing and the
943 * other filling it. If the ring is full when we get out of here and
944 * the card has already interrupted the host then the host can miss the
947 * There is still a possible race condition and the host could miss an
948 * interrupt. The card has to take care of this.
950 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
951 done
= (sw_consumer
== hw_consumer
);
953 spin_unlock(&tx_ring
->tx_clean_lock
);
958 static int qlcnic_poll(struct napi_struct
*napi
, int budget
)
960 int tx_complete
, work_done
;
961 struct qlcnic_host_sds_ring
*sds_ring
;
962 struct qlcnic_adapter
*adapter
;
963 struct qlcnic_host_tx_ring
*tx_ring
;
965 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
966 adapter
= sds_ring
->adapter
;
967 tx_ring
= sds_ring
->tx_ring
;
969 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
,
971 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
973 /* Check if we need a repoll */
977 if (work_done
< budget
) {
978 napi_complete_done(&sds_ring
->napi
, work_done
);
979 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
)) {
980 qlcnic_enable_sds_intr(adapter
, sds_ring
);
981 qlcnic_enable_tx_intr(adapter
, tx_ring
);
988 static int qlcnic_tx_poll(struct napi_struct
*napi
, int budget
)
990 struct qlcnic_host_tx_ring
*tx_ring
;
991 struct qlcnic_adapter
*adapter
;
994 tx_ring
= container_of(napi
, struct qlcnic_host_tx_ring
, napi
);
995 adapter
= tx_ring
->adapter
;
997 work_done
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
999 napi_complete(&tx_ring
->napi
);
1000 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1001 qlcnic_enable_tx_intr(adapter
, tx_ring
);
1003 /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
1010 static int qlcnic_rx_poll(struct napi_struct
*napi
, int budget
)
1012 struct qlcnic_host_sds_ring
*sds_ring
;
1013 struct qlcnic_adapter
*adapter
;
1016 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1017 adapter
= sds_ring
->adapter
;
1019 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
1021 if (work_done
< budget
) {
1022 napi_complete_done(&sds_ring
->napi
, work_done
);
1023 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1024 qlcnic_enable_sds_intr(adapter
, sds_ring
);
1030 static void qlcnic_handle_linkevent(struct qlcnic_adapter
*adapter
,
1031 struct qlcnic_fw_msg
*msg
)
1034 u16 cable_len
, link_speed
;
1035 u8 link_status
, module
, duplex
, autoneg
, lb_status
= 0;
1036 struct net_device
*netdev
= adapter
->netdev
;
1038 adapter
->ahw
->has_link_events
= 1;
1040 cable_OUI
= msg
->body
[1] & 0xffffffff;
1041 cable_len
= (msg
->body
[1] >> 32) & 0xffff;
1042 link_speed
= (msg
->body
[1] >> 48) & 0xffff;
1044 link_status
= msg
->body
[2] & 0xff;
1045 duplex
= (msg
->body
[2] >> 16) & 0xff;
1046 autoneg
= (msg
->body
[2] >> 24) & 0xff;
1047 lb_status
= (msg
->body
[2] >> 32) & 0x3;
1049 module
= (msg
->body
[2] >> 8) & 0xff;
1050 if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE
)
1051 dev_info(&netdev
->dev
,
1052 "unsupported cable: OUI 0x%x, length %d\n",
1053 cable_OUI
, cable_len
);
1054 else if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN
)
1055 dev_info(&netdev
->dev
, "unsupported cable length %d\n",
1058 if (!link_status
&& (lb_status
== QLCNIC_ILB_MODE
||
1059 lb_status
== QLCNIC_ELB_MODE
))
1060 adapter
->ahw
->loopback_state
|= QLCNIC_LINKEVENT
;
1062 qlcnic_advert_link_change(adapter
, link_status
);
1064 if (duplex
== LINKEVENT_FULL_DUPLEX
)
1065 adapter
->ahw
->link_duplex
= DUPLEX_FULL
;
1067 adapter
->ahw
->link_duplex
= DUPLEX_HALF
;
1069 adapter
->ahw
->module_type
= module
;
1070 adapter
->ahw
->link_autoneg
= autoneg
;
1073 adapter
->ahw
->link_speed
= link_speed
;
1075 adapter
->ahw
->link_speed
= SPEED_UNKNOWN
;
1076 adapter
->ahw
->link_duplex
= DUPLEX_UNKNOWN
;
1080 static void qlcnic_handle_fw_message(int desc_cnt
, int index
,
1081 struct qlcnic_host_sds_ring
*sds_ring
)
1083 struct qlcnic_fw_msg msg
;
1084 struct status_desc
*desc
;
1085 struct qlcnic_adapter
*adapter
;
1087 int i
= 0, opcode
, ret
;
1089 while (desc_cnt
> 0 && i
< 8) {
1090 desc
= &sds_ring
->desc_head
[index
];
1091 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[0]);
1092 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[1]);
1094 index
= get_next_index(index
, sds_ring
->num_desc
);
1098 adapter
= sds_ring
->adapter
;
1099 dev
= &adapter
->pdev
->dev
;
1100 opcode
= qlcnic_get_nic_msg_opcode(msg
.body
[0]);
1103 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE
:
1104 qlcnic_handle_linkevent(adapter
, &msg
);
1106 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK
:
1107 ret
= (u32
)(msg
.body
[1]);
1110 adapter
->ahw
->loopback_state
|= QLCNIC_LB_RESPONSE
;
1113 dev_info(dev
, "loopback already in progress\n");
1114 adapter
->ahw
->diag_cnt
= -EINPROGRESS
;
1117 dev_info(dev
, "loopback cable is not connected\n");
1118 adapter
->ahw
->diag_cnt
= -ENODEV
;
1122 "loopback configure request failed, err %x\n",
1124 adapter
->ahw
->diag_cnt
= -EIO
;
1128 case QLCNIC_C2H_OPCODE_GET_DCB_AEN
:
1129 qlcnic_dcb_aen_handler(adapter
->dcb
, (void *)&msg
);
1136 static struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*adapter
,
1137 struct qlcnic_host_rds_ring
*ring
,
1138 u16 index
, u16 cksum
)
1140 struct qlcnic_rx_buffer
*buffer
;
1141 struct sk_buff
*skb
;
1143 buffer
= &ring
->rx_buf_arr
[index
];
1144 if (unlikely(buffer
->skb
== NULL
)) {
1149 pci_unmap_single(adapter
->pdev
, buffer
->dma
, ring
->dma_size
,
1150 PCI_DMA_FROMDEVICE
);
1153 if (likely((adapter
->netdev
->features
& NETIF_F_RXCSUM
) &&
1154 (cksum
== STATUS_CKSUM_OK
|| cksum
== STATUS_CKSUM_LOOP
))) {
1155 adapter
->stats
.csummed
++;
1156 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1158 skb_checksum_none_assert(skb
);
1167 static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter
*adapter
,
1168 struct sk_buff
*skb
, u16
*vlan_tag
)
1170 struct ethhdr
*eth_hdr
;
1172 if (!__vlan_get_tag(skb
, vlan_tag
)) {
1173 eth_hdr
= (struct ethhdr
*)skb
->data
;
1174 memmove(skb
->data
+ VLAN_HLEN
, eth_hdr
, ETH_ALEN
* 2);
1175 skb_pull(skb
, VLAN_HLEN
);
1177 if (!adapter
->rx_pvid
)
1180 if (*vlan_tag
== adapter
->rx_pvid
) {
1181 /* Outer vlan tag. Packet should follow non-vlan path */
1185 if (adapter
->flags
& QLCNIC_TAGGING_ENABLED
)
1191 static struct qlcnic_rx_buffer
*
1192 qlcnic_process_rcv(struct qlcnic_adapter
*adapter
,
1193 struct qlcnic_host_sds_ring
*sds_ring
, int ring
,
1196 struct net_device
*netdev
= adapter
->netdev
;
1197 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1198 struct qlcnic_rx_buffer
*buffer
;
1199 struct sk_buff
*skb
;
1200 struct qlcnic_host_rds_ring
*rds_ring
;
1201 int index
, length
, cksum
, pkt_offset
, is_lb_pkt
;
1202 u16 vid
= 0xffff, t_vid
;
1204 if (unlikely(ring
>= adapter
->max_rds_rings
))
1207 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1209 index
= qlcnic_get_sts_refhandle(sts_data0
);
1210 if (unlikely(index
>= rds_ring
->num_desc
))
1213 buffer
= &rds_ring
->rx_buf_arr
[index
];
1214 length
= qlcnic_get_sts_totallength(sts_data0
);
1215 cksum
= qlcnic_get_sts_status(sts_data0
);
1216 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
1218 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1222 if (adapter
->rx_mac_learn
) {
1224 is_lb_pkt
= qlcnic_82xx_is_lb_pkt(sts_data0
);
1225 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, t_vid
);
1228 if (length
> rds_ring
->skb_size
)
1229 skb_put(skb
, rds_ring
->skb_size
);
1231 skb_put(skb
, length
);
1234 skb_pull(skb
, pkt_offset
);
1236 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1237 adapter
->stats
.rxdropped
++;
1242 skb
->protocol
= eth_type_trans(skb
, netdev
);
1245 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1247 napi_gro_receive(&sds_ring
->napi
, skb
);
1249 adapter
->stats
.rx_pkts
++;
1250 adapter
->stats
.rxbytes
+= length
;
1255 #define QLC_TCP_HDR_SIZE 20
1256 #define QLC_TCP_TS_OPTION_SIZE 12
1257 #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1259 static struct qlcnic_rx_buffer
*
1260 qlcnic_process_lro(struct qlcnic_adapter
*adapter
,
1261 int ring
, u64 sts_data0
, u64 sts_data1
)
1263 struct net_device
*netdev
= adapter
->netdev
;
1264 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1265 struct qlcnic_rx_buffer
*buffer
;
1266 struct sk_buff
*skb
;
1267 struct qlcnic_host_rds_ring
*rds_ring
;
1269 struct ipv6hdr
*ipv6h
;
1271 bool push
, timestamp
;
1272 int index
, l2_hdr_offset
, l4_hdr_offset
, is_lb_pkt
;
1273 u16 lro_length
, length
, data_offset
, t_vid
, vid
= 0xffff;
1276 if (unlikely(ring
>= adapter
->max_rds_rings
))
1279 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1281 index
= qlcnic_get_lro_sts_refhandle(sts_data0
);
1282 if (unlikely(index
>= rds_ring
->num_desc
))
1285 buffer
= &rds_ring
->rx_buf_arr
[index
];
1287 timestamp
= qlcnic_get_lro_sts_timestamp(sts_data0
);
1288 lro_length
= qlcnic_get_lro_sts_length(sts_data0
);
1289 l2_hdr_offset
= qlcnic_get_lro_sts_l2_hdr_offset(sts_data0
);
1290 l4_hdr_offset
= qlcnic_get_lro_sts_l4_hdr_offset(sts_data0
);
1291 push
= qlcnic_get_lro_sts_push_flag(sts_data0
);
1292 seq_number
= qlcnic_get_lro_sts_seq_number(sts_data1
);
1294 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1298 if (adapter
->rx_mac_learn
) {
1300 is_lb_pkt
= qlcnic_82xx_is_lb_pkt(sts_data0
);
1301 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, t_vid
);
1305 data_offset
= l4_hdr_offset
+ QLC_TCP_TS_HDR_SIZE
;
1307 data_offset
= l4_hdr_offset
+ QLC_TCP_HDR_SIZE
;
1309 skb_put(skb
, lro_length
+ data_offset
);
1310 skb_pull(skb
, l2_hdr_offset
);
1312 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1313 adapter
->stats
.rxdropped
++;
1318 skb
->protocol
= eth_type_trans(skb
, netdev
);
1320 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1321 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1322 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1323 length
= (th
->doff
<< 2) + lro_length
;
1324 ipv6h
->payload_len
= htons(length
);
1326 iph
= (struct iphdr
*)skb
->data
;
1327 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1328 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1329 csum_replace2(&iph
->check
, iph
->tot_len
, htons(length
));
1330 iph
->tot_len
= htons(length
);
1334 th
->seq
= htonl(seq_number
);
1337 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1338 skb_shinfo(skb
)->gso_size
= qlcnic_get_lro_sts_mss(sts_data1
);
1339 if (skb
->protocol
== htons(ETH_P_IPV6
))
1340 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1342 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1346 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1347 netif_receive_skb(skb
);
1349 adapter
->stats
.lro_pkts
++;
1350 adapter
->stats
.lrobytes
+= length
;
1355 static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
, int max
)
1357 struct qlcnic_host_rds_ring
*rds_ring
;
1358 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1359 struct list_head
*cur
;
1360 struct status_desc
*desc
;
1361 struct qlcnic_rx_buffer
*rxbuf
;
1362 int opcode
, desc_cnt
, count
= 0;
1363 u64 sts_data0
, sts_data1
;
1365 u32 consumer
= sds_ring
->consumer
;
1367 while (count
< max
) {
1368 desc
= &sds_ring
->desc_head
[consumer
];
1369 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1371 if (!(sts_data0
& STATUS_OWNER_HOST
))
1374 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1375 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1377 case QLCNIC_RXPKT_DESC
:
1378 case QLCNIC_OLD_RXPKT_DESC
:
1379 case QLCNIC_SYN_OFFLOAD
:
1380 ring
= qlcnic_get_sts_type(sts_data0
);
1381 rxbuf
= qlcnic_process_rcv(adapter
, sds_ring
, ring
,
1384 case QLCNIC_LRO_DESC
:
1385 ring
= qlcnic_get_lro_sts_type(sts_data0
);
1386 sts_data1
= le64_to_cpu(desc
->status_desc_data
[1]);
1387 rxbuf
= qlcnic_process_lro(adapter
, ring
, sts_data0
,
1390 case QLCNIC_RESPONSE_DESC
:
1391 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1395 WARN_ON(desc_cnt
> 1);
1398 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1400 adapter
->stats
.null_rxbuf
++;
1402 for (; desc_cnt
> 0; desc_cnt
--) {
1403 desc
= &sds_ring
->desc_head
[consumer
];
1404 desc
->status_desc_data
[0] = QLCNIC_DESC_OWNER_FW
;
1405 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1410 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1411 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1412 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1413 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1414 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1416 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1418 spin_lock(&rds_ring
->lock
);
1419 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1420 &rds_ring
->free_list
);
1421 spin_unlock(&rds_ring
->lock
);
1424 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1428 sds_ring
->consumer
= consumer
;
1429 writel(consumer
, sds_ring
->crb_sts_consumer
);
1435 void qlcnic_post_rx_buffers(struct qlcnic_adapter
*adapter
,
1436 struct qlcnic_host_rds_ring
*rds_ring
, u8 ring_id
)
1438 struct rcv_desc
*pdesc
;
1439 struct qlcnic_rx_buffer
*buffer
;
1441 u32 producer
, handle
;
1442 struct list_head
*head
;
1444 producer
= rds_ring
->producer
;
1445 head
= &rds_ring
->free_list
;
1447 while (!list_empty(head
)) {
1449 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
1452 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
1457 list_del(&buffer
->list
);
1459 /* make a rcv descriptor */
1460 pdesc
= &rds_ring
->desc_head
[producer
];
1461 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
1462 handle
= qlcnic_get_ref_handle(adapter
, buffer
->ref_handle
,
1464 pdesc
->reference_handle
= cpu_to_le16(handle
);
1465 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
1466 producer
= get_next_index(producer
, rds_ring
->num_desc
);
1470 rds_ring
->producer
= producer
;
1471 writel((producer
-1) & (rds_ring
->num_desc
-1),
1472 rds_ring
->crb_rcv_producer
);
1476 static void dump_skb(struct sk_buff
*skb
, struct qlcnic_adapter
*adapter
)
1478 if (adapter
->ahw
->msg_enable
& NETIF_MSG_DRV
) {
1481 scnprintf(prefix
, sizeof(prefix
), "%s: %s: ",
1482 dev_name(&adapter
->pdev
->dev
), __func__
);
1484 print_hex_dump_debug(prefix
, DUMP_PREFIX_NONE
, 16, 1,
1485 skb
->data
, skb
->len
, true);
1489 static void qlcnic_process_rcv_diag(struct qlcnic_adapter
*adapter
, int ring
,
1492 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1493 struct sk_buff
*skb
;
1494 struct qlcnic_host_rds_ring
*rds_ring
;
1495 int index
, length
, cksum
, pkt_offset
;
1497 if (unlikely(ring
>= adapter
->max_rds_rings
))
1500 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1502 index
= qlcnic_get_sts_refhandle(sts_data0
);
1503 length
= qlcnic_get_sts_totallength(sts_data0
);
1504 if (unlikely(index
>= rds_ring
->num_desc
))
1507 cksum
= qlcnic_get_sts_status(sts_data0
);
1508 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
1510 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1514 if (length
> rds_ring
->skb_size
)
1515 skb_put(skb
, rds_ring
->skb_size
);
1517 skb_put(skb
, length
);
1520 skb_pull(skb
, pkt_offset
);
1522 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
1523 adapter
->ahw
->diag_cnt
++;
1525 dump_skb(skb
, adapter
);
1527 dev_kfree_skb_any(skb
);
1528 adapter
->stats
.rx_pkts
++;
1529 adapter
->stats
.rxbytes
+= length
;
1534 void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
1536 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1537 struct status_desc
*desc
;
1539 int ring
, opcode
, desc_cnt
;
1541 u32 consumer
= sds_ring
->consumer
;
1543 desc
= &sds_ring
->desc_head
[consumer
];
1544 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1546 if (!(sts_data0
& STATUS_OWNER_HOST
))
1549 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1550 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1552 case QLCNIC_RESPONSE_DESC
:
1553 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1556 ring
= qlcnic_get_sts_type(sts_data0
);
1557 qlcnic_process_rcv_diag(adapter
, ring
, sts_data0
);
1561 for (; desc_cnt
> 0; desc_cnt
--) {
1562 desc
= &sds_ring
->desc_head
[consumer
];
1563 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
1564 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1567 sds_ring
->consumer
= consumer
;
1568 writel(consumer
, sds_ring
->crb_sts_consumer
);
1571 int qlcnic_82xx_napi_add(struct qlcnic_adapter
*adapter
,
1572 struct net_device
*netdev
)
1575 struct qlcnic_host_sds_ring
*sds_ring
;
1576 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1577 struct qlcnic_host_tx_ring
*tx_ring
;
1579 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->drv_sds_rings
))
1582 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1583 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1584 if (qlcnic_check_multi_tx(adapter
) &&
1585 !adapter
->ahw
->diag_test
) {
1586 netif_napi_add(netdev
, &sds_ring
->napi
, qlcnic_rx_poll
,
1589 if (ring
== (adapter
->drv_sds_rings
- 1))
1590 netif_napi_add(netdev
, &sds_ring
->napi
,
1594 netif_napi_add(netdev
, &sds_ring
->napi
,
1600 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
1601 qlcnic_free_sds_rings(recv_ctx
);
1605 if (qlcnic_check_multi_tx(adapter
) && !adapter
->ahw
->diag_test
) {
1606 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1607 tx_ring
= &adapter
->tx_ring
[ring
];
1608 netif_tx_napi_add(netdev
, &tx_ring
->napi
, qlcnic_tx_poll
,
1616 void qlcnic_82xx_napi_del(struct qlcnic_adapter
*adapter
)
1619 struct qlcnic_host_sds_ring
*sds_ring
;
1620 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1621 struct qlcnic_host_tx_ring
*tx_ring
;
1623 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1624 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1625 netif_napi_del(&sds_ring
->napi
);
1628 qlcnic_free_sds_rings(adapter
->recv_ctx
);
1630 if (qlcnic_check_multi_tx(adapter
) && !adapter
->ahw
->diag_test
) {
1631 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1632 tx_ring
= &adapter
->tx_ring
[ring
];
1633 netif_napi_del(&tx_ring
->napi
);
1637 qlcnic_free_tx_rings(adapter
);
1640 void qlcnic_82xx_napi_enable(struct qlcnic_adapter
*adapter
)
1643 struct qlcnic_host_sds_ring
*sds_ring
;
1644 struct qlcnic_host_tx_ring
*tx_ring
;
1645 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1647 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1650 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1651 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1652 napi_enable(&sds_ring
->napi
);
1653 qlcnic_enable_sds_intr(adapter
, sds_ring
);
1656 if (qlcnic_check_multi_tx(adapter
) &&
1657 (adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
1658 !adapter
->ahw
->diag_test
) {
1659 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1660 tx_ring
= &adapter
->tx_ring
[ring
];
1661 napi_enable(&tx_ring
->napi
);
1662 qlcnic_enable_tx_intr(adapter
, tx_ring
);
1667 void qlcnic_82xx_napi_disable(struct qlcnic_adapter
*adapter
)
1670 struct qlcnic_host_sds_ring
*sds_ring
;
1671 struct qlcnic_host_tx_ring
*tx_ring
;
1672 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1674 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1677 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1678 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1679 qlcnic_disable_sds_intr(adapter
, sds_ring
);
1680 napi_synchronize(&sds_ring
->napi
);
1681 napi_disable(&sds_ring
->napi
);
1684 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
1685 !adapter
->ahw
->diag_test
&&
1686 qlcnic_check_multi_tx(adapter
)) {
1687 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1688 tx_ring
= &adapter
->tx_ring
[ring
];
1689 qlcnic_disable_tx_intr(adapter
, tx_ring
);
1690 napi_synchronize(&tx_ring
->napi
);
1691 napi_disable(&tx_ring
->napi
);
1696 #define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
1697 #define QLC_83XX_LRO_LB_PKT (1ULL << 46)
1699 static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data
, int lro_pkt
)
1702 return (sts_data
& QLC_83XX_LRO_LB_PKT
) ? 1 : 0;
1704 return (sts_data
& QLC_83XX_NORMAL_LB_PKT
) ? 1 : 0;
1707 #define QLCNIC_ENCAP_LENGTH_MASK 0x7f
1709 static inline u8
qlcnic_encap_length(u64 sts_data
)
1711 return sts_data
& QLCNIC_ENCAP_LENGTH_MASK
;
1714 static struct qlcnic_rx_buffer
*
1715 qlcnic_83xx_process_rcv(struct qlcnic_adapter
*adapter
,
1716 struct qlcnic_host_sds_ring
*sds_ring
,
1717 u8 ring
, u64 sts_data
[])
1719 struct net_device
*netdev
= adapter
->netdev
;
1720 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1721 struct qlcnic_rx_buffer
*buffer
;
1722 struct sk_buff
*skb
;
1723 struct qlcnic_host_rds_ring
*rds_ring
;
1724 int index
, length
, cksum
, is_lb_pkt
;
1728 if (unlikely(ring
>= adapter
->max_rds_rings
))
1731 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1733 index
= qlcnic_83xx_hndl(sts_data
[0]);
1734 if (unlikely(index
>= rds_ring
->num_desc
))
1737 buffer
= &rds_ring
->rx_buf_arr
[index
];
1738 length
= qlcnic_83xx_pktln(sts_data
[0]);
1739 cksum
= qlcnic_83xx_csum_status(sts_data
[1]);
1740 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1744 if (length
> rds_ring
->skb_size
)
1745 skb_put(skb
, rds_ring
->skb_size
);
1747 skb_put(skb
, length
);
1749 err
= qlcnic_check_rx_tagging(adapter
, skb
, &vid
);
1751 if (adapter
->rx_mac_learn
) {
1752 is_lb_pkt
= qlcnic_83xx_is_lb_pkt(sts_data
[1], 0);
1753 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, vid
);
1756 if (unlikely(err
)) {
1757 adapter
->stats
.rxdropped
++;
1762 skb
->protocol
= eth_type_trans(skb
, netdev
);
1764 if (qlcnic_encap_length(sts_data
[1]) &&
1765 skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1766 skb
->csum_level
= 1;
1767 adapter
->stats
.encap_rx_csummed
++;
1771 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1773 napi_gro_receive(&sds_ring
->napi
, skb
);
1775 adapter
->stats
.rx_pkts
++;
1776 adapter
->stats
.rxbytes
+= length
;
1781 static struct qlcnic_rx_buffer
*
1782 qlcnic_83xx_process_lro(struct qlcnic_adapter
*adapter
,
1783 u8 ring
, u64 sts_data
[])
1785 struct net_device
*netdev
= adapter
->netdev
;
1786 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1787 struct qlcnic_rx_buffer
*buffer
;
1788 struct sk_buff
*skb
;
1789 struct qlcnic_host_rds_ring
*rds_ring
;
1791 struct ipv6hdr
*ipv6h
;
1794 int l2_hdr_offset
, l4_hdr_offset
;
1795 int index
, is_lb_pkt
;
1796 u16 lro_length
, length
, data_offset
, gso_size
;
1800 if (unlikely(ring
>= adapter
->max_rds_rings
))
1803 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1805 index
= qlcnic_83xx_hndl(sts_data
[0]);
1806 if (unlikely(index
>= rds_ring
->num_desc
))
1809 buffer
= &rds_ring
->rx_buf_arr
[index
];
1811 lro_length
= qlcnic_83xx_lro_pktln(sts_data
[0]);
1812 l2_hdr_offset
= qlcnic_83xx_l2_hdr_off(sts_data
[1]);
1813 l4_hdr_offset
= qlcnic_83xx_l4_hdr_off(sts_data
[1]);
1814 push
= qlcnic_83xx_is_psh_bit(sts_data
[1]);
1816 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1820 if (qlcnic_83xx_is_tstamp(sts_data
[1]))
1821 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_TS_HDR_SIZE
;
1823 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_HDR_SIZE
;
1825 skb_put(skb
, lro_length
+ data_offset
);
1826 skb_pull(skb
, l2_hdr_offset
);
1828 err
= qlcnic_check_rx_tagging(adapter
, skb
, &vid
);
1830 if (adapter
->rx_mac_learn
) {
1831 is_lb_pkt
= qlcnic_83xx_is_lb_pkt(sts_data
[1], 1);
1832 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, vid
);
1835 if (unlikely(err
)) {
1836 adapter
->stats
.rxdropped
++;
1841 skb
->protocol
= eth_type_trans(skb
, netdev
);
1842 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1843 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1844 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1846 length
= (th
->doff
<< 2) + lro_length
;
1847 ipv6h
->payload_len
= htons(length
);
1849 iph
= (struct iphdr
*)skb
->data
;
1850 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1851 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1852 csum_replace2(&iph
->check
, iph
->tot_len
, htons(length
));
1853 iph
->tot_len
= htons(length
);
1859 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1860 gso_size
= qlcnic_83xx_get_lro_sts_mss(sts_data
[0]);
1861 skb_shinfo(skb
)->gso_size
= gso_size
;
1862 if (skb
->protocol
== htons(ETH_P_IPV6
))
1863 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1865 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1869 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1871 netif_receive_skb(skb
);
1873 adapter
->stats
.lro_pkts
++;
1874 adapter
->stats
.lrobytes
+= length
;
1878 static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
,
1881 struct qlcnic_host_rds_ring
*rds_ring
;
1882 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1883 struct list_head
*cur
;
1884 struct status_desc
*desc
;
1885 struct qlcnic_rx_buffer
*rxbuf
= NULL
;
1888 int count
= 0, opcode
;
1889 u32 consumer
= sds_ring
->consumer
;
1891 while (count
< max
) {
1892 desc
= &sds_ring
->desc_head
[consumer
];
1893 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
1894 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
1897 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
1898 ring
= QLCNIC_FETCH_RING_ID(sts_data
[0]);
1901 case QLC_83XX_REG_DESC
:
1902 rxbuf
= qlcnic_83xx_process_rcv(adapter
, sds_ring
,
1905 case QLC_83XX_LRO_DESC
:
1906 rxbuf
= qlcnic_83xx_process_lro(adapter
, ring
,
1910 dev_info(&adapter
->pdev
->dev
,
1911 "Unknown opcode: 0x%x\n", opcode
);
1916 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1918 adapter
->stats
.null_rxbuf
++;
1920 desc
= &sds_ring
->desc_head
[consumer
];
1921 /* Reset the descriptor */
1922 desc
->status_desc_data
[1] = 0;
1923 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1926 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1927 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1928 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1929 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1930 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1932 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1934 spin_lock(&rds_ring
->lock
);
1935 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1936 &rds_ring
->free_list
);
1937 spin_unlock(&rds_ring
->lock
);
1939 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1942 sds_ring
->consumer
= consumer
;
1943 writel(consumer
, sds_ring
->crb_sts_consumer
);
1948 static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct
*napi
, int budget
)
1952 struct qlcnic_host_sds_ring
*sds_ring
;
1953 struct qlcnic_adapter
*adapter
;
1954 struct qlcnic_host_tx_ring
*tx_ring
;
1956 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1957 adapter
= sds_ring
->adapter
;
1958 /* tx ring count = 1 */
1959 tx_ring
= adapter
->tx_ring
;
1961 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1962 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1964 /* Check if we need a repoll */
1968 if (work_done
< budget
) {
1969 napi_complete_done(&sds_ring
->napi
, work_done
);
1970 qlcnic_enable_sds_intr(adapter
, sds_ring
);
1976 static int qlcnic_83xx_poll(struct napi_struct
*napi
, int budget
)
1980 struct qlcnic_host_sds_ring
*sds_ring
;
1981 struct qlcnic_adapter
*adapter
;
1982 struct qlcnic_host_tx_ring
*tx_ring
;
1984 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1985 adapter
= sds_ring
->adapter
;
1986 /* tx ring count = 1 */
1987 tx_ring
= adapter
->tx_ring
;
1989 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1990 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1992 /* Check if we need a repoll */
1996 if (work_done
< budget
) {
1997 napi_complete_done(&sds_ring
->napi
, work_done
);
1998 qlcnic_enable_sds_intr(adapter
, sds_ring
);
2004 static int qlcnic_83xx_msix_tx_poll(struct napi_struct
*napi
, int budget
)
2007 struct qlcnic_host_tx_ring
*tx_ring
;
2008 struct qlcnic_adapter
*adapter
;
2010 tx_ring
= container_of(napi
, struct qlcnic_host_tx_ring
, napi
);
2011 adapter
= tx_ring
->adapter
;
2012 work_done
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
2014 napi_complete(&tx_ring
->napi
);
2015 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
2016 qlcnic_enable_tx_intr(adapter
, tx_ring
);
2025 static int qlcnic_83xx_rx_poll(struct napi_struct
*napi
, int budget
)
2028 struct qlcnic_host_sds_ring
*sds_ring
;
2029 struct qlcnic_adapter
*adapter
;
2031 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
2032 adapter
= sds_ring
->adapter
;
2033 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
2034 if (work_done
< budget
) {
2035 napi_complete_done(&sds_ring
->napi
, work_done
);
2036 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
2037 qlcnic_enable_sds_intr(adapter
, sds_ring
);
2043 void qlcnic_83xx_napi_enable(struct qlcnic_adapter
*adapter
)
2046 struct qlcnic_host_sds_ring
*sds_ring
;
2047 struct qlcnic_host_tx_ring
*tx_ring
;
2048 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2050 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
2053 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2054 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2055 napi_enable(&sds_ring
->napi
);
2056 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
2057 qlcnic_enable_sds_intr(adapter
, sds_ring
);
2060 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2061 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2062 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2063 tx_ring
= &adapter
->tx_ring
[ring
];
2064 napi_enable(&tx_ring
->napi
);
2065 qlcnic_enable_tx_intr(adapter
, tx_ring
);
2070 void qlcnic_83xx_napi_disable(struct qlcnic_adapter
*adapter
)
2073 struct qlcnic_host_sds_ring
*sds_ring
;
2074 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2075 struct qlcnic_host_tx_ring
*tx_ring
;
2077 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
2080 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2081 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2082 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
2083 qlcnic_disable_sds_intr(adapter
, sds_ring
);
2084 napi_synchronize(&sds_ring
->napi
);
2085 napi_disable(&sds_ring
->napi
);
2088 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2089 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2090 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2091 tx_ring
= &adapter
->tx_ring
[ring
];
2092 qlcnic_disable_tx_intr(adapter
, tx_ring
);
2093 napi_synchronize(&tx_ring
->napi
);
2094 napi_disable(&tx_ring
->napi
);
2099 int qlcnic_83xx_napi_add(struct qlcnic_adapter
*adapter
,
2100 struct net_device
*netdev
)
2103 struct qlcnic_host_sds_ring
*sds_ring
;
2104 struct qlcnic_host_tx_ring
*tx_ring
;
2105 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2107 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->drv_sds_rings
))
2110 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2111 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2112 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
) {
2113 if (!(adapter
->flags
& QLCNIC_TX_INTR_SHARED
))
2114 netif_napi_add(netdev
, &sds_ring
->napi
,
2115 qlcnic_83xx_rx_poll
,
2118 netif_napi_add(netdev
, &sds_ring
->napi
,
2119 qlcnic_83xx_msix_sriov_vf_poll
,
2123 netif_napi_add(netdev
, &sds_ring
->napi
,
2129 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
2130 qlcnic_free_sds_rings(recv_ctx
);
2134 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2135 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2136 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2137 tx_ring
= &adapter
->tx_ring
[ring
];
2138 netif_tx_napi_add(netdev
, &tx_ring
->napi
,
2139 qlcnic_83xx_msix_tx_poll
,
2147 void qlcnic_83xx_napi_del(struct qlcnic_adapter
*adapter
)
2150 struct qlcnic_host_sds_ring
*sds_ring
;
2151 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2152 struct qlcnic_host_tx_ring
*tx_ring
;
2154 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2155 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2156 netif_napi_del(&sds_ring
->napi
);
2159 qlcnic_free_sds_rings(adapter
->recv_ctx
);
2161 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2162 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2163 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2164 tx_ring
= &adapter
->tx_ring
[ring
];
2165 netif_napi_del(&tx_ring
->napi
);
2169 qlcnic_free_tx_rings(adapter
);
2172 static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter
*adapter
,
2173 int ring
, u64 sts_data
[])
2175 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2176 struct sk_buff
*skb
;
2177 struct qlcnic_host_rds_ring
*rds_ring
;
2180 if (unlikely(ring
>= adapter
->max_rds_rings
))
2183 rds_ring
= &recv_ctx
->rds_rings
[ring
];
2184 index
= qlcnic_83xx_hndl(sts_data
[0]);
2185 if (unlikely(index
>= rds_ring
->num_desc
))
2188 length
= qlcnic_83xx_pktln(sts_data
[0]);
2190 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
2194 if (length
> rds_ring
->skb_size
)
2195 skb_put(skb
, rds_ring
->skb_size
);
2197 skb_put(skb
, length
);
2199 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
2200 adapter
->ahw
->diag_cnt
++;
2202 dump_skb(skb
, adapter
);
2204 dev_kfree_skb_any(skb
);
2208 void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
2210 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
2211 struct status_desc
*desc
;
2214 u32 consumer
= sds_ring
->consumer
;
2216 desc
= &sds_ring
->desc_head
[consumer
];
2217 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
2218 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
2219 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
2223 ring
= QLCNIC_FETCH_RING_ID(sts_data
[0]);
2224 qlcnic_83xx_process_rcv_diag(adapter
, ring
, sts_data
);
2225 desc
= &sds_ring
->desc_head
[consumer
];
2226 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
2227 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
2228 sds_ring
->consumer
= consumer
;
2229 writel(consumer
, sds_ring
->crb_sts_consumer
);