2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
8 #include <linux/netdevice.h>
9 #include <linux/if_vlan.h>
11 #include <linux/ipv6.h>
12 #include <net/checksum.h>
13 #include <linux/printk.h>
17 #define QLCNIC_TX_ETHER_PKT 0x01
18 #define QLCNIC_TX_TCP_PKT 0x02
19 #define QLCNIC_TX_UDP_PKT 0x03
20 #define QLCNIC_TX_IP_PKT 0x04
21 #define QLCNIC_TX_TCP_LSO 0x05
22 #define QLCNIC_TX_TCP_LSO6 0x06
23 #define QLCNIC_TX_ENCAP_PKT 0x07
24 #define QLCNIC_TX_ENCAP_LSO 0x08
25 #define QLCNIC_TX_TCPV6_PKT 0x0b
26 #define QLCNIC_TX_UDPV6_PKT 0x0c
28 #define QLCNIC_FLAGS_VLAN_TAGGED 0x10
29 #define QLCNIC_FLAGS_VLAN_OOB 0x40
31 #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
32 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
33 #define qlcnic_set_cmd_desc_port(cmd_desc, var) \
34 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
35 #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
36 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
38 #define qlcnic_set_tx_port(_desc, _port) \
39 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
41 #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
42 ((_desc)->flags_opcode |= \
43 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
45 #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
46 ((_desc)->nfrags__length = \
47 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
49 /* owner bits of status_desc */
50 #define STATUS_OWNER_HOST (0x1ULL << 56)
51 #define STATUS_OWNER_PHANTOM (0x2ULL << 56)
54 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
55 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
56 53-55 desc_cnt, 56-57 owner, 58-63 opcode
58 #define qlcnic_get_sts_port(sts_data) \
60 #define qlcnic_get_sts_status(sts_data) \
61 (((sts_data) >> 4) & 0x0F)
62 #define qlcnic_get_sts_type(sts_data) \
63 (((sts_data) >> 8) & 0x0F)
64 #define qlcnic_get_sts_totallength(sts_data) \
65 (((sts_data) >> 12) & 0xFFFF)
66 #define qlcnic_get_sts_refhandle(sts_data) \
67 (((sts_data) >> 28) & 0xFFFF)
68 #define qlcnic_get_sts_prot(sts_data) \
69 (((sts_data) >> 44) & 0x0F)
70 #define qlcnic_get_sts_pkt_offset(sts_data) \
71 (((sts_data) >> 48) & 0x1F)
72 #define qlcnic_get_sts_desc_cnt(sts_data) \
73 (((sts_data) >> 53) & 0x7)
74 #define qlcnic_get_sts_opcode(sts_data) \
75 (((sts_data) >> 58) & 0x03F)
77 #define qlcnic_get_lro_sts_refhandle(sts_data) \
78 ((sts_data) & 0x07FFF)
79 #define qlcnic_get_lro_sts_length(sts_data) \
80 (((sts_data) >> 16) & 0x0FFFF)
81 #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
82 (((sts_data) >> 32) & 0x0FF)
83 #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
84 (((sts_data) >> 40) & 0x0FF)
85 #define qlcnic_get_lro_sts_timestamp(sts_data) \
86 (((sts_data) >> 48) & 0x1)
87 #define qlcnic_get_lro_sts_type(sts_data) \
88 (((sts_data) >> 49) & 0x7)
89 #define qlcnic_get_lro_sts_push_flag(sts_data) \
90 (((sts_data) >> 52) & 0x1)
91 #define qlcnic_get_lro_sts_seq_number(sts_data) \
92 ((sts_data) & 0x0FFFFFFFF)
93 #define qlcnic_get_lro_sts_mss(sts_data1) \
94 ((sts_data1 >> 32) & 0x0FFFF)
96 #define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
98 /* opcode field in status_desc */
99 #define QLCNIC_SYN_OFFLOAD 0x03
100 #define QLCNIC_RXPKT_DESC 0x04
101 #define QLCNIC_OLD_RXPKT_DESC 0x3f
102 #define QLCNIC_RESPONSE_DESC 0x05
103 #define QLCNIC_LRO_DESC 0x12
105 #define QLCNIC_TCP_HDR_SIZE 20
106 #define QLCNIC_TCP_TS_OPTION_SIZE 12
107 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
108 #define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
110 #define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
112 /* for status field in status_desc */
113 #define STATUS_CKSUM_LOOP 0
114 #define STATUS_CKSUM_OK 2
116 #define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
117 #define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
118 #define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
119 #define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
120 #define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
121 #define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
122 #define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
123 #define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
124 #define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
125 #define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
126 #define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
127 #define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
128 #define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
130 static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
,
133 static struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*,
134 struct qlcnic_host_rds_ring
*,
137 static inline u8
qlcnic_mac_hash(u64 mac
, u16 vlan
)
139 return (u8
)((mac
& 0xff) ^ ((mac
>> 40) & 0xff) ^ (vlan
& 0xff));
142 static inline u32
qlcnic_get_ref_handle(struct qlcnic_adapter
*adapter
,
143 u16 handle
, u8 ring_id
)
145 if (qlcnic_83xx_check(adapter
))
146 return handle
| (ring_id
<< 15);
151 static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data
)
153 return (qlcnic_get_sts_status(sts_data
) == STATUS_CKSUM_LOOP
) ? 1 : 0;
156 static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter
*adapter
,
157 struct qlcnic_filter
*fil
,
158 void *addr
, u16 vlan_id
)
163 op
= vlan_id
? QLCNIC_MAC_VLAN_ADD
: QLCNIC_MAC_ADD
;
164 ret
= qlcnic_sre_macaddr_change(adapter
, addr
, vlan_id
, op
);
168 op
= vlan_id
? QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
;
169 ret
= qlcnic_sre_macaddr_change(adapter
, addr
, vlan_id
, op
);
171 hlist_del(&fil
->fnode
);
172 adapter
->rx_fhash
.fnum
--;
176 static struct qlcnic_filter
*qlcnic_find_mac_filter(struct hlist_head
*head
,
177 void *addr
, u16 vlan_id
)
179 struct qlcnic_filter
*tmp_fil
= NULL
;
180 struct hlist_node
*n
;
182 hlist_for_each_entry_safe(tmp_fil
, n
, head
, fnode
) {
183 if (ether_addr_equal(tmp_fil
->faddr
, addr
) &&
184 tmp_fil
->vlan_id
== vlan_id
)
191 static void qlcnic_add_lb_filter(struct qlcnic_adapter
*adapter
,
192 struct sk_buff
*skb
, int loopback_pkt
, u16 vlan_id
)
194 struct ethhdr
*phdr
= (struct ethhdr
*)(skb
->data
);
195 struct qlcnic_filter
*fil
, *tmp_fil
;
196 struct hlist_head
*head
;
202 if (!qlcnic_sriov_pf_check(adapter
) || (vlan_id
== 0xffff))
205 memcpy(&src_addr
, phdr
->h_source
, ETH_ALEN
);
206 hindex
= qlcnic_mac_hash(src_addr
, vlan_id
) &
207 (adapter
->fhash
.fbucket_size
- 1);
210 if (adapter
->rx_fhash
.fnum
>= adapter
->rx_fhash
.fmax
)
213 head
= &(adapter
->rx_fhash
.fhead
[hindex
]);
215 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
217 time
= tmp_fil
->ftime
;
218 if (time_after(jiffies
, QLCNIC_READD_AGE
* HZ
+ time
))
219 tmp_fil
->ftime
= jiffies
;
223 fil
= kzalloc(sizeof(struct qlcnic_filter
), GFP_ATOMIC
);
227 fil
->ftime
= jiffies
;
228 memcpy(fil
->faddr
, &src_addr
, ETH_ALEN
);
229 fil
->vlan_id
= vlan_id
;
230 spin_lock(&adapter
->rx_mac_learn_lock
);
231 hlist_add_head(&(fil
->fnode
), head
);
232 adapter
->rx_fhash
.fnum
++;
233 spin_unlock(&adapter
->rx_mac_learn_lock
);
235 head
= &adapter
->fhash
.fhead
[hindex
];
237 spin_lock(&adapter
->mac_learn_lock
);
239 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
241 op
= vlan_id
? QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
;
242 ret
= qlcnic_sre_macaddr_change(adapter
,
246 hlist_del(&tmp_fil
->fnode
);
247 adapter
->fhash
.fnum
--;
250 spin_unlock(&adapter
->mac_learn_lock
);
255 spin_unlock(&adapter
->mac_learn_lock
);
257 head
= &adapter
->rx_fhash
.fhead
[hindex
];
259 spin_lock(&adapter
->rx_mac_learn_lock
);
261 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
263 qlcnic_delete_rx_list_mac(adapter
, tmp_fil
, &src_addr
,
266 spin_unlock(&adapter
->rx_mac_learn_lock
);
270 void qlcnic_82xx_change_filter(struct qlcnic_adapter
*adapter
, u64
*uaddr
,
271 u16 vlan_id
, struct qlcnic_host_tx_ring
*tx_ring
)
273 struct cmd_desc_type0
*hwdesc
;
274 struct qlcnic_nic_req
*req
;
275 struct qlcnic_mac_req
*mac_req
;
276 struct qlcnic_vlan_req
*vlan_req
;
280 producer
= tx_ring
->producer
;
281 hwdesc
= &tx_ring
->desc_head
[tx_ring
->producer
];
283 req
= (struct qlcnic_nic_req
*)hwdesc
;
284 memset(req
, 0, sizeof(struct qlcnic_nic_req
));
285 req
->qhdr
= cpu_to_le64(QLCNIC_REQUEST
<< 23);
287 word
= QLCNIC_MAC_EVENT
| ((u64
)(adapter
->portnum
) << 16);
288 req
->req_hdr
= cpu_to_le64(word
);
290 mac_req
= (struct qlcnic_mac_req
*)&(req
->words
[0]);
291 mac_req
->op
= vlan_id
? QLCNIC_MAC_VLAN_ADD
: QLCNIC_MAC_ADD
;
292 memcpy(mac_req
->mac_addr
, uaddr
, ETH_ALEN
);
294 vlan_req
= (struct qlcnic_vlan_req
*)&req
->words
[1];
295 vlan_req
->vlan_id
= cpu_to_le16(vlan_id
);
297 tx_ring
->producer
= get_next_index(producer
, tx_ring
->num_desc
);
301 static void qlcnic_send_filter(struct qlcnic_adapter
*adapter
,
302 struct cmd_desc_type0
*first_desc
,
304 struct qlcnic_host_tx_ring
*tx_ring
)
306 struct vlan_ethhdr
*vh
= (struct vlan_ethhdr
*)(skb
->data
);
307 struct ethhdr
*phdr
= (struct ethhdr
*)(skb
->data
);
308 u16 protocol
= ntohs(skb
->protocol
);
309 struct qlcnic_filter
*fil
, *tmp_fil
;
310 struct hlist_head
*head
;
311 struct hlist_node
*n
;
316 if (ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
319 if (adapter
->flags
& QLCNIC_VLAN_FILTERING
) {
320 if (protocol
== ETH_P_8021Q
) {
321 vh
= (struct vlan_ethhdr
*)skb
->data
;
322 vlan_id
= ntohs(vh
->h_vlan_TCI
);
323 } else if (skb_vlan_tag_present(skb
)) {
324 vlan_id
= skb_vlan_tag_get(skb
);
328 memcpy(&src_addr
, phdr
->h_source
, ETH_ALEN
);
329 hval
= qlcnic_mac_hash(src_addr
, vlan_id
);
330 hindex
= hval
& (adapter
->fhash
.fbucket_size
- 1);
331 head
= &(adapter
->fhash
.fhead
[hindex
]);
333 hlist_for_each_entry_safe(tmp_fil
, n
, head
, fnode
) {
334 if (ether_addr_equal(tmp_fil
->faddr
, (u8
*)&src_addr
) &&
335 tmp_fil
->vlan_id
== vlan_id
) {
336 if (jiffies
> (QLCNIC_READD_AGE
* HZ
+ tmp_fil
->ftime
))
337 qlcnic_change_filter(adapter
, &src_addr
,
339 tmp_fil
->ftime
= jiffies
;
344 if (unlikely(adapter
->fhash
.fnum
>= adapter
->fhash
.fmax
)) {
345 adapter
->stats
.mac_filter_limit_overrun
++;
349 fil
= kzalloc(sizeof(struct qlcnic_filter
), GFP_ATOMIC
);
353 qlcnic_change_filter(adapter
, &src_addr
, vlan_id
, tx_ring
);
354 fil
->ftime
= jiffies
;
355 fil
->vlan_id
= vlan_id
;
356 memcpy(fil
->faddr
, &src_addr
, ETH_ALEN
);
357 spin_lock(&adapter
->mac_learn_lock
);
358 hlist_add_head(&(fil
->fnode
), head
);
359 adapter
->fhash
.fnum
++;
360 spin_unlock(&adapter
->mac_learn_lock
);
363 #define QLCNIC_ENCAP_VXLAN_PKT BIT_0
364 #define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1
365 #define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2
366 #define QLCNIC_ENCAP_INNER_L4_UDP BIT_3
367 #define QLCNIC_ENCAP_DO_L3_CSUM BIT_4
368 #define QLCNIC_ENCAP_DO_L4_CSUM BIT_5
370 static int qlcnic_tx_encap_pkt(struct qlcnic_adapter
*adapter
,
371 struct cmd_desc_type0
*first_desc
,
373 struct qlcnic_host_tx_ring
*tx_ring
)
375 u8 opcode
= 0, inner_hdr_len
= 0, outer_hdr_len
= 0, total_hdr_len
= 0;
376 int copied
, copy_len
, descr_size
;
377 u32 producer
= tx_ring
->producer
;
378 struct cmd_desc_type0
*hwdesc
;
379 u16 flags
= 0, encap_descr
= 0;
381 opcode
= QLCNIC_TX_ETHER_PKT
;
382 encap_descr
= QLCNIC_ENCAP_VXLAN_PKT
;
384 if (skb_is_gso(skb
)) {
385 inner_hdr_len
= skb_inner_transport_header(skb
) +
386 inner_tcp_hdrlen(skb
) -
387 skb_inner_mac_header(skb
);
389 /* VXLAN header size = 8 */
390 outer_hdr_len
= skb_transport_offset(skb
) + 8 +
391 sizeof(struct udphdr
);
392 first_desc
->outer_hdr_length
= outer_hdr_len
;
393 total_hdr_len
= inner_hdr_len
+ outer_hdr_len
;
394 encap_descr
|= QLCNIC_ENCAP_DO_L3_CSUM
|
395 QLCNIC_ENCAP_DO_L4_CSUM
;
396 first_desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
397 first_desc
->hdr_length
= inner_hdr_len
;
399 /* Copy inner and outer headers in Tx descriptor(s)
400 * If total_hdr_len > cmd_desc_type0, use multiple
404 descr_size
= (int)sizeof(struct cmd_desc_type0
);
405 while (copied
< total_hdr_len
) {
406 copy_len
= min(descr_size
, (total_hdr_len
- copied
));
407 hwdesc
= &tx_ring
->desc_head
[producer
];
408 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
409 skb_copy_from_linear_data_offset(skb
, copied
,
413 producer
= get_next_index(producer
, tx_ring
->num_desc
);
416 tx_ring
->producer
= producer
;
418 /* Make sure updated tx_ring->producer is visible
419 * for qlcnic_tx_avail()
422 adapter
->stats
.encap_lso_frames
++;
424 opcode
= QLCNIC_TX_ENCAP_LSO
;
425 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
426 if (inner_ip_hdr(skb
)->version
== 6) {
427 if (inner_ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
428 encap_descr
|= QLCNIC_ENCAP_INNER_L4_UDP
;
430 if (inner_ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
431 encap_descr
|= QLCNIC_ENCAP_INNER_L4_UDP
;
434 adapter
->stats
.encap_tx_csummed
++;
435 opcode
= QLCNIC_TX_ENCAP_PKT
;
438 /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
439 if (ip_hdr(skb
)->version
== 6)
440 encap_descr
|= QLCNIC_ENCAP_OUTER_L3_IP6
;
442 /* outer IP header's size in 32bit words size*/
443 encap_descr
|= (skb_network_header_len(skb
) >> 2) << 6;
445 /* outer IP header offset */
446 encap_descr
|= skb_network_offset(skb
) << 10;
447 first_desc
->encap_descr
= cpu_to_le16(encap_descr
);
449 first_desc
->tcp_hdr_offset
= skb_inner_transport_header(skb
) -
451 first_desc
->ip_hdr_offset
= skb_inner_network_offset(skb
);
453 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
458 static int qlcnic_tx_pkt(struct qlcnic_adapter
*adapter
,
459 struct cmd_desc_type0
*first_desc
, struct sk_buff
*skb
,
460 struct qlcnic_host_tx_ring
*tx_ring
)
462 u8 l4proto
, opcode
= 0, hdr_len
= 0, tag_vlan
= 0;
463 u16 flags
= 0, vlan_tci
= 0;
464 int copied
, offset
, copy_len
, size
;
465 struct cmd_desc_type0
*hwdesc
;
466 struct vlan_ethhdr
*vh
;
467 u16 protocol
= ntohs(skb
->protocol
);
468 u32 producer
= tx_ring
->producer
;
470 if (protocol
== ETH_P_8021Q
) {
471 vh
= (struct vlan_ethhdr
*)skb
->data
;
472 flags
= QLCNIC_FLAGS_VLAN_TAGGED
;
473 vlan_tci
= ntohs(vh
->h_vlan_TCI
);
474 protocol
= ntohs(vh
->h_vlan_encapsulated_proto
);
476 } else if (skb_vlan_tag_present(skb
)) {
477 flags
= QLCNIC_FLAGS_VLAN_OOB
;
478 vlan_tci
= skb_vlan_tag_get(skb
);
481 if (unlikely(adapter
->tx_pvid
)) {
482 if (tag_vlan
&& !(adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
484 if (tag_vlan
&& (adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
487 flags
= QLCNIC_FLAGS_VLAN_OOB
;
488 vlan_tci
= adapter
->tx_pvid
;
491 qlcnic_set_tx_vlan_tci(first_desc
, vlan_tci
);
492 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
494 if (*(skb
->data
) & BIT_0
) {
496 memcpy(&first_desc
->eth_addr
, skb
->data
, ETH_ALEN
);
498 opcode
= QLCNIC_TX_ETHER_PKT
;
499 if (skb_is_gso(skb
)) {
500 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
501 first_desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
502 first_desc
->hdr_length
= hdr_len
;
503 opcode
= (protocol
== ETH_P_IPV6
) ? QLCNIC_TX_TCP_LSO6
:
506 /* For LSO, we need to copy the MAC/IP/TCP headers into
507 * the descriptor ring */
511 if (flags
& QLCNIC_FLAGS_VLAN_OOB
) {
512 first_desc
->hdr_length
+= VLAN_HLEN
;
513 first_desc
->tcp_hdr_offset
= VLAN_HLEN
;
514 first_desc
->ip_hdr_offset
= VLAN_HLEN
;
516 /* Only in case of TSO on vlan device */
517 flags
|= QLCNIC_FLAGS_VLAN_TAGGED
;
519 /* Create a TSO vlan header template for firmware */
520 hwdesc
= &tx_ring
->desc_head
[producer
];
521 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
523 copy_len
= min((int)sizeof(struct cmd_desc_type0
) -
524 offset
, hdr_len
+ VLAN_HLEN
);
526 vh
= (struct vlan_ethhdr
*)((char *) hwdesc
+ 2);
527 skb_copy_from_linear_data(skb
, vh
, 12);
528 vh
->h_vlan_proto
= htons(ETH_P_8021Q
);
529 vh
->h_vlan_TCI
= htons(vlan_tci
);
531 skb_copy_from_linear_data_offset(skb
, 12,
534 copied
= copy_len
- VLAN_HLEN
;
536 producer
= get_next_index(producer
, tx_ring
->num_desc
);
539 while (copied
< hdr_len
) {
540 size
= (int)sizeof(struct cmd_desc_type0
) - offset
;
541 copy_len
= min(size
, (hdr_len
- copied
));
542 hwdesc
= &tx_ring
->desc_head
[producer
];
543 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
544 skb_copy_from_linear_data_offset(skb
, copied
,
549 producer
= get_next_index(producer
, tx_ring
->num_desc
);
552 tx_ring
->producer
= producer
;
554 adapter
->stats
.lso_frames
++;
556 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
557 if (protocol
== ETH_P_IP
) {
558 l4proto
= ip_hdr(skb
)->protocol
;
560 if (l4proto
== IPPROTO_TCP
)
561 opcode
= QLCNIC_TX_TCP_PKT
;
562 else if (l4proto
== IPPROTO_UDP
)
563 opcode
= QLCNIC_TX_UDP_PKT
;
564 } else if (protocol
== ETH_P_IPV6
) {
565 l4proto
= ipv6_hdr(skb
)->nexthdr
;
567 if (l4proto
== IPPROTO_TCP
)
568 opcode
= QLCNIC_TX_TCPV6_PKT
;
569 else if (l4proto
== IPPROTO_UDP
)
570 opcode
= QLCNIC_TX_UDPV6_PKT
;
573 first_desc
->tcp_hdr_offset
+= skb_transport_offset(skb
);
574 first_desc
->ip_hdr_offset
+= skb_network_offset(skb
);
575 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
580 static int qlcnic_map_tx_skb(struct pci_dev
*pdev
, struct sk_buff
*skb
,
581 struct qlcnic_cmd_buffer
*pbuf
)
583 struct qlcnic_skb_frag
*nf
;
588 nr_frags
= skb_shinfo(skb
)->nr_frags
;
589 nf
= &pbuf
->frag_array
[0];
591 map
= pci_map_single(pdev
, skb
->data
, skb_headlen(skb
),
593 if (pci_dma_mapping_error(pdev
, map
))
597 nf
->length
= skb_headlen(skb
);
599 for (i
= 0; i
< nr_frags
; i
++) {
600 frag
= &skb_shinfo(skb
)->frags
[i
];
601 nf
= &pbuf
->frag_array
[i
+1];
602 map
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, skb_frag_size(frag
),
604 if (dma_mapping_error(&pdev
->dev
, map
))
608 nf
->length
= skb_frag_size(frag
);
615 nf
= &pbuf
->frag_array
[i
+1];
616 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
619 nf
= &pbuf
->frag_array
[0];
620 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
626 static void qlcnic_unmap_buffers(struct pci_dev
*pdev
, struct sk_buff
*skb
,
627 struct qlcnic_cmd_buffer
*pbuf
)
629 struct qlcnic_skb_frag
*nf
= &pbuf
->frag_array
[0];
630 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
632 for (i
= 0; i
< nr_frags
; i
++) {
633 nf
= &pbuf
->frag_array
[i
+1];
634 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
637 nf
= &pbuf
->frag_array
[0];
638 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
642 static inline void qlcnic_clear_cmddesc(u64
*desc
)
649 netdev_tx_t
qlcnic_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
651 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
652 struct qlcnic_host_tx_ring
*tx_ring
;
653 struct qlcnic_cmd_buffer
*pbuf
;
654 struct qlcnic_skb_frag
*buffrag
;
655 struct cmd_desc_type0
*hwdesc
, *first_desc
;
656 struct pci_dev
*pdev
;
658 int i
, k
, frag_count
, delta
= 0;
659 u32 producer
, num_txd
;
661 bool l4_is_udp
= false;
663 if (!test_bit(__QLCNIC_DEV_UP
, &adapter
->state
)) {
664 netif_tx_stop_all_queues(netdev
);
665 return NETDEV_TX_BUSY
;
668 if (adapter
->flags
& QLCNIC_MACSPOOF
) {
669 phdr
= (struct ethhdr
*)skb
->data
;
670 if (!ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
674 tx_ring
= &adapter
->tx_ring
[skb_get_queue_mapping(skb
)];
675 num_txd
= tx_ring
->num_desc
;
677 frag_count
= skb_shinfo(skb
)->nr_frags
+ 1;
679 /* 14 frags supported for normal packet and
680 * 32 frags supported for TSO packet
682 if (!skb_is_gso(skb
) && frag_count
> QLCNIC_MAX_FRAGS_PER_TX
) {
683 for (i
= 0; i
< (frag_count
- QLCNIC_MAX_FRAGS_PER_TX
); i
++)
684 delta
+= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
686 if (!__pskb_pull_tail(skb
, delta
))
689 frag_count
= 1 + skb_shinfo(skb
)->nr_frags
;
692 if (unlikely(qlcnic_tx_avail(tx_ring
) <= TX_STOP_THRESH
)) {
693 netif_tx_stop_queue(tx_ring
->txq
);
694 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
695 netif_tx_start_queue(tx_ring
->txq
);
697 tx_ring
->tx_stats
.xmit_off
++;
698 return NETDEV_TX_BUSY
;
702 producer
= tx_ring
->producer
;
703 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
704 pdev
= adapter
->pdev
;
705 first_desc
= &tx_ring
->desc_head
[producer
];
706 hwdesc
= &tx_ring
->desc_head
[producer
];
707 qlcnic_clear_cmddesc((u64
*)hwdesc
);
709 if (qlcnic_map_tx_skb(pdev
, skb
, pbuf
)) {
710 adapter
->stats
.tx_dma_map_error
++;
715 pbuf
->frag_count
= frag_count
;
717 qlcnic_set_tx_frags_len(first_desc
, frag_count
, skb
->len
);
718 qlcnic_set_tx_port(first_desc
, adapter
->portnum
);
720 for (i
= 0; i
< frag_count
; i
++) {
723 if ((k
== 0) && (i
> 0)) {
724 /* move to next desc.*/
725 producer
= get_next_index(producer
, num_txd
);
726 hwdesc
= &tx_ring
->desc_head
[producer
];
727 qlcnic_clear_cmddesc((u64
*)hwdesc
);
728 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
731 buffrag
= &pbuf
->frag_array
[i
];
732 hwdesc
->buffer_length
[k
] = cpu_to_le16(buffrag
->length
);
735 hwdesc
->addr_buffer1
= cpu_to_le64(buffrag
->dma
);
738 hwdesc
->addr_buffer2
= cpu_to_le64(buffrag
->dma
);
741 hwdesc
->addr_buffer3
= cpu_to_le64(buffrag
->dma
);
744 hwdesc
->addr_buffer4
= cpu_to_le64(buffrag
->dma
);
749 tx_ring
->producer
= get_next_index(producer
, num_txd
);
752 protocol
= ntohs(skb
->protocol
);
753 if (protocol
== ETH_P_IP
)
754 l4_is_udp
= ip_hdr(skb
)->protocol
== IPPROTO_UDP
;
755 else if (protocol
== ETH_P_IPV6
)
756 l4_is_udp
= ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
;
758 /* Check if it is a VXLAN packet */
759 if (!skb
->encapsulation
|| !l4_is_udp
||
760 !qlcnic_encap_tx_offload(adapter
)) {
761 if (unlikely(qlcnic_tx_pkt(adapter
, first_desc
, skb
,
765 if (unlikely(qlcnic_tx_encap_pkt(adapter
, first_desc
,
770 if (adapter
->drv_mac_learn
)
771 qlcnic_send_filter(adapter
, first_desc
, skb
, tx_ring
);
773 tx_ring
->tx_stats
.tx_bytes
+= skb
->len
;
774 tx_ring
->tx_stats
.xmit_called
++;
776 /* Ensure writes are complete before HW fetches Tx descriptors */
778 qlcnic_update_cmd_producer(tx_ring
);
783 qlcnic_unmap_buffers(pdev
, skb
, pbuf
);
785 adapter
->stats
.txdropped
++;
786 dev_kfree_skb_any(skb
);
790 void qlcnic_advert_link_change(struct qlcnic_adapter
*adapter
, int linkup
)
792 struct net_device
*netdev
= adapter
->netdev
;
794 if (adapter
->ahw
->linkup
&& !linkup
) {
795 netdev_info(netdev
, "NIC Link is down\n");
796 adapter
->ahw
->linkup
= 0;
797 netif_carrier_off(netdev
);
798 } else if (!adapter
->ahw
->linkup
&& linkup
) {
799 adapter
->ahw
->linkup
= 1;
801 /* Do not advertise Link up to the stack if device
802 * is in loopback mode
804 if (qlcnic_83xx_check(adapter
) && adapter
->ahw
->lb_mode
) {
805 netdev_info(netdev
, "NIC Link is up for loopback test\n");
809 netdev_info(netdev
, "NIC Link is up\n");
810 netif_carrier_on(netdev
);
814 static int qlcnic_alloc_rx_skb(struct qlcnic_adapter
*adapter
,
815 struct qlcnic_host_rds_ring
*rds_ring
,
816 struct qlcnic_rx_buffer
*buffer
)
820 struct pci_dev
*pdev
= adapter
->pdev
;
822 skb
= netdev_alloc_skb(adapter
->netdev
, rds_ring
->skb_size
);
824 adapter
->stats
.skb_alloc_failure
++;
828 skb_reserve(skb
, NET_IP_ALIGN
);
829 dma
= pci_map_single(pdev
, skb
->data
,
830 rds_ring
->dma_size
, PCI_DMA_FROMDEVICE
);
832 if (pci_dma_mapping_error(pdev
, dma
)) {
833 adapter
->stats
.rx_dma_map_error
++;
834 dev_kfree_skb_any(skb
);
844 static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter
*adapter
,
845 struct qlcnic_host_rds_ring
*rds_ring
,
848 struct rcv_desc
*pdesc
;
849 struct qlcnic_rx_buffer
*buffer
;
851 uint32_t producer
, handle
;
852 struct list_head
*head
;
854 if (!spin_trylock(&rds_ring
->lock
))
857 producer
= rds_ring
->producer
;
858 head
= &rds_ring
->free_list
;
859 while (!list_empty(head
)) {
860 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
863 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
867 list_del(&buffer
->list
);
869 /* make a rcv descriptor */
870 pdesc
= &rds_ring
->desc_head
[producer
];
871 handle
= qlcnic_get_ref_handle(adapter
,
872 buffer
->ref_handle
, ring_id
);
873 pdesc
->reference_handle
= cpu_to_le16(handle
);
874 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
875 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
876 producer
= get_next_index(producer
, rds_ring
->num_desc
);
879 rds_ring
->producer
= producer
;
880 writel((producer
- 1) & (rds_ring
->num_desc
- 1),
881 rds_ring
->crb_rcv_producer
);
883 spin_unlock(&rds_ring
->lock
);
886 static int qlcnic_process_cmd_ring(struct qlcnic_adapter
*adapter
,
887 struct qlcnic_host_tx_ring
*tx_ring
,
890 u32 sw_consumer
, hw_consumer
;
891 int i
, done
, count
= 0;
892 struct qlcnic_cmd_buffer
*buffer
;
893 struct pci_dev
*pdev
= adapter
->pdev
;
894 struct net_device
*netdev
= adapter
->netdev
;
895 struct qlcnic_skb_frag
*frag
;
897 if (!spin_trylock(&tx_ring
->tx_clean_lock
))
900 sw_consumer
= tx_ring
->sw_consumer
;
901 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
903 while (sw_consumer
!= hw_consumer
) {
904 buffer
= &tx_ring
->cmd_buf_arr
[sw_consumer
];
906 frag
= &buffer
->frag_array
[0];
907 pci_unmap_single(pdev
, frag
->dma
, frag
->length
,
910 for (i
= 1; i
< buffer
->frag_count
; i
++) {
912 pci_unmap_page(pdev
, frag
->dma
, frag
->length
,
916 tx_ring
->tx_stats
.xmit_finished
++;
917 dev_kfree_skb_any(buffer
->skb
);
921 sw_consumer
= get_next_index(sw_consumer
, tx_ring
->num_desc
);
922 if (++count
>= budget
)
926 tx_ring
->sw_consumer
= sw_consumer
;
928 if (count
&& netif_running(netdev
)) {
930 if (netif_tx_queue_stopped(tx_ring
->txq
) &&
931 netif_carrier_ok(netdev
)) {
932 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
933 netif_tx_wake_queue(tx_ring
->txq
);
934 tx_ring
->tx_stats
.xmit_on
++;
937 adapter
->tx_timeo_cnt
= 0;
940 * If everything is freed up to consumer then check if the ring is full
941 * If the ring is full then check if more needs to be freed and
942 * schedule the call back again.
944 * This happens when there are 2 CPUs. One could be freeing and the
945 * other filling it. If the ring is full when we get out of here and
946 * the card has already interrupted the host then the host can miss the
949 * There is still a possible race condition and the host could miss an
950 * interrupt. The card has to take care of this.
952 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
953 done
= (sw_consumer
== hw_consumer
);
955 spin_unlock(&tx_ring
->tx_clean_lock
);
960 static int qlcnic_poll(struct napi_struct
*napi
, int budget
)
962 int tx_complete
, work_done
;
963 struct qlcnic_host_sds_ring
*sds_ring
;
964 struct qlcnic_adapter
*adapter
;
965 struct qlcnic_host_tx_ring
*tx_ring
;
967 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
968 adapter
= sds_ring
->adapter
;
969 tx_ring
= sds_ring
->tx_ring
;
971 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
,
973 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
975 /* Check if we need a repoll */
979 if (work_done
< budget
) {
980 napi_complete_done(&sds_ring
->napi
, work_done
);
981 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
)) {
982 qlcnic_enable_sds_intr(adapter
, sds_ring
);
983 qlcnic_enable_tx_intr(adapter
, tx_ring
);
990 static int qlcnic_tx_poll(struct napi_struct
*napi
, int budget
)
992 struct qlcnic_host_tx_ring
*tx_ring
;
993 struct qlcnic_adapter
*adapter
;
996 tx_ring
= container_of(napi
, struct qlcnic_host_tx_ring
, napi
);
997 adapter
= tx_ring
->adapter
;
999 work_done
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1001 napi_complete(&tx_ring
->napi
);
1002 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1003 qlcnic_enable_tx_intr(adapter
, tx_ring
);
1005 /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
1012 static int qlcnic_rx_poll(struct napi_struct
*napi
, int budget
)
1014 struct qlcnic_host_sds_ring
*sds_ring
;
1015 struct qlcnic_adapter
*adapter
;
1018 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1019 adapter
= sds_ring
->adapter
;
1021 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
1023 if (work_done
< budget
) {
1024 napi_complete_done(&sds_ring
->napi
, work_done
);
1025 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1026 qlcnic_enable_sds_intr(adapter
, sds_ring
);
1032 static void qlcnic_handle_linkevent(struct qlcnic_adapter
*adapter
,
1033 struct qlcnic_fw_msg
*msg
)
1036 u16 cable_len
, link_speed
;
1037 u8 link_status
, module
, duplex
, autoneg
, lb_status
= 0;
1038 struct net_device
*netdev
= adapter
->netdev
;
1040 adapter
->ahw
->has_link_events
= 1;
1042 cable_OUI
= msg
->body
[1] & 0xffffffff;
1043 cable_len
= (msg
->body
[1] >> 32) & 0xffff;
1044 link_speed
= (msg
->body
[1] >> 48) & 0xffff;
1046 link_status
= msg
->body
[2] & 0xff;
1047 duplex
= (msg
->body
[2] >> 16) & 0xff;
1048 autoneg
= (msg
->body
[2] >> 24) & 0xff;
1049 lb_status
= (msg
->body
[2] >> 32) & 0x3;
1051 module
= (msg
->body
[2] >> 8) & 0xff;
1052 if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE
)
1053 dev_info(&netdev
->dev
,
1054 "unsupported cable: OUI 0x%x, length %d\n",
1055 cable_OUI
, cable_len
);
1056 else if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN
)
1057 dev_info(&netdev
->dev
, "unsupported cable length %d\n",
1060 if (!link_status
&& (lb_status
== QLCNIC_ILB_MODE
||
1061 lb_status
== QLCNIC_ELB_MODE
))
1062 adapter
->ahw
->loopback_state
|= QLCNIC_LINKEVENT
;
1064 qlcnic_advert_link_change(adapter
, link_status
);
1066 if (duplex
== LINKEVENT_FULL_DUPLEX
)
1067 adapter
->ahw
->link_duplex
= DUPLEX_FULL
;
1069 adapter
->ahw
->link_duplex
= DUPLEX_HALF
;
1071 adapter
->ahw
->module_type
= module
;
1072 adapter
->ahw
->link_autoneg
= autoneg
;
1075 adapter
->ahw
->link_speed
= link_speed
;
1077 adapter
->ahw
->link_speed
= SPEED_UNKNOWN
;
1078 adapter
->ahw
->link_duplex
= DUPLEX_UNKNOWN
;
1082 static void qlcnic_handle_fw_message(int desc_cnt
, int index
,
1083 struct qlcnic_host_sds_ring
*sds_ring
)
1085 struct qlcnic_fw_msg msg
;
1086 struct status_desc
*desc
;
1087 struct qlcnic_adapter
*adapter
;
1089 int i
= 0, opcode
, ret
;
1091 while (desc_cnt
> 0 && i
< 8) {
1092 desc
= &sds_ring
->desc_head
[index
];
1093 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[0]);
1094 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[1]);
1096 index
= get_next_index(index
, sds_ring
->num_desc
);
1100 adapter
= sds_ring
->adapter
;
1101 dev
= &adapter
->pdev
->dev
;
1102 opcode
= qlcnic_get_nic_msg_opcode(msg
.body
[0]);
1105 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE
:
1106 qlcnic_handle_linkevent(adapter
, &msg
);
1108 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK
:
1109 ret
= (u32
)(msg
.body
[1]);
1112 adapter
->ahw
->loopback_state
|= QLCNIC_LB_RESPONSE
;
1115 dev_info(dev
, "loopback already in progress\n");
1116 adapter
->ahw
->diag_cnt
= -EINPROGRESS
;
1119 dev_info(dev
, "loopback cable is not connected\n");
1120 adapter
->ahw
->diag_cnt
= -ENODEV
;
1124 "loopback configure request failed, err %x\n",
1126 adapter
->ahw
->diag_cnt
= -EIO
;
1130 case QLCNIC_C2H_OPCODE_GET_DCB_AEN
:
1131 qlcnic_dcb_aen_handler(adapter
->dcb
, (void *)&msg
);
1138 static struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*adapter
,
1139 struct qlcnic_host_rds_ring
*ring
,
1140 u16 index
, u16 cksum
)
1142 struct qlcnic_rx_buffer
*buffer
;
1143 struct sk_buff
*skb
;
1145 buffer
= &ring
->rx_buf_arr
[index
];
1146 if (unlikely(buffer
->skb
== NULL
)) {
1151 pci_unmap_single(adapter
->pdev
, buffer
->dma
, ring
->dma_size
,
1152 PCI_DMA_FROMDEVICE
);
1155 if (likely((adapter
->netdev
->features
& NETIF_F_RXCSUM
) &&
1156 (cksum
== STATUS_CKSUM_OK
|| cksum
== STATUS_CKSUM_LOOP
))) {
1157 adapter
->stats
.csummed
++;
1158 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1160 skb_checksum_none_assert(skb
);
1169 static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter
*adapter
,
1170 struct sk_buff
*skb
, u16
*vlan_tag
)
1172 struct ethhdr
*eth_hdr
;
1174 if (!__vlan_get_tag(skb
, vlan_tag
)) {
1175 eth_hdr
= (struct ethhdr
*)skb
->data
;
1176 memmove(skb
->data
+ VLAN_HLEN
, eth_hdr
, ETH_ALEN
* 2);
1177 skb_pull(skb
, VLAN_HLEN
);
1179 if (!adapter
->rx_pvid
)
1182 if (*vlan_tag
== adapter
->rx_pvid
) {
1183 /* Outer vlan tag. Packet should follow non-vlan path */
1187 if (adapter
->flags
& QLCNIC_TAGGING_ENABLED
)
1193 static struct qlcnic_rx_buffer
*
1194 qlcnic_process_rcv(struct qlcnic_adapter
*adapter
,
1195 struct qlcnic_host_sds_ring
*sds_ring
, int ring
,
1198 struct net_device
*netdev
= adapter
->netdev
;
1199 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1200 struct qlcnic_rx_buffer
*buffer
;
1201 struct sk_buff
*skb
;
1202 struct qlcnic_host_rds_ring
*rds_ring
;
1203 int index
, length
, cksum
, pkt_offset
, is_lb_pkt
;
1204 u16 vid
= 0xffff, t_vid
;
1206 if (unlikely(ring
>= adapter
->max_rds_rings
))
1209 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1211 index
= qlcnic_get_sts_refhandle(sts_data0
);
1212 if (unlikely(index
>= rds_ring
->num_desc
))
1215 buffer
= &rds_ring
->rx_buf_arr
[index
];
1216 length
= qlcnic_get_sts_totallength(sts_data0
);
1217 cksum
= qlcnic_get_sts_status(sts_data0
);
1218 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
1220 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1224 if (adapter
->rx_mac_learn
) {
1226 is_lb_pkt
= qlcnic_82xx_is_lb_pkt(sts_data0
);
1227 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, t_vid
);
1230 if (length
> rds_ring
->skb_size
)
1231 skb_put(skb
, rds_ring
->skb_size
);
1233 skb_put(skb
, length
);
1236 skb_pull(skb
, pkt_offset
);
1238 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1239 adapter
->stats
.rxdropped
++;
1244 skb
->protocol
= eth_type_trans(skb
, netdev
);
1247 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1249 napi_gro_receive(&sds_ring
->napi
, skb
);
1251 adapter
->stats
.rx_pkts
++;
1252 adapter
->stats
.rxbytes
+= length
;
1257 #define QLC_TCP_HDR_SIZE 20
1258 #define QLC_TCP_TS_OPTION_SIZE 12
1259 #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1261 static struct qlcnic_rx_buffer
*
1262 qlcnic_process_lro(struct qlcnic_adapter
*adapter
,
1263 int ring
, u64 sts_data0
, u64 sts_data1
)
1265 struct net_device
*netdev
= adapter
->netdev
;
1266 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1267 struct qlcnic_rx_buffer
*buffer
;
1268 struct sk_buff
*skb
;
1269 struct qlcnic_host_rds_ring
*rds_ring
;
1271 struct ipv6hdr
*ipv6h
;
1273 bool push
, timestamp
;
1274 int index
, l2_hdr_offset
, l4_hdr_offset
, is_lb_pkt
;
1275 u16 lro_length
, length
, data_offset
, t_vid
, vid
= 0xffff;
1278 if (unlikely(ring
>= adapter
->max_rds_rings
))
1281 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1283 index
= qlcnic_get_lro_sts_refhandle(sts_data0
);
1284 if (unlikely(index
>= rds_ring
->num_desc
))
1287 buffer
= &rds_ring
->rx_buf_arr
[index
];
1289 timestamp
= qlcnic_get_lro_sts_timestamp(sts_data0
);
1290 lro_length
= qlcnic_get_lro_sts_length(sts_data0
);
1291 l2_hdr_offset
= qlcnic_get_lro_sts_l2_hdr_offset(sts_data0
);
1292 l4_hdr_offset
= qlcnic_get_lro_sts_l4_hdr_offset(sts_data0
);
1293 push
= qlcnic_get_lro_sts_push_flag(sts_data0
);
1294 seq_number
= qlcnic_get_lro_sts_seq_number(sts_data1
);
1296 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1300 if (adapter
->rx_mac_learn
) {
1302 is_lb_pkt
= qlcnic_82xx_is_lb_pkt(sts_data0
);
1303 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, t_vid
);
1307 data_offset
= l4_hdr_offset
+ QLC_TCP_TS_HDR_SIZE
;
1309 data_offset
= l4_hdr_offset
+ QLC_TCP_HDR_SIZE
;
1311 skb_put(skb
, lro_length
+ data_offset
);
1312 skb_pull(skb
, l2_hdr_offset
);
1314 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1315 adapter
->stats
.rxdropped
++;
1320 skb
->protocol
= eth_type_trans(skb
, netdev
);
1322 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1323 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1324 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1325 length
= (th
->doff
<< 2) + lro_length
;
1326 ipv6h
->payload_len
= htons(length
);
1328 iph
= (struct iphdr
*)skb
->data
;
1329 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1330 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1331 csum_replace2(&iph
->check
, iph
->tot_len
, htons(length
));
1332 iph
->tot_len
= htons(length
);
1336 th
->seq
= htonl(seq_number
);
1339 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1340 skb_shinfo(skb
)->gso_size
= qlcnic_get_lro_sts_mss(sts_data1
);
1341 if (skb
->protocol
== htons(ETH_P_IPV6
))
1342 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1344 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1348 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1349 netif_receive_skb(skb
);
1351 adapter
->stats
.lro_pkts
++;
1352 adapter
->stats
.lrobytes
+= length
;
1357 static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
, int max
)
1359 struct qlcnic_host_rds_ring
*rds_ring
;
1360 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1361 struct list_head
*cur
;
1362 struct status_desc
*desc
;
1363 struct qlcnic_rx_buffer
*rxbuf
;
1364 int opcode
, desc_cnt
, count
= 0;
1365 u64 sts_data0
, sts_data1
;
1367 u32 consumer
= sds_ring
->consumer
;
1369 while (count
< max
) {
1370 desc
= &sds_ring
->desc_head
[consumer
];
1371 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1373 if (!(sts_data0
& STATUS_OWNER_HOST
))
1376 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1377 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1379 case QLCNIC_RXPKT_DESC
:
1380 case QLCNIC_OLD_RXPKT_DESC
:
1381 case QLCNIC_SYN_OFFLOAD
:
1382 ring
= qlcnic_get_sts_type(sts_data0
);
1383 rxbuf
= qlcnic_process_rcv(adapter
, sds_ring
, ring
,
1386 case QLCNIC_LRO_DESC
:
1387 ring
= qlcnic_get_lro_sts_type(sts_data0
);
1388 sts_data1
= le64_to_cpu(desc
->status_desc_data
[1]);
1389 rxbuf
= qlcnic_process_lro(adapter
, ring
, sts_data0
,
1392 case QLCNIC_RESPONSE_DESC
:
1393 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1397 WARN_ON(desc_cnt
> 1);
1400 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1402 adapter
->stats
.null_rxbuf
++;
1404 for (; desc_cnt
> 0; desc_cnt
--) {
1405 desc
= &sds_ring
->desc_head
[consumer
];
1406 desc
->status_desc_data
[0] = QLCNIC_DESC_OWNER_FW
;
1407 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1412 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1413 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1414 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1415 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1416 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1418 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1420 spin_lock(&rds_ring
->lock
);
1421 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1422 &rds_ring
->free_list
);
1423 spin_unlock(&rds_ring
->lock
);
1426 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1430 sds_ring
->consumer
= consumer
;
1431 writel(consumer
, sds_ring
->crb_sts_consumer
);
1437 void qlcnic_post_rx_buffers(struct qlcnic_adapter
*adapter
,
1438 struct qlcnic_host_rds_ring
*rds_ring
, u8 ring_id
)
1440 struct rcv_desc
*pdesc
;
1441 struct qlcnic_rx_buffer
*buffer
;
1443 u32 producer
, handle
;
1444 struct list_head
*head
;
1446 producer
= rds_ring
->producer
;
1447 head
= &rds_ring
->free_list
;
1449 while (!list_empty(head
)) {
1451 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
1454 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
1459 list_del(&buffer
->list
);
1461 /* make a rcv descriptor */
1462 pdesc
= &rds_ring
->desc_head
[producer
];
1463 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
1464 handle
= qlcnic_get_ref_handle(adapter
, buffer
->ref_handle
,
1466 pdesc
->reference_handle
= cpu_to_le16(handle
);
1467 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
1468 producer
= get_next_index(producer
, rds_ring
->num_desc
);
1472 rds_ring
->producer
= producer
;
1473 writel((producer
-1) & (rds_ring
->num_desc
-1),
1474 rds_ring
->crb_rcv_producer
);
1478 static void dump_skb(struct sk_buff
*skb
, struct qlcnic_adapter
*adapter
)
1480 if (adapter
->ahw
->msg_enable
& NETIF_MSG_DRV
) {
1483 scnprintf(prefix
, sizeof(prefix
), "%s: %s: ",
1484 dev_name(&adapter
->pdev
->dev
), __func__
);
1486 print_hex_dump_debug(prefix
, DUMP_PREFIX_NONE
, 16, 1,
1487 skb
->data
, skb
->len
, true);
1491 static void qlcnic_process_rcv_diag(struct qlcnic_adapter
*adapter
, int ring
,
1494 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1495 struct sk_buff
*skb
;
1496 struct qlcnic_host_rds_ring
*rds_ring
;
1497 int index
, length
, cksum
, pkt_offset
;
1499 if (unlikely(ring
>= adapter
->max_rds_rings
))
1502 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1504 index
= qlcnic_get_sts_refhandle(sts_data0
);
1505 length
= qlcnic_get_sts_totallength(sts_data0
);
1506 if (unlikely(index
>= rds_ring
->num_desc
))
1509 cksum
= qlcnic_get_sts_status(sts_data0
);
1510 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
1512 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1516 if (length
> rds_ring
->skb_size
)
1517 skb_put(skb
, rds_ring
->skb_size
);
1519 skb_put(skb
, length
);
1522 skb_pull(skb
, pkt_offset
);
1524 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
1525 adapter
->ahw
->diag_cnt
++;
1527 dump_skb(skb
, adapter
);
1529 dev_kfree_skb_any(skb
);
1530 adapter
->stats
.rx_pkts
++;
1531 adapter
->stats
.rxbytes
+= length
;
1536 void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
1538 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1539 struct status_desc
*desc
;
1541 int ring
, opcode
, desc_cnt
;
1543 u32 consumer
= sds_ring
->consumer
;
1545 desc
= &sds_ring
->desc_head
[consumer
];
1546 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1548 if (!(sts_data0
& STATUS_OWNER_HOST
))
1551 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1552 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1554 case QLCNIC_RESPONSE_DESC
:
1555 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1558 ring
= qlcnic_get_sts_type(sts_data0
);
1559 qlcnic_process_rcv_diag(adapter
, ring
, sts_data0
);
1563 for (; desc_cnt
> 0; desc_cnt
--) {
1564 desc
= &sds_ring
->desc_head
[consumer
];
1565 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
1566 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1569 sds_ring
->consumer
= consumer
;
1570 writel(consumer
, sds_ring
->crb_sts_consumer
);
1573 int qlcnic_82xx_napi_add(struct qlcnic_adapter
*adapter
,
1574 struct net_device
*netdev
)
1577 struct qlcnic_host_sds_ring
*sds_ring
;
1578 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1579 struct qlcnic_host_tx_ring
*tx_ring
;
1581 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->drv_sds_rings
))
1584 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1585 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1586 if (qlcnic_check_multi_tx(adapter
) &&
1587 !adapter
->ahw
->diag_test
) {
1588 netif_napi_add(netdev
, &sds_ring
->napi
, qlcnic_rx_poll
,
1591 if (ring
== (adapter
->drv_sds_rings
- 1))
1592 netif_napi_add(netdev
, &sds_ring
->napi
,
1596 netif_napi_add(netdev
, &sds_ring
->napi
,
1602 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
1603 qlcnic_free_sds_rings(recv_ctx
);
1607 if (qlcnic_check_multi_tx(adapter
) && !adapter
->ahw
->diag_test
) {
1608 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1609 tx_ring
= &adapter
->tx_ring
[ring
];
1610 netif_tx_napi_add(netdev
, &tx_ring
->napi
, qlcnic_tx_poll
,
1618 void qlcnic_82xx_napi_del(struct qlcnic_adapter
*adapter
)
1621 struct qlcnic_host_sds_ring
*sds_ring
;
1622 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1623 struct qlcnic_host_tx_ring
*tx_ring
;
1625 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1626 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1627 netif_napi_del(&sds_ring
->napi
);
1630 qlcnic_free_sds_rings(adapter
->recv_ctx
);
1632 if (qlcnic_check_multi_tx(adapter
) && !adapter
->ahw
->diag_test
) {
1633 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1634 tx_ring
= &adapter
->tx_ring
[ring
];
1635 netif_napi_del(&tx_ring
->napi
);
1639 qlcnic_free_tx_rings(adapter
);
1642 void qlcnic_82xx_napi_enable(struct qlcnic_adapter
*adapter
)
1645 struct qlcnic_host_sds_ring
*sds_ring
;
1646 struct qlcnic_host_tx_ring
*tx_ring
;
1647 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1649 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1652 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1653 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1654 napi_enable(&sds_ring
->napi
);
1655 qlcnic_enable_sds_intr(adapter
, sds_ring
);
1658 if (qlcnic_check_multi_tx(adapter
) &&
1659 (adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
1660 !adapter
->ahw
->diag_test
) {
1661 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1662 tx_ring
= &adapter
->tx_ring
[ring
];
1663 napi_enable(&tx_ring
->napi
);
1664 qlcnic_enable_tx_intr(adapter
, tx_ring
);
1669 void qlcnic_82xx_napi_disable(struct qlcnic_adapter
*adapter
)
1672 struct qlcnic_host_sds_ring
*sds_ring
;
1673 struct qlcnic_host_tx_ring
*tx_ring
;
1674 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1676 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1679 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1680 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1681 qlcnic_disable_sds_intr(adapter
, sds_ring
);
1682 napi_synchronize(&sds_ring
->napi
);
1683 napi_disable(&sds_ring
->napi
);
1686 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
1687 !adapter
->ahw
->diag_test
&&
1688 qlcnic_check_multi_tx(adapter
)) {
1689 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1690 tx_ring
= &adapter
->tx_ring
[ring
];
1691 qlcnic_disable_tx_intr(adapter
, tx_ring
);
1692 napi_synchronize(&tx_ring
->napi
);
1693 napi_disable(&tx_ring
->napi
);
1698 #define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
1699 #define QLC_83XX_LRO_LB_PKT (1ULL << 46)
1701 static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data
, int lro_pkt
)
1704 return (sts_data
& QLC_83XX_LRO_LB_PKT
) ? 1 : 0;
1706 return (sts_data
& QLC_83XX_NORMAL_LB_PKT
) ? 1 : 0;
1709 #define QLCNIC_ENCAP_LENGTH_MASK 0x7f
1711 static inline u8
qlcnic_encap_length(u64 sts_data
)
1713 return sts_data
& QLCNIC_ENCAP_LENGTH_MASK
;
1716 static struct qlcnic_rx_buffer
*
1717 qlcnic_83xx_process_rcv(struct qlcnic_adapter
*adapter
,
1718 struct qlcnic_host_sds_ring
*sds_ring
,
1719 u8 ring
, u64 sts_data
[])
1721 struct net_device
*netdev
= adapter
->netdev
;
1722 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1723 struct qlcnic_rx_buffer
*buffer
;
1724 struct sk_buff
*skb
;
1725 struct qlcnic_host_rds_ring
*rds_ring
;
1726 int index
, length
, cksum
, is_lb_pkt
;
1730 if (unlikely(ring
>= adapter
->max_rds_rings
))
1733 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1735 index
= qlcnic_83xx_hndl(sts_data
[0]);
1736 if (unlikely(index
>= rds_ring
->num_desc
))
1739 buffer
= &rds_ring
->rx_buf_arr
[index
];
1740 length
= qlcnic_83xx_pktln(sts_data
[0]);
1741 cksum
= qlcnic_83xx_csum_status(sts_data
[1]);
1742 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1746 if (length
> rds_ring
->skb_size
)
1747 skb_put(skb
, rds_ring
->skb_size
);
1749 skb_put(skb
, length
);
1751 err
= qlcnic_check_rx_tagging(adapter
, skb
, &vid
);
1753 if (adapter
->rx_mac_learn
) {
1754 is_lb_pkt
= qlcnic_83xx_is_lb_pkt(sts_data
[1], 0);
1755 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, vid
);
1758 if (unlikely(err
)) {
1759 adapter
->stats
.rxdropped
++;
1764 skb
->protocol
= eth_type_trans(skb
, netdev
);
1766 if (qlcnic_encap_length(sts_data
[1]) &&
1767 skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1768 skb
->csum_level
= 1;
1769 adapter
->stats
.encap_rx_csummed
++;
1773 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1775 napi_gro_receive(&sds_ring
->napi
, skb
);
1777 adapter
->stats
.rx_pkts
++;
1778 adapter
->stats
.rxbytes
+= length
;
1783 static struct qlcnic_rx_buffer
*
1784 qlcnic_83xx_process_lro(struct qlcnic_adapter
*adapter
,
1785 u8 ring
, u64 sts_data
[])
1787 struct net_device
*netdev
= adapter
->netdev
;
1788 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1789 struct qlcnic_rx_buffer
*buffer
;
1790 struct sk_buff
*skb
;
1791 struct qlcnic_host_rds_ring
*rds_ring
;
1793 struct ipv6hdr
*ipv6h
;
1796 int l2_hdr_offset
, l4_hdr_offset
;
1797 int index
, is_lb_pkt
;
1798 u16 lro_length
, length
, data_offset
, gso_size
;
1802 if (unlikely(ring
>= adapter
->max_rds_rings
))
1805 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1807 index
= qlcnic_83xx_hndl(sts_data
[0]);
1808 if (unlikely(index
>= rds_ring
->num_desc
))
1811 buffer
= &rds_ring
->rx_buf_arr
[index
];
1813 lro_length
= qlcnic_83xx_lro_pktln(sts_data
[0]);
1814 l2_hdr_offset
= qlcnic_83xx_l2_hdr_off(sts_data
[1]);
1815 l4_hdr_offset
= qlcnic_83xx_l4_hdr_off(sts_data
[1]);
1816 push
= qlcnic_83xx_is_psh_bit(sts_data
[1]);
1818 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1822 if (qlcnic_83xx_is_tstamp(sts_data
[1]))
1823 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_TS_HDR_SIZE
;
1825 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_HDR_SIZE
;
1827 skb_put(skb
, lro_length
+ data_offset
);
1828 skb_pull(skb
, l2_hdr_offset
);
1830 err
= qlcnic_check_rx_tagging(adapter
, skb
, &vid
);
1832 if (adapter
->rx_mac_learn
) {
1833 is_lb_pkt
= qlcnic_83xx_is_lb_pkt(sts_data
[1], 1);
1834 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, vid
);
1837 if (unlikely(err
)) {
1838 adapter
->stats
.rxdropped
++;
1843 skb
->protocol
= eth_type_trans(skb
, netdev
);
1844 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1845 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1846 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1848 length
= (th
->doff
<< 2) + lro_length
;
1849 ipv6h
->payload_len
= htons(length
);
1851 iph
= (struct iphdr
*)skb
->data
;
1852 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1853 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1854 csum_replace2(&iph
->check
, iph
->tot_len
, htons(length
));
1855 iph
->tot_len
= htons(length
);
1861 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1862 gso_size
= qlcnic_83xx_get_lro_sts_mss(sts_data
[0]);
1863 skb_shinfo(skb
)->gso_size
= gso_size
;
1864 if (skb
->protocol
== htons(ETH_P_IPV6
))
1865 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1867 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1871 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1873 netif_receive_skb(skb
);
1875 adapter
->stats
.lro_pkts
++;
1876 adapter
->stats
.lrobytes
+= length
;
1880 static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
,
1883 struct qlcnic_host_rds_ring
*rds_ring
;
1884 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1885 struct list_head
*cur
;
1886 struct status_desc
*desc
;
1887 struct qlcnic_rx_buffer
*rxbuf
= NULL
;
1890 int count
= 0, opcode
;
1891 u32 consumer
= sds_ring
->consumer
;
1893 while (count
< max
) {
1894 desc
= &sds_ring
->desc_head
[consumer
];
1895 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
1896 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
1899 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
1900 ring
= QLCNIC_FETCH_RING_ID(sts_data
[0]);
1903 case QLC_83XX_REG_DESC
:
1904 rxbuf
= qlcnic_83xx_process_rcv(adapter
, sds_ring
,
1907 case QLC_83XX_LRO_DESC
:
1908 rxbuf
= qlcnic_83xx_process_lro(adapter
, ring
,
1912 dev_info(&adapter
->pdev
->dev
,
1913 "Unknown opcode: 0x%x\n", opcode
);
1918 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1920 adapter
->stats
.null_rxbuf
++;
1922 desc
= &sds_ring
->desc_head
[consumer
];
1923 /* Reset the descriptor */
1924 desc
->status_desc_data
[1] = 0;
1925 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1928 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1929 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1930 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1931 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1932 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1934 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1936 spin_lock(&rds_ring
->lock
);
1937 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1938 &rds_ring
->free_list
);
1939 spin_unlock(&rds_ring
->lock
);
1941 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1944 sds_ring
->consumer
= consumer
;
1945 writel(consumer
, sds_ring
->crb_sts_consumer
);
1950 static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct
*napi
, int budget
)
1954 struct qlcnic_host_sds_ring
*sds_ring
;
1955 struct qlcnic_adapter
*adapter
;
1956 struct qlcnic_host_tx_ring
*tx_ring
;
1958 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1959 adapter
= sds_ring
->adapter
;
1960 /* tx ring count = 1 */
1961 tx_ring
= adapter
->tx_ring
;
1963 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1964 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1966 /* Check if we need a repoll */
1970 if (work_done
< budget
) {
1971 napi_complete_done(&sds_ring
->napi
, work_done
);
1972 qlcnic_enable_sds_intr(adapter
, sds_ring
);
1978 static int qlcnic_83xx_poll(struct napi_struct
*napi
, int budget
)
1982 struct qlcnic_host_sds_ring
*sds_ring
;
1983 struct qlcnic_adapter
*adapter
;
1984 struct qlcnic_host_tx_ring
*tx_ring
;
1986 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1987 adapter
= sds_ring
->adapter
;
1988 /* tx ring count = 1 */
1989 tx_ring
= adapter
->tx_ring
;
1991 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1992 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1994 /* Check if we need a repoll */
1998 if (work_done
< budget
) {
1999 napi_complete_done(&sds_ring
->napi
, work_done
);
2000 qlcnic_enable_sds_intr(adapter
, sds_ring
);
2006 static int qlcnic_83xx_msix_tx_poll(struct napi_struct
*napi
, int budget
)
2009 struct qlcnic_host_tx_ring
*tx_ring
;
2010 struct qlcnic_adapter
*adapter
;
2012 tx_ring
= container_of(napi
, struct qlcnic_host_tx_ring
, napi
);
2013 adapter
= tx_ring
->adapter
;
2014 work_done
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
2016 napi_complete(&tx_ring
->napi
);
2017 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
2018 qlcnic_enable_tx_intr(adapter
, tx_ring
);
2027 static int qlcnic_83xx_rx_poll(struct napi_struct
*napi
, int budget
)
2030 struct qlcnic_host_sds_ring
*sds_ring
;
2031 struct qlcnic_adapter
*adapter
;
2033 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
2034 adapter
= sds_ring
->adapter
;
2035 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
2036 if (work_done
< budget
) {
2037 napi_complete_done(&sds_ring
->napi
, work_done
);
2038 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
2039 qlcnic_enable_sds_intr(adapter
, sds_ring
);
2045 void qlcnic_83xx_napi_enable(struct qlcnic_adapter
*adapter
)
2048 struct qlcnic_host_sds_ring
*sds_ring
;
2049 struct qlcnic_host_tx_ring
*tx_ring
;
2050 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2052 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
2055 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2056 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2057 napi_enable(&sds_ring
->napi
);
2058 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
2059 qlcnic_enable_sds_intr(adapter
, sds_ring
);
2062 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2063 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2064 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2065 tx_ring
= &adapter
->tx_ring
[ring
];
2066 napi_enable(&tx_ring
->napi
);
2067 qlcnic_enable_tx_intr(adapter
, tx_ring
);
2072 void qlcnic_83xx_napi_disable(struct qlcnic_adapter
*adapter
)
2075 struct qlcnic_host_sds_ring
*sds_ring
;
2076 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2077 struct qlcnic_host_tx_ring
*tx_ring
;
2079 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
2082 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2083 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2084 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
2085 qlcnic_disable_sds_intr(adapter
, sds_ring
);
2086 napi_synchronize(&sds_ring
->napi
);
2087 napi_disable(&sds_ring
->napi
);
2090 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2091 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2092 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2093 tx_ring
= &adapter
->tx_ring
[ring
];
2094 qlcnic_disable_tx_intr(adapter
, tx_ring
);
2095 napi_synchronize(&tx_ring
->napi
);
2096 napi_disable(&tx_ring
->napi
);
2101 int qlcnic_83xx_napi_add(struct qlcnic_adapter
*adapter
,
2102 struct net_device
*netdev
)
2105 struct qlcnic_host_sds_ring
*sds_ring
;
2106 struct qlcnic_host_tx_ring
*tx_ring
;
2107 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2109 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->drv_sds_rings
))
2112 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2113 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2114 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
) {
2115 if (!(adapter
->flags
& QLCNIC_TX_INTR_SHARED
))
2116 netif_napi_add(netdev
, &sds_ring
->napi
,
2117 qlcnic_83xx_rx_poll
,
2120 netif_napi_add(netdev
, &sds_ring
->napi
,
2121 qlcnic_83xx_msix_sriov_vf_poll
,
2125 netif_napi_add(netdev
, &sds_ring
->napi
,
2131 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
2132 qlcnic_free_sds_rings(recv_ctx
);
2136 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2137 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2138 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2139 tx_ring
= &adapter
->tx_ring
[ring
];
2140 netif_tx_napi_add(netdev
, &tx_ring
->napi
,
2141 qlcnic_83xx_msix_tx_poll
,
2149 void qlcnic_83xx_napi_del(struct qlcnic_adapter
*adapter
)
2152 struct qlcnic_host_sds_ring
*sds_ring
;
2153 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2154 struct qlcnic_host_tx_ring
*tx_ring
;
2156 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2157 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2158 netif_napi_del(&sds_ring
->napi
);
2161 qlcnic_free_sds_rings(adapter
->recv_ctx
);
2163 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2164 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2165 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2166 tx_ring
= &adapter
->tx_ring
[ring
];
2167 netif_napi_del(&tx_ring
->napi
);
2171 qlcnic_free_tx_rings(adapter
);
2174 static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter
*adapter
,
2175 int ring
, u64 sts_data
[])
2177 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2178 struct sk_buff
*skb
;
2179 struct qlcnic_host_rds_ring
*rds_ring
;
2182 if (unlikely(ring
>= adapter
->max_rds_rings
))
2185 rds_ring
= &recv_ctx
->rds_rings
[ring
];
2186 index
= qlcnic_83xx_hndl(sts_data
[0]);
2187 if (unlikely(index
>= rds_ring
->num_desc
))
2190 length
= qlcnic_83xx_pktln(sts_data
[0]);
2192 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
2196 if (length
> rds_ring
->skb_size
)
2197 skb_put(skb
, rds_ring
->skb_size
);
2199 skb_put(skb
, length
);
2201 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
2202 adapter
->ahw
->diag_cnt
++;
2204 dump_skb(skb
, adapter
);
2206 dev_kfree_skb_any(skb
);
2210 void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
2212 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
2213 struct status_desc
*desc
;
2216 u32 consumer
= sds_ring
->consumer
;
2218 desc
= &sds_ring
->desc_head
[consumer
];
2219 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
2220 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
2221 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
2225 ring
= QLCNIC_FETCH_RING_ID(sts_data
[0]);
2226 qlcnic_83xx_process_rcv_diag(adapter
, ring
, sts_data
);
2227 desc
= &sds_ring
->desc_head
[consumer
];
2228 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
2229 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
2230 sds_ring
->consumer
= consumer
;
2231 writel(consumer
, sds_ring
->crb_sts_consumer
);