2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
8 #include <linux/netdevice.h>
9 #include <linux/if_vlan.h>
11 #include <linux/ipv6.h>
12 #include <net/checksum.h>
16 #define TX_ETHER_PKT 0x01
17 #define TX_TCP_PKT 0x02
18 #define TX_UDP_PKT 0x03
19 #define TX_IP_PKT 0x04
20 #define TX_TCP_LSO 0x05
21 #define TX_TCP_LSO6 0x06
22 #define TX_TCPV6_PKT 0x0b
23 #define TX_UDPV6_PKT 0x0c
24 #define FLAGS_VLAN_TAGGED 0x10
25 #define FLAGS_VLAN_OOB 0x40
27 #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
28 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
29 #define qlcnic_set_cmd_desc_port(cmd_desc, var) \
30 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
31 #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
32 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
34 #define qlcnic_set_tx_port(_desc, _port) \
35 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
37 #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
38 ((_desc)->flags_opcode |= \
39 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
41 #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
42 ((_desc)->nfrags__length = \
43 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
45 /* owner bits of status_desc */
46 #define STATUS_OWNER_HOST (0x1ULL << 56)
47 #define STATUS_OWNER_PHANTOM (0x2ULL << 56)
50 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
51 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
52 53-55 desc_cnt, 56-57 owner, 58-63 opcode
54 #define qlcnic_get_sts_port(sts_data) \
56 #define qlcnic_get_sts_status(sts_data) \
57 (((sts_data) >> 4) & 0x0F)
58 #define qlcnic_get_sts_type(sts_data) \
59 (((sts_data) >> 8) & 0x0F)
60 #define qlcnic_get_sts_totallength(sts_data) \
61 (((sts_data) >> 12) & 0xFFFF)
62 #define qlcnic_get_sts_refhandle(sts_data) \
63 (((sts_data) >> 28) & 0xFFFF)
64 #define qlcnic_get_sts_prot(sts_data) \
65 (((sts_data) >> 44) & 0x0F)
66 #define qlcnic_get_sts_pkt_offset(sts_data) \
67 (((sts_data) >> 48) & 0x1F)
68 #define qlcnic_get_sts_desc_cnt(sts_data) \
69 (((sts_data) >> 53) & 0x7)
70 #define qlcnic_get_sts_opcode(sts_data) \
71 (((sts_data) >> 58) & 0x03F)
73 #define qlcnic_get_lro_sts_refhandle(sts_data) \
74 ((sts_data) & 0x07FFF)
75 #define qlcnic_get_lro_sts_length(sts_data) \
76 (((sts_data) >> 16) & 0x0FFFF)
77 #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
78 (((sts_data) >> 32) & 0x0FF)
79 #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
80 (((sts_data) >> 40) & 0x0FF)
81 #define qlcnic_get_lro_sts_timestamp(sts_data) \
82 (((sts_data) >> 48) & 0x1)
83 #define qlcnic_get_lro_sts_type(sts_data) \
84 (((sts_data) >> 49) & 0x7)
85 #define qlcnic_get_lro_sts_push_flag(sts_data) \
86 (((sts_data) >> 52) & 0x1)
87 #define qlcnic_get_lro_sts_seq_number(sts_data) \
88 ((sts_data) & 0x0FFFFFFFF)
89 #define qlcnic_get_lro_sts_mss(sts_data1) \
90 ((sts_data1 >> 32) & 0x0FFFF)
92 #define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
94 /* opcode field in status_desc */
95 #define QLCNIC_SYN_OFFLOAD 0x03
96 #define QLCNIC_RXPKT_DESC 0x04
97 #define QLCNIC_OLD_RXPKT_DESC 0x3f
98 #define QLCNIC_RESPONSE_DESC 0x05
99 #define QLCNIC_LRO_DESC 0x12
101 #define QLCNIC_TX_POLL_BUDGET 128
102 #define QLCNIC_TCP_HDR_SIZE 20
103 #define QLCNIC_TCP_TS_OPTION_SIZE 12
104 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
105 #define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
107 #define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
109 /* for status field in status_desc */
110 #define STATUS_CKSUM_LOOP 0
111 #define STATUS_CKSUM_OK 2
113 #define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
114 #define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
115 #define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
116 #define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
117 #define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
118 #define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
119 #define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
120 #define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
121 #define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
122 #define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
123 #define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
124 #define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
125 #define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
127 struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*,
128 struct qlcnic_host_rds_ring
*, u16
, u16
);
130 inline void qlcnic_enable_tx_intr(struct qlcnic_adapter
*adapter
,
131 struct qlcnic_host_tx_ring
*tx_ring
)
133 if (qlcnic_check_multi_tx(adapter
) &&
134 !adapter
->ahw
->diag_test
)
135 writel(0x0, tx_ring
->crb_intr_mask
);
139 static inline void qlcnic_disable_tx_int(struct qlcnic_adapter
*adapter
,
140 struct qlcnic_host_tx_ring
*tx_ring
)
142 if (qlcnic_check_multi_tx(adapter
) &&
143 !adapter
->ahw
->diag_test
)
144 writel(1, tx_ring
->crb_intr_mask
);
147 inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter
*adapter
,
148 struct qlcnic_host_tx_ring
*tx_ring
)
150 writel(0, tx_ring
->crb_intr_mask
);
153 inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter
*adapter
,
154 struct qlcnic_host_tx_ring
*tx_ring
)
156 writel(1, tx_ring
->crb_intr_mask
);
159 static inline u8
qlcnic_mac_hash(u64 mac
)
161 return (u8
)((mac
& 0xff) ^ ((mac
>> 40) & 0xff));
164 static inline u32
qlcnic_get_ref_handle(struct qlcnic_adapter
*adapter
,
165 u16 handle
, u8 ring_id
)
167 if (qlcnic_83xx_check(adapter
))
168 return handle
| (ring_id
<< 15);
173 static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data
)
175 return (qlcnic_get_sts_status(sts_data
) == STATUS_CKSUM_LOOP
) ? 1 : 0;
178 static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter
*adapter
,
179 struct qlcnic_filter
*fil
,
180 void *addr
, u16 vlan_id
)
185 op
= vlan_id
? QLCNIC_MAC_VLAN_ADD
: QLCNIC_MAC_ADD
;
186 ret
= qlcnic_sre_macaddr_change(adapter
, addr
, vlan_id
, op
);
190 op
= vlan_id
? QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
;
191 ret
= qlcnic_sre_macaddr_change(adapter
, addr
, vlan_id
, op
);
193 hlist_del(&fil
->fnode
);
194 adapter
->rx_fhash
.fnum
--;
198 static struct qlcnic_filter
*qlcnic_find_mac_filter(struct hlist_head
*head
,
199 void *addr
, u16 vlan_id
)
201 struct qlcnic_filter
*tmp_fil
= NULL
;
202 struct hlist_node
*n
;
204 hlist_for_each_entry_safe(tmp_fil
, n
, head
, fnode
) {
205 if (!memcmp(tmp_fil
->faddr
, addr
, ETH_ALEN
) &&
206 tmp_fil
->vlan_id
== vlan_id
)
213 void qlcnic_add_lb_filter(struct qlcnic_adapter
*adapter
, struct sk_buff
*skb
,
214 int loopback_pkt
, u16 vlan_id
)
216 struct ethhdr
*phdr
= (struct ethhdr
*)(skb
->data
);
217 struct qlcnic_filter
*fil
, *tmp_fil
;
218 struct hlist_head
*head
;
224 memcpy(&src_addr
, phdr
->h_source
, ETH_ALEN
);
225 hindex
= qlcnic_mac_hash(src_addr
) &
226 (adapter
->fhash
.fbucket_size
- 1);
229 if (adapter
->rx_fhash
.fnum
>= adapter
->rx_fhash
.fmax
)
232 head
= &(adapter
->rx_fhash
.fhead
[hindex
]);
234 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
236 time
= tmp_fil
->ftime
;
237 if (time_after(jiffies
, QLCNIC_READD_AGE
* HZ
+ time
))
238 tmp_fil
->ftime
= jiffies
;
242 fil
= kzalloc(sizeof(struct qlcnic_filter
), GFP_ATOMIC
);
246 fil
->ftime
= jiffies
;
247 memcpy(fil
->faddr
, &src_addr
, ETH_ALEN
);
248 fil
->vlan_id
= vlan_id
;
249 spin_lock(&adapter
->rx_mac_learn_lock
);
250 hlist_add_head(&(fil
->fnode
), head
);
251 adapter
->rx_fhash
.fnum
++;
252 spin_unlock(&adapter
->rx_mac_learn_lock
);
254 head
= &adapter
->fhash
.fhead
[hindex
];
256 spin_lock(&adapter
->mac_learn_lock
);
258 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
260 op
= vlan_id
? QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
;
261 ret
= qlcnic_sre_macaddr_change(adapter
,
265 hlist_del(&tmp_fil
->fnode
);
266 adapter
->fhash
.fnum
--;
269 spin_unlock(&adapter
->mac_learn_lock
);
274 spin_unlock(&adapter
->mac_learn_lock
);
276 head
= &adapter
->rx_fhash
.fhead
[hindex
];
278 spin_lock(&adapter
->rx_mac_learn_lock
);
280 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
282 qlcnic_delete_rx_list_mac(adapter
, tmp_fil
, &src_addr
,
285 spin_unlock(&adapter
->rx_mac_learn_lock
);
289 void qlcnic_82xx_change_filter(struct qlcnic_adapter
*adapter
, u64
*uaddr
,
292 struct cmd_desc_type0
*hwdesc
;
293 struct qlcnic_nic_req
*req
;
294 struct qlcnic_mac_req
*mac_req
;
295 struct qlcnic_vlan_req
*vlan_req
;
296 struct qlcnic_host_tx_ring
*tx_ring
= adapter
->tx_ring
;
300 producer
= tx_ring
->producer
;
301 hwdesc
= &tx_ring
->desc_head
[tx_ring
->producer
];
303 req
= (struct qlcnic_nic_req
*)hwdesc
;
304 memset(req
, 0, sizeof(struct qlcnic_nic_req
));
305 req
->qhdr
= cpu_to_le64(QLCNIC_REQUEST
<< 23);
307 word
= QLCNIC_MAC_EVENT
| ((u64
)(adapter
->portnum
) << 16);
308 req
->req_hdr
= cpu_to_le64(word
);
310 mac_req
= (struct qlcnic_mac_req
*)&(req
->words
[0]);
311 mac_req
->op
= vlan_id
? QLCNIC_MAC_VLAN_ADD
: QLCNIC_MAC_ADD
;
312 memcpy(mac_req
->mac_addr
, uaddr
, ETH_ALEN
);
314 vlan_req
= (struct qlcnic_vlan_req
*)&req
->words
[1];
315 vlan_req
->vlan_id
= cpu_to_le16(vlan_id
);
317 tx_ring
->producer
= get_next_index(producer
, tx_ring
->num_desc
);
321 static void qlcnic_send_filter(struct qlcnic_adapter
*adapter
,
322 struct cmd_desc_type0
*first_desc
,
325 struct qlcnic_filter
*fil
, *tmp_fil
;
326 struct hlist_node
*n
;
327 struct hlist_head
*head
;
328 struct net_device
*netdev
= adapter
->netdev
;
329 struct ethhdr
*phdr
= (struct ethhdr
*)(skb
->data
);
334 if (ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
337 if (adapter
->fhash
.fnum
>= adapter
->fhash
.fmax
) {
338 adapter
->stats
.mac_filter_limit_overrun
++;
339 netdev_info(netdev
, "Can not add more than %d mac addresses\n",
340 adapter
->fhash
.fmax
);
344 memcpy(&src_addr
, phdr
->h_source
, ETH_ALEN
);
345 hindex
= qlcnic_mac_hash(src_addr
) & (adapter
->fhash
.fbucket_size
- 1);
346 head
= &(adapter
->fhash
.fhead
[hindex
]);
348 hlist_for_each_entry_safe(tmp_fil
, n
, head
, fnode
) {
349 if (!memcmp(tmp_fil
->faddr
, &src_addr
, ETH_ALEN
) &&
350 tmp_fil
->vlan_id
== vlan_id
) {
351 if (jiffies
> (QLCNIC_READD_AGE
* HZ
+ tmp_fil
->ftime
))
352 qlcnic_change_filter(adapter
, &src_addr
,
354 tmp_fil
->ftime
= jiffies
;
359 fil
= kzalloc(sizeof(struct qlcnic_filter
), GFP_ATOMIC
);
363 qlcnic_change_filter(adapter
, &src_addr
, vlan_id
);
364 fil
->ftime
= jiffies
;
365 fil
->vlan_id
= vlan_id
;
366 memcpy(fil
->faddr
, &src_addr
, ETH_ALEN
);
367 spin_lock(&adapter
->mac_learn_lock
);
368 hlist_add_head(&(fil
->fnode
), head
);
369 adapter
->fhash
.fnum
++;
370 spin_unlock(&adapter
->mac_learn_lock
);
373 static int qlcnic_tx_pkt(struct qlcnic_adapter
*adapter
,
374 struct cmd_desc_type0
*first_desc
, struct sk_buff
*skb
,
375 struct qlcnic_host_tx_ring
*tx_ring
)
377 u8 l4proto
, opcode
= 0, hdr_len
= 0;
378 u16 flags
= 0, vlan_tci
= 0;
379 int copied
, offset
, copy_len
, size
;
380 struct cmd_desc_type0
*hwdesc
;
381 struct vlan_ethhdr
*vh
;
382 u16 protocol
= ntohs(skb
->protocol
);
383 u32 producer
= tx_ring
->producer
;
385 if (protocol
== ETH_P_8021Q
) {
386 vh
= (struct vlan_ethhdr
*)skb
->data
;
387 flags
= FLAGS_VLAN_TAGGED
;
388 vlan_tci
= ntohs(vh
->h_vlan_TCI
);
389 protocol
= ntohs(vh
->h_vlan_encapsulated_proto
);
390 } else if (vlan_tx_tag_present(skb
)) {
391 flags
= FLAGS_VLAN_OOB
;
392 vlan_tci
= vlan_tx_tag_get(skb
);
394 if (unlikely(adapter
->tx_pvid
)) {
395 if (vlan_tci
&& !(adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
397 if (vlan_tci
&& (adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
400 flags
= FLAGS_VLAN_OOB
;
401 vlan_tci
= adapter
->tx_pvid
;
404 qlcnic_set_tx_vlan_tci(first_desc
, vlan_tci
);
405 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
407 if (*(skb
->data
) & BIT_0
) {
409 memcpy(&first_desc
->eth_addr
, skb
->data
, ETH_ALEN
);
411 opcode
= TX_ETHER_PKT
;
412 if (skb_is_gso(skb
)) {
413 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
414 first_desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
415 first_desc
->total_hdr_length
= hdr_len
;
416 opcode
= (protocol
== ETH_P_IPV6
) ? TX_TCP_LSO6
: TX_TCP_LSO
;
418 /* For LSO, we need to copy the MAC/IP/TCP headers into
419 * the descriptor ring */
423 if (flags
& FLAGS_VLAN_OOB
) {
424 first_desc
->total_hdr_length
+= VLAN_HLEN
;
425 first_desc
->tcp_hdr_offset
= VLAN_HLEN
;
426 first_desc
->ip_hdr_offset
= VLAN_HLEN
;
428 /* Only in case of TSO on vlan device */
429 flags
|= FLAGS_VLAN_TAGGED
;
431 /* Create a TSO vlan header template for firmware */
432 hwdesc
= &tx_ring
->desc_head
[producer
];
433 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
435 copy_len
= min((int)sizeof(struct cmd_desc_type0
) -
436 offset
, hdr_len
+ VLAN_HLEN
);
438 vh
= (struct vlan_ethhdr
*)((char *) hwdesc
+ 2);
439 skb_copy_from_linear_data(skb
, vh
, 12);
440 vh
->h_vlan_proto
= htons(ETH_P_8021Q
);
441 vh
->h_vlan_TCI
= htons(vlan_tci
);
443 skb_copy_from_linear_data_offset(skb
, 12,
446 copied
= copy_len
- VLAN_HLEN
;
448 producer
= get_next_index(producer
, tx_ring
->num_desc
);
451 while (copied
< hdr_len
) {
452 size
= (int)sizeof(struct cmd_desc_type0
) - offset
;
453 copy_len
= min(size
, (hdr_len
- copied
));
454 hwdesc
= &tx_ring
->desc_head
[producer
];
455 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
456 skb_copy_from_linear_data_offset(skb
, copied
,
461 producer
= get_next_index(producer
, tx_ring
->num_desc
);
464 tx_ring
->producer
= producer
;
466 adapter
->stats
.lso_frames
++;
468 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
469 if (protocol
== ETH_P_IP
) {
470 l4proto
= ip_hdr(skb
)->protocol
;
472 if (l4proto
== IPPROTO_TCP
)
474 else if (l4proto
== IPPROTO_UDP
)
476 } else if (protocol
== ETH_P_IPV6
) {
477 l4proto
= ipv6_hdr(skb
)->nexthdr
;
479 if (l4proto
== IPPROTO_TCP
)
480 opcode
= TX_TCPV6_PKT
;
481 else if (l4proto
== IPPROTO_UDP
)
482 opcode
= TX_UDPV6_PKT
;
485 first_desc
->tcp_hdr_offset
+= skb_transport_offset(skb
);
486 first_desc
->ip_hdr_offset
+= skb_network_offset(skb
);
487 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
492 static int qlcnic_map_tx_skb(struct pci_dev
*pdev
, struct sk_buff
*skb
,
493 struct qlcnic_cmd_buffer
*pbuf
)
495 struct qlcnic_skb_frag
*nf
;
496 struct skb_frag_struct
*frag
;
500 nr_frags
= skb_shinfo(skb
)->nr_frags
;
501 nf
= &pbuf
->frag_array
[0];
503 map
= pci_map_single(pdev
, skb
->data
, skb_headlen(skb
),
505 if (pci_dma_mapping_error(pdev
, map
))
509 nf
->length
= skb_headlen(skb
);
511 for (i
= 0; i
< nr_frags
; i
++) {
512 frag
= &skb_shinfo(skb
)->frags
[i
];
513 nf
= &pbuf
->frag_array
[i
+1];
514 map
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, skb_frag_size(frag
),
516 if (dma_mapping_error(&pdev
->dev
, map
))
520 nf
->length
= skb_frag_size(frag
);
527 nf
= &pbuf
->frag_array
[i
+1];
528 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
531 nf
= &pbuf
->frag_array
[0];
532 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
538 static void qlcnic_unmap_buffers(struct pci_dev
*pdev
, struct sk_buff
*skb
,
539 struct qlcnic_cmd_buffer
*pbuf
)
541 struct qlcnic_skb_frag
*nf
= &pbuf
->frag_array
[0];
542 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
544 for (i
= 0; i
< nr_frags
; i
++) {
545 nf
= &pbuf
->frag_array
[i
+1];
546 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
549 nf
= &pbuf
->frag_array
[0];
550 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
554 static inline void qlcnic_clear_cmddesc(u64
*desc
)
561 netdev_tx_t
qlcnic_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
563 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
564 struct qlcnic_host_tx_ring
*tx_ring
;
565 struct qlcnic_cmd_buffer
*pbuf
;
566 struct qlcnic_skb_frag
*buffrag
;
567 struct cmd_desc_type0
*hwdesc
, *first_desc
;
568 struct pci_dev
*pdev
;
570 int i
, k
, frag_count
, delta
= 0;
571 u32 producer
, num_txd
;
573 if (!test_bit(__QLCNIC_DEV_UP
, &adapter
->state
)) {
574 netif_tx_stop_all_queues(netdev
);
575 return NETDEV_TX_BUSY
;
578 if (adapter
->flags
& QLCNIC_MACSPOOF
) {
579 phdr
= (struct ethhdr
*)skb
->data
;
580 if (!ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
584 if (qlcnic_check_multi_tx(adapter
))
585 tx_ring
= &adapter
->tx_ring
[skb_get_queue_mapping(skb
)];
587 tx_ring
= &adapter
->tx_ring
[0];
588 num_txd
= tx_ring
->num_desc
;
590 frag_count
= skb_shinfo(skb
)->nr_frags
+ 1;
592 /* 14 frags supported for normal packet and
593 * 32 frags supported for TSO packet
595 if (!skb_is_gso(skb
) && frag_count
> QLCNIC_MAX_FRAGS_PER_TX
) {
596 for (i
= 0; i
< (frag_count
- QLCNIC_MAX_FRAGS_PER_TX
); i
++)
597 delta
+= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
599 if (!__pskb_pull_tail(skb
, delta
))
602 frag_count
= 1 + skb_shinfo(skb
)->nr_frags
;
605 if (unlikely(qlcnic_tx_avail(tx_ring
) <= TX_STOP_THRESH
)) {
606 netif_tx_stop_queue(tx_ring
->txq
);
607 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
608 netif_tx_start_queue(tx_ring
->txq
);
610 adapter
->stats
.xmit_off
++;
612 return NETDEV_TX_BUSY
;
616 producer
= tx_ring
->producer
;
617 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
618 pdev
= adapter
->pdev
;
619 first_desc
= &tx_ring
->desc_head
[producer
];
620 hwdesc
= &tx_ring
->desc_head
[producer
];
621 qlcnic_clear_cmddesc((u64
*)hwdesc
);
623 if (qlcnic_map_tx_skb(pdev
, skb
, pbuf
)) {
624 adapter
->stats
.tx_dma_map_error
++;
629 pbuf
->frag_count
= frag_count
;
631 qlcnic_set_tx_frags_len(first_desc
, frag_count
, skb
->len
);
632 qlcnic_set_tx_port(first_desc
, adapter
->portnum
);
634 for (i
= 0; i
< frag_count
; i
++) {
637 if ((k
== 0) && (i
> 0)) {
638 /* move to next desc.*/
639 producer
= get_next_index(producer
, num_txd
);
640 hwdesc
= &tx_ring
->desc_head
[producer
];
641 qlcnic_clear_cmddesc((u64
*)hwdesc
);
642 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
645 buffrag
= &pbuf
->frag_array
[i
];
646 hwdesc
->buffer_length
[k
] = cpu_to_le16(buffrag
->length
);
649 hwdesc
->addr_buffer1
= cpu_to_le64(buffrag
->dma
);
652 hwdesc
->addr_buffer2
= cpu_to_le64(buffrag
->dma
);
655 hwdesc
->addr_buffer3
= cpu_to_le64(buffrag
->dma
);
658 hwdesc
->addr_buffer4
= cpu_to_le64(buffrag
->dma
);
663 tx_ring
->producer
= get_next_index(producer
, num_txd
);
666 if (unlikely(qlcnic_tx_pkt(adapter
, first_desc
, skb
, tx_ring
)))
669 if (adapter
->drv_mac_learn
)
670 qlcnic_send_filter(adapter
, first_desc
, skb
);
672 adapter
->stats
.txbytes
+= skb
->len
;
673 adapter
->stats
.xmitcalled
++;
674 tx_ring
->xmit_called
++;
676 qlcnic_update_cmd_producer(tx_ring
);
681 qlcnic_unmap_buffers(pdev
, skb
, pbuf
);
683 adapter
->stats
.txdropped
++;
684 dev_kfree_skb_any(skb
);
688 void qlcnic_advert_link_change(struct qlcnic_adapter
*adapter
, int linkup
)
690 struct net_device
*netdev
= adapter
->netdev
;
692 if (adapter
->ahw
->linkup
&& !linkup
) {
693 netdev_info(netdev
, "NIC Link is down\n");
694 adapter
->ahw
->linkup
= 0;
695 if (netif_running(netdev
)) {
696 netif_carrier_off(netdev
);
697 netif_tx_stop_all_queues(netdev
);
699 } else if (!adapter
->ahw
->linkup
&& linkup
) {
700 netdev_info(netdev
, "NIC Link is up\n");
701 adapter
->ahw
->linkup
= 1;
702 if (netif_running(netdev
)) {
703 netif_carrier_on(netdev
);
704 netif_wake_queue(netdev
);
709 static int qlcnic_alloc_rx_skb(struct qlcnic_adapter
*adapter
,
710 struct qlcnic_host_rds_ring
*rds_ring
,
711 struct qlcnic_rx_buffer
*buffer
)
715 struct pci_dev
*pdev
= adapter
->pdev
;
717 skb
= netdev_alloc_skb(adapter
->netdev
, rds_ring
->skb_size
);
719 adapter
->stats
.skb_alloc_failure
++;
723 skb_reserve(skb
, NET_IP_ALIGN
);
724 dma
= pci_map_single(pdev
, skb
->data
,
725 rds_ring
->dma_size
, PCI_DMA_FROMDEVICE
);
727 if (pci_dma_mapping_error(pdev
, dma
)) {
728 adapter
->stats
.rx_dma_map_error
++;
729 dev_kfree_skb_any(skb
);
739 static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter
*adapter
,
740 struct qlcnic_host_rds_ring
*rds_ring
,
743 struct rcv_desc
*pdesc
;
744 struct qlcnic_rx_buffer
*buffer
;
746 uint32_t producer
, handle
;
747 struct list_head
*head
;
749 if (!spin_trylock(&rds_ring
->lock
))
752 producer
= rds_ring
->producer
;
753 head
= &rds_ring
->free_list
;
754 while (!list_empty(head
)) {
755 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
758 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
762 list_del(&buffer
->list
);
764 /* make a rcv descriptor */
765 pdesc
= &rds_ring
->desc_head
[producer
];
766 handle
= qlcnic_get_ref_handle(adapter
,
767 buffer
->ref_handle
, ring_id
);
768 pdesc
->reference_handle
= cpu_to_le16(handle
);
769 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
770 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
771 producer
= get_next_index(producer
, rds_ring
->num_desc
);
774 rds_ring
->producer
= producer
;
775 writel((producer
- 1) & (rds_ring
->num_desc
- 1),
776 rds_ring
->crb_rcv_producer
);
778 spin_unlock(&rds_ring
->lock
);
781 static int qlcnic_process_cmd_ring(struct qlcnic_adapter
*adapter
,
782 struct qlcnic_host_tx_ring
*tx_ring
,
785 u32 sw_consumer
, hw_consumer
;
786 int i
, done
, count
= 0;
787 struct qlcnic_cmd_buffer
*buffer
;
788 struct pci_dev
*pdev
= adapter
->pdev
;
789 struct net_device
*netdev
= adapter
->netdev
;
790 struct qlcnic_skb_frag
*frag
;
792 sw_consumer
= tx_ring
->sw_consumer
;
793 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
795 while (sw_consumer
!= hw_consumer
) {
796 buffer
= &tx_ring
->cmd_buf_arr
[sw_consumer
];
798 frag
= &buffer
->frag_array
[0];
799 pci_unmap_single(pdev
, frag
->dma
, frag
->length
,
802 for (i
= 1; i
< buffer
->frag_count
; i
++) {
804 pci_unmap_page(pdev
, frag
->dma
, frag
->length
,
808 adapter
->stats
.xmitfinished
++;
809 tx_ring
->xmit_finished
++;
810 dev_kfree_skb_any(buffer
->skb
);
814 sw_consumer
= get_next_index(sw_consumer
, tx_ring
->num_desc
);
815 if (++count
>= budget
)
819 if (count
&& netif_running(netdev
)) {
820 tx_ring
->sw_consumer
= sw_consumer
;
822 if (netif_tx_queue_stopped(tx_ring
->txq
) &&
823 netif_carrier_ok(netdev
)) {
824 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
825 netif_tx_wake_queue(tx_ring
->txq
);
826 adapter
->stats
.xmit_on
++;
830 adapter
->tx_timeo_cnt
= 0;
833 * If everything is freed up to consumer then check if the ring is full
834 * If the ring is full then check if more needs to be freed and
835 * schedule the call back again.
837 * This happens when there are 2 CPUs. One could be freeing and the
838 * other filling it. If the ring is full when we get out of here and
839 * the card has already interrupted the host then the host can miss the
842 * There is still a possible race condition and the host could miss an
843 * interrupt. The card has to take care of this.
845 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
846 done
= (sw_consumer
== hw_consumer
);
851 static int qlcnic_poll(struct napi_struct
*napi
, int budget
)
853 int tx_complete
, work_done
;
854 struct qlcnic_host_sds_ring
*sds_ring
;
855 struct qlcnic_adapter
*adapter
;
856 struct qlcnic_host_tx_ring
*tx_ring
;
858 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
859 adapter
= sds_ring
->adapter
;
860 tx_ring
= sds_ring
->tx_ring
;
862 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
,
864 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
865 if ((work_done
< budget
) && tx_complete
) {
866 napi_complete(&sds_ring
->napi
);
867 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
)) {
868 qlcnic_enable_int(sds_ring
);
869 qlcnic_enable_tx_intr(adapter
, tx_ring
);
876 static int qlcnic_tx_poll(struct napi_struct
*napi
, int budget
)
878 struct qlcnic_host_tx_ring
*tx_ring
;
879 struct qlcnic_adapter
*adapter
;
882 tx_ring
= container_of(napi
, struct qlcnic_host_tx_ring
, napi
);
883 adapter
= tx_ring
->adapter
;
885 work_done
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
887 napi_complete(&tx_ring
->napi
);
888 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
889 qlcnic_enable_tx_intr(adapter
, tx_ring
);
895 static int qlcnic_rx_poll(struct napi_struct
*napi
, int budget
)
897 struct qlcnic_host_sds_ring
*sds_ring
;
898 struct qlcnic_adapter
*adapter
;
901 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
902 adapter
= sds_ring
->adapter
;
904 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
906 if (work_done
< budget
) {
907 napi_complete(&sds_ring
->napi
);
908 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
909 qlcnic_enable_int(sds_ring
);
915 static void qlcnic_handle_linkevent(struct qlcnic_adapter
*adapter
,
916 struct qlcnic_fw_msg
*msg
)
919 u16 cable_len
, link_speed
;
920 u8 link_status
, module
, duplex
, autoneg
, lb_status
= 0;
921 struct net_device
*netdev
= adapter
->netdev
;
923 adapter
->ahw
->has_link_events
= 1;
925 cable_OUI
= msg
->body
[1] & 0xffffffff;
926 cable_len
= (msg
->body
[1] >> 32) & 0xffff;
927 link_speed
= (msg
->body
[1] >> 48) & 0xffff;
929 link_status
= msg
->body
[2] & 0xff;
930 duplex
= (msg
->body
[2] >> 16) & 0xff;
931 autoneg
= (msg
->body
[2] >> 24) & 0xff;
932 lb_status
= (msg
->body
[2] >> 32) & 0x3;
934 module
= (msg
->body
[2] >> 8) & 0xff;
935 if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE
)
936 dev_info(&netdev
->dev
,
937 "unsupported cable: OUI 0x%x, length %d\n",
938 cable_OUI
, cable_len
);
939 else if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN
)
940 dev_info(&netdev
->dev
, "unsupported cable length %d\n",
943 if (!link_status
&& (lb_status
== QLCNIC_ILB_MODE
||
944 lb_status
== QLCNIC_ELB_MODE
))
945 adapter
->ahw
->loopback_state
|= QLCNIC_LINKEVENT
;
947 qlcnic_advert_link_change(adapter
, link_status
);
949 if (duplex
== LINKEVENT_FULL_DUPLEX
)
950 adapter
->ahw
->link_duplex
= DUPLEX_FULL
;
952 adapter
->ahw
->link_duplex
= DUPLEX_HALF
;
954 adapter
->ahw
->module_type
= module
;
955 adapter
->ahw
->link_autoneg
= autoneg
;
958 adapter
->ahw
->link_speed
= link_speed
;
960 adapter
->ahw
->link_speed
= SPEED_UNKNOWN
;
961 adapter
->ahw
->link_duplex
= DUPLEX_UNKNOWN
;
965 static void qlcnic_handle_fw_message(int desc_cnt
, int index
,
966 struct qlcnic_host_sds_ring
*sds_ring
)
968 struct qlcnic_fw_msg msg
;
969 struct status_desc
*desc
;
970 struct qlcnic_adapter
*adapter
;
972 int i
= 0, opcode
, ret
;
974 while (desc_cnt
> 0 && i
< 8) {
975 desc
= &sds_ring
->desc_head
[index
];
976 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[0]);
977 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[1]);
979 index
= get_next_index(index
, sds_ring
->num_desc
);
983 adapter
= sds_ring
->adapter
;
984 dev
= &adapter
->pdev
->dev
;
985 opcode
= qlcnic_get_nic_msg_opcode(msg
.body
[0]);
988 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE
:
989 qlcnic_handle_linkevent(adapter
, &msg
);
991 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK
:
992 ret
= (u32
)(msg
.body
[1]);
995 adapter
->ahw
->loopback_state
|= QLCNIC_LB_RESPONSE
;
998 dev_info(dev
, "loopback already in progress\n");
999 adapter
->ahw
->diag_cnt
= -EINPROGRESS
;
1002 dev_info(dev
, "loopback cable is not connected\n");
1003 adapter
->ahw
->diag_cnt
= -ENODEV
;
1007 "loopback configure request failed, err %x\n",
1009 adapter
->ahw
->diag_cnt
= -EIO
;
1013 case QLCNIC_C2H_OPCODE_GET_DCB_AEN
:
1014 qlcnic_dcb_handle_aen(adapter
, (void *)&msg
);
1021 struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*adapter
,
1022 struct qlcnic_host_rds_ring
*ring
,
1023 u16 index
, u16 cksum
)
1025 struct qlcnic_rx_buffer
*buffer
;
1026 struct sk_buff
*skb
;
1028 buffer
= &ring
->rx_buf_arr
[index
];
1029 if (unlikely(buffer
->skb
== NULL
)) {
1034 pci_unmap_single(adapter
->pdev
, buffer
->dma
, ring
->dma_size
,
1035 PCI_DMA_FROMDEVICE
);
1038 if (likely((adapter
->netdev
->features
& NETIF_F_RXCSUM
) &&
1039 (cksum
== STATUS_CKSUM_OK
|| cksum
== STATUS_CKSUM_LOOP
))) {
1040 adapter
->stats
.csummed
++;
1041 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1043 skb_checksum_none_assert(skb
);
1052 static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter
*adapter
,
1053 struct sk_buff
*skb
, u16
*vlan_tag
)
1055 struct ethhdr
*eth_hdr
;
1057 if (!__vlan_get_tag(skb
, vlan_tag
)) {
1058 eth_hdr
= (struct ethhdr
*)skb
->data
;
1059 memmove(skb
->data
+ VLAN_HLEN
, eth_hdr
, ETH_ALEN
* 2);
1060 skb_pull(skb
, VLAN_HLEN
);
1062 if (!adapter
->rx_pvid
)
1065 if (*vlan_tag
== adapter
->rx_pvid
) {
1066 /* Outer vlan tag. Packet should follow non-vlan path */
1070 if (adapter
->flags
& QLCNIC_TAGGING_ENABLED
)
1076 static struct qlcnic_rx_buffer
*
1077 qlcnic_process_rcv(struct qlcnic_adapter
*adapter
,
1078 struct qlcnic_host_sds_ring
*sds_ring
, int ring
,
1081 struct net_device
*netdev
= adapter
->netdev
;
1082 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1083 struct qlcnic_rx_buffer
*buffer
;
1084 struct sk_buff
*skb
;
1085 struct qlcnic_host_rds_ring
*rds_ring
;
1086 int index
, length
, cksum
, pkt_offset
, is_lb_pkt
;
1087 u16 vid
= 0xffff, t_vid
;
1089 if (unlikely(ring
>= adapter
->max_rds_rings
))
1092 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1094 index
= qlcnic_get_sts_refhandle(sts_data0
);
1095 if (unlikely(index
>= rds_ring
->num_desc
))
1098 buffer
= &rds_ring
->rx_buf_arr
[index
];
1099 length
= qlcnic_get_sts_totallength(sts_data0
);
1100 cksum
= qlcnic_get_sts_status(sts_data0
);
1101 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
1103 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1107 if (adapter
->drv_mac_learn
&&
1108 (adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)) {
1110 is_lb_pkt
= qlcnic_82xx_is_lb_pkt(sts_data0
);
1111 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, t_vid
);
1114 if (length
> rds_ring
->skb_size
)
1115 skb_put(skb
, rds_ring
->skb_size
);
1117 skb_put(skb
, length
);
1120 skb_pull(skb
, pkt_offset
);
1122 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1123 adapter
->stats
.rxdropped
++;
1128 skb
->protocol
= eth_type_trans(skb
, netdev
);
1131 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1133 napi_gro_receive(&sds_ring
->napi
, skb
);
1135 adapter
->stats
.rx_pkts
++;
1136 adapter
->stats
.rxbytes
+= length
;
1141 #define QLC_TCP_HDR_SIZE 20
1142 #define QLC_TCP_TS_OPTION_SIZE 12
1143 #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1145 static struct qlcnic_rx_buffer
*
1146 qlcnic_process_lro(struct qlcnic_adapter
*adapter
,
1147 int ring
, u64 sts_data0
, u64 sts_data1
)
1149 struct net_device
*netdev
= adapter
->netdev
;
1150 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1151 struct qlcnic_rx_buffer
*buffer
;
1152 struct sk_buff
*skb
;
1153 struct qlcnic_host_rds_ring
*rds_ring
;
1155 struct ipv6hdr
*ipv6h
;
1157 bool push
, timestamp
;
1158 int index
, l2_hdr_offset
, l4_hdr_offset
, is_lb_pkt
;
1159 u16 lro_length
, length
, data_offset
, t_vid
, vid
= 0xffff;
1162 if (unlikely(ring
> adapter
->max_rds_rings
))
1165 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1167 index
= qlcnic_get_lro_sts_refhandle(sts_data0
);
1168 if (unlikely(index
> rds_ring
->num_desc
))
1171 buffer
= &rds_ring
->rx_buf_arr
[index
];
1173 timestamp
= qlcnic_get_lro_sts_timestamp(sts_data0
);
1174 lro_length
= qlcnic_get_lro_sts_length(sts_data0
);
1175 l2_hdr_offset
= qlcnic_get_lro_sts_l2_hdr_offset(sts_data0
);
1176 l4_hdr_offset
= qlcnic_get_lro_sts_l4_hdr_offset(sts_data0
);
1177 push
= qlcnic_get_lro_sts_push_flag(sts_data0
);
1178 seq_number
= qlcnic_get_lro_sts_seq_number(sts_data1
);
1180 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1184 if (adapter
->drv_mac_learn
&&
1185 (adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)) {
1187 is_lb_pkt
= qlcnic_82xx_is_lb_pkt(sts_data0
);
1188 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, t_vid
);
1192 data_offset
= l4_hdr_offset
+ QLC_TCP_TS_HDR_SIZE
;
1194 data_offset
= l4_hdr_offset
+ QLC_TCP_HDR_SIZE
;
1196 skb_put(skb
, lro_length
+ data_offset
);
1197 skb_pull(skb
, l2_hdr_offset
);
1199 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1200 adapter
->stats
.rxdropped
++;
1205 skb
->protocol
= eth_type_trans(skb
, netdev
);
1207 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1208 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1209 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1210 length
= (th
->doff
<< 2) + lro_length
;
1211 ipv6h
->payload_len
= htons(length
);
1213 iph
= (struct iphdr
*)skb
->data
;
1214 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1215 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1216 csum_replace2(&iph
->check
, iph
->tot_len
, htons(length
));
1217 iph
->tot_len
= htons(length
);
1221 th
->seq
= htonl(seq_number
);
1224 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1225 skb_shinfo(skb
)->gso_size
= qlcnic_get_lro_sts_mss(sts_data1
);
1226 if (skb
->protocol
== htons(ETH_P_IPV6
))
1227 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1229 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1233 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1234 netif_receive_skb(skb
);
1236 adapter
->stats
.lro_pkts
++;
1237 adapter
->stats
.lrobytes
+= length
;
1242 int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
, int max
)
1244 struct qlcnic_host_rds_ring
*rds_ring
;
1245 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1246 struct list_head
*cur
;
1247 struct status_desc
*desc
;
1248 struct qlcnic_rx_buffer
*rxbuf
;
1249 int opcode
, desc_cnt
, count
= 0;
1250 u64 sts_data0
, sts_data1
;
1252 u32 consumer
= sds_ring
->consumer
;
1254 while (count
< max
) {
1255 desc
= &sds_ring
->desc_head
[consumer
];
1256 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1258 if (!(sts_data0
& STATUS_OWNER_HOST
))
1261 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1262 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1264 case QLCNIC_RXPKT_DESC
:
1265 case QLCNIC_OLD_RXPKT_DESC
:
1266 case QLCNIC_SYN_OFFLOAD
:
1267 ring
= qlcnic_get_sts_type(sts_data0
);
1268 rxbuf
= qlcnic_process_rcv(adapter
, sds_ring
, ring
,
1271 case QLCNIC_LRO_DESC
:
1272 ring
= qlcnic_get_lro_sts_type(sts_data0
);
1273 sts_data1
= le64_to_cpu(desc
->status_desc_data
[1]);
1274 rxbuf
= qlcnic_process_lro(adapter
, ring
, sts_data0
,
1277 case QLCNIC_RESPONSE_DESC
:
1278 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1282 WARN_ON(desc_cnt
> 1);
1285 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1287 adapter
->stats
.null_rxbuf
++;
1289 for (; desc_cnt
> 0; desc_cnt
--) {
1290 desc
= &sds_ring
->desc_head
[consumer
];
1291 desc
->status_desc_data
[0] = QLCNIC_DESC_OWNER_FW
;
1292 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1297 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1298 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1299 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1300 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1301 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1303 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1305 spin_lock(&rds_ring
->lock
);
1306 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1307 &rds_ring
->free_list
);
1308 spin_unlock(&rds_ring
->lock
);
1311 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1315 sds_ring
->consumer
= consumer
;
1316 writel(consumer
, sds_ring
->crb_sts_consumer
);
1322 void qlcnic_post_rx_buffers(struct qlcnic_adapter
*adapter
,
1323 struct qlcnic_host_rds_ring
*rds_ring
, u8 ring_id
)
1325 struct rcv_desc
*pdesc
;
1326 struct qlcnic_rx_buffer
*buffer
;
1328 u32 producer
, handle
;
1329 struct list_head
*head
;
1331 producer
= rds_ring
->producer
;
1332 head
= &rds_ring
->free_list
;
1334 while (!list_empty(head
)) {
1336 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
1339 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
1344 list_del(&buffer
->list
);
1346 /* make a rcv descriptor */
1347 pdesc
= &rds_ring
->desc_head
[producer
];
1348 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
1349 handle
= qlcnic_get_ref_handle(adapter
, buffer
->ref_handle
,
1351 pdesc
->reference_handle
= cpu_to_le16(handle
);
1352 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
1353 producer
= get_next_index(producer
, rds_ring
->num_desc
);
1357 rds_ring
->producer
= producer
;
1358 writel((producer
-1) & (rds_ring
->num_desc
-1),
1359 rds_ring
->crb_rcv_producer
);
1363 static void dump_skb(struct sk_buff
*skb
, struct qlcnic_adapter
*adapter
)
1366 unsigned char *data
= skb
->data
;
1368 pr_info(KERN_INFO
"\n");
1369 for (i
= 0; i
< skb
->len
; i
++) {
1370 QLCDB(adapter
, DRV
, "%02x ", data
[i
]);
1371 if ((i
& 0x0f) == 8)
1372 pr_info(KERN_INFO
"\n");
1376 static void qlcnic_process_rcv_diag(struct qlcnic_adapter
*adapter
, int ring
,
1379 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1380 struct sk_buff
*skb
;
1381 struct qlcnic_host_rds_ring
*rds_ring
;
1382 int index
, length
, cksum
, pkt_offset
;
1384 if (unlikely(ring
>= adapter
->max_rds_rings
))
1387 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1389 index
= qlcnic_get_sts_refhandle(sts_data0
);
1390 length
= qlcnic_get_sts_totallength(sts_data0
);
1391 if (unlikely(index
>= rds_ring
->num_desc
))
1394 cksum
= qlcnic_get_sts_status(sts_data0
);
1395 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
1397 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1401 if (length
> rds_ring
->skb_size
)
1402 skb_put(skb
, rds_ring
->skb_size
);
1404 skb_put(skb
, length
);
1407 skb_pull(skb
, pkt_offset
);
1409 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
1410 adapter
->ahw
->diag_cnt
++;
1412 dump_skb(skb
, adapter
);
1414 dev_kfree_skb_any(skb
);
1415 adapter
->stats
.rx_pkts
++;
1416 adapter
->stats
.rxbytes
+= length
;
1421 void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
1423 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1424 struct status_desc
*desc
;
1426 int ring
, opcode
, desc_cnt
;
1428 u32 consumer
= sds_ring
->consumer
;
1430 desc
= &sds_ring
->desc_head
[consumer
];
1431 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1433 if (!(sts_data0
& STATUS_OWNER_HOST
))
1436 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1437 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1439 case QLCNIC_RESPONSE_DESC
:
1440 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1443 ring
= qlcnic_get_sts_type(sts_data0
);
1444 qlcnic_process_rcv_diag(adapter
, ring
, sts_data0
);
1448 for (; desc_cnt
> 0; desc_cnt
--) {
1449 desc
= &sds_ring
->desc_head
[consumer
];
1450 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
1451 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1454 sds_ring
->consumer
= consumer
;
1455 writel(consumer
, sds_ring
->crb_sts_consumer
);
1458 int qlcnic_82xx_napi_add(struct qlcnic_adapter
*adapter
,
1459 struct net_device
*netdev
)
1462 struct qlcnic_host_sds_ring
*sds_ring
;
1463 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1464 struct qlcnic_host_tx_ring
*tx_ring
;
1466 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->max_sds_rings
))
1469 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1470 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1471 if (qlcnic_check_multi_tx(adapter
) &&
1472 !adapter
->ahw
->diag_test
&&
1473 (adapter
->max_drv_tx_rings
> 1)) {
1474 netif_napi_add(netdev
, &sds_ring
->napi
, qlcnic_rx_poll
,
1477 if (ring
== (adapter
->max_sds_rings
- 1))
1478 netif_napi_add(netdev
, &sds_ring
->napi
,
1482 netif_napi_add(netdev
, &sds_ring
->napi
,
1488 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
1489 qlcnic_free_sds_rings(recv_ctx
);
1493 if (qlcnic_check_multi_tx(adapter
) && !adapter
->ahw
->diag_test
) {
1494 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1495 tx_ring
= &adapter
->tx_ring
[ring
];
1496 netif_napi_add(netdev
, &tx_ring
->napi
, qlcnic_tx_poll
,
1504 void qlcnic_82xx_napi_del(struct qlcnic_adapter
*adapter
)
1507 struct qlcnic_host_sds_ring
*sds_ring
;
1508 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1509 struct qlcnic_host_tx_ring
*tx_ring
;
1511 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1512 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1513 netif_napi_del(&sds_ring
->napi
);
1516 qlcnic_free_sds_rings(adapter
->recv_ctx
);
1518 if (qlcnic_check_multi_tx(adapter
) && !adapter
->ahw
->diag_test
) {
1519 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1520 tx_ring
= &adapter
->tx_ring
[ring
];
1521 netif_napi_del(&tx_ring
->napi
);
1525 qlcnic_free_tx_rings(adapter
);
1528 void qlcnic_82xx_napi_enable(struct qlcnic_adapter
*adapter
)
1531 struct qlcnic_host_sds_ring
*sds_ring
;
1532 struct qlcnic_host_tx_ring
*tx_ring
;
1533 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1535 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1538 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1539 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1540 napi_enable(&sds_ring
->napi
);
1541 qlcnic_enable_int(sds_ring
);
1544 if (qlcnic_check_multi_tx(adapter
) &&
1545 (adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
1546 !adapter
->ahw
->diag_test
&&
1547 (adapter
->max_drv_tx_rings
> 1)) {
1548 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1549 tx_ring
= &adapter
->tx_ring
[ring
];
1550 napi_enable(&tx_ring
->napi
);
1551 qlcnic_enable_tx_intr(adapter
, tx_ring
);
1556 void qlcnic_82xx_napi_disable(struct qlcnic_adapter
*adapter
)
1559 struct qlcnic_host_sds_ring
*sds_ring
;
1560 struct qlcnic_host_tx_ring
*tx_ring
;
1561 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1563 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1566 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1567 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1568 qlcnic_disable_int(sds_ring
);
1569 napi_synchronize(&sds_ring
->napi
);
1570 napi_disable(&sds_ring
->napi
);
1573 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
1574 !adapter
->ahw
->diag_test
&&
1575 qlcnic_check_multi_tx(adapter
)) {
1576 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1577 tx_ring
= &adapter
->tx_ring
[ring
];
1578 qlcnic_disable_tx_int(adapter
, tx_ring
);
1579 napi_synchronize(&tx_ring
->napi
);
1580 napi_disable(&tx_ring
->napi
);
1585 #define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
1586 #define QLC_83XX_LRO_LB_PKT (1ULL << 46)
1588 static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data
, int lro_pkt
)
1591 return (sts_data
& QLC_83XX_LRO_LB_PKT
) ? 1 : 0;
1593 return (sts_data
& QLC_83XX_NORMAL_LB_PKT
) ? 1 : 0;
1596 static struct qlcnic_rx_buffer
*
1597 qlcnic_83xx_process_rcv(struct qlcnic_adapter
*adapter
,
1598 struct qlcnic_host_sds_ring
*sds_ring
,
1599 u8 ring
, u64 sts_data
[])
1601 struct net_device
*netdev
= adapter
->netdev
;
1602 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1603 struct qlcnic_rx_buffer
*buffer
;
1604 struct sk_buff
*skb
;
1605 struct qlcnic_host_rds_ring
*rds_ring
;
1606 int index
, length
, cksum
, is_lb_pkt
;
1607 u16 vid
= 0xffff, t_vid
;
1609 if (unlikely(ring
>= adapter
->max_rds_rings
))
1612 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1614 index
= qlcnic_83xx_hndl(sts_data
[0]);
1615 if (unlikely(index
>= rds_ring
->num_desc
))
1618 buffer
= &rds_ring
->rx_buf_arr
[index
];
1619 length
= qlcnic_83xx_pktln(sts_data
[0]);
1620 cksum
= qlcnic_83xx_csum_status(sts_data
[1]);
1621 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1625 if (adapter
->drv_mac_learn
&&
1626 (adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)) {
1628 is_lb_pkt
= qlcnic_83xx_is_lb_pkt(sts_data
[1], 0);
1629 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, t_vid
);
1632 if (length
> rds_ring
->skb_size
)
1633 skb_put(skb
, rds_ring
->skb_size
);
1635 skb_put(skb
, length
);
1637 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1638 adapter
->stats
.rxdropped
++;
1643 skb
->protocol
= eth_type_trans(skb
, netdev
);
1646 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1648 napi_gro_receive(&sds_ring
->napi
, skb
);
1650 adapter
->stats
.rx_pkts
++;
1651 adapter
->stats
.rxbytes
+= length
;
1656 static struct qlcnic_rx_buffer
*
1657 qlcnic_83xx_process_lro(struct qlcnic_adapter
*adapter
,
1658 u8 ring
, u64 sts_data
[])
1660 struct net_device
*netdev
= adapter
->netdev
;
1661 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1662 struct qlcnic_rx_buffer
*buffer
;
1663 struct sk_buff
*skb
;
1664 struct qlcnic_host_rds_ring
*rds_ring
;
1666 struct ipv6hdr
*ipv6h
;
1669 int l2_hdr_offset
, l4_hdr_offset
;
1670 int index
, is_lb_pkt
;
1671 u16 lro_length
, length
, data_offset
, gso_size
;
1672 u16 vid
= 0xffff, t_vid
;
1674 if (unlikely(ring
> adapter
->max_rds_rings
))
1677 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1679 index
= qlcnic_83xx_hndl(sts_data
[0]);
1680 if (unlikely(index
> rds_ring
->num_desc
))
1683 buffer
= &rds_ring
->rx_buf_arr
[index
];
1685 lro_length
= qlcnic_83xx_lro_pktln(sts_data
[0]);
1686 l2_hdr_offset
= qlcnic_83xx_l2_hdr_off(sts_data
[1]);
1687 l4_hdr_offset
= qlcnic_83xx_l4_hdr_off(sts_data
[1]);
1688 push
= qlcnic_83xx_is_psh_bit(sts_data
[1]);
1690 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1694 if (adapter
->drv_mac_learn
&&
1695 (adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)) {
1697 is_lb_pkt
= qlcnic_83xx_is_lb_pkt(sts_data
[1], 1);
1698 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, t_vid
);
1700 if (qlcnic_83xx_is_tstamp(sts_data
[1]))
1701 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_TS_HDR_SIZE
;
1703 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_HDR_SIZE
;
1705 skb_put(skb
, lro_length
+ data_offset
);
1706 skb_pull(skb
, l2_hdr_offset
);
1708 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1709 adapter
->stats
.rxdropped
++;
1714 skb
->protocol
= eth_type_trans(skb
, netdev
);
1715 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1716 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1717 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1719 length
= (th
->doff
<< 2) + lro_length
;
1720 ipv6h
->payload_len
= htons(length
);
1722 iph
= (struct iphdr
*)skb
->data
;
1723 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1724 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1725 csum_replace2(&iph
->check
, iph
->tot_len
, htons(length
));
1726 iph
->tot_len
= htons(length
);
1732 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1733 gso_size
= qlcnic_83xx_get_lro_sts_mss(sts_data
[0]);
1734 skb_shinfo(skb
)->gso_size
= gso_size
;
1735 if (skb
->protocol
== htons(ETH_P_IPV6
))
1736 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1738 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1742 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1744 netif_receive_skb(skb
);
1746 adapter
->stats
.lro_pkts
++;
1747 adapter
->stats
.lrobytes
+= length
;
1751 static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
,
1754 struct qlcnic_host_rds_ring
*rds_ring
;
1755 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1756 struct list_head
*cur
;
1757 struct status_desc
*desc
;
1758 struct qlcnic_rx_buffer
*rxbuf
= NULL
;
1761 int count
= 0, opcode
;
1762 u32 consumer
= sds_ring
->consumer
;
1764 while (count
< max
) {
1765 desc
= &sds_ring
->desc_head
[consumer
];
1766 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
1767 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
1770 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
1771 ring
= QLCNIC_FETCH_RING_ID(sts_data
[0]);
1774 case QLC_83XX_REG_DESC
:
1775 rxbuf
= qlcnic_83xx_process_rcv(adapter
, sds_ring
,
1778 case QLC_83XX_LRO_DESC
:
1779 rxbuf
= qlcnic_83xx_process_lro(adapter
, ring
,
1783 dev_info(&adapter
->pdev
->dev
,
1784 "Unknown opcode: 0x%x\n", opcode
);
1789 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1791 adapter
->stats
.null_rxbuf
++;
1793 desc
= &sds_ring
->desc_head
[consumer
];
1794 /* Reset the descriptor */
1795 desc
->status_desc_data
[1] = 0;
1796 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1799 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1800 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1801 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1802 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1803 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1805 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1807 spin_lock(&rds_ring
->lock
);
1808 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1809 &rds_ring
->free_list
);
1810 spin_unlock(&rds_ring
->lock
);
1812 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1815 sds_ring
->consumer
= consumer
;
1816 writel(consumer
, sds_ring
->crb_sts_consumer
);
1821 static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct
*napi
, int budget
)
1825 struct qlcnic_host_sds_ring
*sds_ring
;
1826 struct qlcnic_adapter
*adapter
;
1827 struct qlcnic_host_tx_ring
*tx_ring
;
1829 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1830 adapter
= sds_ring
->adapter
;
1831 /* tx ring count = 1 */
1832 tx_ring
= adapter
->tx_ring
;
1834 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1835 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1836 if ((work_done
< budget
) && tx_complete
) {
1837 napi_complete(&sds_ring
->napi
);
1838 qlcnic_83xx_enable_intr(adapter
, sds_ring
);
1844 static int qlcnic_83xx_poll(struct napi_struct
*napi
, int budget
)
1848 struct qlcnic_host_sds_ring
*sds_ring
;
1849 struct qlcnic_adapter
*adapter
;
1850 struct qlcnic_host_tx_ring
*tx_ring
;
1852 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1853 adapter
= sds_ring
->adapter
;
1854 /* tx ring count = 1 */
1855 tx_ring
= adapter
->tx_ring
;
1857 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1858 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1859 if ((work_done
< budget
) && tx_complete
) {
1860 napi_complete(&sds_ring
->napi
);
1861 qlcnic_83xx_enable_intr(adapter
, sds_ring
);
1867 static int qlcnic_83xx_msix_tx_poll(struct napi_struct
*napi
, int budget
)
1870 struct qlcnic_host_tx_ring
*tx_ring
;
1871 struct qlcnic_adapter
*adapter
;
1873 budget
= QLCNIC_TX_POLL_BUDGET
;
1874 tx_ring
= container_of(napi
, struct qlcnic_host_tx_ring
, napi
);
1875 adapter
= tx_ring
->adapter
;
1876 work_done
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1878 napi_complete(&tx_ring
->napi
);
1879 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1880 qlcnic_83xx_enable_tx_intr(adapter
, tx_ring
);
1886 static int qlcnic_83xx_rx_poll(struct napi_struct
*napi
, int budget
)
1889 struct qlcnic_host_sds_ring
*sds_ring
;
1890 struct qlcnic_adapter
*adapter
;
1892 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1893 adapter
= sds_ring
->adapter
;
1894 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1895 if (work_done
< budget
) {
1896 napi_complete(&sds_ring
->napi
);
1897 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1898 qlcnic_83xx_enable_intr(adapter
, sds_ring
);
1904 void qlcnic_83xx_napi_enable(struct qlcnic_adapter
*adapter
)
1907 struct qlcnic_host_sds_ring
*sds_ring
;
1908 struct qlcnic_host_tx_ring
*tx_ring
;
1909 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1911 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1914 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1915 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1916 napi_enable(&sds_ring
->napi
);
1917 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
1918 qlcnic_83xx_enable_intr(adapter
, sds_ring
);
1921 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
1922 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
1923 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1924 tx_ring
= &adapter
->tx_ring
[ring
];
1925 napi_enable(&tx_ring
->napi
);
1926 qlcnic_83xx_enable_tx_intr(adapter
, tx_ring
);
1931 void qlcnic_83xx_napi_disable(struct qlcnic_adapter
*adapter
)
1934 struct qlcnic_host_sds_ring
*sds_ring
;
1935 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1936 struct qlcnic_host_tx_ring
*tx_ring
;
1938 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1941 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1942 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1943 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
1944 qlcnic_83xx_disable_intr(adapter
, sds_ring
);
1945 napi_synchronize(&sds_ring
->napi
);
1946 napi_disable(&sds_ring
->napi
);
1949 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
1950 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
1951 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1952 tx_ring
= &adapter
->tx_ring
[ring
];
1953 qlcnic_83xx_disable_tx_intr(adapter
, tx_ring
);
1954 napi_synchronize(&tx_ring
->napi
);
1955 napi_disable(&tx_ring
->napi
);
1960 int qlcnic_83xx_napi_add(struct qlcnic_adapter
*adapter
,
1961 struct net_device
*netdev
)
1964 struct qlcnic_host_sds_ring
*sds_ring
;
1965 struct qlcnic_host_tx_ring
*tx_ring
;
1966 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1968 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->max_sds_rings
))
1971 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1972 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1973 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
) {
1974 if (!(adapter
->flags
& QLCNIC_TX_INTR_SHARED
))
1975 netif_napi_add(netdev
, &sds_ring
->napi
,
1976 qlcnic_83xx_rx_poll
,
1979 netif_napi_add(netdev
, &sds_ring
->napi
,
1980 qlcnic_83xx_msix_sriov_vf_poll
,
1984 netif_napi_add(netdev
, &sds_ring
->napi
,
1990 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
1991 qlcnic_free_sds_rings(recv_ctx
);
1995 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
1996 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
1997 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1998 tx_ring
= &adapter
->tx_ring
[ring
];
1999 netif_napi_add(netdev
, &tx_ring
->napi
,
2000 qlcnic_83xx_msix_tx_poll
,
2008 void qlcnic_83xx_napi_del(struct qlcnic_adapter
*adapter
)
2011 struct qlcnic_host_sds_ring
*sds_ring
;
2012 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2013 struct qlcnic_host_tx_ring
*tx_ring
;
2015 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
2016 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2017 netif_napi_del(&sds_ring
->napi
);
2020 qlcnic_free_sds_rings(adapter
->recv_ctx
);
2022 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2023 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2024 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
2025 tx_ring
= &adapter
->tx_ring
[ring
];
2026 netif_napi_del(&tx_ring
->napi
);
2030 qlcnic_free_tx_rings(adapter
);
2033 void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter
*adapter
,
2034 int ring
, u64 sts_data
[])
2036 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2037 struct sk_buff
*skb
;
2038 struct qlcnic_host_rds_ring
*rds_ring
;
2041 if (unlikely(ring
>= adapter
->max_rds_rings
))
2044 rds_ring
= &recv_ctx
->rds_rings
[ring
];
2045 index
= qlcnic_83xx_hndl(sts_data
[0]);
2046 if (unlikely(index
>= rds_ring
->num_desc
))
2049 length
= qlcnic_83xx_pktln(sts_data
[0]);
2051 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
2055 if (length
> rds_ring
->skb_size
)
2056 skb_put(skb
, rds_ring
->skb_size
);
2058 skb_put(skb
, length
);
2060 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
2061 adapter
->ahw
->diag_cnt
++;
2063 dump_skb(skb
, adapter
);
2065 dev_kfree_skb_any(skb
);
2069 void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
2071 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
2072 struct status_desc
*desc
;
2075 u32 consumer
= sds_ring
->consumer
;
2077 desc
= &sds_ring
->desc_head
[consumer
];
2078 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
2079 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
2080 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
2084 ring
= QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data
[0]));
2085 qlcnic_83xx_process_rcv_diag(adapter
, ring
, sts_data
);
2086 desc
= &sds_ring
->desc_head
[consumer
];
2087 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
2088 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
2089 sds_ring
->consumer
= consumer
;
2090 writel(consumer
, sds_ring
->crb_sts_consumer
);