2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/skbuff.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/if_ether.h>
24 #include <linux/if_vlan.h>
25 #include <linux/workqueue.h>
26 #include <scsi/fc/fc_els.h>
27 #include <scsi/fc/fc_fcoe.h>
28 #include <scsi/fc_frame.h>
29 #include <scsi/libfc.h>
32 #include "cq_enet_desc.h"
33 #include "cq_exch_desc.h"
35 struct workqueue_struct
*fnic_event_queue
;
37 void fnic_handle_link(struct work_struct
*work
)
39 struct fnic
*fnic
= container_of(work
, struct fnic
, link_work
);
42 u32 old_link_down_cnt
;
44 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
46 if (fnic
->stop_rx_link_events
) {
47 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
51 old_link_down_cnt
= fnic
->link_down_cnt
;
52 old_link_status
= fnic
->link_status
;
53 fnic
->link_status
= vnic_dev_link_status(fnic
->vdev
);
54 fnic
->link_down_cnt
= vnic_dev_link_down_cnt(fnic
->vdev
);
56 if (old_link_status
== fnic
->link_status
) {
57 if (!fnic
->link_status
)
59 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
61 if (old_link_down_cnt
!= fnic
->link_down_cnt
) {
62 /* UP -> DOWN -> UP */
63 fnic
->lport
->host_stats
.link_failure_count
++;
64 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
65 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
67 fc_linkdown(fnic
->lport
);
68 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
70 fc_linkup(fnic
->lport
);
73 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
75 } else if (fnic
->link_status
) {
77 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
78 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
, "link up\n");
79 fc_linkup(fnic
->lport
);
82 fnic
->lport
->host_stats
.link_failure_count
++;
83 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
84 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
, "link down\n");
85 fc_linkdown(fnic
->lport
);
91 * This function passes incoming fabric frames to libFC
93 void fnic_handle_frame(struct work_struct
*work
)
95 struct fnic
*fnic
= container_of(work
, struct fnic
, frame_work
);
96 struct fc_lport
*lp
= fnic
->lport
;
101 while ((skb
= skb_dequeue(&fnic
->frame_queue
))) {
103 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
104 if (fnic
->stop_rx_link_events
) {
105 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
109 fp
= (struct fc_frame
*)skb
;
110 /* if Flogi resp frame, register the address */
112 vnic_dev_add_addr(fnic
->vdev
,
113 fnic
->data_src_addr
);
116 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
118 fc_exch_recv(lp
, fp
);
123 static inline void fnic_import_rq_fc_frame(struct sk_buff
*skb
,
124 u32 len
, u8 sof
, u8 eof
)
126 struct fc_frame
*fp
= (struct fc_frame
*)skb
;
134 static inline int fnic_import_rq_eth_pkt(struct sk_buff
*skb
, u32 len
)
138 struct vlan_ethhdr
*vh
;
139 struct fcoe_hdr
*fcoe_hdr
;
140 struct fcoe_crc_eof
*ft
;
141 u32 transport_len
= 0;
143 eh
= (struct ethhdr
*)skb
->data
;
144 vh
= (struct vlan_ethhdr
*)skb
->data
;
145 if (vh
->h_vlan_proto
== htons(ETH_P_8021Q
) &&
146 vh
->h_vlan_encapsulated_proto
== htons(ETH_P_FCOE
)) {
147 skb_pull(skb
, sizeof(struct vlan_ethhdr
));
148 transport_len
+= sizeof(struct vlan_ethhdr
);
149 } else if (eh
->h_proto
== htons(ETH_P_FCOE
)) {
150 transport_len
+= sizeof(struct ethhdr
);
151 skb_pull(skb
, sizeof(struct ethhdr
));
155 fcoe_hdr
= (struct fcoe_hdr
*)skb
->data
;
156 if (FC_FCOE_DECAPS_VER(fcoe_hdr
) != FC_FCOE_VER
)
159 fp
= (struct fc_frame
*)skb
;
161 fr_sof(fp
) = fcoe_hdr
->fcoe_sof
;
162 skb_pull(skb
, sizeof(struct fcoe_hdr
));
163 transport_len
+= sizeof(struct fcoe_hdr
);
165 ft
= (struct fcoe_crc_eof
*)(skb
->data
+ len
-
166 transport_len
- sizeof(*ft
));
167 fr_eof(fp
) = ft
->fcoe_eof
;
168 skb_trim(skb
, len
- transport_len
- sizeof(*ft
));
172 static inline int fnic_handle_flogi_resp(struct fnic
*fnic
,
175 u8 mac
[ETH_ALEN
] = FC_FCOE_FLOGI_MAC
;
176 struct ethhdr
*eth_hdr
;
177 struct fc_frame_header
*fh
;
180 struct fc_frame
*old_flogi_resp
= NULL
;
182 fh
= (struct fc_frame_header
*)fr_hdr(fp
);
184 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
186 if (fnic
->state
== FNIC_IN_ETH_MODE
) {
189 * Check if oxid matches on taking the lock. A new Flogi
190 * issued by libFC might have changed the fnic cached oxid
192 if (fnic
->flogi_oxid
!= ntohs(fh
->fh_ox_id
)) {
193 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
194 "Flogi response oxid not"
195 " matching cached oxid, dropping frame"
198 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
199 dev_kfree_skb_irq(fp_skb(fp
));
200 goto handle_flogi_resp_end
;
203 /* Drop older cached flogi response frame, cache this frame */
204 old_flogi_resp
= fnic
->flogi_resp
;
205 fnic
->flogi_resp
= fp
;
206 fnic
->flogi_oxid
= FC_XID_UNKNOWN
;
209 * this frame is part of flogi get the src mac addr from this
210 * frame if the src mac is fcoui based then we mark the
211 * address mode flag to use fcoui base for dst mac addr
212 * otherwise we have to store the fcoe gateway addr
214 eth_hdr
= (struct ethhdr
*)skb_mac_header(fp_skb(fp
));
215 memcpy(mac
, eth_hdr
->h_source
, ETH_ALEN
);
217 if (ntoh24(mac
) == FC_FCOE_OUI
)
218 fnic
->fcoui_mode
= 1;
220 fnic
->fcoui_mode
= 0;
221 memcpy(fnic
->dest_addr
, mac
, ETH_ALEN
);
225 * Except for Flogi frame, all outbound frames from us have the
226 * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses
227 * the vnic MAC address as the Eth Src address
229 fc_fcoe_set_mac(fnic
->data_src_addr
, fh
->fh_d_id
);
231 /* We get our s_id from the d_id of the flogi resp frame */
232 fnic
->s_id
= ntoh24(fh
->fh_d_id
);
234 /* Change state to reflect transition from Eth to FC mode */
235 fnic
->state
= FNIC_IN_ETH_TRANS_FC_MODE
;
238 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
239 "Unexpected fnic state %s while"
240 " processing flogi resp\n",
241 fnic_state_to_str(fnic
->state
));
243 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
244 dev_kfree_skb_irq(fp_skb(fp
));
245 goto handle_flogi_resp_end
;
248 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
250 /* Drop older cached frame */
252 dev_kfree_skb_irq(fp_skb(old_flogi_resp
));
255 * send flogi reg request to firmware, this will put the fnic in
258 ret
= fnic_flogi_reg_handler(fnic
);
262 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
264 * free the frame is some other thread is not
267 if (fnic
->flogi_resp
!= fp
)
270 fnic
->flogi_resp
= NULL
;
272 if (fnic
->state
== FNIC_IN_ETH_TRANS_FC_MODE
)
273 fnic
->state
= FNIC_IN_ETH_MODE
;
274 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
276 dev_kfree_skb_irq(fp_skb(fp
));
279 handle_flogi_resp_end
:
283 /* Returns 1 for a response that matches cached flogi oxid */
284 static inline int is_matching_flogi_resp_frame(struct fnic
*fnic
,
287 struct fc_frame_header
*fh
;
291 fh
= fc_frame_header_get(fp
);
292 f_ctl
= ntoh24(fh
->fh_f_ctl
);
294 if (fnic
->flogi_oxid
== ntohs(fh
->fh_ox_id
) &&
295 fh
->fh_r_ctl
== FC_RCTL_ELS_REP
&&
296 (f_ctl
& (FC_FC_EX_CTX
| FC_FC_SEQ_CTX
)) == FC_FC_EX_CTX
&&
297 fh
->fh_type
== FC_TYPE_ELS
)
303 static void fnic_rq_cmpl_frame_recv(struct vnic_rq
*rq
, struct cq_desc
304 *cq_desc
, struct vnic_rq_buf
*buf
,
305 int skipped
__attribute__((unused
)),
308 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
311 unsigned int eth_hdrs_stripped
;
312 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
313 u8 fcoe
= 0, fcoe_sof
, fcoe_eof
;
314 u8 fcoe_fc_crc_ok
= 1, fcoe_enc_error
= 0;
315 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
316 u8 ipv6
, ipv4
, ipv4_fragment
, rss_type
, csum_not_calc
;
317 u8 fcs_ok
= 1, packet_error
= 0;
318 u16 q_number
, completed_index
, bytes_written
= 0, vlan
, checksum
;
320 u16 exchange_id
, tmpl
;
323 u32 fcp_bytes_written
= 0;
326 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
, buf
->len
,
331 cq_desc_dec(cq_desc
, &type
, &color
, &q_number
, &completed_index
);
332 if (type
== CQ_DESC_TYPE_RQ_FCP
) {
333 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc
*)cq_desc
,
334 &type
, &color
, &q_number
, &completed_index
,
335 &eop
, &sop
, &fcoe_fc_crc_ok
, &exchange_id
,
336 &tmpl
, &fcp_bytes_written
, &sof
, &eof
,
337 &ingress_port
, &packet_error
,
338 &fcoe_enc_error
, &fcs_ok
, &vlan_stripped
,
340 eth_hdrs_stripped
= 1;
342 } else if (type
== CQ_DESC_TYPE_RQ_ENET
) {
343 cq_enet_rq_desc_dec((struct cq_enet_rq_desc
*)cq_desc
,
344 &type
, &color
, &q_number
, &completed_index
,
345 &ingress_port
, &fcoe
, &eop
, &sop
,
346 &rss_type
, &csum_not_calc
, &rss_hash
,
347 &bytes_written
, &packet_error
,
348 &vlan_stripped
, &vlan
, &checksum
,
349 &fcoe_sof
, &fcoe_fc_crc_ok
,
350 &fcoe_enc_error
, &fcoe_eof
,
351 &tcp_udp_csum_ok
, &udp
, &tcp
,
352 &ipv4_csum_ok
, &ipv6
, &ipv4
,
353 &ipv4_fragment
, &fcs_ok
);
354 eth_hdrs_stripped
= 0;
358 shost_printk(KERN_ERR
, fnic
->lport
->host
,
359 "fnic rq_cmpl wrong cq type x%x\n", type
);
363 if (!fcs_ok
|| packet_error
|| !fcoe_fc_crc_ok
|| fcoe_enc_error
) {
364 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
365 "fnic rq_cmpl fcoe x%x fcsok x%x"
366 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
368 fcoe
, fcs_ok
, packet_error
,
369 fcoe_fc_crc_ok
, fcoe_enc_error
);
373 if (eth_hdrs_stripped
)
374 fnic_import_rq_fc_frame(skb
, fcp_bytes_written
, sof
, eof
);
375 else if (fnic_import_rq_eth_pkt(skb
, bytes_written
))
378 fp
= (struct fc_frame
*)skb
;
381 * If frame is an ELS response that matches the cached FLOGI OX_ID,
382 * and is accept, issue flogi_reg_request copy wq request to firmware
383 * to register the S_ID and determine whether FC_OUI mode or GW mode.
385 if (is_matching_flogi_resp_frame(fnic
, fp
)) {
386 if (!eth_hdrs_stripped
) {
387 if (fc_frame_payload_op(fp
) == ELS_LS_ACC
) {
388 fnic_handle_flogi_resp(fnic
, fp
);
392 * Recd. Flogi reject. No point registering
393 * with fw, but forward to libFC
399 if (!eth_hdrs_stripped
)
403 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
404 if (fnic
->stop_rx_link_events
) {
405 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
408 /* Use fr_flags to indicate whether succ. flogi resp or not */
410 fr_dev(fp
) = fnic
->lport
;
411 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
413 skb_queue_tail(&fnic
->frame_queue
, skb
);
414 queue_work(fnic_event_queue
, &fnic
->frame_work
);
418 dev_kfree_skb_irq(skb
);
421 static int fnic_rq_cmpl_handler_cont(struct vnic_dev
*vdev
,
422 struct cq_desc
*cq_desc
, u8 type
,
423 u16 q_number
, u16 completed_index
,
426 struct fnic
*fnic
= vnic_dev_priv(vdev
);
428 vnic_rq_service(&fnic
->rq
[q_number
], cq_desc
, completed_index
,
429 VNIC_RQ_RETURN_DESC
, fnic_rq_cmpl_frame_recv
,
434 int fnic_rq_cmpl_handler(struct fnic
*fnic
, int rq_work_to_do
)
436 unsigned int tot_rq_work_done
= 0, cur_work_done
;
440 for (i
= 0; i
< fnic
->rq_count
; i
++) {
441 cur_work_done
= vnic_cq_service(&fnic
->cq
[i
], rq_work_to_do
,
442 fnic_rq_cmpl_handler_cont
,
445 err
= vnic_rq_fill(&fnic
->rq
[i
], fnic_alloc_rq_frame
);
447 shost_printk(KERN_ERR
, fnic
->lport
->host
,
448 "fnic_alloc_rq_frame cant alloc"
451 tot_rq_work_done
+= cur_work_done
;
454 return tot_rq_work_done
;
458 * This function is called once at init time to allocate and fill RQ
459 * buffers. Subsequently, it is called in the interrupt context after RQ
460 * buffer processing to replenish the buffers in the RQ
462 int fnic_alloc_rq_frame(struct vnic_rq
*rq
)
464 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
469 len
= FC_FRAME_HEADROOM
+ FC_MAX_FRAME
+ FC_FRAME_TAILROOM
;
470 skb
= dev_alloc_skb(len
);
472 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
473 "Unable to allocate RQ sk_buff\n");
476 skb_reset_mac_header(skb
);
477 skb_reset_transport_header(skb
);
478 skb_reset_network_header(skb
);
480 pa
= pci_map_single(fnic
->pdev
, skb
->data
, len
, PCI_DMA_FROMDEVICE
);
481 fnic_queue_rq_desc(rq
, skb
, pa
, len
);
485 void fnic_free_rq_buf(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
)
487 struct fc_frame
*fp
= buf
->os_buf
;
488 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
490 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
, buf
->len
,
493 dev_kfree_skb(fp_skb(fp
));
497 static inline int is_flogi_frame(struct fc_frame_header
*fh
)
499 return fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
&& *(u8
*)(fh
+ 1) == ELS_FLOGI
;
502 int fnic_send_frame(struct fnic
*fnic
, struct fc_frame
*fp
)
504 struct vnic_wq
*wq
= &fnic
->wq
[0];
507 struct ethhdr
*eth_hdr
;
508 struct vlan_ethhdr
*vlan_hdr
;
509 struct fcoe_hdr
*fcoe_hdr
;
510 struct fc_frame_header
*fh
;
511 u32 tot_len
, eth_hdr_len
;
515 fh
= fc_frame_header_get(fp
);
518 if (!fnic
->vlan_hw_insert
) {
519 eth_hdr_len
= sizeof(*vlan_hdr
) + sizeof(*fcoe_hdr
);
520 vlan_hdr
= (struct vlan_ethhdr
*)skb_push(skb
, eth_hdr_len
);
521 eth_hdr
= (struct ethhdr
*)vlan_hdr
;
522 vlan_hdr
->h_vlan_proto
= htons(ETH_P_8021Q
);
523 vlan_hdr
->h_vlan_encapsulated_proto
= htons(ETH_P_FCOE
);
524 vlan_hdr
->h_vlan_TCI
= htons(fnic
->vlan_id
);
525 fcoe_hdr
= (struct fcoe_hdr
*)(vlan_hdr
+ 1);
527 eth_hdr_len
= sizeof(*eth_hdr
) + sizeof(*fcoe_hdr
);
528 eth_hdr
= (struct ethhdr
*)skb_push(skb
, eth_hdr_len
);
529 eth_hdr
->h_proto
= htons(ETH_P_FCOE
);
530 fcoe_hdr
= (struct fcoe_hdr
*)(eth_hdr
+ 1);
533 if (is_flogi_frame(fh
)) {
534 fc_fcoe_set_mac(eth_hdr
->h_dest
, fh
->fh_d_id
);
535 memcpy(eth_hdr
->h_source
, fnic
->mac_addr
, ETH_ALEN
);
537 if (fnic
->fcoui_mode
)
538 fc_fcoe_set_mac(eth_hdr
->h_dest
, fh
->fh_d_id
);
540 memcpy(eth_hdr
->h_dest
, fnic
->dest_addr
, ETH_ALEN
);
541 memcpy(eth_hdr
->h_source
, fnic
->data_src_addr
, ETH_ALEN
);
547 memset(fcoe_hdr
, 0, sizeof(*fcoe_hdr
));
548 fcoe_hdr
->fcoe_sof
= fr_sof(fp
);
550 FC_FCOE_ENCAPS_VER(fcoe_hdr
, FC_FCOE_VER
);
552 pa
= pci_map_single(fnic
->pdev
, eth_hdr
, tot_len
, PCI_DMA_TODEVICE
);
554 spin_lock_irqsave(&fnic
->wq_lock
[0], flags
);
556 if (!vnic_wq_desc_avail(wq
)) {
557 pci_unmap_single(fnic
->pdev
, pa
,
558 tot_len
, PCI_DMA_TODEVICE
);
560 goto fnic_send_frame_end
;
563 fnic_queue_wq_desc(wq
, skb
, pa
, tot_len
, fr_eof(fp
),
564 fnic
->vlan_hw_insert
, fnic
->vlan_id
, 1, 1, 1);
566 spin_unlock_irqrestore(&fnic
->wq_lock
[0], flags
);
569 dev_kfree_skb_any(fp_skb(fp
));
576 * Routine to send a raw frame
578 int fnic_send(struct fc_lport
*lp
, struct fc_frame
*fp
)
580 struct fnic
*fnic
= lport_priv(lp
);
581 struct fc_frame_header
*fh
;
583 enum fnic_state old_state
;
585 struct fc_frame
*old_flogi
= NULL
;
586 struct fc_frame
*old_flogi_resp
= NULL
;
588 if (fnic
->in_remove
) {
589 dev_kfree_skb(fp_skb(fp
));
594 fh
= fc_frame_header_get(fp
);
595 /* if not an Flogi frame, send it out, this is the common case */
596 if (!is_flogi_frame(fh
))
597 return fnic_send_frame(fnic
, fp
);
599 /* Flogi frame, now enter the state machine */
601 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
603 /* Get any old cached frames, free them after dropping lock */
604 old_flogi
= fnic
->flogi
;
606 old_flogi_resp
= fnic
->flogi_resp
;
607 fnic
->flogi_resp
= NULL
;
609 fnic
->flogi_oxid
= FC_XID_UNKNOWN
;
611 old_state
= fnic
->state
;
613 case FNIC_IN_FC_MODE
:
614 case FNIC_IN_ETH_TRANS_FC_MODE
:
616 fnic
->state
= FNIC_IN_FC_TRANS_ETH_MODE
;
617 vnic_dev_del_addr(fnic
->vdev
, fnic
->data_src_addr
);
618 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
621 dev_kfree_skb(fp_skb(old_flogi
));
624 if (old_flogi_resp
) {
625 dev_kfree_skb(fp_skb(old_flogi_resp
));
626 old_flogi_resp
= NULL
;
629 ret
= fnic_fw_reset_handler(fnic
);
631 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
632 if (fnic
->state
!= FNIC_IN_FC_TRANS_ETH_MODE
)
635 fnic
->state
= old_state
;
636 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
637 dev_kfree_skb(fp_skb(fp
));
640 old_flogi
= fnic
->flogi
;
642 fnic
->flogi_oxid
= ntohs(fh
->fh_ox_id
);
643 old_flogi_resp
= fnic
->flogi_resp
;
644 fnic
->flogi_resp
= NULL
;
645 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
648 case FNIC_IN_FC_TRANS_ETH_MODE
:
650 * A reset is pending with the firmware. Store the flogi
651 * and its oxid. The transition out of this state happens
652 * only when Firmware completes the reset, either with
653 * success or failed. If success, transition to
654 * FNIC_IN_ETH_MODE, if fail, then transition to
658 fnic
->flogi_oxid
= ntohs(fh
->fh_ox_id
);
659 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
662 case FNIC_IN_ETH_MODE
:
664 * The fw/hw is already in eth mode. Store the oxid,
665 * and send the flogi frame out. The transition out of this
666 * state happens only we receive flogi response from the
667 * network, and the oxid matches the cached oxid when the
668 * flogi frame was sent out. If they match, then we issue
669 * a flogi_reg request and transition to state
670 * FNIC_IN_ETH_TRANS_FC_MODE
672 fnic
->flogi_oxid
= ntohs(fh
->fh_ox_id
);
673 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
674 ret
= fnic_send_frame(fnic
, fp
);
680 dev_kfree_skb(fp_skb(old_flogi
));
682 dev_kfree_skb(fp_skb(old_flogi_resp
));
686 static void fnic_wq_complete_frame_send(struct vnic_wq
*wq
,
687 struct cq_desc
*cq_desc
,
688 struct vnic_wq_buf
*buf
, void *opaque
)
690 struct sk_buff
*skb
= buf
->os_buf
;
691 struct fc_frame
*fp
= (struct fc_frame
*)skb
;
692 struct fnic
*fnic
= vnic_dev_priv(wq
->vdev
);
694 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
,
695 buf
->len
, PCI_DMA_TODEVICE
);
696 dev_kfree_skb_irq(fp_skb(fp
));
700 static int fnic_wq_cmpl_handler_cont(struct vnic_dev
*vdev
,
701 struct cq_desc
*cq_desc
, u8 type
,
702 u16 q_number
, u16 completed_index
,
705 struct fnic
*fnic
= vnic_dev_priv(vdev
);
708 spin_lock_irqsave(&fnic
->wq_lock
[q_number
], flags
);
709 vnic_wq_service(&fnic
->wq
[q_number
], cq_desc
, completed_index
,
710 fnic_wq_complete_frame_send
, NULL
);
711 spin_unlock_irqrestore(&fnic
->wq_lock
[q_number
], flags
);
716 int fnic_wq_cmpl_handler(struct fnic
*fnic
, int work_to_do
)
718 unsigned int wq_work_done
= 0;
721 for (i
= 0; i
< fnic
->raw_wq_count
; i
++) {
722 wq_work_done
+= vnic_cq_service(&fnic
->cq
[fnic
->rq_count
+i
],
724 fnic_wq_cmpl_handler_cont
,
732 void fnic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
734 struct fc_frame
*fp
= buf
->os_buf
;
735 struct fnic
*fnic
= vnic_dev_priv(wq
->vdev
);
737 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
,
738 buf
->len
, PCI_DMA_TODEVICE
);
740 dev_kfree_skb(fp_skb(fp
));