2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/workqueue.h>
27 #include <scsi/fc/fc_fip.h>
28 #include <scsi/fc/fc_els.h>
29 #include <scsi/fc/fc_fcoe.h>
30 #include <scsi/fc_frame.h>
31 #include <scsi/libfc.h>
35 #include "cq_enet_desc.h"
36 #include "cq_exch_desc.h"
38 static u8 fcoe_all_fcfs
[ETH_ALEN
];
39 struct workqueue_struct
*fnic_fip_queue
;
40 struct workqueue_struct
*fnic_event_queue
;
42 static void fnic_set_eth_mode(struct fnic
*);
43 static void fnic_fcoe_send_vlan_req(struct fnic
*fnic
);
44 static void fnic_fcoe_start_fcf_disc(struct fnic
*fnic
);
45 static void fnic_fcoe_process_vlan_resp(struct fnic
*fnic
, struct sk_buff
*);
46 static int fnic_fcoe_vlan_check(struct fnic
*fnic
, u16 flag
);
47 static int fnic_fcoe_handle_fip_frame(struct fnic
*fnic
, struct sk_buff
*skb
);
49 void fnic_handle_link(struct work_struct
*work
)
51 struct fnic
*fnic
= container_of(work
, struct fnic
, link_work
);
54 u32 old_link_down_cnt
;
56 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
58 if (fnic
->stop_rx_link_events
) {
59 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
63 old_link_down_cnt
= fnic
->link_down_cnt
;
64 old_link_status
= fnic
->link_status
;
65 fnic
->link_status
= vnic_dev_link_status(fnic
->vdev
);
66 fnic
->link_down_cnt
= vnic_dev_link_down_cnt(fnic
->vdev
);
68 if (old_link_status
== fnic
->link_status
) {
69 if (!fnic
->link_status
)
71 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
73 if (old_link_down_cnt
!= fnic
->link_down_cnt
) {
74 /* UP -> DOWN -> UP */
75 fnic
->lport
->host_stats
.link_failure_count
++;
76 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
77 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
79 fcoe_ctlr_link_down(&fnic
->ctlr
);
80 if (fnic
->config
.flags
& VFCF_FIP_CAPABLE
) {
81 /* start FCoE VLAN discovery */
82 fnic_fcoe_send_vlan_req(fnic
);
85 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
87 fcoe_ctlr_link_up(&fnic
->ctlr
);
90 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
92 } else if (fnic
->link_status
) {
94 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
95 if (fnic
->config
.flags
& VFCF_FIP_CAPABLE
) {
96 /* start FCoE VLAN discovery */
97 fnic_fcoe_send_vlan_req(fnic
);
100 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
, "link up\n");
101 fcoe_ctlr_link_up(&fnic
->ctlr
);
104 fnic
->lport
->host_stats
.link_failure_count
++;
105 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
106 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
, "link down\n");
107 fcoe_ctlr_link_down(&fnic
->ctlr
);
113 * This function passes incoming fabric frames to libFC
115 void fnic_handle_frame(struct work_struct
*work
)
117 struct fnic
*fnic
= container_of(work
, struct fnic
, frame_work
);
118 struct fc_lport
*lp
= fnic
->lport
;
123 while ((skb
= skb_dequeue(&fnic
->frame_queue
))) {
125 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
126 if (fnic
->stop_rx_link_events
) {
127 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
131 fp
= (struct fc_frame
*)skb
;
134 * If we're in a transitional state, just re-queue and return.
135 * The queue will be serviced when we get to a stable state.
137 if (fnic
->state
!= FNIC_IN_FC_MODE
&&
138 fnic
->state
!= FNIC_IN_ETH_MODE
) {
139 skb_queue_head(&fnic
->frame_queue
, skb
);
140 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
143 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
145 fc_exch_recv(lp
, fp
);
149 void fnic_fcoe_evlist_free(struct fnic
*fnic
)
151 struct fnic_event
*fevt
= NULL
;
152 struct fnic_event
*next
= NULL
;
155 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
156 if (list_empty(&fnic
->evlist
)) {
157 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
161 list_for_each_entry_safe(fevt
, next
, &fnic
->evlist
, list
) {
162 list_del(&fevt
->list
);
165 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
168 void fnic_handle_event(struct work_struct
*work
)
170 struct fnic
*fnic
= container_of(work
, struct fnic
, event_work
);
171 struct fnic_event
*fevt
= NULL
;
172 struct fnic_event
*next
= NULL
;
175 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
176 if (list_empty(&fnic
->evlist
)) {
177 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
181 list_for_each_entry_safe(fevt
, next
, &fnic
->evlist
, list
) {
182 if (fnic
->stop_rx_link_events
) {
183 list_del(&fevt
->list
);
185 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
189 * If we're in a transitional state, just re-queue and return.
190 * The queue will be serviced when we get to a stable state.
192 if (fnic
->state
!= FNIC_IN_FC_MODE
&&
193 fnic
->state
!= FNIC_IN_ETH_MODE
) {
194 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
198 list_del(&fevt
->list
);
199 switch (fevt
->event
) {
200 case FNIC_EVT_START_VLAN_DISC
:
201 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
202 fnic_fcoe_send_vlan_req(fnic
);
203 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
205 case FNIC_EVT_START_FCF_DISC
:
206 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
207 "Start FCF Discovery\n");
208 fnic_fcoe_start_fcf_disc(fnic
);
211 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
212 "Unknown event 0x%x\n", fevt
->event
);
217 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
221 * Check if the Received FIP FLOGI frame is rejected
222 * @fip: The FCoE controller that received the frame
223 * @skb: The received FIP frame
225 * Returns non-zero if the frame is rejected with unsupported cmd with
226 * insufficient resource els explanation.
228 static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr
*fip
,
231 struct fc_lport
*lport
= fip
->lp
;
232 struct fip_header
*fiph
;
233 struct fc_frame_header
*fh
= NULL
;
234 struct fip_desc
*desc
;
235 struct fip_encaps
*els
;
236 enum fip_desc_type els_dtype
= 0;
245 if (skb_linearize(skb
))
248 if (skb
->len
< sizeof(*fiph
))
251 fiph
= (struct fip_header
*)skb
->data
;
252 op
= ntohs(fiph
->fip_op
);
253 sub
= fiph
->fip_subcode
;
258 if (sub
!= FIP_SC_REP
)
261 rlen
= ntohs(fiph
->fip_dl_len
) * 4;
262 if (rlen
+ sizeof(*fiph
) > skb
->len
)
265 desc
= (struct fip_desc
*)(fiph
+ 1);
266 dlen
= desc
->fip_dlen
* FIP_BPW
;
268 if (desc
->fip_dtype
== FIP_DT_FLOGI
) {
270 shost_printk(KERN_DEBUG
, lport
->host
,
271 " FIP TYPE FLOGI: fab name:%llx "
273 fip
->sel_fcf
->fabric_name
, fip
->sel_fcf
->vfid
,
274 fip
->sel_fcf
->fc_map
);
275 if (dlen
< sizeof(*els
) + sizeof(*fh
) + 1)
278 els_len
= dlen
- sizeof(*els
);
279 els
= (struct fip_encaps
*)desc
;
280 fh
= (struct fc_frame_header
*)(els
+ 1);
281 els_dtype
= desc
->fip_dtype
;
287 * ELS command code, reason and explanation should be = Reject,
288 * unsupported command and insufficient resource
290 els_op
= *(u8
*)(fh
+ 1);
291 if (els_op
== ELS_LS_RJT
) {
292 shost_printk(KERN_INFO
, lport
->host
,
293 "Flogi Request Rejected by Switch\n");
296 shost_printk(KERN_INFO
, lport
->host
,
297 "Flogi Request Accepted by Switch\n");
302 static void fnic_fcoe_send_vlan_req(struct fnic
*fnic
)
304 struct fcoe_ctlr
*fip
= &fnic
->ctlr
;
305 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
309 struct fip_vlan
*vlan
;
312 fnic_fcoe_reset_vlans(fnic
);
313 fnic
->set_vlan(fnic
, 0);
314 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
315 "Sending VLAN request...\n");
316 skb
= dev_alloc_skb(sizeof(struct fip_vlan
));
320 fr_len
= sizeof(*vlan
);
321 eth_fr
= (char *)skb
->data
;
322 vlan
= (struct fip_vlan
*)eth_fr
;
324 memset(vlan
, 0, sizeof(*vlan
));
325 memcpy(vlan
->eth
.h_source
, fip
->ctl_src_addr
, ETH_ALEN
);
326 memcpy(vlan
->eth
.h_dest
, fcoe_all_fcfs
, ETH_ALEN
);
327 vlan
->eth
.h_proto
= htons(ETH_P_FIP
);
329 vlan
->fip
.fip_ver
= FIP_VER_ENCAPS(FIP_VER
);
330 vlan
->fip
.fip_op
= htons(FIP_OP_VLAN
);
331 vlan
->fip
.fip_subcode
= FIP_SC_VL_REQ
;
332 vlan
->fip
.fip_dl_len
= htons(sizeof(vlan
->desc
) / FIP_BPW
);
334 vlan
->desc
.mac
.fd_desc
.fip_dtype
= FIP_DT_MAC
;
335 vlan
->desc
.mac
.fd_desc
.fip_dlen
= sizeof(vlan
->desc
.mac
) / FIP_BPW
;
336 memcpy(&vlan
->desc
.mac
.fd_mac
, fip
->ctl_src_addr
, ETH_ALEN
);
338 vlan
->desc
.wwnn
.fd_desc
.fip_dtype
= FIP_DT_NAME
;
339 vlan
->desc
.wwnn
.fd_desc
.fip_dlen
= sizeof(vlan
->desc
.wwnn
) / FIP_BPW
;
340 put_unaligned_be64(fip
->lp
->wwnn
, &vlan
->desc
.wwnn
.fd_wwn
);
341 atomic64_inc(&fnic_stats
->vlan_stats
.vlan_disc_reqs
);
343 skb_put(skb
, sizeof(*vlan
));
344 skb
->protocol
= htons(ETH_P_FIP
);
345 skb_reset_mac_header(skb
);
346 skb_reset_network_header(skb
);
349 /* set a timer so that we can retry if there no response */
350 vlan_tov
= jiffies
+ msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV
);
351 mod_timer(&fnic
->fip_timer
, round_jiffies(vlan_tov
));
354 static void fnic_fcoe_process_vlan_resp(struct fnic
*fnic
, struct sk_buff
*skb
)
356 struct fcoe_ctlr
*fip
= &fnic
->ctlr
;
357 struct fip_header
*fiph
;
358 struct fip_desc
*desc
;
359 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
363 struct fcoe_vlan
*vlan
;
367 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
368 "Received VLAN response...\n");
370 fiph
= (struct fip_header
*) skb
->data
;
372 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
373 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
374 ntohs(fiph
->fip_op
), fiph
->fip_subcode
);
376 rlen
= ntohs(fiph
->fip_dl_len
) * 4;
377 fnic_fcoe_reset_vlans(fnic
);
378 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
379 desc
= (struct fip_desc
*)(fiph
+ 1);
381 dlen
= desc
->fip_dlen
* FIP_BPW
;
382 switch (desc
->fip_dtype
) {
384 vid
= ntohs(((struct fip_vlan_desc
*)desc
)->fd_vlan
);
385 shost_printk(KERN_INFO
, fnic
->lport
->host
,
386 "process_vlan_resp: FIP VLAN %d\n", vid
);
387 vlan
= kmalloc(sizeof(*vlan
),
390 /* retry from timer */
391 spin_unlock_irqrestore(&fnic
->vlans_lock
,
395 memset(vlan
, 0, sizeof(struct fcoe_vlan
));
396 vlan
->vid
= vid
& 0x0fff;
397 vlan
->state
= FIP_VLAN_AVAIL
;
398 list_add_tail(&vlan
->list
, &fnic
->vlans
);
401 desc
= (struct fip_desc
*)((char *)desc
+ dlen
);
405 /* any VLAN descriptors present ? */
406 if (list_empty(&fnic
->vlans
)) {
407 /* retry from timer */
408 atomic64_inc(&fnic_stats
->vlan_stats
.resp_withno_vlanID
);
409 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
410 "No VLAN descriptors in FIP VLAN response\n");
411 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
415 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
416 fnic
->set_vlan(fnic
, vlan
->vid
);
417 vlan
->state
= FIP_VLAN_SENT
; /* sent now */
419 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
421 /* start the solicitation */
422 fcoe_ctlr_link_up(fip
);
424 sol_time
= jiffies
+ msecs_to_jiffies(FCOE_CTLR_START_DELAY
);
425 mod_timer(&fnic
->fip_timer
, round_jiffies(sol_time
));
430 static void fnic_fcoe_start_fcf_disc(struct fnic
*fnic
)
433 struct fcoe_vlan
*vlan
;
436 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
437 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
438 fnic
->set_vlan(fnic
, vlan
->vid
);
439 vlan
->state
= FIP_VLAN_SENT
; /* sent now */
441 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
443 /* start the solicitation */
444 fcoe_ctlr_link_up(&fnic
->ctlr
);
446 sol_time
= jiffies
+ msecs_to_jiffies(FCOE_CTLR_START_DELAY
);
447 mod_timer(&fnic
->fip_timer
, round_jiffies(sol_time
));
450 static int fnic_fcoe_vlan_check(struct fnic
*fnic
, u16 flag
)
453 struct fcoe_vlan
*fvlan
;
455 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
456 if (list_empty(&fnic
->vlans
)) {
457 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
461 fvlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
462 if (fvlan
->state
== FIP_VLAN_USED
) {
463 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
467 if (fvlan
->state
== FIP_VLAN_SENT
) {
468 fvlan
->state
= FIP_VLAN_USED
;
469 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
472 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
476 static void fnic_event_enq(struct fnic
*fnic
, enum fnic_evt ev
)
478 struct fnic_event
*fevt
;
481 fevt
= kmalloc(sizeof(*fevt
), GFP_ATOMIC
);
488 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
489 list_add_tail(&fevt
->list
, &fnic
->evlist
);
490 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
492 schedule_work(&fnic
->event_work
);
495 static int fnic_fcoe_handle_fip_frame(struct fnic
*fnic
, struct sk_buff
*skb
)
497 struct fip_header
*fiph
;
502 if (!skb
|| !(skb
->data
))
505 if (skb_linearize(skb
))
508 fiph
= (struct fip_header
*)skb
->data
;
509 op
= ntohs(fiph
->fip_op
);
510 sub
= fiph
->fip_subcode
;
512 if (FIP_VER_DECAPS(fiph
->fip_ver
) != FIP_VER
)
515 if (ntohs(fiph
->fip_dl_len
) * FIP_BPW
+ sizeof(*fiph
) > skb
->len
)
518 if (op
== FIP_OP_DISC
&& sub
== FIP_SC_ADV
) {
519 if (fnic_fcoe_vlan_check(fnic
, ntohs(fiph
->fip_flags
)))
521 /* pass it on to fcoe */
523 } else if (op
== FIP_OP_VLAN
&& sub
== FIP_SC_VL_REP
) {
524 /* set the vlan as used */
525 fnic_fcoe_process_vlan_resp(fnic
, skb
);
527 } else if (op
== FIP_OP_CTRL
&& sub
== FIP_SC_CLR_VLINK
) {
528 /* received CVL request, restart vlan disc */
529 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
530 /* pass it on to fcoe */
537 void fnic_handle_fip_frame(struct work_struct
*work
)
539 struct fnic
*fnic
= container_of(work
, struct fnic
, fip_frame_work
);
540 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
545 while ((skb
= skb_dequeue(&fnic
->fip_frame_queue
))) {
546 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
547 if (fnic
->stop_rx_link_events
) {
548 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
553 * If we're in a transitional state, just re-queue and return.
554 * The queue will be serviced when we get to a stable state.
556 if (fnic
->state
!= FNIC_IN_FC_MODE
&&
557 fnic
->state
!= FNIC_IN_ETH_MODE
) {
558 skb_queue_head(&fnic
->fip_frame_queue
, skb
);
559 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
562 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
563 eh
= (struct ethhdr
*)skb
->data
;
564 if (eh
->h_proto
== htons(ETH_P_FIP
)) {
565 skb_pull(skb
, sizeof(*eh
));
566 if (fnic_fcoe_handle_fip_frame(fnic
, skb
) <= 0) {
571 * If there's FLOGI rejects - clear all
572 * fcf's & restart from scratch
574 if (is_fnic_fip_flogi_reject(&fnic
->ctlr
, skb
)) {
576 &fnic_stats
->vlan_stats
.flogi_rejects
);
577 shost_printk(KERN_INFO
, fnic
->lport
->host
,
578 "Trigger a Link down - VLAN Disc\n");
579 fcoe_ctlr_link_down(&fnic
->ctlr
);
580 /* start FCoE VLAN discovery */
581 fnic_fcoe_send_vlan_req(fnic
);
585 fcoe_ctlr_recv(&fnic
->ctlr
, skb
);
592 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
593 * @fnic: fnic instance.
594 * @skb: Ethernet Frame.
596 static inline int fnic_import_rq_eth_pkt(struct fnic
*fnic
, struct sk_buff
*skb
)
600 struct fcoe_hdr
*fcoe_hdr
;
601 struct fcoe_crc_eof
*ft
;
604 * Undo VLAN encapsulation if present.
606 eh
= (struct ethhdr
*)skb
->data
;
607 if (eh
->h_proto
== htons(ETH_P_8021Q
)) {
608 memmove((u8
*)eh
+ VLAN_HLEN
, eh
, ETH_ALEN
* 2);
609 eh
= (struct ethhdr
*)skb_pull(skb
, VLAN_HLEN
);
610 skb_reset_mac_header(skb
);
612 if (eh
->h_proto
== htons(ETH_P_FIP
)) {
613 if (!(fnic
->config
.flags
& VFCF_FIP_CAPABLE
)) {
614 printk(KERN_ERR
"Dropped FIP frame, as firmware "
615 "uses non-FIP mode, Enable FIP "
619 skb_queue_tail(&fnic
->fip_frame_queue
, skb
);
620 queue_work(fnic_fip_queue
, &fnic
->fip_frame_work
);
621 return 1; /* let caller know packet was used */
623 if (eh
->h_proto
!= htons(ETH_P_FCOE
))
625 skb_set_network_header(skb
, sizeof(*eh
));
626 skb_pull(skb
, sizeof(*eh
));
628 fcoe_hdr
= (struct fcoe_hdr
*)skb
->data
;
629 if (FC_FCOE_DECAPS_VER(fcoe_hdr
) != FC_FCOE_VER
)
632 fp
= (struct fc_frame
*)skb
;
634 fr_sof(fp
) = fcoe_hdr
->fcoe_sof
;
635 skb_pull(skb
, sizeof(struct fcoe_hdr
));
636 skb_reset_transport_header(skb
);
638 ft
= (struct fcoe_crc_eof
*)(skb
->data
+ skb
->len
- sizeof(*ft
));
639 fr_eof(fp
) = ft
->fcoe_eof
;
640 skb_trim(skb
, skb
->len
- sizeof(*ft
));
643 dev_kfree_skb_irq(skb
);
648 * fnic_update_mac_locked() - set data MAC address and filters.
649 * @fnic: fnic instance.
650 * @new: newly-assigned FCoE MAC address.
652 * Called with the fnic lock held.
654 void fnic_update_mac_locked(struct fnic
*fnic
, u8
*new)
656 u8
*ctl
= fnic
->ctlr
.ctl_src_addr
;
657 u8
*data
= fnic
->data_src_addr
;
659 if (is_zero_ether_addr(new))
661 if (ether_addr_equal(data
, new))
663 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
, "update_mac %pM\n", new);
664 if (!is_zero_ether_addr(data
) && !ether_addr_equal(data
, ctl
))
665 vnic_dev_del_addr(fnic
->vdev
, data
);
666 memcpy(data
, new, ETH_ALEN
);
667 if (!ether_addr_equal(new, ctl
))
668 vnic_dev_add_addr(fnic
->vdev
, new);
672 * fnic_update_mac() - set data MAC address and filters.
673 * @lport: local port.
674 * @new: newly-assigned FCoE MAC address.
676 void fnic_update_mac(struct fc_lport
*lport
, u8
*new)
678 struct fnic
*fnic
= lport_priv(lport
);
680 spin_lock_irq(&fnic
->fnic_lock
);
681 fnic_update_mac_locked(fnic
, new);
682 spin_unlock_irq(&fnic
->fnic_lock
);
686 * fnic_set_port_id() - set the port_ID after successful FLOGI.
687 * @lport: local port.
688 * @port_id: assigned FC_ID.
689 * @fp: received frame containing the FLOGI accept or NULL.
691 * This is called from libfc when a new FC_ID has been assigned.
692 * This causes us to reset the firmware to FC_MODE and setup the new MAC
695 * It is also called with FC_ID 0 when we're logged off.
697 * If the FC_ID is due to point-to-point, fp may be NULL.
699 void fnic_set_port_id(struct fc_lport
*lport
, u32 port_id
, struct fc_frame
*fp
)
701 struct fnic
*fnic
= lport_priv(lport
);
705 FNIC_FCS_DBG(KERN_DEBUG
, lport
->host
, "set port_id %x fp %p\n",
709 * If we're clearing the FC_ID, change to use the ctl_src_addr.
710 * Set ethernet mode to send FLOGI.
713 fnic_update_mac(lport
, fnic
->ctlr
.ctl_src_addr
);
714 fnic_set_eth_mode(fnic
);
719 mac
= fr_cb(fp
)->granted_mac
;
720 if (is_zero_ether_addr(mac
)) {
721 /* non-FIP - FLOGI already accepted - ignore return */
722 fcoe_ctlr_recv_flogi(&fnic
->ctlr
, lport
, fp
);
724 fnic_update_mac(lport
, mac
);
727 /* Change state to reflect transition to FC mode */
728 spin_lock_irq(&fnic
->fnic_lock
);
729 if (fnic
->state
== FNIC_IN_ETH_MODE
|| fnic
->state
== FNIC_IN_FC_MODE
)
730 fnic
->state
= FNIC_IN_ETH_TRANS_FC_MODE
;
732 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
733 "Unexpected fnic state %s while"
734 " processing flogi resp\n",
735 fnic_state_to_str(fnic
->state
));
736 spin_unlock_irq(&fnic
->fnic_lock
);
739 spin_unlock_irq(&fnic
->fnic_lock
);
742 * Send FLOGI registration to firmware to set up FC mode.
743 * The new address will be set up when registration completes.
745 ret
= fnic_flogi_reg_handler(fnic
, port_id
);
748 spin_lock_irq(&fnic
->fnic_lock
);
749 if (fnic
->state
== FNIC_IN_ETH_TRANS_FC_MODE
)
750 fnic
->state
= FNIC_IN_ETH_MODE
;
751 spin_unlock_irq(&fnic
->fnic_lock
);
755 static void fnic_rq_cmpl_frame_recv(struct vnic_rq
*rq
, struct cq_desc
756 *cq_desc
, struct vnic_rq_buf
*buf
,
757 int skipped
__attribute__((unused
)),
760 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
763 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
764 unsigned int eth_hdrs_stripped
;
765 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
766 u8 fcoe
= 0, fcoe_sof
, fcoe_eof
;
767 u8 fcoe_fc_crc_ok
= 1, fcoe_enc_error
= 0;
768 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
769 u8 ipv6
, ipv4
, ipv4_fragment
, rss_type
, csum_not_calc
;
770 u8 fcs_ok
= 1, packet_error
= 0;
771 u16 q_number
, completed_index
, bytes_written
= 0, vlan
, checksum
;
773 u16 exchange_id
, tmpl
;
776 u32 fcp_bytes_written
= 0;
779 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
, buf
->len
,
782 fp
= (struct fc_frame
*)skb
;
785 cq_desc_dec(cq_desc
, &type
, &color
, &q_number
, &completed_index
);
786 if (type
== CQ_DESC_TYPE_RQ_FCP
) {
787 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc
*)cq_desc
,
788 &type
, &color
, &q_number
, &completed_index
,
789 &eop
, &sop
, &fcoe_fc_crc_ok
, &exchange_id
,
790 &tmpl
, &fcp_bytes_written
, &sof
, &eof
,
791 &ingress_port
, &packet_error
,
792 &fcoe_enc_error
, &fcs_ok
, &vlan_stripped
,
794 eth_hdrs_stripped
= 1;
795 skb_trim(skb
, fcp_bytes_written
);
799 } else if (type
== CQ_DESC_TYPE_RQ_ENET
) {
800 cq_enet_rq_desc_dec((struct cq_enet_rq_desc
*)cq_desc
,
801 &type
, &color
, &q_number
, &completed_index
,
802 &ingress_port
, &fcoe
, &eop
, &sop
,
803 &rss_type
, &csum_not_calc
, &rss_hash
,
804 &bytes_written
, &packet_error
,
805 &vlan_stripped
, &vlan
, &checksum
,
806 &fcoe_sof
, &fcoe_fc_crc_ok
,
807 &fcoe_enc_error
, &fcoe_eof
,
808 &tcp_udp_csum_ok
, &udp
, &tcp
,
809 &ipv4_csum_ok
, &ipv6
, &ipv4
,
810 &ipv4_fragment
, &fcs_ok
);
811 eth_hdrs_stripped
= 0;
812 skb_trim(skb
, bytes_written
);
814 atomic64_inc(&fnic_stats
->misc_stats
.frame_errors
);
815 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
816 "fcs error. dropping packet.\n");
819 if (fnic_import_rq_eth_pkt(fnic
, skb
))
824 shost_printk(KERN_ERR
, fnic
->lport
->host
,
825 "fnic rq_cmpl wrong cq type x%x\n", type
);
829 if (!fcs_ok
|| packet_error
|| !fcoe_fc_crc_ok
|| fcoe_enc_error
) {
830 atomic64_inc(&fnic_stats
->misc_stats
.frame_errors
);
831 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
832 "fnic rq_cmpl fcoe x%x fcsok x%x"
833 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
835 fcoe
, fcs_ok
, packet_error
,
836 fcoe_fc_crc_ok
, fcoe_enc_error
);
840 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
841 if (fnic
->stop_rx_link_events
) {
842 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
845 fr_dev(fp
) = fnic
->lport
;
846 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
848 skb_queue_tail(&fnic
->frame_queue
, skb
);
849 queue_work(fnic_event_queue
, &fnic
->frame_work
);
853 dev_kfree_skb_irq(skb
);
856 static int fnic_rq_cmpl_handler_cont(struct vnic_dev
*vdev
,
857 struct cq_desc
*cq_desc
, u8 type
,
858 u16 q_number
, u16 completed_index
,
861 struct fnic
*fnic
= vnic_dev_priv(vdev
);
863 vnic_rq_service(&fnic
->rq
[q_number
], cq_desc
, completed_index
,
864 VNIC_RQ_RETURN_DESC
, fnic_rq_cmpl_frame_recv
,
869 int fnic_rq_cmpl_handler(struct fnic
*fnic
, int rq_work_to_do
)
871 unsigned int tot_rq_work_done
= 0, cur_work_done
;
875 for (i
= 0; i
< fnic
->rq_count
; i
++) {
876 cur_work_done
= vnic_cq_service(&fnic
->cq
[i
], rq_work_to_do
,
877 fnic_rq_cmpl_handler_cont
,
880 err
= vnic_rq_fill(&fnic
->rq
[i
], fnic_alloc_rq_frame
);
882 shost_printk(KERN_ERR
, fnic
->lport
->host
,
883 "fnic_alloc_rq_frame can't alloc"
886 tot_rq_work_done
+= cur_work_done
;
889 return tot_rq_work_done
;
893 * This function is called once at init time to allocate and fill RQ
894 * buffers. Subsequently, it is called in the interrupt context after RQ
895 * buffer processing to replenish the buffers in the RQ
897 int fnic_alloc_rq_frame(struct vnic_rq
*rq
)
899 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
904 len
= FC_FRAME_HEADROOM
+ FC_MAX_FRAME
+ FC_FRAME_TAILROOM
;
905 skb
= dev_alloc_skb(len
);
907 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
908 "Unable to allocate RQ sk_buff\n");
911 skb_reset_mac_header(skb
);
912 skb_reset_transport_header(skb
);
913 skb_reset_network_header(skb
);
915 pa
= pci_map_single(fnic
->pdev
, skb
->data
, len
, PCI_DMA_FROMDEVICE
);
916 fnic_queue_rq_desc(rq
, skb
, pa
, len
);
920 void fnic_free_rq_buf(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
)
922 struct fc_frame
*fp
= buf
->os_buf
;
923 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
925 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
, buf
->len
,
928 dev_kfree_skb(fp_skb(fp
));
933 * fnic_eth_send() - Send Ethernet frame.
934 * @fip: fcoe_ctlr instance.
935 * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
937 void fnic_eth_send(struct fcoe_ctlr
*fip
, struct sk_buff
*skb
)
939 struct fnic
*fnic
= fnic_from_ctlr(fip
);
940 struct vnic_wq
*wq
= &fnic
->wq
[0];
942 struct ethhdr
*eth_hdr
;
943 struct vlan_ethhdr
*vlan_hdr
;
946 if (!fnic
->vlan_hw_insert
) {
947 eth_hdr
= (struct ethhdr
*)skb_mac_header(skb
);
948 vlan_hdr
= (struct vlan_ethhdr
*)skb_push(skb
,
949 sizeof(*vlan_hdr
) - sizeof(*eth_hdr
));
950 memcpy(vlan_hdr
, eth_hdr
, 2 * ETH_ALEN
);
951 vlan_hdr
->h_vlan_proto
= htons(ETH_P_8021Q
);
952 vlan_hdr
->h_vlan_encapsulated_proto
= eth_hdr
->h_proto
;
953 vlan_hdr
->h_vlan_TCI
= htons(fnic
->vlan_id
);
956 pa
= pci_map_single(fnic
->pdev
, skb
->data
, skb
->len
, PCI_DMA_TODEVICE
);
958 spin_lock_irqsave(&fnic
->wq_lock
[0], flags
);
959 if (!vnic_wq_desc_avail(wq
)) {
960 pci_unmap_single(fnic
->pdev
, pa
, skb
->len
, PCI_DMA_TODEVICE
);
961 spin_unlock_irqrestore(&fnic
->wq_lock
[0], flags
);
966 fnic_queue_wq_eth_desc(wq
, skb
, pa
, skb
->len
,
967 0 /* hw inserts cos value */,
969 spin_unlock_irqrestore(&fnic
->wq_lock
[0], flags
);
975 static int fnic_send_frame(struct fnic
*fnic
, struct fc_frame
*fp
)
977 struct vnic_wq
*wq
= &fnic
->wq
[0];
980 struct ethhdr
*eth_hdr
;
981 struct vlan_ethhdr
*vlan_hdr
;
982 struct fcoe_hdr
*fcoe_hdr
;
983 struct fc_frame_header
*fh
;
984 u32 tot_len
, eth_hdr_len
;
988 fh
= fc_frame_header_get(fp
);
991 if (unlikely(fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
) &&
992 fcoe_ctlr_els_send(&fnic
->ctlr
, fnic
->lport
, skb
))
995 if (!fnic
->vlan_hw_insert
) {
996 eth_hdr_len
= sizeof(*vlan_hdr
) + sizeof(*fcoe_hdr
);
997 vlan_hdr
= (struct vlan_ethhdr
*)skb_push(skb
, eth_hdr_len
);
998 eth_hdr
= (struct ethhdr
*)vlan_hdr
;
999 vlan_hdr
->h_vlan_proto
= htons(ETH_P_8021Q
);
1000 vlan_hdr
->h_vlan_encapsulated_proto
= htons(ETH_P_FCOE
);
1001 vlan_hdr
->h_vlan_TCI
= htons(fnic
->vlan_id
);
1002 fcoe_hdr
= (struct fcoe_hdr
*)(vlan_hdr
+ 1);
1004 eth_hdr_len
= sizeof(*eth_hdr
) + sizeof(*fcoe_hdr
);
1005 eth_hdr
= (struct ethhdr
*)skb_push(skb
, eth_hdr_len
);
1006 eth_hdr
->h_proto
= htons(ETH_P_FCOE
);
1007 fcoe_hdr
= (struct fcoe_hdr
*)(eth_hdr
+ 1);
1010 if (fnic
->ctlr
.map_dest
)
1011 fc_fcoe_set_mac(eth_hdr
->h_dest
, fh
->fh_d_id
);
1013 memcpy(eth_hdr
->h_dest
, fnic
->ctlr
.dest_addr
, ETH_ALEN
);
1014 memcpy(eth_hdr
->h_source
, fnic
->data_src_addr
, ETH_ALEN
);
1017 BUG_ON(tot_len
% 4);
1019 memset(fcoe_hdr
, 0, sizeof(*fcoe_hdr
));
1020 fcoe_hdr
->fcoe_sof
= fr_sof(fp
);
1022 FC_FCOE_ENCAPS_VER(fcoe_hdr
, FC_FCOE_VER
);
1024 pa
= pci_map_single(fnic
->pdev
, eth_hdr
, tot_len
, PCI_DMA_TODEVICE
);
1026 spin_lock_irqsave(&fnic
->wq_lock
[0], flags
);
1028 if (!vnic_wq_desc_avail(wq
)) {
1029 pci_unmap_single(fnic
->pdev
, pa
,
1030 tot_len
, PCI_DMA_TODEVICE
);
1032 goto fnic_send_frame_end
;
1035 fnic_queue_wq_desc(wq
, skb
, pa
, tot_len
, fr_eof(fp
),
1036 0 /* hw inserts cos value */,
1037 fnic
->vlan_id
, 1, 1, 1);
1038 fnic_send_frame_end
:
1039 spin_unlock_irqrestore(&fnic
->wq_lock
[0], flags
);
1042 dev_kfree_skb_any(fp_skb(fp
));
1049 * Routine to send a raw frame
1051 int fnic_send(struct fc_lport
*lp
, struct fc_frame
*fp
)
1053 struct fnic
*fnic
= lport_priv(lp
);
1054 unsigned long flags
;
1056 if (fnic
->in_remove
) {
1057 dev_kfree_skb(fp_skb(fp
));
1062 * Queue frame if in a transitional state.
1063 * This occurs while registering the Port_ID / MAC address after FLOGI.
1065 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1066 if (fnic
->state
!= FNIC_IN_FC_MODE
&& fnic
->state
!= FNIC_IN_ETH_MODE
) {
1067 skb_queue_tail(&fnic
->tx_queue
, fp_skb(fp
));
1068 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1071 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1073 return fnic_send_frame(fnic
, fp
);
1077 * fnic_flush_tx() - send queued frames.
1078 * @fnic: fnic device
1080 * Send frames that were waiting to go out in FC or Ethernet mode.
1081 * Whenever changing modes we purge queued frames, so these frames should
1082 * be queued for the stable mode that we're in, either FC or Ethernet.
1084 * Called without fnic_lock held.
1086 void fnic_flush_tx(struct fnic
*fnic
)
1088 struct sk_buff
*skb
;
1089 struct fc_frame
*fp
;
1091 while ((skb
= skb_dequeue(&fnic
->tx_queue
))) {
1092 fp
= (struct fc_frame
*)skb
;
1093 fnic_send_frame(fnic
, fp
);
1098 * fnic_set_eth_mode() - put fnic into ethernet mode.
1099 * @fnic: fnic device
1101 * Called without fnic lock held.
1103 static void fnic_set_eth_mode(struct fnic
*fnic
)
1105 unsigned long flags
;
1106 enum fnic_state old_state
;
1109 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1111 old_state
= fnic
->state
;
1112 switch (old_state
) {
1113 case FNIC_IN_FC_MODE
:
1114 case FNIC_IN_ETH_TRANS_FC_MODE
:
1116 fnic
->state
= FNIC_IN_FC_TRANS_ETH_MODE
;
1117 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1119 ret
= fnic_fw_reset_handler(fnic
);
1121 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1122 if (fnic
->state
!= FNIC_IN_FC_TRANS_ETH_MODE
)
1125 fnic
->state
= old_state
;
1128 case FNIC_IN_FC_TRANS_ETH_MODE
:
1129 case FNIC_IN_ETH_MODE
:
1132 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1135 static void fnic_wq_complete_frame_send(struct vnic_wq
*wq
,
1136 struct cq_desc
*cq_desc
,
1137 struct vnic_wq_buf
*buf
, void *opaque
)
1139 struct sk_buff
*skb
= buf
->os_buf
;
1140 struct fc_frame
*fp
= (struct fc_frame
*)skb
;
1141 struct fnic
*fnic
= vnic_dev_priv(wq
->vdev
);
1143 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
,
1144 buf
->len
, PCI_DMA_TODEVICE
);
1145 dev_kfree_skb_irq(fp_skb(fp
));
1149 static int fnic_wq_cmpl_handler_cont(struct vnic_dev
*vdev
,
1150 struct cq_desc
*cq_desc
, u8 type
,
1151 u16 q_number
, u16 completed_index
,
1154 struct fnic
*fnic
= vnic_dev_priv(vdev
);
1155 unsigned long flags
;
1157 spin_lock_irqsave(&fnic
->wq_lock
[q_number
], flags
);
1158 vnic_wq_service(&fnic
->wq
[q_number
], cq_desc
, completed_index
,
1159 fnic_wq_complete_frame_send
, NULL
);
1160 spin_unlock_irqrestore(&fnic
->wq_lock
[q_number
], flags
);
1165 int fnic_wq_cmpl_handler(struct fnic
*fnic
, int work_to_do
)
1167 unsigned int wq_work_done
= 0;
1170 for (i
= 0; i
< fnic
->raw_wq_count
; i
++) {
1171 wq_work_done
+= vnic_cq_service(&fnic
->cq
[fnic
->rq_count
+i
],
1173 fnic_wq_cmpl_handler_cont
,
1177 return wq_work_done
;
1181 void fnic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
1183 struct fc_frame
*fp
= buf
->os_buf
;
1184 struct fnic
*fnic
= vnic_dev_priv(wq
->vdev
);
1186 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
,
1187 buf
->len
, PCI_DMA_TODEVICE
);
1189 dev_kfree_skb(fp_skb(fp
));
1193 void fnic_fcoe_reset_vlans(struct fnic
*fnic
)
1195 unsigned long flags
;
1196 struct fcoe_vlan
*vlan
;
1197 struct fcoe_vlan
*next
;
1200 * indicate a link down to fcoe so that all fcf's are free'd
1201 * might not be required since we did this before sending vlan
1204 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
1205 if (!list_empty(&fnic
->vlans
)) {
1206 list_for_each_entry_safe(vlan
, next
, &fnic
->vlans
, list
) {
1207 list_del(&vlan
->list
);
1211 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1214 void fnic_handle_fip_timer(struct fnic
*fnic
)
1216 unsigned long flags
;
1217 struct fcoe_vlan
*vlan
;
1218 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
1221 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1222 if (fnic
->stop_rx_link_events
) {
1223 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1226 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1228 if (fnic
->ctlr
.mode
== FIP_ST_NON_FIP
)
1231 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
1232 if (list_empty(&fnic
->vlans
)) {
1233 /* no vlans available, try again */
1234 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1235 "Start VLAN Discovery\n");
1236 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1237 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
1241 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
1242 shost_printk(KERN_DEBUG
, fnic
->lport
->host
,
1243 "fip_timer: vlan %d state %d sol_count %d\n",
1244 vlan
->vid
, vlan
->state
, vlan
->sol_count
);
1245 switch (vlan
->state
) {
1247 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1248 "FIP VLAN is selected for FC transaction\n");
1249 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1251 case FIP_VLAN_FAILED
:
1252 /* if all vlans are in failed state, restart vlan disc */
1253 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1254 "Start VLAN Discovery\n");
1255 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1256 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
1259 if (vlan
->sol_count
>= FCOE_CTLR_MAX_SOL
) {
1261 * no response on this vlan, remove from the list.
1264 shost_printk(KERN_INFO
, fnic
->lport
->host
,
1265 "Dequeue this VLAN ID %d from list\n",
1267 list_del(&vlan
->list
);
1270 if (list_empty(&fnic
->vlans
)) {
1271 /* we exhausted all vlans, restart vlan disc */
1272 spin_unlock_irqrestore(&fnic
->vlans_lock
,
1274 shost_printk(KERN_INFO
, fnic
->lport
->host
,
1275 "fip_timer: vlan list empty, "
1276 "trigger vlan disc\n");
1277 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
1280 /* check the next vlan */
1281 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
,
1283 fnic
->set_vlan(fnic
, vlan
->vid
);
1284 vlan
->state
= FIP_VLAN_SENT
; /* sent now */
1286 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1287 atomic64_inc(&fnic_stats
->vlan_stats
.sol_expiry_count
);
1289 sol_time
= jiffies
+ msecs_to_jiffies
1290 (FCOE_CTLR_START_DELAY
);
1291 mod_timer(&fnic
->fip_timer
, round_jiffies(sol_time
));