2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/workqueue.h>
27 #include <scsi/fc/fc_fip.h>
28 #include <scsi/fc/fc_els.h>
29 #include <scsi/fc/fc_fcoe.h>
30 #include <scsi/fc_frame.h>
31 #include <scsi/libfc.h>
35 #include "cq_enet_desc.h"
36 #include "cq_exch_desc.h"
38 static u8 fcoe_all_fcfs
[ETH_ALEN
] = FIP_ALL_FCF_MACS
;
39 struct workqueue_struct
*fnic_fip_queue
;
40 struct workqueue_struct
*fnic_event_queue
;
42 static void fnic_set_eth_mode(struct fnic
*);
43 static void fnic_fcoe_send_vlan_req(struct fnic
*fnic
);
44 static void fnic_fcoe_start_fcf_disc(struct fnic
*fnic
);
45 static void fnic_fcoe_process_vlan_resp(struct fnic
*fnic
, struct sk_buff
*);
46 static int fnic_fcoe_vlan_check(struct fnic
*fnic
, u16 flag
);
47 static int fnic_fcoe_handle_fip_frame(struct fnic
*fnic
, struct sk_buff
*skb
);
49 void fnic_handle_link(struct work_struct
*work
)
51 struct fnic
*fnic
= container_of(work
, struct fnic
, link_work
);
54 u32 old_link_down_cnt
;
56 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
58 if (fnic
->stop_rx_link_events
) {
59 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
63 old_link_down_cnt
= fnic
->link_down_cnt
;
64 old_link_status
= fnic
->link_status
;
65 fnic
->link_status
= vnic_dev_link_status(fnic
->vdev
);
66 fnic
->link_down_cnt
= vnic_dev_link_down_cnt(fnic
->vdev
);
68 switch (vnic_dev_port_speed(fnic
->vdev
)) {
69 case DCEM_PORTSPEED_10G
:
70 fc_host_speed(fnic
->lport
->host
) = FC_PORTSPEED_10GBIT
;
71 fnic
->lport
->link_supported_speeds
= FC_PORTSPEED_10GBIT
;
73 case DCEM_PORTSPEED_25G
:
74 fc_host_speed(fnic
->lport
->host
) = FC_PORTSPEED_25GBIT
;
75 fnic
->lport
->link_supported_speeds
= FC_PORTSPEED_25GBIT
;
77 case DCEM_PORTSPEED_40G
:
78 case DCEM_PORTSPEED_4x10G
:
79 fc_host_speed(fnic
->lport
->host
) = FC_PORTSPEED_40GBIT
;
80 fnic
->lport
->link_supported_speeds
= FC_PORTSPEED_40GBIT
;
82 case DCEM_PORTSPEED_100G
:
83 fc_host_speed(fnic
->lport
->host
) = FC_PORTSPEED_100GBIT
;
84 fnic
->lport
->link_supported_speeds
= FC_PORTSPEED_100GBIT
;
87 fc_host_speed(fnic
->lport
->host
) = FC_PORTSPEED_UNKNOWN
;
88 fnic
->lport
->link_supported_speeds
= FC_PORTSPEED_UNKNOWN
;
92 if (old_link_status
== fnic
->link_status
) {
93 if (!fnic
->link_status
) {
95 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
96 fnic_fc_trace_set_data(fnic
->lport
->host
->host_no
,
97 FNIC_FC_LE
, "Link Status: DOWN->DOWN",
98 strlen("Link Status: DOWN->DOWN"));
100 if (old_link_down_cnt
!= fnic
->link_down_cnt
) {
101 /* UP -> DOWN -> UP */
102 fnic
->lport
->host_stats
.link_failure_count
++;
103 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
104 fnic_fc_trace_set_data(
105 fnic
->lport
->host
->host_no
,
107 "Link Status:UP_DOWN_UP",
108 strlen("Link_Status:UP_DOWN_UP")
110 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
112 fcoe_ctlr_link_down(&fnic
->ctlr
);
113 if (fnic
->config
.flags
& VFCF_FIP_CAPABLE
) {
114 /* start FCoE VLAN discovery */
115 fnic_fc_trace_set_data(
116 fnic
->lport
->host
->host_no
,
118 "Link Status: UP_DOWN_UP_VLAN",
120 "Link Status: UP_DOWN_UP_VLAN")
122 fnic_fcoe_send_vlan_req(fnic
);
125 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
127 fcoe_ctlr_link_up(&fnic
->ctlr
);
130 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
131 fnic_fc_trace_set_data(
132 fnic
->lport
->host
->host_no
, FNIC_FC_LE
,
133 "Link Status: UP_UP",
134 strlen("Link Status: UP_UP"));
137 } else if (fnic
->link_status
) {
139 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
140 if (fnic
->config
.flags
& VFCF_FIP_CAPABLE
) {
141 /* start FCoE VLAN discovery */
142 fnic_fc_trace_set_data(
143 fnic
->lport
->host
->host_no
,
144 FNIC_FC_LE
, "Link Status: DOWN_UP_VLAN",
145 strlen("Link Status: DOWN_UP_VLAN"));
146 fnic_fcoe_send_vlan_req(fnic
);
149 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
, "link up\n");
150 fnic_fc_trace_set_data(fnic
->lport
->host
->host_no
, FNIC_FC_LE
,
151 "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
152 fcoe_ctlr_link_up(&fnic
->ctlr
);
155 fnic
->lport
->host_stats
.link_failure_count
++;
156 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
157 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
, "link down\n");
158 fnic_fc_trace_set_data(
159 fnic
->lport
->host
->host_no
, FNIC_FC_LE
,
160 "Link Status: UP_DOWN",
161 strlen("Link Status: UP_DOWN"));
162 if (fnic
->config
.flags
& VFCF_FIP_CAPABLE
) {
163 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
164 "deleting fip-timer during link-down\n");
165 del_timer_sync(&fnic
->fip_timer
);
167 fcoe_ctlr_link_down(&fnic
->ctlr
);
173 * This function passes incoming fabric frames to libFC
175 void fnic_handle_frame(struct work_struct
*work
)
177 struct fnic
*fnic
= container_of(work
, struct fnic
, frame_work
);
178 struct fc_lport
*lp
= fnic
->lport
;
183 while ((skb
= skb_dequeue(&fnic
->frame_queue
))) {
185 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
186 if (fnic
->stop_rx_link_events
) {
187 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
191 fp
= (struct fc_frame
*)skb
;
194 * If we're in a transitional state, just re-queue and return.
195 * The queue will be serviced when we get to a stable state.
197 if (fnic
->state
!= FNIC_IN_FC_MODE
&&
198 fnic
->state
!= FNIC_IN_ETH_MODE
) {
199 skb_queue_head(&fnic
->frame_queue
, skb
);
200 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
203 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
205 fc_exch_recv(lp
, fp
);
209 void fnic_fcoe_evlist_free(struct fnic
*fnic
)
211 struct fnic_event
*fevt
= NULL
;
212 struct fnic_event
*next
= NULL
;
215 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
216 if (list_empty(&fnic
->evlist
)) {
217 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
221 list_for_each_entry_safe(fevt
, next
, &fnic
->evlist
, list
) {
222 list_del(&fevt
->list
);
225 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
228 void fnic_handle_event(struct work_struct
*work
)
230 struct fnic
*fnic
= container_of(work
, struct fnic
, event_work
);
231 struct fnic_event
*fevt
= NULL
;
232 struct fnic_event
*next
= NULL
;
235 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
236 if (list_empty(&fnic
->evlist
)) {
237 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
241 list_for_each_entry_safe(fevt
, next
, &fnic
->evlist
, list
) {
242 if (fnic
->stop_rx_link_events
) {
243 list_del(&fevt
->list
);
245 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
249 * If we're in a transitional state, just re-queue and return.
250 * The queue will be serviced when we get to a stable state.
252 if (fnic
->state
!= FNIC_IN_FC_MODE
&&
253 fnic
->state
!= FNIC_IN_ETH_MODE
) {
254 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
258 list_del(&fevt
->list
);
259 switch (fevt
->event
) {
260 case FNIC_EVT_START_VLAN_DISC
:
261 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
262 fnic_fcoe_send_vlan_req(fnic
);
263 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
265 case FNIC_EVT_START_FCF_DISC
:
266 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
267 "Start FCF Discovery\n");
268 fnic_fcoe_start_fcf_disc(fnic
);
271 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
272 "Unknown event 0x%x\n", fevt
->event
);
277 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
281 * Check if the Received FIP FLOGI frame is rejected
282 * @fip: The FCoE controller that received the frame
283 * @skb: The received FIP frame
285 * Returns non-zero if the frame is rejected with unsupported cmd with
286 * insufficient resource els explanation.
288 static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr
*fip
,
291 struct fc_lport
*lport
= fip
->lp
;
292 struct fip_header
*fiph
;
293 struct fc_frame_header
*fh
= NULL
;
294 struct fip_desc
*desc
;
295 struct fip_encaps
*els
;
296 enum fip_desc_type els_dtype
= 0;
305 if (skb_linearize(skb
))
308 if (skb
->len
< sizeof(*fiph
))
311 fiph
= (struct fip_header
*)skb
->data
;
312 op
= ntohs(fiph
->fip_op
);
313 sub
= fiph
->fip_subcode
;
318 if (sub
!= FIP_SC_REP
)
321 rlen
= ntohs(fiph
->fip_dl_len
) * 4;
322 if (rlen
+ sizeof(*fiph
) > skb
->len
)
325 desc
= (struct fip_desc
*)(fiph
+ 1);
326 dlen
= desc
->fip_dlen
* FIP_BPW
;
328 if (desc
->fip_dtype
== FIP_DT_FLOGI
) {
330 if (dlen
< sizeof(*els
) + sizeof(*fh
) + 1)
333 els_len
= dlen
- sizeof(*els
);
334 els
= (struct fip_encaps
*)desc
;
335 fh
= (struct fc_frame_header
*)(els
+ 1);
336 els_dtype
= desc
->fip_dtype
;
342 * ELS command code, reason and explanation should be = Reject,
343 * unsupported command and insufficient resource
345 els_op
= *(u8
*)(fh
+ 1);
346 if (els_op
== ELS_LS_RJT
) {
347 shost_printk(KERN_INFO
, lport
->host
,
348 "Flogi Request Rejected by Switch\n");
351 shost_printk(KERN_INFO
, lport
->host
,
352 "Flogi Request Accepted by Switch\n");
357 static void fnic_fcoe_send_vlan_req(struct fnic
*fnic
)
359 struct fcoe_ctlr
*fip
= &fnic
->ctlr
;
360 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
364 struct fip_vlan
*vlan
;
367 fnic_fcoe_reset_vlans(fnic
);
368 fnic
->set_vlan(fnic
, 0);
370 if (printk_ratelimit())
371 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
372 "Sending VLAN request...\n");
374 skb
= dev_alloc_skb(sizeof(struct fip_vlan
));
378 fr_len
= sizeof(*vlan
);
379 eth_fr
= (char *)skb
->data
;
380 vlan
= (struct fip_vlan
*)eth_fr
;
382 memset(vlan
, 0, sizeof(*vlan
));
383 memcpy(vlan
->eth
.h_source
, fip
->ctl_src_addr
, ETH_ALEN
);
384 memcpy(vlan
->eth
.h_dest
, fcoe_all_fcfs
, ETH_ALEN
);
385 vlan
->eth
.h_proto
= htons(ETH_P_FIP
);
387 vlan
->fip
.fip_ver
= FIP_VER_ENCAPS(FIP_VER
);
388 vlan
->fip
.fip_op
= htons(FIP_OP_VLAN
);
389 vlan
->fip
.fip_subcode
= FIP_SC_VL_REQ
;
390 vlan
->fip
.fip_dl_len
= htons(sizeof(vlan
->desc
) / FIP_BPW
);
392 vlan
->desc
.mac
.fd_desc
.fip_dtype
= FIP_DT_MAC
;
393 vlan
->desc
.mac
.fd_desc
.fip_dlen
= sizeof(vlan
->desc
.mac
) / FIP_BPW
;
394 memcpy(&vlan
->desc
.mac
.fd_mac
, fip
->ctl_src_addr
, ETH_ALEN
);
396 vlan
->desc
.wwnn
.fd_desc
.fip_dtype
= FIP_DT_NAME
;
397 vlan
->desc
.wwnn
.fd_desc
.fip_dlen
= sizeof(vlan
->desc
.wwnn
) / FIP_BPW
;
398 put_unaligned_be64(fip
->lp
->wwnn
, &vlan
->desc
.wwnn
.fd_wwn
);
399 atomic64_inc(&fnic_stats
->vlan_stats
.vlan_disc_reqs
);
401 skb_put(skb
, sizeof(*vlan
));
402 skb
->protocol
= htons(ETH_P_FIP
);
403 skb_reset_mac_header(skb
);
404 skb_reset_network_header(skb
);
407 /* set a timer so that we can retry if there no response */
408 vlan_tov
= jiffies
+ msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV
);
409 mod_timer(&fnic
->fip_timer
, round_jiffies(vlan_tov
));
412 static void fnic_fcoe_process_vlan_resp(struct fnic
*fnic
, struct sk_buff
*skb
)
414 struct fcoe_ctlr
*fip
= &fnic
->ctlr
;
415 struct fip_header
*fiph
;
416 struct fip_desc
*desc
;
417 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
421 struct fcoe_vlan
*vlan
;
425 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
426 "Received VLAN response...\n");
428 fiph
= (struct fip_header
*) skb
->data
;
430 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
431 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
432 ntohs(fiph
->fip_op
), fiph
->fip_subcode
);
434 rlen
= ntohs(fiph
->fip_dl_len
) * 4;
435 fnic_fcoe_reset_vlans(fnic
);
436 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
437 desc
= (struct fip_desc
*)(fiph
+ 1);
439 dlen
= desc
->fip_dlen
* FIP_BPW
;
440 switch (desc
->fip_dtype
) {
442 vid
= ntohs(((struct fip_vlan_desc
*)desc
)->fd_vlan
);
443 shost_printk(KERN_INFO
, fnic
->lport
->host
,
444 "process_vlan_resp: FIP VLAN %d\n", vid
);
445 vlan
= kmalloc(sizeof(*vlan
),
448 /* retry from timer */
449 spin_unlock_irqrestore(&fnic
->vlans_lock
,
453 memset(vlan
, 0, sizeof(struct fcoe_vlan
));
454 vlan
->vid
= vid
& 0x0fff;
455 vlan
->state
= FIP_VLAN_AVAIL
;
456 list_add_tail(&vlan
->list
, &fnic
->vlans
);
459 desc
= (struct fip_desc
*)((char *)desc
+ dlen
);
463 /* any VLAN descriptors present ? */
464 if (list_empty(&fnic
->vlans
)) {
465 /* retry from timer */
466 atomic64_inc(&fnic_stats
->vlan_stats
.resp_withno_vlanID
);
467 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
468 "No VLAN descriptors in FIP VLAN response\n");
469 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
473 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
474 fnic
->set_vlan(fnic
, vlan
->vid
);
475 vlan
->state
= FIP_VLAN_SENT
; /* sent now */
477 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
479 /* start the solicitation */
480 fcoe_ctlr_link_up(fip
);
482 sol_time
= jiffies
+ msecs_to_jiffies(FCOE_CTLR_START_DELAY
);
483 mod_timer(&fnic
->fip_timer
, round_jiffies(sol_time
));
488 static void fnic_fcoe_start_fcf_disc(struct fnic
*fnic
)
491 struct fcoe_vlan
*vlan
;
494 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
495 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
496 fnic
->set_vlan(fnic
, vlan
->vid
);
497 vlan
->state
= FIP_VLAN_SENT
; /* sent now */
499 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
501 /* start the solicitation */
502 fcoe_ctlr_link_up(&fnic
->ctlr
);
504 sol_time
= jiffies
+ msecs_to_jiffies(FCOE_CTLR_START_DELAY
);
505 mod_timer(&fnic
->fip_timer
, round_jiffies(sol_time
));
508 static int fnic_fcoe_vlan_check(struct fnic
*fnic
, u16 flag
)
511 struct fcoe_vlan
*fvlan
;
513 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
514 if (list_empty(&fnic
->vlans
)) {
515 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
519 fvlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
520 if (fvlan
->state
== FIP_VLAN_USED
) {
521 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
525 if (fvlan
->state
== FIP_VLAN_SENT
) {
526 fvlan
->state
= FIP_VLAN_USED
;
527 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
530 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
534 static void fnic_event_enq(struct fnic
*fnic
, enum fnic_evt ev
)
536 struct fnic_event
*fevt
;
539 fevt
= kmalloc(sizeof(*fevt
), GFP_ATOMIC
);
546 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
547 list_add_tail(&fevt
->list
, &fnic
->evlist
);
548 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
550 schedule_work(&fnic
->event_work
);
553 static int fnic_fcoe_handle_fip_frame(struct fnic
*fnic
, struct sk_buff
*skb
)
555 struct fip_header
*fiph
;
560 if (!skb
|| !(skb
->data
))
563 if (skb_linearize(skb
))
566 fiph
= (struct fip_header
*)skb
->data
;
567 op
= ntohs(fiph
->fip_op
);
568 sub
= fiph
->fip_subcode
;
570 if (FIP_VER_DECAPS(fiph
->fip_ver
) != FIP_VER
)
573 if (ntohs(fiph
->fip_dl_len
) * FIP_BPW
+ sizeof(*fiph
) > skb
->len
)
576 if (op
== FIP_OP_DISC
&& sub
== FIP_SC_ADV
) {
577 if (fnic_fcoe_vlan_check(fnic
, ntohs(fiph
->fip_flags
)))
579 /* pass it on to fcoe */
581 } else if (op
== FIP_OP_VLAN
&& sub
== FIP_SC_VL_NOTE
) {
582 /* set the vlan as used */
583 fnic_fcoe_process_vlan_resp(fnic
, skb
);
585 } else if (op
== FIP_OP_CTRL
&& sub
== FIP_SC_CLR_VLINK
) {
586 /* received CVL request, restart vlan disc */
587 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
588 /* pass it on to fcoe */
595 void fnic_handle_fip_frame(struct work_struct
*work
)
597 struct fnic
*fnic
= container_of(work
, struct fnic
, fip_frame_work
);
598 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
603 while ((skb
= skb_dequeue(&fnic
->fip_frame_queue
))) {
604 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
605 if (fnic
->stop_rx_link_events
) {
606 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
611 * If we're in a transitional state, just re-queue and return.
612 * The queue will be serviced when we get to a stable state.
614 if (fnic
->state
!= FNIC_IN_FC_MODE
&&
615 fnic
->state
!= FNIC_IN_ETH_MODE
) {
616 skb_queue_head(&fnic
->fip_frame_queue
, skb
);
617 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
620 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
621 eh
= (struct ethhdr
*)skb
->data
;
622 if (eh
->h_proto
== htons(ETH_P_FIP
)) {
623 skb_pull(skb
, sizeof(*eh
));
624 if (fnic_fcoe_handle_fip_frame(fnic
, skb
) <= 0) {
629 * If there's FLOGI rejects - clear all
630 * fcf's & restart from scratch
632 if (is_fnic_fip_flogi_reject(&fnic
->ctlr
, skb
)) {
634 &fnic_stats
->vlan_stats
.flogi_rejects
);
635 shost_printk(KERN_INFO
, fnic
->lport
->host
,
636 "Trigger a Link down - VLAN Disc\n");
637 fcoe_ctlr_link_down(&fnic
->ctlr
);
638 /* start FCoE VLAN discovery */
639 fnic_fcoe_send_vlan_req(fnic
);
643 fcoe_ctlr_recv(&fnic
->ctlr
, skb
);
650 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
651 * @fnic: fnic instance.
652 * @skb: Ethernet Frame.
654 static inline int fnic_import_rq_eth_pkt(struct fnic
*fnic
, struct sk_buff
*skb
)
658 struct fcoe_hdr
*fcoe_hdr
;
659 struct fcoe_crc_eof
*ft
;
662 * Undo VLAN encapsulation if present.
664 eh
= (struct ethhdr
*)skb
->data
;
665 if (eh
->h_proto
== htons(ETH_P_8021Q
)) {
666 memmove((u8
*)eh
+ VLAN_HLEN
, eh
, ETH_ALEN
* 2);
667 eh
= skb_pull(skb
, VLAN_HLEN
);
668 skb_reset_mac_header(skb
);
670 if (eh
->h_proto
== htons(ETH_P_FIP
)) {
671 if (!(fnic
->config
.flags
& VFCF_FIP_CAPABLE
)) {
672 printk(KERN_ERR
"Dropped FIP frame, as firmware "
673 "uses non-FIP mode, Enable FIP "
677 if ((fnic_fc_trace_set_data(fnic
->lport
->host
->host_no
,
678 FNIC_FC_RECV
|0x80, (char *)skb
->data
, skb
->len
)) != 0) {
679 printk(KERN_ERR
"fnic ctlr frame trace error!!!");
681 skb_queue_tail(&fnic
->fip_frame_queue
, skb
);
682 queue_work(fnic_fip_queue
, &fnic
->fip_frame_work
);
683 return 1; /* let caller know packet was used */
685 if (eh
->h_proto
!= htons(ETH_P_FCOE
))
687 skb_set_network_header(skb
, sizeof(*eh
));
688 skb_pull(skb
, sizeof(*eh
));
690 fcoe_hdr
= (struct fcoe_hdr
*)skb
->data
;
691 if (FC_FCOE_DECAPS_VER(fcoe_hdr
) != FC_FCOE_VER
)
694 fp
= (struct fc_frame
*)skb
;
696 fr_sof(fp
) = fcoe_hdr
->fcoe_sof
;
697 skb_pull(skb
, sizeof(struct fcoe_hdr
));
698 skb_reset_transport_header(skb
);
700 ft
= (struct fcoe_crc_eof
*)(skb
->data
+ skb
->len
- sizeof(*ft
));
701 fr_eof(fp
) = ft
->fcoe_eof
;
702 skb_trim(skb
, skb
->len
- sizeof(*ft
));
705 dev_kfree_skb_irq(skb
);
710 * fnic_update_mac_locked() - set data MAC address and filters.
711 * @fnic: fnic instance.
712 * @new: newly-assigned FCoE MAC address.
714 * Called with the fnic lock held.
716 void fnic_update_mac_locked(struct fnic
*fnic
, u8
*new)
718 u8
*ctl
= fnic
->ctlr
.ctl_src_addr
;
719 u8
*data
= fnic
->data_src_addr
;
721 if (is_zero_ether_addr(new))
723 if (ether_addr_equal(data
, new))
725 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
, "update_mac %pM\n", new);
726 if (!is_zero_ether_addr(data
) && !ether_addr_equal(data
, ctl
))
727 vnic_dev_del_addr(fnic
->vdev
, data
);
728 memcpy(data
, new, ETH_ALEN
);
729 if (!ether_addr_equal(new, ctl
))
730 vnic_dev_add_addr(fnic
->vdev
, new);
734 * fnic_update_mac() - set data MAC address and filters.
735 * @lport: local port.
736 * @new: newly-assigned FCoE MAC address.
738 void fnic_update_mac(struct fc_lport
*lport
, u8
*new)
740 struct fnic
*fnic
= lport_priv(lport
);
742 spin_lock_irq(&fnic
->fnic_lock
);
743 fnic_update_mac_locked(fnic
, new);
744 spin_unlock_irq(&fnic
->fnic_lock
);
748 * fnic_set_port_id() - set the port_ID after successful FLOGI.
749 * @lport: local port.
750 * @port_id: assigned FC_ID.
751 * @fp: received frame containing the FLOGI accept or NULL.
753 * This is called from libfc when a new FC_ID has been assigned.
754 * This causes us to reset the firmware to FC_MODE and setup the new MAC
757 * It is also called with FC_ID 0 when we're logged off.
759 * If the FC_ID is due to point-to-point, fp may be NULL.
761 void fnic_set_port_id(struct fc_lport
*lport
, u32 port_id
, struct fc_frame
*fp
)
763 struct fnic
*fnic
= lport_priv(lport
);
767 FNIC_FCS_DBG(KERN_DEBUG
, lport
->host
, "set port_id %x fp %p\n",
771 * If we're clearing the FC_ID, change to use the ctl_src_addr.
772 * Set ethernet mode to send FLOGI.
775 fnic_update_mac(lport
, fnic
->ctlr
.ctl_src_addr
);
776 fnic_set_eth_mode(fnic
);
781 mac
= fr_cb(fp
)->granted_mac
;
782 if (is_zero_ether_addr(mac
)) {
783 /* non-FIP - FLOGI already accepted - ignore return */
784 fcoe_ctlr_recv_flogi(&fnic
->ctlr
, lport
, fp
);
786 fnic_update_mac(lport
, mac
);
789 /* Change state to reflect transition to FC mode */
790 spin_lock_irq(&fnic
->fnic_lock
);
791 if (fnic
->state
== FNIC_IN_ETH_MODE
|| fnic
->state
== FNIC_IN_FC_MODE
)
792 fnic
->state
= FNIC_IN_ETH_TRANS_FC_MODE
;
794 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
795 "Unexpected fnic state %s while"
796 " processing flogi resp\n",
797 fnic_state_to_str(fnic
->state
));
798 spin_unlock_irq(&fnic
->fnic_lock
);
801 spin_unlock_irq(&fnic
->fnic_lock
);
804 * Send FLOGI registration to firmware to set up FC mode.
805 * The new address will be set up when registration completes.
807 ret
= fnic_flogi_reg_handler(fnic
, port_id
);
810 spin_lock_irq(&fnic
->fnic_lock
);
811 if (fnic
->state
== FNIC_IN_ETH_TRANS_FC_MODE
)
812 fnic
->state
= FNIC_IN_ETH_MODE
;
813 spin_unlock_irq(&fnic
->fnic_lock
);
817 static void fnic_rq_cmpl_frame_recv(struct vnic_rq
*rq
, struct cq_desc
818 *cq_desc
, struct vnic_rq_buf
*buf
,
819 int skipped
__attribute__((unused
)),
822 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
825 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
826 unsigned int eth_hdrs_stripped
;
827 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
828 u8 fcoe
= 0, fcoe_sof
, fcoe_eof
;
829 u8 fcoe_fc_crc_ok
= 1, fcoe_enc_error
= 0;
830 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
831 u8 ipv6
, ipv4
, ipv4_fragment
, rss_type
, csum_not_calc
;
832 u8 fcs_ok
= 1, packet_error
= 0;
833 u16 q_number
, completed_index
, bytes_written
= 0, vlan
, checksum
;
835 u16 exchange_id
, tmpl
;
838 u32 fcp_bytes_written
= 0;
841 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
, buf
->len
,
844 fp
= (struct fc_frame
*)skb
;
847 cq_desc_dec(cq_desc
, &type
, &color
, &q_number
, &completed_index
);
848 if (type
== CQ_DESC_TYPE_RQ_FCP
) {
849 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc
*)cq_desc
,
850 &type
, &color
, &q_number
, &completed_index
,
851 &eop
, &sop
, &fcoe_fc_crc_ok
, &exchange_id
,
852 &tmpl
, &fcp_bytes_written
, &sof
, &eof
,
853 &ingress_port
, &packet_error
,
854 &fcoe_enc_error
, &fcs_ok
, &vlan_stripped
,
856 eth_hdrs_stripped
= 1;
857 skb_trim(skb
, fcp_bytes_written
);
861 } else if (type
== CQ_DESC_TYPE_RQ_ENET
) {
862 cq_enet_rq_desc_dec((struct cq_enet_rq_desc
*)cq_desc
,
863 &type
, &color
, &q_number
, &completed_index
,
864 &ingress_port
, &fcoe
, &eop
, &sop
,
865 &rss_type
, &csum_not_calc
, &rss_hash
,
866 &bytes_written
, &packet_error
,
867 &vlan_stripped
, &vlan
, &checksum
,
868 &fcoe_sof
, &fcoe_fc_crc_ok
,
869 &fcoe_enc_error
, &fcoe_eof
,
870 &tcp_udp_csum_ok
, &udp
, &tcp
,
871 &ipv4_csum_ok
, &ipv6
, &ipv4
,
872 &ipv4_fragment
, &fcs_ok
);
873 eth_hdrs_stripped
= 0;
874 skb_trim(skb
, bytes_written
);
876 atomic64_inc(&fnic_stats
->misc_stats
.frame_errors
);
877 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
878 "fcs error. dropping packet.\n");
881 if (fnic_import_rq_eth_pkt(fnic
, skb
))
886 shost_printk(KERN_ERR
, fnic
->lport
->host
,
887 "fnic rq_cmpl wrong cq type x%x\n", type
);
891 if (!fcs_ok
|| packet_error
|| !fcoe_fc_crc_ok
|| fcoe_enc_error
) {
892 atomic64_inc(&fnic_stats
->misc_stats
.frame_errors
);
893 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
894 "fnic rq_cmpl fcoe x%x fcsok x%x"
895 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
897 fcoe
, fcs_ok
, packet_error
,
898 fcoe_fc_crc_ok
, fcoe_enc_error
);
902 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
903 if (fnic
->stop_rx_link_events
) {
904 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
907 fr_dev(fp
) = fnic
->lport
;
908 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
909 if ((fnic_fc_trace_set_data(fnic
->lport
->host
->host_no
, FNIC_FC_RECV
,
910 (char *)skb
->data
, skb
->len
)) != 0) {
911 printk(KERN_ERR
"fnic ctlr frame trace error!!!");
914 skb_queue_tail(&fnic
->frame_queue
, skb
);
915 queue_work(fnic_event_queue
, &fnic
->frame_work
);
919 dev_kfree_skb_irq(skb
);
922 static int fnic_rq_cmpl_handler_cont(struct vnic_dev
*vdev
,
923 struct cq_desc
*cq_desc
, u8 type
,
924 u16 q_number
, u16 completed_index
,
927 struct fnic
*fnic
= vnic_dev_priv(vdev
);
929 vnic_rq_service(&fnic
->rq
[q_number
], cq_desc
, completed_index
,
930 VNIC_RQ_RETURN_DESC
, fnic_rq_cmpl_frame_recv
,
935 int fnic_rq_cmpl_handler(struct fnic
*fnic
, int rq_work_to_do
)
937 unsigned int tot_rq_work_done
= 0, cur_work_done
;
941 for (i
= 0; i
< fnic
->rq_count
; i
++) {
942 cur_work_done
= vnic_cq_service(&fnic
->cq
[i
], rq_work_to_do
,
943 fnic_rq_cmpl_handler_cont
,
946 err
= vnic_rq_fill(&fnic
->rq
[i
], fnic_alloc_rq_frame
);
948 shost_printk(KERN_ERR
, fnic
->lport
->host
,
949 "fnic_alloc_rq_frame can't alloc"
952 tot_rq_work_done
+= cur_work_done
;
955 return tot_rq_work_done
;
959 * This function is called once at init time to allocate and fill RQ
960 * buffers. Subsequently, it is called in the interrupt context after RQ
961 * buffer processing to replenish the buffers in the RQ
963 int fnic_alloc_rq_frame(struct vnic_rq
*rq
)
965 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
971 len
= FC_FRAME_HEADROOM
+ FC_MAX_FRAME
+ FC_FRAME_TAILROOM
;
972 skb
= dev_alloc_skb(len
);
974 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
975 "Unable to allocate RQ sk_buff\n");
978 skb_reset_mac_header(skb
);
979 skb_reset_transport_header(skb
);
980 skb_reset_network_header(skb
);
982 pa
= pci_map_single(fnic
->pdev
, skb
->data
, len
, PCI_DMA_FROMDEVICE
);
984 if (pci_dma_mapping_error(fnic
->pdev
, pa
)) {
986 printk(KERN_ERR
"PCI mapping failed with error %d\n", r
);
990 fnic_queue_rq_desc(rq
, skb
, pa
, len
);
998 void fnic_free_rq_buf(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
)
1000 struct fc_frame
*fp
= buf
->os_buf
;
1001 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
1003 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
, buf
->len
,
1004 PCI_DMA_FROMDEVICE
);
1006 dev_kfree_skb(fp_skb(fp
));
1011 * fnic_eth_send() - Send Ethernet frame.
1012 * @fip: fcoe_ctlr instance.
1013 * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
1015 void fnic_eth_send(struct fcoe_ctlr
*fip
, struct sk_buff
*skb
)
1017 struct fnic
*fnic
= fnic_from_ctlr(fip
);
1018 struct vnic_wq
*wq
= &fnic
->wq
[0];
1020 struct ethhdr
*eth_hdr
;
1021 struct vlan_ethhdr
*vlan_hdr
;
1022 unsigned long flags
;
1025 if (!fnic
->vlan_hw_insert
) {
1026 eth_hdr
= (struct ethhdr
*)skb_mac_header(skb
);
1027 vlan_hdr
= skb_push(skb
, sizeof(*vlan_hdr
) - sizeof(*eth_hdr
));
1028 memcpy(vlan_hdr
, eth_hdr
, 2 * ETH_ALEN
);
1029 vlan_hdr
->h_vlan_proto
= htons(ETH_P_8021Q
);
1030 vlan_hdr
->h_vlan_encapsulated_proto
= eth_hdr
->h_proto
;
1031 vlan_hdr
->h_vlan_TCI
= htons(fnic
->vlan_id
);
1032 if ((fnic_fc_trace_set_data(fnic
->lport
->host
->host_no
,
1033 FNIC_FC_SEND
|0x80, (char *)eth_hdr
, skb
->len
)) != 0) {
1034 printk(KERN_ERR
"fnic ctlr frame trace error!!!");
1037 if ((fnic_fc_trace_set_data(fnic
->lport
->host
->host_no
,
1038 FNIC_FC_SEND
|0x80, (char *)skb
->data
, skb
->len
)) != 0) {
1039 printk(KERN_ERR
"fnic ctlr frame trace error!!!");
1043 pa
= pci_map_single(fnic
->pdev
, skb
->data
, skb
->len
, PCI_DMA_TODEVICE
);
1045 r
= pci_dma_mapping_error(fnic
->pdev
, pa
);
1047 printk(KERN_ERR
"PCI mapping failed with error %d\n", r
);
1051 spin_lock_irqsave(&fnic
->wq_lock
[0], flags
);
1052 if (!vnic_wq_desc_avail(wq
))
1055 fnic_queue_wq_eth_desc(wq
, skb
, pa
, skb
->len
,
1056 0 /* hw inserts cos value */,
1058 spin_unlock_irqrestore(&fnic
->wq_lock
[0], flags
);
1062 spin_unlock_irqrestore(&fnic
->wq_lock
[0], flags
);
1063 pci_unmap_single(fnic
->pdev
, pa
, skb
->len
, PCI_DMA_TODEVICE
);
1071 static int fnic_send_frame(struct fnic
*fnic
, struct fc_frame
*fp
)
1073 struct vnic_wq
*wq
= &fnic
->wq
[0];
1074 struct sk_buff
*skb
;
1076 struct ethhdr
*eth_hdr
;
1077 struct vlan_ethhdr
*vlan_hdr
;
1078 struct fcoe_hdr
*fcoe_hdr
;
1079 struct fc_frame_header
*fh
;
1080 u32 tot_len
, eth_hdr_len
;
1082 unsigned long flags
;
1084 fh
= fc_frame_header_get(fp
);
1087 if (unlikely(fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
) &&
1088 fcoe_ctlr_els_send(&fnic
->ctlr
, fnic
->lport
, skb
))
1091 if (!fnic
->vlan_hw_insert
) {
1092 eth_hdr_len
= sizeof(*vlan_hdr
) + sizeof(*fcoe_hdr
);
1093 vlan_hdr
= skb_push(skb
, eth_hdr_len
);
1094 eth_hdr
= (struct ethhdr
*)vlan_hdr
;
1095 vlan_hdr
->h_vlan_proto
= htons(ETH_P_8021Q
);
1096 vlan_hdr
->h_vlan_encapsulated_proto
= htons(ETH_P_FCOE
);
1097 vlan_hdr
->h_vlan_TCI
= htons(fnic
->vlan_id
);
1098 fcoe_hdr
= (struct fcoe_hdr
*)(vlan_hdr
+ 1);
1100 eth_hdr_len
= sizeof(*eth_hdr
) + sizeof(*fcoe_hdr
);
1101 eth_hdr
= skb_push(skb
, eth_hdr_len
);
1102 eth_hdr
->h_proto
= htons(ETH_P_FCOE
);
1103 fcoe_hdr
= (struct fcoe_hdr
*)(eth_hdr
+ 1);
1106 if (fnic
->ctlr
.map_dest
)
1107 fc_fcoe_set_mac(eth_hdr
->h_dest
, fh
->fh_d_id
);
1109 memcpy(eth_hdr
->h_dest
, fnic
->ctlr
.dest_addr
, ETH_ALEN
);
1110 memcpy(eth_hdr
->h_source
, fnic
->data_src_addr
, ETH_ALEN
);
1113 BUG_ON(tot_len
% 4);
1115 memset(fcoe_hdr
, 0, sizeof(*fcoe_hdr
));
1116 fcoe_hdr
->fcoe_sof
= fr_sof(fp
);
1118 FC_FCOE_ENCAPS_VER(fcoe_hdr
, FC_FCOE_VER
);
1120 pa
= pci_map_single(fnic
->pdev
, eth_hdr
, tot_len
, PCI_DMA_TODEVICE
);
1122 if (pci_dma_mapping_error(fnic
->pdev
, pa
)) {
1124 printk(KERN_ERR
"DMA map failed with error %d\n", ret
);
1125 goto free_skb_on_err
;
1128 if ((fnic_fc_trace_set_data(fnic
->lport
->host
->host_no
, FNIC_FC_SEND
,
1129 (char *)eth_hdr
, tot_len
)) != 0) {
1130 printk(KERN_ERR
"fnic ctlr frame trace error!!!");
1133 spin_lock_irqsave(&fnic
->wq_lock
[0], flags
);
1135 if (!vnic_wq_desc_avail(wq
)) {
1136 pci_unmap_single(fnic
->pdev
, pa
,
1137 tot_len
, PCI_DMA_TODEVICE
);
1142 fnic_queue_wq_desc(wq
, skb
, pa
, tot_len
, fr_eof(fp
),
1143 0 /* hw inserts cos value */,
1144 fnic
->vlan_id
, 1, 1, 1);
1147 spin_unlock_irqrestore(&fnic
->wq_lock
[0], flags
);
1151 dev_kfree_skb_any(fp_skb(fp
));
1158 * Routine to send a raw frame
1160 int fnic_send(struct fc_lport
*lp
, struct fc_frame
*fp
)
1162 struct fnic
*fnic
= lport_priv(lp
);
1163 unsigned long flags
;
1165 if (fnic
->in_remove
) {
1166 dev_kfree_skb(fp_skb(fp
));
1171 * Queue frame if in a transitional state.
1172 * This occurs while registering the Port_ID / MAC address after FLOGI.
1174 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1175 if (fnic
->state
!= FNIC_IN_FC_MODE
&& fnic
->state
!= FNIC_IN_ETH_MODE
) {
1176 skb_queue_tail(&fnic
->tx_queue
, fp_skb(fp
));
1177 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1180 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1182 return fnic_send_frame(fnic
, fp
);
1186 * fnic_flush_tx() - send queued frames.
1187 * @fnic: fnic device
1189 * Send frames that were waiting to go out in FC or Ethernet mode.
1190 * Whenever changing modes we purge queued frames, so these frames should
1191 * be queued for the stable mode that we're in, either FC or Ethernet.
1193 * Called without fnic_lock held.
1195 void fnic_flush_tx(struct fnic
*fnic
)
1197 struct sk_buff
*skb
;
1198 struct fc_frame
*fp
;
1200 while ((skb
= skb_dequeue(&fnic
->tx_queue
))) {
1201 fp
= (struct fc_frame
*)skb
;
1202 fnic_send_frame(fnic
, fp
);
1207 * fnic_set_eth_mode() - put fnic into ethernet mode.
1208 * @fnic: fnic device
1210 * Called without fnic lock held.
1212 static void fnic_set_eth_mode(struct fnic
*fnic
)
1214 unsigned long flags
;
1215 enum fnic_state old_state
;
1218 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1220 old_state
= fnic
->state
;
1221 switch (old_state
) {
1222 case FNIC_IN_FC_MODE
:
1223 case FNIC_IN_ETH_TRANS_FC_MODE
:
1225 fnic
->state
= FNIC_IN_FC_TRANS_ETH_MODE
;
1226 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1228 ret
= fnic_fw_reset_handler(fnic
);
1230 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1231 if (fnic
->state
!= FNIC_IN_FC_TRANS_ETH_MODE
)
1234 fnic
->state
= old_state
;
1237 case FNIC_IN_FC_TRANS_ETH_MODE
:
1238 case FNIC_IN_ETH_MODE
:
1241 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1244 static void fnic_wq_complete_frame_send(struct vnic_wq
*wq
,
1245 struct cq_desc
*cq_desc
,
1246 struct vnic_wq_buf
*buf
, void *opaque
)
1248 struct sk_buff
*skb
= buf
->os_buf
;
1249 struct fc_frame
*fp
= (struct fc_frame
*)skb
;
1250 struct fnic
*fnic
= vnic_dev_priv(wq
->vdev
);
1252 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
,
1253 buf
->len
, PCI_DMA_TODEVICE
);
1254 dev_kfree_skb_irq(fp_skb(fp
));
1258 static int fnic_wq_cmpl_handler_cont(struct vnic_dev
*vdev
,
1259 struct cq_desc
*cq_desc
, u8 type
,
1260 u16 q_number
, u16 completed_index
,
1263 struct fnic
*fnic
= vnic_dev_priv(vdev
);
1264 unsigned long flags
;
1266 spin_lock_irqsave(&fnic
->wq_lock
[q_number
], flags
);
1267 vnic_wq_service(&fnic
->wq
[q_number
], cq_desc
, completed_index
,
1268 fnic_wq_complete_frame_send
, NULL
);
1269 spin_unlock_irqrestore(&fnic
->wq_lock
[q_number
], flags
);
1274 int fnic_wq_cmpl_handler(struct fnic
*fnic
, int work_to_do
)
1276 unsigned int wq_work_done
= 0;
1279 for (i
= 0; i
< fnic
->raw_wq_count
; i
++) {
1280 wq_work_done
+= vnic_cq_service(&fnic
->cq
[fnic
->rq_count
+i
],
1282 fnic_wq_cmpl_handler_cont
,
1286 return wq_work_done
;
1290 void fnic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
1292 struct fc_frame
*fp
= buf
->os_buf
;
1293 struct fnic
*fnic
= vnic_dev_priv(wq
->vdev
);
1295 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
,
1296 buf
->len
, PCI_DMA_TODEVICE
);
1298 dev_kfree_skb(fp_skb(fp
));
1302 void fnic_fcoe_reset_vlans(struct fnic
*fnic
)
1304 unsigned long flags
;
1305 struct fcoe_vlan
*vlan
;
1306 struct fcoe_vlan
*next
;
1309 * indicate a link down to fcoe so that all fcf's are free'd
1310 * might not be required since we did this before sending vlan
1313 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
1314 if (!list_empty(&fnic
->vlans
)) {
1315 list_for_each_entry_safe(vlan
, next
, &fnic
->vlans
, list
) {
1316 list_del(&vlan
->list
);
1320 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1323 void fnic_handle_fip_timer(struct fnic
*fnic
)
1325 unsigned long flags
;
1326 struct fcoe_vlan
*vlan
;
1327 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
1330 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1331 if (fnic
->stop_rx_link_events
) {
1332 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1335 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1337 if (fnic
->ctlr
.mode
== FIP_MODE_NON_FIP
)
1340 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
1341 if (list_empty(&fnic
->vlans
)) {
1342 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1343 /* no vlans available, try again */
1344 if (printk_ratelimit())
1345 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1346 "Start VLAN Discovery\n");
1347 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
1351 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
1352 shost_printk(KERN_DEBUG
, fnic
->lport
->host
,
1353 "fip_timer: vlan %d state %d sol_count %d\n",
1354 vlan
->vid
, vlan
->state
, vlan
->sol_count
);
1355 switch (vlan
->state
) {
1357 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1358 "FIP VLAN is selected for FC transaction\n");
1359 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1361 case FIP_VLAN_FAILED
:
1362 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1363 /* if all vlans are in failed state, restart vlan disc */
1364 if (printk_ratelimit())
1365 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1366 "Start VLAN Discovery\n");
1367 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
1370 if (vlan
->sol_count
>= FCOE_CTLR_MAX_SOL
) {
1372 * no response on this vlan, remove from the list.
1375 shost_printk(KERN_INFO
, fnic
->lport
->host
,
1376 "Dequeue this VLAN ID %d from list\n",
1378 list_del(&vlan
->list
);
1381 if (list_empty(&fnic
->vlans
)) {
1382 /* we exhausted all vlans, restart vlan disc */
1383 spin_unlock_irqrestore(&fnic
->vlans_lock
,
1385 shost_printk(KERN_INFO
, fnic
->lport
->host
,
1386 "fip_timer: vlan list empty, "
1387 "trigger vlan disc\n");
1388 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
1391 /* check the next vlan */
1392 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
,
1394 fnic
->set_vlan(fnic
, vlan
->vid
);
1395 vlan
->state
= FIP_VLAN_SENT
; /* sent now */
1397 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1398 atomic64_inc(&fnic_stats
->vlan_stats
.sol_expiry_count
);
1400 sol_time
= jiffies
+ msecs_to_jiffies
1401 (FCOE_CTLR_START_DELAY
);
1402 mod_timer(&fnic
->fip_timer
, round_jiffies(sol_time
));