2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/workqueue.h>
27 #include <scsi/fc/fc_fip.h>
28 #include <scsi/fc/fc_els.h>
29 #include <scsi/fc/fc_fcoe.h>
30 #include <scsi/fc_frame.h>
31 #include <scsi/libfc.h>
35 #include "cq_enet_desc.h"
36 #include "cq_exch_desc.h"
38 static u8 fcoe_all_fcfs
[ETH_ALEN
];
39 struct workqueue_struct
*fnic_fip_queue
;
40 struct workqueue_struct
*fnic_event_queue
;
42 static void fnic_set_eth_mode(struct fnic
*);
43 static void fnic_fcoe_send_vlan_req(struct fnic
*fnic
);
44 static void fnic_fcoe_start_fcf_disc(struct fnic
*fnic
);
45 static void fnic_fcoe_process_vlan_resp(struct fnic
*fnic
, struct sk_buff
*);
46 static int fnic_fcoe_vlan_check(struct fnic
*fnic
, u16 flag
);
47 static int fnic_fcoe_handle_fip_frame(struct fnic
*fnic
, struct sk_buff
*skb
);
49 void fnic_handle_link(struct work_struct
*work
)
51 struct fnic
*fnic
= container_of(work
, struct fnic
, link_work
);
54 u32 old_link_down_cnt
;
56 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
58 if (fnic
->stop_rx_link_events
) {
59 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
63 old_link_down_cnt
= fnic
->link_down_cnt
;
64 old_link_status
= fnic
->link_status
;
65 fnic
->link_status
= vnic_dev_link_status(fnic
->vdev
);
66 fnic
->link_down_cnt
= vnic_dev_link_down_cnt(fnic
->vdev
);
68 if (old_link_status
== fnic
->link_status
) {
69 if (!fnic
->link_status
)
71 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
73 if (old_link_down_cnt
!= fnic
->link_down_cnt
) {
74 /* UP -> DOWN -> UP */
75 fnic
->lport
->host_stats
.link_failure_count
++;
76 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
77 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
79 fcoe_ctlr_link_down(&fnic
->ctlr
);
80 if (fnic
->config
.flags
& VFCF_FIP_CAPABLE
) {
81 /* start FCoE VLAN discovery */
82 fnic_fcoe_send_vlan_req(fnic
);
85 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
87 fcoe_ctlr_link_up(&fnic
->ctlr
);
90 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
92 } else if (fnic
->link_status
) {
94 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
95 if (fnic
->config
.flags
& VFCF_FIP_CAPABLE
) {
96 /* start FCoE VLAN discovery */
97 fnic_fcoe_send_vlan_req(fnic
);
100 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
, "link up\n");
101 fcoe_ctlr_link_up(&fnic
->ctlr
);
104 fnic
->lport
->host_stats
.link_failure_count
++;
105 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
106 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
, "link down\n");
107 fcoe_ctlr_link_down(&fnic
->ctlr
);
113 * This function passes incoming fabric frames to libFC
115 void fnic_handle_frame(struct work_struct
*work
)
117 struct fnic
*fnic
= container_of(work
, struct fnic
, frame_work
);
118 struct fc_lport
*lp
= fnic
->lport
;
123 while ((skb
= skb_dequeue(&fnic
->frame_queue
))) {
125 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
126 if (fnic
->stop_rx_link_events
) {
127 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
131 fp
= (struct fc_frame
*)skb
;
134 * If we're in a transitional state, just re-queue and return.
135 * The queue will be serviced when we get to a stable state.
137 if (fnic
->state
!= FNIC_IN_FC_MODE
&&
138 fnic
->state
!= FNIC_IN_ETH_MODE
) {
139 skb_queue_head(&fnic
->frame_queue
, skb
);
140 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
143 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
145 fc_exch_recv(lp
, fp
);
149 void fnic_fcoe_evlist_free(struct fnic
*fnic
)
151 struct fnic_event
*fevt
= NULL
;
152 struct fnic_event
*next
= NULL
;
155 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
156 if (list_empty(&fnic
->evlist
)) {
157 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
161 list_for_each_entry_safe(fevt
, next
, &fnic
->evlist
, list
) {
162 list_del(&fevt
->list
);
165 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
168 void fnic_handle_event(struct work_struct
*work
)
170 struct fnic
*fnic
= container_of(work
, struct fnic
, event_work
);
171 struct fnic_event
*fevt
= NULL
;
172 struct fnic_event
*next
= NULL
;
175 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
176 if (list_empty(&fnic
->evlist
)) {
177 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
181 list_for_each_entry_safe(fevt
, next
, &fnic
->evlist
, list
) {
182 if (fnic
->stop_rx_link_events
) {
183 list_del(&fevt
->list
);
185 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
189 * If we're in a transitional state, just re-queue and return.
190 * The queue will be serviced when we get to a stable state.
192 if (fnic
->state
!= FNIC_IN_FC_MODE
&&
193 fnic
->state
!= FNIC_IN_ETH_MODE
) {
194 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
198 list_del(&fevt
->list
);
199 switch (fevt
->event
) {
200 case FNIC_EVT_START_VLAN_DISC
:
201 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
202 fnic_fcoe_send_vlan_req(fnic
);
203 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
205 case FNIC_EVT_START_FCF_DISC
:
206 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
207 "Start FCF Discovery\n");
208 fnic_fcoe_start_fcf_disc(fnic
);
211 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
212 "Unknown event 0x%x\n", fevt
->event
);
217 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
221 * Check if the Received FIP FLOGI frame is rejected
222 * @fip: The FCoE controller that received the frame
223 * @skb: The received FIP frame
225 * Returns non-zero if the frame is rejected with unsupported cmd with
226 * insufficient resource els explanation.
228 static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr
*fip
,
231 struct fc_lport
*lport
= fip
->lp
;
232 struct fip_header
*fiph
;
233 struct fc_frame_header
*fh
= NULL
;
234 struct fip_desc
*desc
;
235 struct fip_encaps
*els
;
236 enum fip_desc_type els_dtype
= 0;
245 if (skb_linearize(skb
))
248 if (skb
->len
< sizeof(*fiph
))
251 fiph
= (struct fip_header
*)skb
->data
;
252 op
= ntohs(fiph
->fip_op
);
253 sub
= fiph
->fip_subcode
;
258 if (sub
!= FIP_SC_REP
)
261 rlen
= ntohs(fiph
->fip_dl_len
) * 4;
262 if (rlen
+ sizeof(*fiph
) > skb
->len
)
265 desc
= (struct fip_desc
*)(fiph
+ 1);
266 dlen
= desc
->fip_dlen
* FIP_BPW
;
268 if (desc
->fip_dtype
== FIP_DT_FLOGI
) {
270 shost_printk(KERN_DEBUG
, lport
->host
,
271 " FIP TYPE FLOGI: fab name:%llx "
273 fip
->sel_fcf
->fabric_name
, fip
->sel_fcf
->vfid
,
274 fip
->sel_fcf
->fc_map
);
275 if (dlen
< sizeof(*els
) + sizeof(*fh
) + 1)
278 els_len
= dlen
- sizeof(*els
);
279 els
= (struct fip_encaps
*)desc
;
280 fh
= (struct fc_frame_header
*)(els
+ 1);
281 els_dtype
= desc
->fip_dtype
;
287 * ELS command code, reason and explanation should be = Reject,
288 * unsupported command and insufficient resource
290 els_op
= *(u8
*)(fh
+ 1);
291 if (els_op
== ELS_LS_RJT
) {
292 shost_printk(KERN_INFO
, lport
->host
,
293 "Flogi Request Rejected by Switch\n");
296 shost_printk(KERN_INFO
, lport
->host
,
297 "Flogi Request Accepted by Switch\n");
302 static void fnic_fcoe_send_vlan_req(struct fnic
*fnic
)
304 struct fcoe_ctlr
*fip
= &fnic
->ctlr
;
308 struct fip_vlan
*vlan
;
311 fnic_fcoe_reset_vlans(fnic
);
312 fnic
->set_vlan(fnic
, 0);
313 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
314 "Sending VLAN request...\n");
315 skb
= dev_alloc_skb(sizeof(struct fip_vlan
));
319 fr_len
= sizeof(*vlan
);
320 eth_fr
= (char *)skb
->data
;
321 vlan
= (struct fip_vlan
*)eth_fr
;
323 memset(vlan
, 0, sizeof(*vlan
));
324 memcpy(vlan
->eth
.h_source
, fip
->ctl_src_addr
, ETH_ALEN
);
325 memcpy(vlan
->eth
.h_dest
, fcoe_all_fcfs
, ETH_ALEN
);
326 vlan
->eth
.h_proto
= htons(ETH_P_FIP
);
328 vlan
->fip
.fip_ver
= FIP_VER_ENCAPS(FIP_VER
);
329 vlan
->fip
.fip_op
= htons(FIP_OP_VLAN
);
330 vlan
->fip
.fip_subcode
= FIP_SC_VL_REQ
;
331 vlan
->fip
.fip_dl_len
= htons(sizeof(vlan
->desc
) / FIP_BPW
);
333 vlan
->desc
.mac
.fd_desc
.fip_dtype
= FIP_DT_MAC
;
334 vlan
->desc
.mac
.fd_desc
.fip_dlen
= sizeof(vlan
->desc
.mac
) / FIP_BPW
;
335 memcpy(&vlan
->desc
.mac
.fd_mac
, fip
->ctl_src_addr
, ETH_ALEN
);
337 vlan
->desc
.wwnn
.fd_desc
.fip_dtype
= FIP_DT_NAME
;
338 vlan
->desc
.wwnn
.fd_desc
.fip_dlen
= sizeof(vlan
->desc
.wwnn
) / FIP_BPW
;
339 put_unaligned_be64(fip
->lp
->wwnn
, &vlan
->desc
.wwnn
.fd_wwn
);
341 skb_put(skb
, sizeof(*vlan
));
342 skb
->protocol
= htons(ETH_P_FIP
);
343 skb_reset_mac_header(skb
);
344 skb_reset_network_header(skb
);
347 /* set a timer so that we can retry if there no response */
348 vlan_tov
= jiffies
+ msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV
);
349 mod_timer(&fnic
->fip_timer
, round_jiffies(vlan_tov
));
352 static void fnic_fcoe_process_vlan_resp(struct fnic
*fnic
, struct sk_buff
*skb
)
354 struct fcoe_ctlr
*fip
= &fnic
->ctlr
;
355 struct fip_header
*fiph
;
356 struct fip_desc
*desc
;
360 struct fcoe_vlan
*vlan
;
364 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
365 "Received VLAN response...\n");
367 fiph
= (struct fip_header
*) skb
->data
;
369 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
370 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
371 ntohs(fiph
->fip_op
), fiph
->fip_subcode
);
373 rlen
= ntohs(fiph
->fip_dl_len
) * 4;
374 fnic_fcoe_reset_vlans(fnic
);
375 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
376 desc
= (struct fip_desc
*)(fiph
+ 1);
378 dlen
= desc
->fip_dlen
* FIP_BPW
;
379 switch (desc
->fip_dtype
) {
381 vid
= ntohs(((struct fip_vlan_desc
*)desc
)->fd_vlan
);
382 shost_printk(KERN_INFO
, fnic
->lport
->host
,
383 "process_vlan_resp: FIP VLAN %d\n", vid
);
384 vlan
= kmalloc(sizeof(*vlan
),
387 /* retry from timer */
388 spin_unlock_irqrestore(&fnic
->vlans_lock
,
392 memset(vlan
, 0, sizeof(struct fcoe_vlan
));
393 vlan
->vid
= vid
& 0x0fff;
394 vlan
->state
= FIP_VLAN_AVAIL
;
395 list_add_tail(&vlan
->list
, &fnic
->vlans
);
398 desc
= (struct fip_desc
*)((char *)desc
+ dlen
);
402 /* any VLAN descriptors present ? */
403 if (list_empty(&fnic
->vlans
)) {
404 /* retry from timer */
405 FNIC_FCS_DBG(KERN_INFO
, fnic
->lport
->host
,
406 "No VLAN descriptors in FIP VLAN response\n");
407 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
411 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
412 fnic
->set_vlan(fnic
, vlan
->vid
);
413 vlan
->state
= FIP_VLAN_SENT
; /* sent now */
415 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
417 /* start the solicitation */
418 fcoe_ctlr_link_up(fip
);
420 sol_time
= jiffies
+ msecs_to_jiffies(FCOE_CTLR_START_DELAY
);
421 mod_timer(&fnic
->fip_timer
, round_jiffies(sol_time
));
426 static void fnic_fcoe_start_fcf_disc(struct fnic
*fnic
)
429 struct fcoe_vlan
*vlan
;
432 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
433 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
434 fnic
->set_vlan(fnic
, vlan
->vid
);
435 vlan
->state
= FIP_VLAN_SENT
; /* sent now */
437 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
439 /* start the solicitation */
440 fcoe_ctlr_link_up(&fnic
->ctlr
);
442 sol_time
= jiffies
+ msecs_to_jiffies(FCOE_CTLR_START_DELAY
);
443 mod_timer(&fnic
->fip_timer
, round_jiffies(sol_time
));
446 static int fnic_fcoe_vlan_check(struct fnic
*fnic
, u16 flag
)
449 struct fcoe_vlan
*fvlan
;
451 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
452 if (list_empty(&fnic
->vlans
)) {
453 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
457 fvlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
458 if (fvlan
->state
== FIP_VLAN_USED
) {
459 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
463 if (fvlan
->state
== FIP_VLAN_SENT
) {
464 fvlan
->state
= FIP_VLAN_USED
;
465 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
468 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
472 static void fnic_event_enq(struct fnic
*fnic
, enum fnic_evt ev
)
474 struct fnic_event
*fevt
;
477 fevt
= kmalloc(sizeof(*fevt
), GFP_ATOMIC
);
484 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
485 list_add_tail(&fevt
->list
, &fnic
->evlist
);
486 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
488 schedule_work(&fnic
->event_work
);
491 static int fnic_fcoe_handle_fip_frame(struct fnic
*fnic
, struct sk_buff
*skb
)
493 struct fip_header
*fiph
;
498 if (!skb
|| !(skb
->data
))
501 if (skb_linearize(skb
))
504 fiph
= (struct fip_header
*)skb
->data
;
505 op
= ntohs(fiph
->fip_op
);
506 sub
= fiph
->fip_subcode
;
508 if (FIP_VER_DECAPS(fiph
->fip_ver
) != FIP_VER
)
511 if (ntohs(fiph
->fip_dl_len
) * FIP_BPW
+ sizeof(*fiph
) > skb
->len
)
514 if (op
== FIP_OP_DISC
&& sub
== FIP_SC_ADV
) {
515 if (fnic_fcoe_vlan_check(fnic
, ntohs(fiph
->fip_flags
)))
517 /* pass it on to fcoe */
519 } else if (op
== FIP_OP_VLAN
&& sub
== FIP_SC_VL_REP
) {
520 /* set the vlan as used */
521 fnic_fcoe_process_vlan_resp(fnic
, skb
);
523 } else if (op
== FIP_OP_CTRL
&& sub
== FIP_SC_CLR_VLINK
) {
524 /* received CVL request, restart vlan disc */
525 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
526 /* pass it on to fcoe */
533 void fnic_handle_fip_frame(struct work_struct
*work
)
535 struct fnic
*fnic
= container_of(work
, struct fnic
, fip_frame_work
);
540 while ((skb
= skb_dequeue(&fnic
->fip_frame_queue
))) {
541 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
542 if (fnic
->stop_rx_link_events
) {
543 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
548 * If we're in a transitional state, just re-queue and return.
549 * The queue will be serviced when we get to a stable state.
551 if (fnic
->state
!= FNIC_IN_FC_MODE
&&
552 fnic
->state
!= FNIC_IN_ETH_MODE
) {
553 skb_queue_head(&fnic
->fip_frame_queue
, skb
);
554 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
557 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
558 eh
= (struct ethhdr
*)skb
->data
;
559 if (eh
->h_proto
== htons(ETH_P_FIP
)) {
560 skb_pull(skb
, sizeof(*eh
));
561 if (fnic_fcoe_handle_fip_frame(fnic
, skb
) <= 0) {
566 * If there's FLOGI rejects - clear all
567 * fcf's & restart from scratch
569 if (is_fnic_fip_flogi_reject(&fnic
->ctlr
, skb
)) {
570 shost_printk(KERN_INFO
, fnic
->lport
->host
,
571 "Trigger a Link down - VLAN Disc\n");
572 fcoe_ctlr_link_down(&fnic
->ctlr
);
573 /* start FCoE VLAN discovery */
574 fnic_fcoe_send_vlan_req(fnic
);
578 fcoe_ctlr_recv(&fnic
->ctlr
, skb
);
585 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
586 * @fnic: fnic instance.
587 * @skb: Ethernet Frame.
589 static inline int fnic_import_rq_eth_pkt(struct fnic
*fnic
, struct sk_buff
*skb
)
593 struct fcoe_hdr
*fcoe_hdr
;
594 struct fcoe_crc_eof
*ft
;
597 * Undo VLAN encapsulation if present.
599 eh
= (struct ethhdr
*)skb
->data
;
600 if (eh
->h_proto
== htons(ETH_P_8021Q
)) {
601 memmove((u8
*)eh
+ VLAN_HLEN
, eh
, ETH_ALEN
* 2);
602 eh
= (struct ethhdr
*)skb_pull(skb
, VLAN_HLEN
);
603 skb_reset_mac_header(skb
);
605 if (eh
->h_proto
== htons(ETH_P_FIP
)) {
606 if (!(fnic
->config
.flags
& VFCF_FIP_CAPABLE
)) {
607 printk(KERN_ERR
"Dropped FIP frame, as firmware "
608 "uses non-FIP mode, Enable FIP "
612 skb_queue_tail(&fnic
->fip_frame_queue
, skb
);
613 queue_work(fnic_fip_queue
, &fnic
->fip_frame_work
);
614 return 1; /* let caller know packet was used */
616 if (eh
->h_proto
!= htons(ETH_P_FCOE
))
618 skb_set_network_header(skb
, sizeof(*eh
));
619 skb_pull(skb
, sizeof(*eh
));
621 fcoe_hdr
= (struct fcoe_hdr
*)skb
->data
;
622 if (FC_FCOE_DECAPS_VER(fcoe_hdr
) != FC_FCOE_VER
)
625 fp
= (struct fc_frame
*)skb
;
627 fr_sof(fp
) = fcoe_hdr
->fcoe_sof
;
628 skb_pull(skb
, sizeof(struct fcoe_hdr
));
629 skb_reset_transport_header(skb
);
631 ft
= (struct fcoe_crc_eof
*)(skb
->data
+ skb
->len
- sizeof(*ft
));
632 fr_eof(fp
) = ft
->fcoe_eof
;
633 skb_trim(skb
, skb
->len
- sizeof(*ft
));
636 dev_kfree_skb_irq(skb
);
641 * fnic_update_mac_locked() - set data MAC address and filters.
642 * @fnic: fnic instance.
643 * @new: newly-assigned FCoE MAC address.
645 * Called with the fnic lock held.
647 void fnic_update_mac_locked(struct fnic
*fnic
, u8
*new)
649 u8
*ctl
= fnic
->ctlr
.ctl_src_addr
;
650 u8
*data
= fnic
->data_src_addr
;
652 if (is_zero_ether_addr(new))
654 if (!compare_ether_addr(data
, new))
656 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
, "update_mac %pM\n", new);
657 if (!is_zero_ether_addr(data
) && compare_ether_addr(data
, ctl
))
658 vnic_dev_del_addr(fnic
->vdev
, data
);
659 memcpy(data
, new, ETH_ALEN
);
660 if (compare_ether_addr(new, ctl
))
661 vnic_dev_add_addr(fnic
->vdev
, new);
665 * fnic_update_mac() - set data MAC address and filters.
666 * @lport: local port.
667 * @new: newly-assigned FCoE MAC address.
669 void fnic_update_mac(struct fc_lport
*lport
, u8
*new)
671 struct fnic
*fnic
= lport_priv(lport
);
673 spin_lock_irq(&fnic
->fnic_lock
);
674 fnic_update_mac_locked(fnic
, new);
675 spin_unlock_irq(&fnic
->fnic_lock
);
679 * fnic_set_port_id() - set the port_ID after successful FLOGI.
680 * @lport: local port.
681 * @port_id: assigned FC_ID.
682 * @fp: received frame containing the FLOGI accept or NULL.
684 * This is called from libfc when a new FC_ID has been assigned.
685 * This causes us to reset the firmware to FC_MODE and setup the new MAC
688 * It is also called with FC_ID 0 when we're logged off.
690 * If the FC_ID is due to point-to-point, fp may be NULL.
692 void fnic_set_port_id(struct fc_lport
*lport
, u32 port_id
, struct fc_frame
*fp
)
694 struct fnic
*fnic
= lport_priv(lport
);
698 FNIC_FCS_DBG(KERN_DEBUG
, lport
->host
, "set port_id %x fp %p\n",
702 * If we're clearing the FC_ID, change to use the ctl_src_addr.
703 * Set ethernet mode to send FLOGI.
706 fnic_update_mac(lport
, fnic
->ctlr
.ctl_src_addr
);
707 fnic_set_eth_mode(fnic
);
712 mac
= fr_cb(fp
)->granted_mac
;
713 if (is_zero_ether_addr(mac
)) {
714 /* non-FIP - FLOGI already accepted - ignore return */
715 fcoe_ctlr_recv_flogi(&fnic
->ctlr
, lport
, fp
);
717 fnic_update_mac(lport
, mac
);
720 /* Change state to reflect transition to FC mode */
721 spin_lock_irq(&fnic
->fnic_lock
);
722 if (fnic
->state
== FNIC_IN_ETH_MODE
|| fnic
->state
== FNIC_IN_FC_MODE
)
723 fnic
->state
= FNIC_IN_ETH_TRANS_FC_MODE
;
725 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
726 "Unexpected fnic state %s while"
727 " processing flogi resp\n",
728 fnic_state_to_str(fnic
->state
));
729 spin_unlock_irq(&fnic
->fnic_lock
);
732 spin_unlock_irq(&fnic
->fnic_lock
);
735 * Send FLOGI registration to firmware to set up FC mode.
736 * The new address will be set up when registration completes.
738 ret
= fnic_flogi_reg_handler(fnic
, port_id
);
741 spin_lock_irq(&fnic
->fnic_lock
);
742 if (fnic
->state
== FNIC_IN_ETH_TRANS_FC_MODE
)
743 fnic
->state
= FNIC_IN_ETH_MODE
;
744 spin_unlock_irq(&fnic
->fnic_lock
);
748 static void fnic_rq_cmpl_frame_recv(struct vnic_rq
*rq
, struct cq_desc
749 *cq_desc
, struct vnic_rq_buf
*buf
,
750 int skipped
__attribute__((unused
)),
753 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
756 unsigned int eth_hdrs_stripped
;
757 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
758 u8 fcoe
= 0, fcoe_sof
, fcoe_eof
;
759 u8 fcoe_fc_crc_ok
= 1, fcoe_enc_error
= 0;
760 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
761 u8 ipv6
, ipv4
, ipv4_fragment
, rss_type
, csum_not_calc
;
762 u8 fcs_ok
= 1, packet_error
= 0;
763 u16 q_number
, completed_index
, bytes_written
= 0, vlan
, checksum
;
765 u16 exchange_id
, tmpl
;
768 u32 fcp_bytes_written
= 0;
771 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
, buf
->len
,
774 fp
= (struct fc_frame
*)skb
;
777 cq_desc_dec(cq_desc
, &type
, &color
, &q_number
, &completed_index
);
778 if (type
== CQ_DESC_TYPE_RQ_FCP
) {
779 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc
*)cq_desc
,
780 &type
, &color
, &q_number
, &completed_index
,
781 &eop
, &sop
, &fcoe_fc_crc_ok
, &exchange_id
,
782 &tmpl
, &fcp_bytes_written
, &sof
, &eof
,
783 &ingress_port
, &packet_error
,
784 &fcoe_enc_error
, &fcs_ok
, &vlan_stripped
,
786 eth_hdrs_stripped
= 1;
787 skb_trim(skb
, fcp_bytes_written
);
791 } else if (type
== CQ_DESC_TYPE_RQ_ENET
) {
792 cq_enet_rq_desc_dec((struct cq_enet_rq_desc
*)cq_desc
,
793 &type
, &color
, &q_number
, &completed_index
,
794 &ingress_port
, &fcoe
, &eop
, &sop
,
795 &rss_type
, &csum_not_calc
, &rss_hash
,
796 &bytes_written
, &packet_error
,
797 &vlan_stripped
, &vlan
, &checksum
,
798 &fcoe_sof
, &fcoe_fc_crc_ok
,
799 &fcoe_enc_error
, &fcoe_eof
,
800 &tcp_udp_csum_ok
, &udp
, &tcp
,
801 &ipv4_csum_ok
, &ipv6
, &ipv4
,
802 &ipv4_fragment
, &fcs_ok
);
803 eth_hdrs_stripped
= 0;
804 skb_trim(skb
, bytes_written
);
806 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
807 "fcs error. dropping packet.\n");
810 if (fnic_import_rq_eth_pkt(fnic
, skb
))
815 shost_printk(KERN_ERR
, fnic
->lport
->host
,
816 "fnic rq_cmpl wrong cq type x%x\n", type
);
820 if (!fcs_ok
|| packet_error
|| !fcoe_fc_crc_ok
|| fcoe_enc_error
) {
821 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
822 "fnic rq_cmpl fcoe x%x fcsok x%x"
823 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
825 fcoe
, fcs_ok
, packet_error
,
826 fcoe_fc_crc_ok
, fcoe_enc_error
);
830 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
831 if (fnic
->stop_rx_link_events
) {
832 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
835 fr_dev(fp
) = fnic
->lport
;
836 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
838 skb_queue_tail(&fnic
->frame_queue
, skb
);
839 queue_work(fnic_event_queue
, &fnic
->frame_work
);
843 dev_kfree_skb_irq(skb
);
846 static int fnic_rq_cmpl_handler_cont(struct vnic_dev
*vdev
,
847 struct cq_desc
*cq_desc
, u8 type
,
848 u16 q_number
, u16 completed_index
,
851 struct fnic
*fnic
= vnic_dev_priv(vdev
);
853 vnic_rq_service(&fnic
->rq
[q_number
], cq_desc
, completed_index
,
854 VNIC_RQ_RETURN_DESC
, fnic_rq_cmpl_frame_recv
,
859 int fnic_rq_cmpl_handler(struct fnic
*fnic
, int rq_work_to_do
)
861 unsigned int tot_rq_work_done
= 0, cur_work_done
;
865 for (i
= 0; i
< fnic
->rq_count
; i
++) {
866 cur_work_done
= vnic_cq_service(&fnic
->cq
[i
], rq_work_to_do
,
867 fnic_rq_cmpl_handler_cont
,
870 err
= vnic_rq_fill(&fnic
->rq
[i
], fnic_alloc_rq_frame
);
872 shost_printk(KERN_ERR
, fnic
->lport
->host
,
873 "fnic_alloc_rq_frame can't alloc"
876 tot_rq_work_done
+= cur_work_done
;
879 return tot_rq_work_done
;
883 * This function is called once at init time to allocate and fill RQ
884 * buffers. Subsequently, it is called in the interrupt context after RQ
885 * buffer processing to replenish the buffers in the RQ
887 int fnic_alloc_rq_frame(struct vnic_rq
*rq
)
889 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
894 len
= FC_FRAME_HEADROOM
+ FC_MAX_FRAME
+ FC_FRAME_TAILROOM
;
895 skb
= dev_alloc_skb(len
);
897 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
898 "Unable to allocate RQ sk_buff\n");
901 skb_reset_mac_header(skb
);
902 skb_reset_transport_header(skb
);
903 skb_reset_network_header(skb
);
905 pa
= pci_map_single(fnic
->pdev
, skb
->data
, len
, PCI_DMA_FROMDEVICE
);
906 fnic_queue_rq_desc(rq
, skb
, pa
, len
);
910 void fnic_free_rq_buf(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
)
912 struct fc_frame
*fp
= buf
->os_buf
;
913 struct fnic
*fnic
= vnic_dev_priv(rq
->vdev
);
915 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
, buf
->len
,
918 dev_kfree_skb(fp_skb(fp
));
923 * fnic_eth_send() - Send Ethernet frame.
924 * @fip: fcoe_ctlr instance.
925 * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
927 void fnic_eth_send(struct fcoe_ctlr
*fip
, struct sk_buff
*skb
)
929 struct fnic
*fnic
= fnic_from_ctlr(fip
);
930 struct vnic_wq
*wq
= &fnic
->wq
[0];
932 struct ethhdr
*eth_hdr
;
933 struct vlan_ethhdr
*vlan_hdr
;
936 if (!fnic
->vlan_hw_insert
) {
937 eth_hdr
= (struct ethhdr
*)skb_mac_header(skb
);
938 vlan_hdr
= (struct vlan_ethhdr
*)skb_push(skb
,
939 sizeof(*vlan_hdr
) - sizeof(*eth_hdr
));
940 memcpy(vlan_hdr
, eth_hdr
, 2 * ETH_ALEN
);
941 vlan_hdr
->h_vlan_proto
= htons(ETH_P_8021Q
);
942 vlan_hdr
->h_vlan_encapsulated_proto
= eth_hdr
->h_proto
;
943 vlan_hdr
->h_vlan_TCI
= htons(fnic
->vlan_id
);
946 pa
= pci_map_single(fnic
->pdev
, skb
->data
, skb
->len
, PCI_DMA_TODEVICE
);
948 spin_lock_irqsave(&fnic
->wq_lock
[0], flags
);
949 if (!vnic_wq_desc_avail(wq
)) {
950 pci_unmap_single(fnic
->pdev
, pa
, skb
->len
, PCI_DMA_TODEVICE
);
951 spin_unlock_irqrestore(&fnic
->wq_lock
[0], flags
);
956 fnic_queue_wq_eth_desc(wq
, skb
, pa
, skb
->len
,
957 0 /* hw inserts cos value */,
959 spin_unlock_irqrestore(&fnic
->wq_lock
[0], flags
);
965 static int fnic_send_frame(struct fnic
*fnic
, struct fc_frame
*fp
)
967 struct vnic_wq
*wq
= &fnic
->wq
[0];
970 struct ethhdr
*eth_hdr
;
971 struct vlan_ethhdr
*vlan_hdr
;
972 struct fcoe_hdr
*fcoe_hdr
;
973 struct fc_frame_header
*fh
;
974 u32 tot_len
, eth_hdr_len
;
978 fh
= fc_frame_header_get(fp
);
981 if (unlikely(fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
) &&
982 fcoe_ctlr_els_send(&fnic
->ctlr
, fnic
->lport
, skb
))
985 if (!fnic
->vlan_hw_insert
) {
986 eth_hdr_len
= sizeof(*vlan_hdr
) + sizeof(*fcoe_hdr
);
987 vlan_hdr
= (struct vlan_ethhdr
*)skb_push(skb
, eth_hdr_len
);
988 eth_hdr
= (struct ethhdr
*)vlan_hdr
;
989 vlan_hdr
->h_vlan_proto
= htons(ETH_P_8021Q
);
990 vlan_hdr
->h_vlan_encapsulated_proto
= htons(ETH_P_FCOE
);
991 vlan_hdr
->h_vlan_TCI
= htons(fnic
->vlan_id
);
992 fcoe_hdr
= (struct fcoe_hdr
*)(vlan_hdr
+ 1);
994 eth_hdr_len
= sizeof(*eth_hdr
) + sizeof(*fcoe_hdr
);
995 eth_hdr
= (struct ethhdr
*)skb_push(skb
, eth_hdr_len
);
996 eth_hdr
->h_proto
= htons(ETH_P_FCOE
);
997 fcoe_hdr
= (struct fcoe_hdr
*)(eth_hdr
+ 1);
1000 if (fnic
->ctlr
.map_dest
)
1001 fc_fcoe_set_mac(eth_hdr
->h_dest
, fh
->fh_d_id
);
1003 memcpy(eth_hdr
->h_dest
, fnic
->ctlr
.dest_addr
, ETH_ALEN
);
1004 memcpy(eth_hdr
->h_source
, fnic
->data_src_addr
, ETH_ALEN
);
1007 BUG_ON(tot_len
% 4);
1009 memset(fcoe_hdr
, 0, sizeof(*fcoe_hdr
));
1010 fcoe_hdr
->fcoe_sof
= fr_sof(fp
);
1012 FC_FCOE_ENCAPS_VER(fcoe_hdr
, FC_FCOE_VER
);
1014 pa
= pci_map_single(fnic
->pdev
, eth_hdr
, tot_len
, PCI_DMA_TODEVICE
);
1016 spin_lock_irqsave(&fnic
->wq_lock
[0], flags
);
1018 if (!vnic_wq_desc_avail(wq
)) {
1019 pci_unmap_single(fnic
->pdev
, pa
,
1020 tot_len
, PCI_DMA_TODEVICE
);
1022 goto fnic_send_frame_end
;
1025 fnic_queue_wq_desc(wq
, skb
, pa
, tot_len
, fr_eof(fp
),
1026 0 /* hw inserts cos value */,
1027 fnic
->vlan_id
, 1, 1, 1);
1028 fnic_send_frame_end
:
1029 spin_unlock_irqrestore(&fnic
->wq_lock
[0], flags
);
1032 dev_kfree_skb_any(fp_skb(fp
));
1039 * Routine to send a raw frame
1041 int fnic_send(struct fc_lport
*lp
, struct fc_frame
*fp
)
1043 struct fnic
*fnic
= lport_priv(lp
);
1044 unsigned long flags
;
1046 if (fnic
->in_remove
) {
1047 dev_kfree_skb(fp_skb(fp
));
1052 * Queue frame if in a transitional state.
1053 * This occurs while registering the Port_ID / MAC address after FLOGI.
1055 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1056 if (fnic
->state
!= FNIC_IN_FC_MODE
&& fnic
->state
!= FNIC_IN_ETH_MODE
) {
1057 skb_queue_tail(&fnic
->tx_queue
, fp_skb(fp
));
1058 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1061 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1063 return fnic_send_frame(fnic
, fp
);
1067 * fnic_flush_tx() - send queued frames.
1068 * @fnic: fnic device
1070 * Send frames that were waiting to go out in FC or Ethernet mode.
1071 * Whenever changing modes we purge queued frames, so these frames should
1072 * be queued for the stable mode that we're in, either FC or Ethernet.
1074 * Called without fnic_lock held.
1076 void fnic_flush_tx(struct fnic
*fnic
)
1078 struct sk_buff
*skb
;
1079 struct fc_frame
*fp
;
1081 while ((skb
= skb_dequeue(&fnic
->tx_queue
))) {
1082 fp
= (struct fc_frame
*)skb
;
1083 fnic_send_frame(fnic
, fp
);
1088 * fnic_set_eth_mode() - put fnic into ethernet mode.
1089 * @fnic: fnic device
1091 * Called without fnic lock held.
1093 static void fnic_set_eth_mode(struct fnic
*fnic
)
1095 unsigned long flags
;
1096 enum fnic_state old_state
;
1099 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1101 old_state
= fnic
->state
;
1102 switch (old_state
) {
1103 case FNIC_IN_FC_MODE
:
1104 case FNIC_IN_ETH_TRANS_FC_MODE
:
1106 fnic
->state
= FNIC_IN_FC_TRANS_ETH_MODE
;
1107 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1109 ret
= fnic_fw_reset_handler(fnic
);
1111 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1112 if (fnic
->state
!= FNIC_IN_FC_TRANS_ETH_MODE
)
1115 fnic
->state
= old_state
;
1118 case FNIC_IN_FC_TRANS_ETH_MODE
:
1119 case FNIC_IN_ETH_MODE
:
1122 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1125 static void fnic_wq_complete_frame_send(struct vnic_wq
*wq
,
1126 struct cq_desc
*cq_desc
,
1127 struct vnic_wq_buf
*buf
, void *opaque
)
1129 struct sk_buff
*skb
= buf
->os_buf
;
1130 struct fc_frame
*fp
= (struct fc_frame
*)skb
;
1131 struct fnic
*fnic
= vnic_dev_priv(wq
->vdev
);
1133 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
,
1134 buf
->len
, PCI_DMA_TODEVICE
);
1135 dev_kfree_skb_irq(fp_skb(fp
));
1139 static int fnic_wq_cmpl_handler_cont(struct vnic_dev
*vdev
,
1140 struct cq_desc
*cq_desc
, u8 type
,
1141 u16 q_number
, u16 completed_index
,
1144 struct fnic
*fnic
= vnic_dev_priv(vdev
);
1145 unsigned long flags
;
1147 spin_lock_irqsave(&fnic
->wq_lock
[q_number
], flags
);
1148 vnic_wq_service(&fnic
->wq
[q_number
], cq_desc
, completed_index
,
1149 fnic_wq_complete_frame_send
, NULL
);
1150 spin_unlock_irqrestore(&fnic
->wq_lock
[q_number
], flags
);
1155 int fnic_wq_cmpl_handler(struct fnic
*fnic
, int work_to_do
)
1157 unsigned int wq_work_done
= 0;
1160 for (i
= 0; i
< fnic
->raw_wq_count
; i
++) {
1161 wq_work_done
+= vnic_cq_service(&fnic
->cq
[fnic
->rq_count
+i
],
1163 fnic_wq_cmpl_handler_cont
,
1167 return wq_work_done
;
1171 void fnic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
1173 struct fc_frame
*fp
= buf
->os_buf
;
1174 struct fnic
*fnic
= vnic_dev_priv(wq
->vdev
);
1176 pci_unmap_single(fnic
->pdev
, buf
->dma_addr
,
1177 buf
->len
, PCI_DMA_TODEVICE
);
1179 dev_kfree_skb(fp_skb(fp
));
1183 void fnic_fcoe_reset_vlans(struct fnic
*fnic
)
1185 unsigned long flags
;
1186 struct fcoe_vlan
*vlan
;
1187 struct fcoe_vlan
*next
;
1190 * indicate a link down to fcoe so that all fcf's are free'd
1191 * might not be required since we did this before sending vlan
1194 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
1195 if (!list_empty(&fnic
->vlans
)) {
1196 list_for_each_entry_safe(vlan
, next
, &fnic
->vlans
, list
) {
1197 list_del(&vlan
->list
);
1201 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1204 void fnic_handle_fip_timer(struct fnic
*fnic
)
1206 unsigned long flags
;
1207 struct fcoe_vlan
*vlan
;
1210 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1211 if (fnic
->stop_rx_link_events
) {
1212 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1215 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1217 if (fnic
->ctlr
.mode
== FIP_ST_NON_FIP
)
1220 spin_lock_irqsave(&fnic
->vlans_lock
, flags
);
1221 if (list_empty(&fnic
->vlans
)) {
1222 /* no vlans available, try again */
1223 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1224 "Start VLAN Discovery\n");
1225 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1226 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
1230 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
, list
);
1231 shost_printk(KERN_DEBUG
, fnic
->lport
->host
,
1232 "fip_timer: vlan %d state %d sol_count %d\n",
1233 vlan
->vid
, vlan
->state
, vlan
->sol_count
);
1234 switch (vlan
->state
) {
1236 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1237 "FIP VLAN is selected for FC transaction\n");
1238 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1240 case FIP_VLAN_FAILED
:
1241 /* if all vlans are in failed state, restart vlan disc */
1242 FNIC_FCS_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1243 "Start VLAN Discovery\n");
1244 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1245 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
1248 if (vlan
->sol_count
>= FCOE_CTLR_MAX_SOL
) {
1250 * no response on this vlan, remove from the list.
1253 shost_printk(KERN_INFO
, fnic
->lport
->host
,
1254 "Dequeue this VLAN ID %d from list\n",
1256 list_del(&vlan
->list
);
1259 if (list_empty(&fnic
->vlans
)) {
1260 /* we exhausted all vlans, restart vlan disc */
1261 spin_unlock_irqrestore(&fnic
->vlans_lock
,
1263 shost_printk(KERN_INFO
, fnic
->lport
->host
,
1264 "fip_timer: vlan list empty, "
1265 "trigger vlan disc\n");
1266 fnic_event_enq(fnic
, FNIC_EVT_START_VLAN_DISC
);
1269 /* check the next vlan */
1270 vlan
= list_first_entry(&fnic
->vlans
, struct fcoe_vlan
,
1272 fnic
->set_vlan(fnic
, vlan
->vid
);
1273 vlan
->state
= FIP_VLAN_SENT
; /* sent now */
1275 spin_unlock_irqrestore(&fnic
->vlans_lock
, flags
);
1277 sol_time
= jiffies
+ msecs_to_jiffies
1278 (FCOE_CTLR_START_DELAY
);
1279 mod_timer(&fnic
->fip_timer
, round_jiffies(sol_time
));