2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * Maintained at www.Open-FCoE.org
20 #include <linux/module.h>
21 #include <linux/version.h>
22 #include <linux/kernel.h>
23 #include <linux/spinlock.h>
24 #include <linux/skbuff.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/if_ether.h>
29 #include <linux/if_vlan.h>
30 #include <linux/kthread.h>
31 #include <linux/crc32.h>
32 #include <linux/cpu.h>
34 #include <linux/sysfs.h>
35 #include <linux/ctype.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsicam.h>
38 #include <scsi/scsi_transport.h>
39 #include <scsi/scsi_transport_fc.h>
40 #include <net/rtnetlink.h>
42 #include <scsi/fc/fc_encaps.h>
44 #include <scsi/libfc.h>
45 #include <scsi/fc_frame.h>
46 #include <scsi/libfcoe.h>
47 #include <scsi/fc_transport_fcoe.h>
49 static int debug_fcoe
;
51 #define FCOE_MAX_QUEUE_DEPTH 256
52 #define FCOE_LOW_QUEUE_DEPTH 32
54 /* destination address mode */
55 #define FCOE_GW_ADDR_MODE 0x00
56 #define FCOE_FCOUI_ADDR_MODE 0x01
58 #define FCOE_WORD_TO_BYTE 4
60 MODULE_AUTHOR("Open-FCoE.org");
61 MODULE_DESCRIPTION("FCoE");
62 MODULE_LICENSE("GPL");
65 LIST_HEAD(fcoe_hostlist
);
66 DEFINE_RWLOCK(fcoe_hostlist_lock
);
67 DEFINE_TIMER(fcoe_timer
, NULL
, 0, 0);
68 struct fcoe_percpu_s
*fcoe_percpu
[NR_CPUS
];
71 /* Function Prototyes */
72 static int fcoe_check_wait_queue(struct fc_lport
*);
73 static void fcoe_recv_flogi(struct fcoe_softc
*, struct fc_frame
*, u8
*);
74 #ifdef CONFIG_HOTPLUG_CPU
75 static int fcoe_cpu_callback(struct notifier_block
*, ulong
, void *);
76 #endif /* CONFIG_HOTPLUG_CPU */
77 static int fcoe_device_notification(struct notifier_block
*, ulong
, void *);
78 static void fcoe_dev_setup(void);
79 static void fcoe_dev_cleanup(void);
81 /* notification function from net device */
82 static struct notifier_block fcoe_notifier
= {
83 .notifier_call
= fcoe_device_notification
,
87 #ifdef CONFIG_HOTPLUG_CPU
88 static struct notifier_block fcoe_cpu_notifier
= {
89 .notifier_call
= fcoe_cpu_callback
,
93 * fcoe_create_percpu_data() - creates the associated cpu data
94 * @cpu: index for the cpu where fcoe cpu data will be created
96 * create percpu stats block, from cpu add notifier
100 static void fcoe_create_percpu_data(int cpu
)
103 struct fcoe_softc
*fc
;
105 write_lock_bh(&fcoe_hostlist_lock
);
106 list_for_each_entry(fc
, &fcoe_hostlist
, list
) {
108 if (lp
->dev_stats
[cpu
] == NULL
)
110 kzalloc(sizeof(struct fcoe_dev_stats
),
113 write_unlock_bh(&fcoe_hostlist_lock
);
117 * fcoe_destroy_percpu_data() - destroys the associated cpu data
118 * @cpu: index for the cpu where fcoe cpu data will destroyed
120 * destroy percpu stats block called by cpu add/remove notifier
124 static void fcoe_destroy_percpu_data(int cpu
)
127 struct fcoe_softc
*fc
;
129 write_lock_bh(&fcoe_hostlist_lock
);
130 list_for_each_entry(fc
, &fcoe_hostlist
, list
) {
132 kfree(lp
->dev_stats
[cpu
]);
133 lp
->dev_stats
[cpu
] = NULL
;
135 write_unlock_bh(&fcoe_hostlist_lock
);
139 * fcoe_cpu_callback() - fcoe cpu hotplug event callback
140 * @nfb: callback data block
141 * @action: event triggering the callback
142 * @hcpu: index for the cpu of this event
144 * this creates or destroys per cpu data for fcoe
146 * Returns NOTIFY_OK always.
148 static int fcoe_cpu_callback(struct notifier_block
*nfb
, unsigned long action
,
151 unsigned int cpu
= (unsigned long)hcpu
;
155 fcoe_create_percpu_data(cpu
);
158 fcoe_destroy_percpu_data(cpu
);
165 #endif /* CONFIG_HOTPLUG_CPU */
168 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
169 * @skb: the receive skb
170 * @dev: associated net device
172 * @odldev: last device
174 * this function will receive the packet and build fc frame and pass it up
176 * Returns: 0 for success
178 int fcoe_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
179 struct packet_type
*ptype
, struct net_device
*olddev
)
182 struct fcoe_rcv_info
*fr
;
183 struct fcoe_softc
*fc
;
184 struct fcoe_dev_stats
*stats
;
185 struct fc_frame_header
*fh
;
188 struct fcoe_percpu_s
*fps
;
190 fc
= container_of(ptype
, struct fcoe_softc
, fcoe_packet_type
);
192 if (unlikely(lp
== NULL
)) {
193 FC_DBG("cannot find hba structure");
197 if (unlikely(debug_fcoe
)) {
198 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
199 "end:%p sum:%d dev:%s", skb
->len
, skb
->data_len
,
200 skb
->head
, skb
->data
, skb_tail_pointer(skb
),
201 skb_end_pointer(skb
), skb
->csum
,
202 skb
->dev
? skb
->dev
->name
: "<NULL>");
206 /* check for FCOE packet type */
207 if (unlikely(eth_hdr(skb
)->h_proto
!= htons(ETH_P_FCOE
))) {
208 FC_DBG("wrong FC type frame");
213 * Check for minimum frame length, and make sure required FCoE
214 * and FC headers are pulled into the linear data area.
216 if (unlikely((skb
->len
< FCOE_MIN_FRAME
) ||
217 !pskb_may_pull(skb
, FCOE_HEADER_LEN
)))
220 skb_set_transport_header(skb
, sizeof(struct fcoe_hdr
));
221 fh
= (struct fc_frame_header
*) skb_transport_header(skb
);
223 oxid
= ntohs(fh
->fh_ox_id
);
225 fr
= fcoe_dev_from_skb(skb
);
231 * The incoming frame exchange id(oxid) is ANDed with num of online
232 * cpu bits to get cpu_idx and then this cpu_idx is used for selecting
233 * a per cpu kernel thread from fcoe_percpu. In case the cpu is
234 * offline or no kernel thread for derived cpu_idx then cpu_idx is
235 * initialize to first online cpu index.
237 cpu_idx
= oxid
& (num_online_cpus() - 1);
238 if (!fcoe_percpu
[cpu_idx
] || !cpu_online(cpu_idx
))
239 cpu_idx
= first_cpu(cpu_online_map
);
241 fps
= fcoe_percpu
[cpu_idx
];
243 spin_lock_bh(&fps
->fcoe_rx_list
.lock
);
244 __skb_queue_tail(&fps
->fcoe_rx_list
, skb
);
245 if (fps
->fcoe_rx_list
.qlen
== 1)
246 wake_up_process(fps
->thread
);
248 spin_unlock_bh(&fps
->fcoe_rx_list
.lock
);
253 stats
= lp
->dev_stats
[smp_processor_id()];
255 stats
= lp
->dev_stats
[0];
258 stats
->ErrorFrames
++;
264 EXPORT_SYMBOL_GPL(fcoe_rcv
);
267 * fcoe_start_io() - pass to netdev to start xmit for fcoe
268 * @skb: the skb to be xmitted
270 * Returns: 0 for success
272 static inline int fcoe_start_io(struct sk_buff
*skb
)
277 rc
= dev_queue_xmit(skb
);
285 * fcoe_get_paged_crc_eof() - in case we need alloc a page for crc_eof
286 * @skb: the skb to be xmitted
289 * Returns: 0 for success
291 static int fcoe_get_paged_crc_eof(struct sk_buff
*skb
, int tlen
)
293 struct fcoe_percpu_s
*fps
;
298 fps
= fcoe_percpu
[cpu_idx
];
299 page
= fps
->crc_eof_page
;
301 page
= alloc_page(GFP_ATOMIC
);
306 fps
->crc_eof_page
= page
;
307 WARN_ON(fps
->crc_eof_offset
!= 0);
311 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
, page
,
312 fps
->crc_eof_offset
, tlen
);
314 skb
->data_len
+= tlen
;
315 skb
->truesize
+= tlen
;
316 fps
->crc_eof_offset
+= sizeof(struct fcoe_crc_eof
);
318 if (fps
->crc_eof_offset
>= PAGE_SIZE
) {
319 fps
->crc_eof_page
= NULL
;
320 fps
->crc_eof_offset
= 0;
328 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
329 * @fp: the fc_frame containg data to be checksummed
331 * This uses crc32() to calculate the crc for fc frame
332 * Return : 32 bit crc
334 u32
fcoe_fc_crc(struct fc_frame
*fp
)
336 struct sk_buff
*skb
= fp_skb(fp
);
337 struct skb_frag_struct
*frag
;
339 unsigned long off
, len
, clen
;
343 crc
= crc32(~0, skb
->data
, skb_headlen(skb
));
345 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
346 frag
= &skb_shinfo(skb
)->frags
[i
];
347 off
= frag
->page_offset
;
350 clen
= min(len
, PAGE_SIZE
- (off
& ~PAGE_MASK
));
351 data
= kmap_atomic(frag
->page
+ (off
>> PAGE_SHIFT
),
352 KM_SKB_DATA_SOFTIRQ
);
353 crc
= crc32(crc
, data
+ (off
& ~PAGE_MASK
), clen
);
354 kunmap_atomic(data
, KM_SKB_DATA_SOFTIRQ
);
361 EXPORT_SYMBOL_GPL(fcoe_fc_crc
);
364 * fcoe_xmit() - FCoE frame transmit function
365 * @lp: the associated local port
366 * @fp: the fc_frame to be transmitted
368 * Return : 0 for success
370 int fcoe_xmit(struct fc_lport
*lp
, struct fc_frame
*fp
)
375 struct fcoe_crc_eof
*cp
;
377 struct fcoe_dev_stats
*stats
;
378 struct fc_frame_header
*fh
;
379 unsigned int hlen
; /* header length implies the version */
380 unsigned int tlen
; /* trailer length */
381 unsigned int elen
; /* eth header, may include vlan */
382 int flogi_in_progress
= 0;
383 struct fcoe_softc
*fc
;
387 WARN_ON((fr_len(fp
) % sizeof(u32
)) != 0);
391 * if it is a flogi then we need to learn gw-addr
394 fh
= fc_frame_header_get(fp
);
395 if (unlikely(fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
)) {
396 if (fc_frame_payload_op(fp
) == ELS_FLOGI
) {
397 fc
->flogi_oxid
= ntohs(fh
->fh_ox_id
);
398 fc
->address_mode
= FCOE_FCOUI_ADDR_MODE
;
399 fc
->flogi_progress
= 1;
400 flogi_in_progress
= 1;
401 } else if (fc
->flogi_progress
&& ntoh24(fh
->fh_s_id
) != 0) {
403 * Here we must've gotten an SID by accepting an FLOGI
404 * from a point-to-point connection. Switch to using
405 * the source mac based on the SID. The destination
406 * MAC in this case would have been set by receving the
409 fc_fcoe_set_mac(fc
->data_src_addr
, fh
->fh_s_id
);
410 fc
->flogi_progress
= 0;
418 elen
= (fc
->real_dev
->priv_flags
& IFF_802_1Q_VLAN
) ?
419 sizeof(struct vlan_ethhdr
) : sizeof(struct ethhdr
);
420 hlen
= sizeof(struct fcoe_hdr
);
421 tlen
= sizeof(struct fcoe_crc_eof
);
422 wlen
= (skb
->len
- tlen
+ sizeof(crc
)) / FCOE_WORD_TO_BYTE
;
425 if (likely(lp
->crc_offload
)) {
426 skb
->ip_summed
= CHECKSUM_COMPLETE
;
427 skb
->csum_start
= skb_headroom(skb
);
428 skb
->csum_offset
= skb
->len
;
431 skb
->ip_summed
= CHECKSUM_NONE
;
432 crc
= fcoe_fc_crc(fp
);
435 /* copy fc crc and eof to the skb buff */
436 if (skb_is_nonlinear(skb
)) {
438 if (fcoe_get_paged_crc_eof(skb
, tlen
)) {
442 frag
= &skb_shinfo(skb
)->frags
[skb_shinfo(skb
)->nr_frags
- 1];
443 cp
= kmap_atomic(frag
->page
, KM_SKB_DATA_SOFTIRQ
)
446 cp
= (struct fcoe_crc_eof
*)skb_put(skb
, tlen
);
449 memset(cp
, 0, sizeof(*cp
));
451 cp
->fcoe_crc32
= cpu_to_le32(~crc
);
453 if (skb_is_nonlinear(skb
)) {
454 kunmap_atomic(cp
, KM_SKB_DATA_SOFTIRQ
);
458 /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
459 skb_push(skb
, elen
+ hlen
);
460 skb_reset_mac_header(skb
);
461 skb_reset_network_header(skb
);
463 skb
->protocol
= htons(ETH_P_802_3
);
464 skb
->dev
= fc
->real_dev
;
466 /* fill up mac and fcoe headers */
468 eh
->h_proto
= htons(ETH_P_FCOE
);
469 if (fc
->address_mode
== FCOE_FCOUI_ADDR_MODE
)
470 fc_fcoe_set_mac(eh
->h_dest
, fh
->fh_d_id
);
472 /* insert GW address */
473 memcpy(eh
->h_dest
, fc
->dest_addr
, ETH_ALEN
);
475 if (unlikely(flogi_in_progress
))
476 memcpy(eh
->h_source
, fc
->ctl_src_addr
, ETH_ALEN
);
478 memcpy(eh
->h_source
, fc
->data_src_addr
, ETH_ALEN
);
480 hp
= (struct fcoe_hdr
*)(eh
+ 1);
481 memset(hp
, 0, sizeof(*hp
));
483 FC_FCOE_ENCAPS_VER(hp
, FC_FCOE_VER
);
486 /* update tx stats: regardless if LLD fails */
487 stats
= lp
->dev_stats
[smp_processor_id()];
490 stats
->TxWords
+= wlen
;
493 /* send down to lld */
495 if (fc
->fcoe_pending_queue
.qlen
)
496 rc
= fcoe_check_wait_queue(lp
);
499 rc
= fcoe_start_io(skb
);
502 spin_lock_bh(&fc
->fcoe_pending_queue
.lock
);
503 __skb_queue_tail(&fc
->fcoe_pending_queue
, skb
);
504 spin_unlock_bh(&fc
->fcoe_pending_queue
.lock
);
505 if (fc
->fcoe_pending_queue
.qlen
> FCOE_MAX_QUEUE_DEPTH
)
511 EXPORT_SYMBOL_GPL(fcoe_xmit
);
514 * fcoe_percpu_receive_thread() - recv thread per cpu
515 * @arg: ptr to the fcoe per cpu struct
517 * Return: 0 for success
519 int fcoe_percpu_receive_thread(void *arg
)
521 struct fcoe_percpu_s
*p
= arg
;
524 struct fcoe_rcv_info
*fr
;
525 struct fcoe_dev_stats
*stats
;
526 struct fc_frame_header
*fh
;
528 struct fcoe_crc_eof crc_eof
;
531 struct fcoe_softc
*fc
;
534 set_user_nice(current
, -20);
536 while (!kthread_should_stop()) {
538 spin_lock_bh(&p
->fcoe_rx_list
.lock
);
539 while ((skb
= __skb_dequeue(&p
->fcoe_rx_list
)) == NULL
) {
540 set_current_state(TASK_INTERRUPTIBLE
);
541 spin_unlock_bh(&p
->fcoe_rx_list
.lock
);
543 set_current_state(TASK_RUNNING
);
544 if (kthread_should_stop())
546 spin_lock_bh(&p
->fcoe_rx_list
.lock
);
548 spin_unlock_bh(&p
->fcoe_rx_list
.lock
);
549 fr
= fcoe_dev_from_skb(skb
);
551 if (unlikely(lp
== NULL
)) {
552 FC_DBG("invalid HBA Structure");
557 stats
= lp
->dev_stats
[smp_processor_id()];
559 if (unlikely(debug_fcoe
)) {
560 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
561 "tail:%p end:%p sum:%d dev:%s",
562 skb
->len
, skb
->data_len
,
563 skb
->head
, skb
->data
, skb_tail_pointer(skb
),
564 skb_end_pointer(skb
), skb
->csum
,
565 skb
->dev
? skb
->dev
->name
: "<NULL>");
569 * Save source MAC address before discarding header.
572 if (unlikely(fc
->flogi_progress
))
573 mac
= eth_hdr(skb
)->h_source
;
575 if (skb_is_nonlinear(skb
))
576 skb_linearize(skb
); /* not ideal */
579 * Frame length checks and setting up the header pointers
580 * was done in fcoe_rcv already.
582 hp
= (struct fcoe_hdr
*) skb_network_header(skb
);
583 fh
= (struct fc_frame_header
*) skb_transport_header(skb
);
585 if (unlikely(FC_FCOE_DECAPS_VER(hp
) != FC_FCOE_VER
)) {
587 if (stats
->ErrorFrames
< 5)
588 FC_DBG("unknown FCoE version %x",
589 FC_FCOE_DECAPS_VER(hp
));
590 stats
->ErrorFrames
++;
596 skb_pull(skb
, sizeof(struct fcoe_hdr
));
597 fr_len
= skb
->len
- sizeof(struct fcoe_crc_eof
);
601 stats
->RxWords
+= fr_len
/ FCOE_WORD_TO_BYTE
;
604 fp
= (struct fc_frame
*)skb
;
607 fr_sof(fp
) = hp
->fcoe_sof
;
609 /* Copy out the CRC and EOF trailer for access */
610 if (skb_copy_bits(skb
, fr_len
, &crc_eof
, sizeof(crc_eof
))) {
614 fr_eof(fp
) = crc_eof
.fcoe_eof
;
615 fr_crc(fp
) = crc_eof
.fcoe_crc32
;
616 if (pskb_trim(skb
, fr_len
)) {
622 * We only check CRC if no offload is available and if it is
623 * it's solicited data, in which case, the FCP layer would
624 * check it during the copy.
627 fr_flags(fp
) &= ~FCPHF_CRC_UNCHECKED
;
629 fr_flags(fp
) |= FCPHF_CRC_UNCHECKED
;
631 fh
= fc_frame_header_get(fp
);
632 if (fh
->fh_r_ctl
== FC_RCTL_DD_SOL_DATA
&&
633 fh
->fh_type
== FC_TYPE_FCP
) {
634 fc_exch_recv(lp
, lp
->emp
, fp
);
637 if (fr_flags(fp
) & FCPHF_CRC_UNCHECKED
) {
638 if (le32_to_cpu(fr_crc(fp
)) !=
639 ~crc32(~0, skb
->data
, fr_len
)) {
640 if (debug_fcoe
|| stats
->InvalidCRCCount
< 5)
641 printk(KERN_WARNING
"fcoe: dropping "
642 "frame with CRC error\n");
643 stats
->InvalidCRCCount
++;
644 stats
->ErrorFrames
++;
648 fr_flags(fp
) &= ~FCPHF_CRC_UNCHECKED
;
650 /* non flogi and non data exchanges are handled here */
651 if (unlikely(fc
->flogi_progress
))
652 fcoe_recv_flogi(fc
, fp
, mac
);
653 fc_exch_recv(lp
, lp
->emp
, fp
);
659 * fcoe_recv_flogi() - flogi receive function
660 * @fc: associated fcoe_softc
661 * @fp: the recieved frame
662 * @sa: the source address of this flogi
664 * This is responsible to parse the flogi response and sets the corresponding
665 * mac address for the initiator, eitehr OUI based or GW based.
669 static void fcoe_recv_flogi(struct fcoe_softc
*fc
, struct fc_frame
*fp
, u8
*sa
)
671 struct fc_frame_header
*fh
;
674 fh
= fc_frame_header_get(fp
);
675 if (fh
->fh_type
!= FC_TYPE_ELS
)
677 op
= fc_frame_payload_op(fp
);
678 if (op
== ELS_LS_ACC
&& fh
->fh_r_ctl
== FC_RCTL_ELS_REP
&&
679 fc
->flogi_oxid
== ntohs(fh
->fh_ox_id
)) {
682 * If the src mac addr is FC_OUI-based, then we mark the
683 * address_mode flag to use FC_OUI-based Ethernet DA.
684 * Otherwise we use the FCoE gateway addr
686 if (!compare_ether_addr(sa
, (u8
[6]) FC_FCOE_FLOGI_MAC
)) {
687 fc
->address_mode
= FCOE_FCOUI_ADDR_MODE
;
689 memcpy(fc
->dest_addr
, sa
, ETH_ALEN
);
690 fc
->address_mode
= FCOE_GW_ADDR_MODE
;
694 * Remove any previously-set unicast MAC filter.
695 * Add secondary FCoE MAC address filter for our OUI.
698 if (compare_ether_addr(fc
->data_src_addr
, (u8
[6]) { 0 }))
699 dev_unicast_delete(fc
->real_dev
, fc
->data_src_addr
,
701 fc_fcoe_set_mac(fc
->data_src_addr
, fh
->fh_d_id
);
702 dev_unicast_add(fc
->real_dev
, fc
->data_src_addr
, ETH_ALEN
);
705 fc
->flogi_progress
= 0;
706 } else if (op
== ELS_FLOGI
&& fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
&& sa
) {
708 * Save source MAC for point-to-point responses.
710 memcpy(fc
->dest_addr
, sa
, ETH_ALEN
);
711 fc
->address_mode
= FCOE_GW_ADDR_MODE
;
716 * fcoe_watchdog() - fcoe timer callback
719 * This checks the pending queue length for fcoe and set lport qfull
720 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
723 * Returns: 0 for success
725 void fcoe_watchdog(ulong vp
)
727 struct fcoe_softc
*fc
;
729 read_lock(&fcoe_hostlist_lock
);
730 list_for_each_entry(fc
, &fcoe_hostlist
, list
) {
732 fcoe_check_wait_queue(fc
->lp
);
734 read_unlock(&fcoe_hostlist_lock
);
736 fcoe_timer
.expires
= jiffies
+ (1 * HZ
);
737 add_timer(&fcoe_timer
);
742 * fcoe_check_wait_queue() - put the skb into fcoe pending xmit queue
743 * @lp: the fc_port for this skb
744 * @skb: the associated skb to be xmitted
746 * This empties the wait_queue, dequeue the head of the wait_queue queue
747 * and calls fcoe_start_io() for each packet, if all skb have been
748 * transmitted, return qlen or -1 if a error occurs, then restore
749 * wait_queue and try again later.
751 * The wait_queue is used when the skb transmit fails. skb will go
752 * in the wait_queue which will be emptied by the time function OR
753 * by the next skb transmit.
755 * Returns: 0 for success
757 static int fcoe_check_wait_queue(struct fc_lport
*lp
)
759 struct fcoe_softc
*fc
= lport_priv(lp
);
763 spin_lock_bh(&fc
->fcoe_pending_queue
.lock
);
764 if (fc
->fcoe_pending_queue_active
)
766 fc
->fcoe_pending_queue_active
= 1;
768 while (fc
->fcoe_pending_queue
.qlen
) {
769 /* keep qlen > 0 until fcoe_start_io succeeds */
770 fc
->fcoe_pending_queue
.qlen
++;
771 skb
= __skb_dequeue(&fc
->fcoe_pending_queue
);
773 spin_unlock_bh(&fc
->fcoe_pending_queue
.lock
);
774 rc
= fcoe_start_io(skb
);
775 spin_lock_bh(&fc
->fcoe_pending_queue
.lock
);
778 __skb_queue_head(&fc
->fcoe_pending_queue
, skb
);
779 /* undo temporary increment above */
780 fc
->fcoe_pending_queue
.qlen
--;
783 /* undo temporary increment above */
784 fc
->fcoe_pending_queue
.qlen
--;
787 if (fc
->fcoe_pending_queue
.qlen
< FCOE_LOW_QUEUE_DEPTH
)
789 fc
->fcoe_pending_queue_active
= 0;
790 rc
= fc
->fcoe_pending_queue
.qlen
;
792 spin_unlock_bh(&fc
->fcoe_pending_queue
.lock
);
797 * fcoe_dev_setup() - setup link change notification interface
799 static void fcoe_dev_setup()
802 * here setup a interface specific wd time to
803 * monitor the link state
805 register_netdevice_notifier(&fcoe_notifier
);
809 * fcoe_dev_setup() - cleanup link change notification interface
811 static void fcoe_dev_cleanup(void)
813 unregister_netdevice_notifier(&fcoe_notifier
);
817 * fcoe_device_notification() - netdev event notification callback
818 * @notifier: context of the notification
819 * @event: type of event
820 * @ptr: fixed array for output parsed ifname
822 * This function is called by the ethernet driver in case of link change event
824 * Returns: 0 for success
826 static int fcoe_device_notification(struct notifier_block
*notifier
,
827 ulong event
, void *ptr
)
829 struct fc_lport
*lp
= NULL
;
830 struct net_device
*real_dev
= ptr
;
831 struct fcoe_softc
*fc
;
832 struct fcoe_dev_stats
*stats
;
837 read_lock(&fcoe_hostlist_lock
);
838 list_for_each_entry(fc
, &fcoe_hostlist
, list
) {
839 if (fc
->real_dev
== real_dev
) {
844 read_unlock(&fcoe_hostlist_lock
);
850 new_link_up
= lp
->link_up
;
853 case NETDEV_GOING_DOWN
:
858 new_link_up
= !fcoe_link_ok(lp
);
860 case NETDEV_CHANGEMTU
:
861 mfs
= fc
->real_dev
->mtu
-
862 (sizeof(struct fcoe_hdr
) +
863 sizeof(struct fcoe_crc_eof
));
864 if (mfs
>= FC_MIN_MAX_FRAME
)
866 new_link_up
= !fcoe_link_ok(lp
);
868 case NETDEV_REGISTER
:
871 FC_DBG("unknown event %ld call", event
);
873 if (lp
->link_up
!= new_link_up
) {
877 stats
= lp
->dev_stats
[smp_processor_id()];
879 stats
->LinkFailureCount
++;
881 fcoe_clean_pending_queue(lp
);
889 * fcoe_if_to_netdev() - parse a name buffer to get netdev
890 * @ifname: fixed array for output parsed ifname
891 * @buffer: incoming buffer to be copied
893 * Returns: NULL or ptr to netdeive
895 static struct net_device
*fcoe_if_to_netdev(const char *buffer
)
898 char ifname
[IFNAMSIZ
+ 2];
901 strlcpy(ifname
, buffer
, IFNAMSIZ
);
902 cp
= ifname
+ strlen(ifname
);
903 while (--cp
>= ifname
&& *cp
== '\n')
905 return dev_get_by_name(&init_net
, ifname
);
911 * fcoe_netdev_to_module_owner() - finds out the nic drive moddule of the netdev
912 * @netdev: the target netdev
914 * Returns: ptr to the struct module, NULL for failure
916 static struct module
*
917 fcoe_netdev_to_module_owner(const struct net_device
*netdev
)
924 dev
= netdev
->dev
.parent
;
931 return dev
->driver
->owner
;
935 * fcoe_ethdrv_get() - Hold the Ethernet driver
936 * @netdev: the target netdev
938 * Holds the Ethernet driver module by try_module_get() for
939 * the corresponding netdev.
941 * Returns: 0 for succsss
943 static int fcoe_ethdrv_get(const struct net_device
*netdev
)
945 struct module
*owner
;
947 owner
= fcoe_netdev_to_module_owner(netdev
);
949 printk(KERN_DEBUG
"fcoe:hold driver module %s for %s\n",
950 module_name(owner
), netdev
->name
);
951 return try_module_get(owner
);
957 * fcoe_ethdrv_put() - Release the Ethernet driver
958 * @netdev: the target netdev
960 * Releases the Ethernet driver module by module_put for
961 * the corresponding netdev.
963 * Returns: 0 for succsss
965 static int fcoe_ethdrv_put(const struct net_device
*netdev
)
967 struct module
*owner
;
969 owner
= fcoe_netdev_to_module_owner(netdev
);
971 printk(KERN_DEBUG
"fcoe:release driver module %s for %s\n",
972 module_name(owner
), netdev
->name
);
980 * fcoe_destroy() - handles the destroy from sysfs
981 * @buffer: expcted to be a eth if name
982 * @kp: associated kernel param
984 * Returns: 0 for success
986 static int fcoe_destroy(const char *buffer
, struct kernel_param
*kp
)
989 struct net_device
*netdev
;
991 netdev
= fcoe_if_to_netdev(buffer
);
996 /* look for existing lport */
997 if (!fcoe_hostlist_lookup(netdev
)) {
1001 /* pass to transport */
1002 rc
= fcoe_transport_release(netdev
);
1004 printk(KERN_ERR
"fcoe: fcoe_transport_release(%s) failed\n",
1009 fcoe_ethdrv_put(netdev
);
1018 * fcoe_create() - Handles the create call from sysfs
1019 * @buffer: expcted to be a eth if name
1020 * @kp: associated kernel param
1022 * Returns: 0 for success
1024 static int fcoe_create(const char *buffer
, struct kernel_param
*kp
)
1027 struct net_device
*netdev
;
1029 netdev
= fcoe_if_to_netdev(buffer
);
1034 /* look for existing lport */
1035 if (fcoe_hostlist_lookup(netdev
)) {
1039 fcoe_ethdrv_get(netdev
);
1041 /* pass to transport */
1042 rc
= fcoe_transport_attach(netdev
);
1044 printk(KERN_ERR
"fcoe: fcoe_transport_attach(%s) failed\n",
1046 fcoe_ethdrv_put(netdev
);
1057 module_param_call(create
, fcoe_create
, NULL
, NULL
, S_IWUSR
);
1058 __MODULE_PARM_TYPE(create
, "string");
1059 MODULE_PARM_DESC(create
, "Create fcoe port using net device passed in.");
1060 module_param_call(destroy
, fcoe_destroy
, NULL
, NULL
, S_IWUSR
);
1061 __MODULE_PARM_TYPE(destroy
, "string");
1062 MODULE_PARM_DESC(destroy
, "Destroy fcoe port");
1065 * fcoe_link_ok() - Check if link is ok for the fc_lport
1066 * @lp: ptr to the fc_lport
1068 * Any permanently-disqualifying conditions have been previously checked.
1069 * This also updates the speed setting, which may change with link for 100/1000.
1071 * This function should probably be checking for PAUSE support at some point
1072 * in the future. Currently Per-priority-pause is not determinable using
1073 * ethtool, so we shouldn't be restrictive until that problem is resolved.
1075 * Returns: 0 if link is OK for use by FCoE.
1078 int fcoe_link_ok(struct fc_lport
*lp
)
1080 struct fcoe_softc
*fc
= lport_priv(lp
);
1081 struct net_device
*dev
= fc
->real_dev
;
1082 struct ethtool_cmd ecmd
= { ETHTOOL_GSET
};
1085 if ((dev
->flags
& IFF_UP
) && netif_carrier_ok(dev
)) {
1087 if (dev
->ethtool_ops
->get_settings
) {
1088 dev
->ethtool_ops
->get_settings(dev
, &ecmd
);
1089 lp
->link_supported_speeds
&=
1090 ~(FC_PORTSPEED_1GBIT
| FC_PORTSPEED_10GBIT
);
1091 if (ecmd
.supported
& (SUPPORTED_1000baseT_Half
|
1092 SUPPORTED_1000baseT_Full
))
1093 lp
->link_supported_speeds
|= FC_PORTSPEED_1GBIT
;
1094 if (ecmd
.supported
& SUPPORTED_10000baseT_Full
)
1095 lp
->link_supported_speeds
|=
1096 FC_PORTSPEED_10GBIT
;
1097 if (ecmd
.speed
== SPEED_1000
)
1098 lp
->link_speed
= FC_PORTSPEED_1GBIT
;
1099 if (ecmd
.speed
== SPEED_10000
)
1100 lp
->link_speed
= FC_PORTSPEED_10GBIT
;
1107 EXPORT_SYMBOL_GPL(fcoe_link_ok
);
1110 * fcoe_percpu_clean() - Clear the pending skbs for an lport
1113 void fcoe_percpu_clean(struct fc_lport
*lp
)
1116 struct fcoe_percpu_s
*pp
;
1117 struct fcoe_rcv_info
*fr
;
1118 struct sk_buff_head
*list
;
1119 struct sk_buff
*skb
, *next
;
1120 struct sk_buff
*head
;
1122 for (idx
= 0; idx
< NR_CPUS
; idx
++) {
1123 if (fcoe_percpu
[idx
]) {
1124 pp
= fcoe_percpu
[idx
];
1125 spin_lock_bh(&pp
->fcoe_rx_list
.lock
);
1126 list
= &pp
->fcoe_rx_list
;
1128 for (skb
= head
; skb
!= (struct sk_buff
*)list
;
1131 fr
= fcoe_dev_from_skb(skb
);
1132 if (fr
->fr_dev
== lp
) {
1133 __skb_unlink(skb
, list
);
1137 spin_unlock_bh(&pp
->fcoe_rx_list
.lock
);
1141 EXPORT_SYMBOL_GPL(fcoe_percpu_clean
);
1144 * fcoe_clean_pending_queue() - Dequeue a skb and free it
1145 * @lp: the corresponding fc_lport
1149 void fcoe_clean_pending_queue(struct fc_lport
*lp
)
1151 struct fcoe_softc
*fc
= lport_priv(lp
);
1152 struct sk_buff
*skb
;
1154 spin_lock_bh(&fc
->fcoe_pending_queue
.lock
);
1155 while ((skb
= __skb_dequeue(&fc
->fcoe_pending_queue
)) != NULL
) {
1156 spin_unlock_bh(&fc
->fcoe_pending_queue
.lock
);
1158 spin_lock_bh(&fc
->fcoe_pending_queue
.lock
);
1160 spin_unlock_bh(&fc
->fcoe_pending_queue
.lock
);
1162 EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue
);
1165 * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport
1166 * @sht: ptr to the scsi host templ
1167 * @priv_size: size of private data after fc_lport
1169 * Returns: ptr to Scsi_Host
1172 static inline struct Scsi_Host
*
1173 libfc_host_alloc(struct scsi_host_template
*sht
, int priv_size
)
1175 return scsi_host_alloc(sht
, sizeof(struct fc_lport
) + priv_size
);
1179 * fcoe_host_alloc() - Allocate a Scsi_Host with room for the fcoe_softc
1180 * @sht: ptr to the scsi host templ
1181 * @priv_size: size of private data after fc_lport
1183 * Returns: ptr to Scsi_Host
1185 struct Scsi_Host
*fcoe_host_alloc(struct scsi_host_template
*sht
, int priv_size
)
1187 return libfc_host_alloc(sht
, sizeof(struct fcoe_softc
) + priv_size
);
1189 EXPORT_SYMBOL_GPL(fcoe_host_alloc
);
1192 * fcoe_reset() - Resets the fcoe
1193 * @shost: shost the reset is from
1197 int fcoe_reset(struct Scsi_Host
*shost
)
1199 struct fc_lport
*lport
= shost_priv(shost
);
1200 fc_lport_reset(lport
);
1203 EXPORT_SYMBOL_GPL(fcoe_reset
);
1206 * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN.
1208 * @scheme: check port
1209 * @port: port indicator for converting
1211 * Returns: u64 fc world wide name
1213 u64
fcoe_wwn_from_mac(unsigned char mac
[MAX_ADDR_LEN
],
1214 unsigned int scheme
, unsigned int port
)
1219 /* The MAC is in NO, so flip only the low 48 bits */
1220 host_mac
= ((u64
) mac
[0] << 40) |
1221 ((u64
) mac
[1] << 32) |
1222 ((u64
) mac
[2] << 24) |
1223 ((u64
) mac
[3] << 16) |
1224 ((u64
) mac
[4] << 8) |
1227 WARN_ON(host_mac
>= (1ULL << 48));
1228 wwn
= host_mac
| ((u64
) scheme
<< 60);
1234 WARN_ON(port
>= 0xfff);
1235 wwn
|= (u64
) port
<< 48;
1244 EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac
);
1247 * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
1248 * @device: this is currently ptr to net_device
1250 * Returns: NULL or the located fcoe_softc
1252 static struct fcoe_softc
*
1253 fcoe_hostlist_lookup_softc(const struct net_device
*dev
)
1255 struct fcoe_softc
*fc
;
1257 read_lock(&fcoe_hostlist_lock
);
1258 list_for_each_entry(fc
, &fcoe_hostlist
, list
) {
1259 if (fc
->real_dev
== dev
) {
1260 read_unlock(&fcoe_hostlist_lock
);
1264 read_unlock(&fcoe_hostlist_lock
);
1269 * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
1270 * @netdev: ptr to net_device
1272 * Returns: 0 for success
1274 struct fc_lport
*fcoe_hostlist_lookup(const struct net_device
*netdev
)
1276 struct fcoe_softc
*fc
;
1278 fc
= fcoe_hostlist_lookup_softc(netdev
);
1280 return (fc
) ? fc
->lp
: NULL
;
1282 EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup
);
1285 * fcoe_hostlist_add() - Add a lport to lports list
1286 * @lp: ptr to the fc_lport to badded
1288 * Returns: 0 for success
1290 int fcoe_hostlist_add(const struct fc_lport
*lp
)
1292 struct fcoe_softc
*fc
;
1294 fc
= fcoe_hostlist_lookup_softc(fcoe_netdev(lp
));
1296 fc
= lport_priv(lp
);
1297 write_lock_bh(&fcoe_hostlist_lock
);
1298 list_add_tail(&fc
->list
, &fcoe_hostlist
);
1299 write_unlock_bh(&fcoe_hostlist_lock
);
1303 EXPORT_SYMBOL_GPL(fcoe_hostlist_add
);
1306 * fcoe_hostlist_remove() - remove a lport from lports list
1307 * @lp: ptr to the fc_lport to badded
1309 * Returns: 0 for success
1311 int fcoe_hostlist_remove(const struct fc_lport
*lp
)
1313 struct fcoe_softc
*fc
;
1315 fc
= fcoe_hostlist_lookup_softc(fcoe_netdev(lp
));
1317 write_lock_bh(&fcoe_hostlist_lock
);
1318 list_del(&fc
->list
);
1319 write_unlock_bh(&fcoe_hostlist_lock
);
1323 EXPORT_SYMBOL_GPL(fcoe_hostlist_remove
);
1326 * fcoe_libfc_config() - sets up libfc related properties for lport
1327 * @lp: ptr to the fc_lport
1328 * @tt: libfc function template
1330 * Returns : 0 for success
1332 int fcoe_libfc_config(struct fc_lport
*lp
, struct libfc_function_template
*tt
)
1334 /* Set the function pointers set by the LLDD */
1335 memcpy(&lp
->tt
, tt
, sizeof(*tt
));
1336 if (fc_fcp_init(lp
))
1346 EXPORT_SYMBOL_GPL(fcoe_libfc_config
);
1349 * fcoe_init() - fcoe module loading initialization
1351 * Initialization routine
1352 * 1. Will create fc transport software structure
1353 * 2. initialize the link list of port information structure
1355 * Returns 0 on success, negative on failure
1357 static int __init
fcoe_init(void)
1360 struct fcoe_percpu_s
*p
;
1363 INIT_LIST_HEAD(&fcoe_hostlist
);
1364 rwlock_init(&fcoe_hostlist_lock
);
1366 #ifdef CONFIG_HOTPLUG_CPU
1367 register_cpu_notifier(&fcoe_cpu_notifier
);
1368 #endif /* CONFIG_HOTPLUG_CPU */
1371 * initialize per CPU interrupt thread
1373 for_each_online_cpu(cpu
) {
1374 p
= kzalloc(sizeof(struct fcoe_percpu_s
), GFP_KERNEL
);
1376 p
->thread
= kthread_create(fcoe_percpu_receive_thread
,
1378 "fcoethread/%d", cpu
);
1381 * if there is no error then bind the thread to the cpu
1382 * initialize the semaphore and skb queue head
1384 if (likely(!IS_ERR(p
->thread
))) {
1386 fcoe_percpu
[cpu
] = p
;
1387 skb_queue_head_init(&p
->fcoe_rx_list
);
1388 kthread_bind(p
->thread
, cpu
);
1389 wake_up_process(p
->thread
);
1391 fcoe_percpu
[cpu
] = NULL
;
1398 * setup link change notification
1402 setup_timer(&fcoe_timer
, fcoe_watchdog
, 0);
1404 mod_timer(&fcoe_timer
, jiffies
+ (10 * HZ
));
1406 /* initiatlize the fcoe transport */
1407 fcoe_transport_init();
1413 module_init(fcoe_init
);
1416 * fcoe_exit() - fcoe module unloading cleanup
1418 * Returns 0 on success, negative on failure
1420 static void __exit
fcoe_exit(void)
1423 struct fcoe_softc
*fc
, *tmp
;
1424 struct fcoe_percpu_s
*p
;
1425 struct sk_buff
*skb
;
1428 * Stop all call back interfaces
1430 #ifdef CONFIG_HOTPLUG_CPU
1431 unregister_cpu_notifier(&fcoe_cpu_notifier
);
1432 #endif /* CONFIG_HOTPLUG_CPU */
1438 del_timer_sync(&fcoe_timer
);
1440 /* releases the associated fcoe transport for each lport */
1441 list_for_each_entry_safe(fc
, tmp
, &fcoe_hostlist
, list
)
1442 fcoe_transport_release(fc
->real_dev
);
1444 for (idx
= 0; idx
< NR_CPUS
; idx
++) {
1445 if (fcoe_percpu
[idx
]) {
1446 kthread_stop(fcoe_percpu
[idx
]->thread
);
1447 p
= fcoe_percpu
[idx
];
1448 spin_lock_bh(&p
->fcoe_rx_list
.lock
);
1449 while ((skb
= __skb_dequeue(&p
->fcoe_rx_list
)) != NULL
)
1451 spin_unlock_bh(&p
->fcoe_rx_list
.lock
);
1452 if (fcoe_percpu
[idx
]->crc_eof_page
)
1453 put_page(fcoe_percpu
[idx
]->crc_eof_page
);
1454 kfree(fcoe_percpu
[idx
]);
1458 /* remove sw trasnport */
1461 /* detach the transport */
1462 fcoe_transport_exit();
1464 module_exit(fcoe_exit
);