2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * Maintained at www.Open-FCoE.org
20 #include <linux/module.h>
21 #include <linux/version.h>
22 #include <linux/spinlock.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/crc32.h>
29 #include <linux/cpu.h>
31 #include <linux/sysfs.h>
32 #include <linux/ctype.h>
33 #include <scsi/scsi_tcq.h>
34 #include <scsi/scsicam.h>
35 #include <scsi/scsi_transport.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <net/rtnetlink.h>
39 #include <scsi/fc/fc_encaps.h>
40 #include <scsi/fc/fc_fip.h>
42 #include <scsi/libfc.h>
43 #include <scsi/fc_frame.h>
44 #include <scsi/libfcoe.h>
48 MODULE_AUTHOR("Open-FCoE.org");
49 MODULE_DESCRIPTION("FCoE");
50 MODULE_LICENSE("GPL v2");
52 /* Performance tuning parameters for fcoe */
53 static unsigned int fcoe_ddp_min
;
54 module_param_named(ddp_min
, fcoe_ddp_min
, uint
, S_IRUGO
| S_IWUSR
);
55 MODULE_PARM_DESC(ddp_min
, "Minimum I/O size in bytes for " \
56 "Direct Data Placement (DDP).");
58 DEFINE_MUTEX(fcoe_config_mutex
);
60 /* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
61 static DECLARE_COMPLETION(fcoe_flush_completion
);
64 /* must only by accessed under the RTNL mutex */
65 LIST_HEAD(fcoe_hostlist
);
66 DEFINE_PER_CPU(struct fcoe_percpu_s
, fcoe_percpu
);
68 /* Function Prototypes */
69 static int fcoe_reset(struct Scsi_Host
*shost
);
70 static int fcoe_xmit(struct fc_lport
*, struct fc_frame
*);
71 static int fcoe_rcv(struct sk_buff
*, struct net_device
*,
72 struct packet_type
*, struct net_device
*);
73 static int fcoe_percpu_receive_thread(void *arg
);
74 static void fcoe_clean_pending_queue(struct fc_lport
*lp
);
75 static void fcoe_percpu_clean(struct fc_lport
*lp
);
76 static int fcoe_link_ok(struct fc_lport
*lp
);
78 static struct fc_lport
*fcoe_hostlist_lookup(const struct net_device
*);
79 static int fcoe_hostlist_add(const struct fc_lport
*);
81 static void fcoe_check_wait_queue(struct fc_lport
*, struct sk_buff
*);
82 static int fcoe_device_notification(struct notifier_block
*, ulong
, void *);
83 static void fcoe_dev_setup(void);
84 static void fcoe_dev_cleanup(void);
85 static struct fcoe_interface
*
86 fcoe_hostlist_lookup_port(const struct net_device
*dev
);
88 /* notification function from net device */
89 static struct notifier_block fcoe_notifier
= {
90 .notifier_call
= fcoe_device_notification
,
93 static struct scsi_transport_template
*scsi_transport_fcoe_sw
;
95 struct fc_function_template fcoe_transport_function
= {
96 .show_host_node_name
= 1,
97 .show_host_port_name
= 1,
98 .show_host_supported_classes
= 1,
99 .show_host_supported_fc4s
= 1,
100 .show_host_active_fc4s
= 1,
101 .show_host_maxframe_size
= 1,
103 .show_host_port_id
= 1,
104 .show_host_supported_speeds
= 1,
105 .get_host_speed
= fc_get_host_speed
,
106 .show_host_speed
= 1,
107 .show_host_port_type
= 1,
108 .get_host_port_state
= fc_get_host_port_state
,
109 .show_host_port_state
= 1,
110 .show_host_symbolic_name
= 1,
112 .dd_fcrport_size
= sizeof(struct fc_rport_libfc_priv
),
113 .show_rport_maxframe_size
= 1,
114 .show_rport_supported_classes
= 1,
116 .show_host_fabric_name
= 1,
117 .show_starget_node_name
= 1,
118 .show_starget_port_name
= 1,
119 .show_starget_port_id
= 1,
120 .set_rport_dev_loss_tmo
= fc_set_rport_loss_tmo
,
121 .show_rport_dev_loss_tmo
= 1,
122 .get_fc_host_stats
= fc_get_host_stats
,
123 .issue_fc_host_lip
= fcoe_reset
,
125 .terminate_rport_io
= fc_rport_terminate_io
,
128 static struct scsi_host_template fcoe_shost_template
= {
129 .module
= THIS_MODULE
,
130 .name
= "FCoE Driver",
131 .proc_name
= FCOE_NAME
,
132 .queuecommand
= fc_queuecommand
,
133 .eh_abort_handler
= fc_eh_abort
,
134 .eh_device_reset_handler
= fc_eh_device_reset
,
135 .eh_host_reset_handler
= fc_eh_host_reset
,
136 .slave_alloc
= fc_slave_alloc
,
137 .change_queue_depth
= fc_change_queue_depth
,
138 .change_queue_type
= fc_change_queue_type
,
141 .can_queue
= FCOE_MAX_OUTSTANDING_COMMANDS
,
142 .use_clustering
= ENABLE_CLUSTERING
,
143 .sg_tablesize
= SG_ALL
,
144 .max_sectors
= 0xffff,
147 static int fcoe_fip_recv(struct sk_buff
*skb
, struct net_device
*dev
,
148 struct packet_type
*ptype
,
149 struct net_device
*orig_dev
);
151 * fcoe_interface_setup()
152 * @fcoe: new fcoe_interface
153 * @netdev : ptr to the associated netdevice struct
155 * Returns : 0 for success
156 * Locking: must be called with the RTNL mutex held
158 static int fcoe_interface_setup(struct fcoe_interface
*fcoe
,
159 struct net_device
*netdev
)
161 struct fcoe_ctlr
*fip
= &fcoe
->ctlr
;
162 struct netdev_hw_addr
*ha
;
163 u8 flogi_maddr
[ETH_ALEN
];
165 fcoe
->netdev
= netdev
;
167 /* Do not support for bonding device */
168 if ((netdev
->priv_flags
& IFF_MASTER_ALB
) ||
169 (netdev
->priv_flags
& IFF_SLAVE_INACTIVE
) ||
170 (netdev
->priv_flags
& IFF_MASTER_8023AD
)) {
174 /* look for SAN MAC address, if multiple SAN MACs exist, only
175 * use the first one for SPMA */
177 for_each_dev_addr(netdev
, ha
) {
178 if ((ha
->type
== NETDEV_HW_ADDR_T_SAN
) &&
179 (is_valid_ether_addr(fip
->ctl_src_addr
))) {
180 memcpy(fip
->ctl_src_addr
, ha
->addr
, ETH_ALEN
);
187 /* setup Source Mac Address */
189 memcpy(fip
->ctl_src_addr
, netdev
->dev_addr
, netdev
->addr_len
);
192 * Add FCoE MAC address as second unicast MAC address
193 * or enter promiscuous mode if not capable of listening
194 * for multiple unicast MACs.
196 memcpy(flogi_maddr
, (u8
[6]) FC_FCOE_FLOGI_MAC
, ETH_ALEN
);
197 dev_unicast_add(netdev
, flogi_maddr
);
199 dev_unicast_add(netdev
, fip
->ctl_src_addr
);
200 dev_mc_add(netdev
, FIP_ALL_ENODE_MACS
, ETH_ALEN
, 0);
203 * setup the receive function from ethernet driver
204 * on the ethertype for the given device
206 fcoe
->fcoe_packet_type
.func
= fcoe_rcv
;
207 fcoe
->fcoe_packet_type
.type
= __constant_htons(ETH_P_FCOE
);
208 fcoe
->fcoe_packet_type
.dev
= netdev
;
209 dev_add_pack(&fcoe
->fcoe_packet_type
);
211 fcoe
->fip_packet_type
.func
= fcoe_fip_recv
;
212 fcoe
->fip_packet_type
.type
= htons(ETH_P_FIP
);
213 fcoe
->fip_packet_type
.dev
= netdev
;
214 dev_add_pack(&fcoe
->fip_packet_type
);
219 static void fcoe_fip_send(struct fcoe_ctlr
*fip
, struct sk_buff
*skb
);
220 static void fcoe_update_src_mac(struct fcoe_ctlr
*fip
, u8
*old
, u8
*new);
221 static void fcoe_destroy_work(struct work_struct
*work
);
224 * fcoe_interface_create()
225 * @netdev: network interface
227 * Returns: pointer to a struct fcoe_interface or NULL on error
229 static struct fcoe_interface
*fcoe_interface_create(struct net_device
*netdev
)
231 struct fcoe_interface
*fcoe
;
233 fcoe
= kzalloc(sizeof(*fcoe
), GFP_KERNEL
);
235 FCOE_NETDEV_DBG(netdev
, "Could not allocate fcoe structure\n");
240 kref_init(&fcoe
->kref
);
245 fcoe_ctlr_init(&fcoe
->ctlr
);
246 fcoe
->ctlr
.send
= fcoe_fip_send
;
247 fcoe
->ctlr
.update_mac
= fcoe_update_src_mac
;
249 fcoe_interface_setup(fcoe
, netdev
);
255 * fcoe_interface_cleanup() - clean up netdev configurations
258 * Caller must be holding the RTNL mutex
260 void fcoe_interface_cleanup(struct fcoe_interface
*fcoe
)
262 struct net_device
*netdev
= fcoe
->netdev
;
263 struct fcoe_ctlr
*fip
= &fcoe
->ctlr
;
264 u8 flogi_maddr
[ETH_ALEN
];
267 * Don't listen for Ethernet packets anymore.
268 * synchronize_net() ensures that the packet handlers are not running
269 * on another CPU. dev_remove_pack() would do that, this calls the
270 * unsyncronized version __dev_remove_pack() to avoid multiple delays.
272 __dev_remove_pack(&fcoe
->fcoe_packet_type
);
273 __dev_remove_pack(&fcoe
->fip_packet_type
);
276 /* Delete secondary MAC addresses */
277 memcpy(flogi_maddr
, (u8
[6]) FC_FCOE_FLOGI_MAC
, ETH_ALEN
);
278 dev_unicast_delete(netdev
, flogi_maddr
);
279 if (!is_zero_ether_addr(fip
->data_src_addr
))
280 dev_unicast_delete(netdev
, fip
->data_src_addr
);
282 dev_unicast_delete(netdev
, fip
->ctl_src_addr
);
283 dev_mc_delete(netdev
, FIP_ALL_ENODE_MACS
, ETH_ALEN
, 0);
287 * fcoe_interface_release() - fcoe_port kref release function
288 * @kref: embedded reference count in an fcoe_interface struct
290 static void fcoe_interface_release(struct kref
*kref
)
292 struct fcoe_interface
*fcoe
;
293 struct net_device
*netdev
;
295 fcoe
= container_of(kref
, struct fcoe_interface
, kref
);
296 netdev
= fcoe
->netdev
;
297 /* tear-down the FCoE controller */
298 fcoe_ctlr_destroy(&fcoe
->ctlr
);
304 * fcoe_interface_get()
307 static inline void fcoe_interface_get(struct fcoe_interface
*fcoe
)
309 kref_get(&fcoe
->kref
);
313 * fcoe_interface_put()
316 static inline void fcoe_interface_put(struct fcoe_interface
*fcoe
)
318 kref_put(&fcoe
->kref
, fcoe_interface_release
);
322 * fcoe_fip_recv - handle a received FIP frame.
323 * @skb: the receive skb
324 * @dev: associated &net_device
325 * @ptype: the &packet_type structure which was used to register this handler.
326 * @orig_dev: original receive &net_device, in case @dev is a bond.
328 * Returns: 0 for success
330 static int fcoe_fip_recv(struct sk_buff
*skb
, struct net_device
*dev
,
331 struct packet_type
*ptype
,
332 struct net_device
*orig_dev
)
334 struct fcoe_interface
*fcoe
;
336 fcoe
= container_of(ptype
, struct fcoe_interface
, fip_packet_type
);
337 fcoe_ctlr_recv(&fcoe
->ctlr
, skb
);
342 * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
343 * @fip: FCoE controller.
346 static void fcoe_fip_send(struct fcoe_ctlr
*fip
, struct sk_buff
*skb
)
348 skb
->dev
= fcoe_from_ctlr(fip
)->netdev
;
353 * fcoe_update_src_mac() - Update Ethernet MAC filters.
354 * @fip: FCoE controller.
355 * @old: Unicast MAC address to delete if the MAC is non-zero.
356 * @new: Unicast MAC address to add.
358 * Remove any previously-set unicast MAC filter.
359 * Add secondary FCoE MAC address filter for our OUI.
361 static void fcoe_update_src_mac(struct fcoe_ctlr
*fip
, u8
*old
, u8
*new)
363 struct fcoe_interface
*fcoe
;
365 fcoe
= fcoe_from_ctlr(fip
);
367 if (!is_zero_ether_addr(old
))
368 dev_unicast_delete(fcoe
->netdev
, old
);
369 dev_unicast_add(fcoe
->netdev
, new);
374 * fcoe_lport_config() - sets up the fc_lport
375 * @lp: ptr to the fc_lport
377 * Returns: 0 for success
379 static int fcoe_lport_config(struct fc_lport
*lp
)
383 lp
->max_retry_count
= 3;
384 lp
->max_rport_retry_count
= 3;
385 lp
->e_d_tov
= 2 * 1000; /* FC-FS default */
386 lp
->r_a_tov
= 2 * 2 * 1000;
387 lp
->service_params
= (FCP_SPPF_INIT_FCN
| FCP_SPPF_RD_XRDY_DIS
|
388 FCP_SPPF_RETRY
| FCP_SPPF_CONF_COMPL
);
390 fc_lport_init_stats(lp
);
392 /* lport fc_lport related configuration */
395 /* offload related configuration */
406 * fcoe_queue_timer() - fcoe queue timer
407 * @lp: the fc_lport pointer
409 * Calls fcoe_check_wait_queue on timeout
412 static void fcoe_queue_timer(ulong lp
)
414 fcoe_check_wait_queue((struct fc_lport
*)lp
, NULL
);
418 * fcoe_netdev_config() - Set up netdev for SW FCoE
419 * @lp : ptr to the fc_lport
420 * @netdev : ptr to the associated netdevice struct
422 * Must be called after fcoe_lport_config() as it will use lport mutex
424 * Returns : 0 for success
426 static int fcoe_netdev_config(struct fc_lport
*lp
, struct net_device
*netdev
)
430 struct fcoe_interface
*fcoe
;
431 struct fcoe_port
*port
;
433 /* Setup lport private data to point to fcoe softc */
434 port
= lport_priv(lp
);
438 * Determine max frame size based on underlying device and optional
439 * user-configured limit. If the MFS is too low, fcoe_link_ok()
440 * will return 0, so do this first.
442 mfs
= netdev
->mtu
- (sizeof(struct fcoe_hdr
) +
443 sizeof(struct fcoe_crc_eof
));
444 if (fc_set_mfs(lp
, mfs
))
447 /* offload features support */
448 if (netdev
->features
& NETIF_F_SG
)
451 if (netdev
->features
& NETIF_F_FCOE_CRC
) {
453 FCOE_NETDEV_DBG(netdev
, "Supports FCCRC offload\n");
455 if (netdev
->features
& NETIF_F_FSO
) {
457 lp
->lso_max
= netdev
->gso_max_size
;
458 FCOE_NETDEV_DBG(netdev
, "Supports LSO for max len 0x%x\n",
461 if (netdev
->fcoe_ddp_xid
) {
463 lp
->lro_xid
= netdev
->fcoe_ddp_xid
;
464 FCOE_NETDEV_DBG(netdev
, "Supports LRO for max xid 0x%x\n",
467 skb_queue_head_init(&port
->fcoe_pending_queue
);
468 port
->fcoe_pending_queue_active
= 0;
469 setup_timer(&port
->timer
, fcoe_queue_timer
, (unsigned long)lp
);
471 wwnn
= fcoe_wwn_from_mac(netdev
->dev_addr
, 1, 0);
472 fc_set_wwnn(lp
, wwnn
);
473 /* XXX - 3rd arg needs to be vlan id */
474 wwpn
= fcoe_wwn_from_mac(netdev
->dev_addr
, 2, 0);
475 fc_set_wwpn(lp
, wwpn
);
481 * fcoe_shost_config() - Sets up fc_lport->host
482 * @lp : ptr to the fc_lport
483 * @shost : ptr to the associated scsi host
484 * @dev : device associated to scsi host
486 * Must be called after fcoe_lport_config() and fcoe_netdev_config()
488 * Returns : 0 for success
490 static int fcoe_shost_config(struct fc_lport
*lp
, struct Scsi_Host
*shost
,
495 /* lport scsi host config */
498 lp
->host
->max_lun
= FCOE_MAX_LUN
;
499 lp
->host
->max_id
= FCOE_MAX_FCP_TARGET
;
500 lp
->host
->max_channel
= 0;
501 lp
->host
->transportt
= scsi_transport_fcoe_sw
;
503 /* add the new host to the SCSI-ml */
504 rc
= scsi_add_host(lp
->host
, dev
);
506 FCOE_NETDEV_DBG(fcoe_netdev(lp
), "fcoe_shost_config: "
507 "error on scsi_add_host\n");
510 sprintf(fc_host_symbolic_name(lp
->host
), "%s v%s over %s",
511 FCOE_NAME
, FCOE_VERSION
,
512 fcoe_netdev(lp
)->name
);
518 * fcoe_oem_match() - match for read types IO
519 * @fp: the fc_frame for new IO.
521 * Returns : true for read types IO, otherwise returns false.
523 bool fcoe_oem_match(struct fc_frame
*fp
)
525 return fc_fcp_is_read(fr_fsp(fp
)) &&
526 (fr_fsp(fp
)->data_len
> fcoe_ddp_min
);
530 * fcoe_em_config() - allocates em for this lport
531 * @lp: the fcoe that em is to allocated for
533 * Returns : 0 on success
535 static inline int fcoe_em_config(struct fc_lport
*lp
)
537 struct fcoe_port
*port
= lport_priv(lp
);
538 struct fcoe_interface
*fcoe
= port
->fcoe
;
539 struct fcoe_interface
*oldfcoe
= NULL
;
540 struct net_device
*old_real_dev
, *cur_real_dev
;
541 u16 min_xid
= FCOE_MIN_XID
;
542 u16 max_xid
= FCOE_MAX_XID
;
545 * Check if need to allocate an em instance for
546 * offload exchange ids to be shared across all VN_PORTs/lport.
548 if (!lp
->lro_enabled
|| !lp
->lro_xid
|| (lp
->lro_xid
>= max_xid
)) {
554 * Reuse existing offload em instance in case
555 * it is already allocated on real eth device
557 if (fcoe
->netdev
->priv_flags
& IFF_802_1Q_VLAN
)
558 cur_real_dev
= vlan_dev_real_dev(fcoe
->netdev
);
560 cur_real_dev
= fcoe
->netdev
;
562 list_for_each_entry(oldfcoe
, &fcoe_hostlist
, list
) {
563 if (oldfcoe
->netdev
->priv_flags
& IFF_802_1Q_VLAN
)
564 old_real_dev
= vlan_dev_real_dev(oldfcoe
->netdev
);
566 old_real_dev
= oldfcoe
->netdev
;
568 if (cur_real_dev
== old_real_dev
) {
569 fcoe
->oem
= oldfcoe
->oem
;
575 if (!fc_exch_mgr_add(lp
, fcoe
->oem
, fcoe_oem_match
)) {
576 printk(KERN_ERR
"fcoe_em_config: failed to add "
577 "offload em:%p on interface:%s\n",
578 fcoe
->oem
, fcoe
->netdev
->name
);
582 fcoe
->oem
= fc_exch_mgr_alloc(lp
, FC_CLASS_3
,
583 FCOE_MIN_XID
, lp
->lro_xid
,
586 printk(KERN_ERR
"fcoe_em_config: failed to allocate "
587 "em for offload exches on interface:%s\n",
594 * Exclude offload EM xid range from next EM xid range.
596 min_xid
+= lp
->lro_xid
+ 1;
599 if (!fc_exch_mgr_alloc(lp
, FC_CLASS_3
, min_xid
, max_xid
, NULL
)) {
600 printk(KERN_ERR
"fcoe_em_config: failed to "
601 "allocate em on interface %s\n", fcoe
->netdev
->name
);
609 * fcoe_if_destroy() - FCoE software HBA tear-down function
610 * @lport: fc_lport to destroy
612 static void fcoe_if_destroy(struct fc_lport
*lport
)
614 struct fcoe_port
*port
= lport_priv(lport
);
615 struct fcoe_interface
*fcoe
= port
->fcoe
;
616 struct net_device
*netdev
= fcoe
->netdev
;
618 FCOE_NETDEV_DBG(netdev
, "Destroying interface\n");
620 /* Logout of the fabric */
621 fc_fabric_logoff(lport
);
623 /* Cleanup the fc_lport */
624 fc_lport_destroy(lport
);
625 fc_fcp_destroy(lport
);
627 /* Stop the transmit retry timer */
628 del_timer_sync(&port
->timer
);
630 /* Free existing transmit skbs */
631 fcoe_clean_pending_queue(lport
);
633 /* receives may not be stopped until after this */
634 fcoe_interface_put(fcoe
);
636 /* Free queued packets for the per-CPU receive threads */
637 fcoe_percpu_clean(lport
);
639 /* Detach from the scsi-ml */
640 fc_remove_host(lport
->host
);
641 scsi_remove_host(lport
->host
);
643 /* There are no more rports or I/O, free the EM */
644 fc_exch_mgr_free(lport
);
646 /* Free memory used by statistical counters */
647 fc_lport_free_stats(lport
);
649 /* Release the Scsi_Host */
650 scsi_host_put(lport
->host
);
654 * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
655 * @lp: the corresponding fc_lport
656 * @xid: the exchange id for this ddp transfer
657 * @sgl: the scatterlist describing this transfer
658 * @sgc: number of sg items
662 static int fcoe_ddp_setup(struct fc_lport
*lp
, u16 xid
,
663 struct scatterlist
*sgl
, unsigned int sgc
)
665 struct net_device
*n
= fcoe_netdev(lp
);
667 if (n
->netdev_ops
&& n
->netdev_ops
->ndo_fcoe_ddp_setup
)
668 return n
->netdev_ops
->ndo_fcoe_ddp_setup(n
, xid
, sgl
, sgc
);
674 * fcoe_ddp_done - calls LLD's ddp_done through net_device
675 * @lp: the corresponding fc_lport
676 * @xid: the exchange id for this ddp transfer
678 * Returns : the length of data that have been completed by ddp
680 static int fcoe_ddp_done(struct fc_lport
*lp
, u16 xid
)
682 struct net_device
*n
= fcoe_netdev(lp
);
684 if (n
->netdev_ops
&& n
->netdev_ops
->ndo_fcoe_ddp_done
)
685 return n
->netdev_ops
->ndo_fcoe_ddp_done(n
, xid
);
689 static struct libfc_function_template fcoe_libfc_fcn_templ
= {
690 .frame_send
= fcoe_xmit
,
691 .ddp_setup
= fcoe_ddp_setup
,
692 .ddp_done
= fcoe_ddp_done
,
696 * fcoe_if_create() - this function creates the fcoe port
697 * @fcoe: fcoe_interface structure to create an fc_lport instance on
698 * @parent: device pointer to be the parent in sysfs for the SCSI host
700 * Creates fc_lport struct and scsi_host for lport, configures lport.
702 * Returns : The allocated fc_lport or an error pointer
704 static struct fc_lport
*fcoe_if_create(struct fcoe_interface
*fcoe
,
705 struct device
*parent
)
708 struct fc_lport
*lport
= NULL
;
709 struct fcoe_port
*port
;
710 struct Scsi_Host
*shost
;
711 struct net_device
*netdev
= fcoe
->netdev
;
713 FCOE_NETDEV_DBG(netdev
, "Create Interface\n");
715 shost
= libfc_host_alloc(&fcoe_shost_template
,
716 sizeof(struct fcoe_port
));
718 FCOE_NETDEV_DBG(netdev
, "Could not allocate host structure\n");
722 lport
= shost_priv(shost
);
723 port
= lport_priv(lport
);
726 INIT_WORK(&port
->destroy_work
, fcoe_destroy_work
);
728 /* configure fc_lport, e.g., em */
729 rc
= fcoe_lport_config(lport
);
731 FCOE_NETDEV_DBG(netdev
, "Could not configure lport for the "
736 /* configure lport network properties */
737 rc
= fcoe_netdev_config(lport
, netdev
);
739 FCOE_NETDEV_DBG(netdev
, "Could not configure netdev for the "
744 /* configure lport scsi host properties */
745 rc
= fcoe_shost_config(lport
, shost
, parent
);
747 FCOE_NETDEV_DBG(netdev
, "Could not configure shost for the "
752 /* Initialize the library */
753 rc
= fcoe_libfc_config(lport
, &fcoe_libfc_fcn_templ
);
755 FCOE_NETDEV_DBG(netdev
, "Could not configure libfc for the "
761 * fcoe_em_alloc() and fcoe_hostlist_add() both
762 * need to be atomic with respect to other changes to the hostlist
763 * since fcoe_em_alloc() looks for an existing EM
764 * instance on host list updated by fcoe_hostlist_add().
766 * This is currently handled through the fcoe_config_mutex begin held.
769 /* lport exch manager allocation */
770 rc
= fcoe_em_config(lport
);
772 FCOE_NETDEV_DBG(netdev
, "Could not configure the EM for the "
777 fcoe_interface_get(fcoe
);
781 fc_exch_mgr_free(lport
);
783 scsi_host_put(lport
->host
);
789 * fcoe_if_init() - attach to scsi transport
791 * Returns : 0 on success
793 static int __init
fcoe_if_init(void)
795 /* attach to scsi transport */
796 scsi_transport_fcoe_sw
=
797 fc_attach_transport(&fcoe_transport_function
);
799 if (!scsi_transport_fcoe_sw
) {
800 printk(KERN_ERR
"fcoe: Failed to attach to the FC transport\n");
808 * fcoe_if_exit() - detach from scsi transport
810 * Returns : 0 on success
812 int __exit
fcoe_if_exit(void)
814 fc_release_transport(scsi_transport_fcoe_sw
);
815 scsi_transport_fcoe_sw
= NULL
;
820 * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
821 * @cpu: cpu index for the online cpu
823 static void fcoe_percpu_thread_create(unsigned int cpu
)
825 struct fcoe_percpu_s
*p
;
826 struct task_struct
*thread
;
828 p
= &per_cpu(fcoe_percpu
, cpu
);
830 thread
= kthread_create(fcoe_percpu_receive_thread
,
831 (void *)p
, "fcoethread/%d", cpu
);
833 if (likely(!IS_ERR(thread
))) {
834 kthread_bind(thread
, cpu
);
835 wake_up_process(thread
);
837 spin_lock_bh(&p
->fcoe_rx_list
.lock
);
839 spin_unlock_bh(&p
->fcoe_rx_list
.lock
);
844 * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
845 * @cpu: cpu index the rx thread is to be removed
847 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
848 * current CPU's Rx thread. If the thread being destroyed is bound to
849 * the CPU processing this context the skbs will be freed.
851 static void fcoe_percpu_thread_destroy(unsigned int cpu
)
853 struct fcoe_percpu_s
*p
;
854 struct task_struct
*thread
;
855 struct page
*crc_eof
;
858 struct fcoe_percpu_s
*p0
;
859 unsigned targ_cpu
= smp_processor_id();
860 #endif /* CONFIG_SMP */
862 FCOE_DBG("Destroying receive thread for CPU %d\n", cpu
);
864 /* Prevent any new skbs from being queued for this CPU. */
865 p
= &per_cpu(fcoe_percpu
, cpu
);
866 spin_lock_bh(&p
->fcoe_rx_list
.lock
);
869 crc_eof
= p
->crc_eof_page
;
870 p
->crc_eof_page
= NULL
;
871 p
->crc_eof_offset
= 0;
872 spin_unlock_bh(&p
->fcoe_rx_list
.lock
);
876 * Don't bother moving the skb's if this context is running
877 * on the same CPU that is having its thread destroyed. This
878 * can easily happen when the module is removed.
880 if (cpu
!= targ_cpu
) {
881 p0
= &per_cpu(fcoe_percpu
, targ_cpu
);
882 spin_lock_bh(&p0
->fcoe_rx_list
.lock
);
884 FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
887 while ((skb
= __skb_dequeue(&p
->fcoe_rx_list
)) != NULL
)
888 __skb_queue_tail(&p0
->fcoe_rx_list
, skb
);
889 spin_unlock_bh(&p0
->fcoe_rx_list
.lock
);
892 * The targeted CPU is not initialized and cannot accept
893 * new skbs. Unlock the targeted CPU and drop the skbs
894 * on the CPU that is going offline.
896 while ((skb
= __skb_dequeue(&p
->fcoe_rx_list
)) != NULL
)
898 spin_unlock_bh(&p0
->fcoe_rx_list
.lock
);
902 * This scenario occurs when the module is being removed
903 * and all threads are being destroyed. skbs will continue
904 * to be shifted from the CPU thread that is being removed
905 * to the CPU thread associated with the CPU that is processing
906 * the module removal. Once there is only one CPU Rx thread it
907 * will reach this case and we will drop all skbs and later
910 spin_lock_bh(&p
->fcoe_rx_list
.lock
);
911 while ((skb
= __skb_dequeue(&p
->fcoe_rx_list
)) != NULL
)
913 spin_unlock_bh(&p
->fcoe_rx_list
.lock
);
917 * This a non-SMP scenario where the singular Rx thread is
918 * being removed. Free all skbs and stop the thread.
920 spin_lock_bh(&p
->fcoe_rx_list
.lock
);
921 while ((skb
= __skb_dequeue(&p
->fcoe_rx_list
)) != NULL
)
923 spin_unlock_bh(&p
->fcoe_rx_list
.lock
);
927 kthread_stop(thread
);
934 * fcoe_cpu_callback() - fcoe cpu hotplug event callback
935 * @nfb: callback data block
936 * @action: event triggering the callback
937 * @hcpu: index for the cpu of this event
939 * This creates or destroys per cpu data for fcoe
941 * Returns NOTIFY_OK always.
943 static int fcoe_cpu_callback(struct notifier_block
*nfb
,
944 unsigned long action
, void *hcpu
)
946 unsigned cpu
= (unsigned long)hcpu
;
950 case CPU_ONLINE_FROZEN
:
951 FCOE_DBG("CPU %x online: Create Rx thread\n", cpu
);
952 fcoe_percpu_thread_create(cpu
);
955 case CPU_DEAD_FROZEN
:
956 FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu
);
957 fcoe_percpu_thread_destroy(cpu
);
965 static struct notifier_block fcoe_cpu_notifier
= {
966 .notifier_call
= fcoe_cpu_callback
,
970 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
971 * @skb: the receive skb
972 * @dev: associated net device
974 * @olddev: last device
976 * this function will receive the packet and build fc frame and pass it up
978 * Returns: 0 for success
980 int fcoe_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
981 struct packet_type
*ptype
, struct net_device
*olddev
)
984 struct fcoe_rcv_info
*fr
;
985 struct fcoe_interface
*fcoe
;
986 struct fc_frame_header
*fh
;
987 struct fcoe_percpu_s
*fps
;
990 fcoe
= container_of(ptype
, struct fcoe_interface
, fcoe_packet_type
);
992 if (unlikely(lp
== NULL
)) {
993 FCOE_NETDEV_DBG(dev
, "Cannot find hba structure");
999 FCOE_NETDEV_DBG(dev
, "skb_info: len:%d data_len:%d head:%p "
1000 "data:%p tail:%p end:%p sum:%d dev:%s",
1001 skb
->len
, skb
->data_len
, skb
->head
, skb
->data
,
1002 skb_tail_pointer(skb
), skb_end_pointer(skb
),
1003 skb
->csum
, skb
->dev
? skb
->dev
->name
: "<NULL>");
1005 /* check for FCOE packet type */
1006 if (unlikely(eth_hdr(skb
)->h_proto
!= htons(ETH_P_FCOE
))) {
1007 FCOE_NETDEV_DBG(dev
, "Wrong FC type frame");
1012 * Check for minimum frame length, and make sure required FCoE
1013 * and FC headers are pulled into the linear data area.
1015 if (unlikely((skb
->len
< FCOE_MIN_FRAME
) ||
1016 !pskb_may_pull(skb
, FCOE_HEADER_LEN
)))
1019 skb_set_transport_header(skb
, sizeof(struct fcoe_hdr
));
1020 fh
= (struct fc_frame_header
*) skb_transport_header(skb
);
1022 fr
= fcoe_dev_from_skb(skb
);
1027 * In case the incoming frame's exchange is originated from
1028 * the initiator, then received frame's exchange id is ANDed
1029 * with fc_cpu_mask bits to get the same cpu on which exchange
1030 * was originated, otherwise just use the current cpu.
1032 if (ntoh24(fh
->fh_f_ctl
) & FC_FC_EX_CTX
)
1033 cpu
= ntohs(fh
->fh_ox_id
) & fc_cpu_mask
;
1035 cpu
= smp_processor_id();
1037 fps
= &per_cpu(fcoe_percpu
, cpu
);
1038 spin_lock_bh(&fps
->fcoe_rx_list
.lock
);
1039 if (unlikely(!fps
->thread
)) {
1041 * The targeted CPU is not ready, let's target
1042 * the first CPU now. For non-SMP systems this
1043 * will check the same CPU twice.
1045 FCOE_NETDEV_DBG(dev
, "CPU is online, but no receive thread "
1046 "ready for incoming skb- using first online "
1049 spin_unlock_bh(&fps
->fcoe_rx_list
.lock
);
1050 cpu
= first_cpu(cpu_online_map
);
1051 fps
= &per_cpu(fcoe_percpu
, cpu
);
1052 spin_lock_bh(&fps
->fcoe_rx_list
.lock
);
1054 spin_unlock_bh(&fps
->fcoe_rx_list
.lock
);
1060 * We now have a valid CPU that we're targeting for
1061 * this skb. We also have this receive thread locked,
1062 * so we're free to queue skbs into it's queue.
1064 __skb_queue_tail(&fps
->fcoe_rx_list
, skb
);
1065 if (fps
->fcoe_rx_list
.qlen
== 1)
1066 wake_up_process(fps
->thread
);
1068 spin_unlock_bh(&fps
->fcoe_rx_list
.lock
);
1072 fc_lport_get_stats(lp
)->ErrorFrames
++;
1080 * fcoe_start_io() - pass to netdev to start xmit for fcoe
1081 * @skb: the skb to be xmitted
1083 * Returns: 0 for success
1085 static inline int fcoe_start_io(struct sk_buff
*skb
)
1090 rc
= dev_queue_xmit(skb
);
1098 * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof
1099 * @skb: the skb to be xmitted
1102 * Returns: 0 for success
1104 static int fcoe_get_paged_crc_eof(struct sk_buff
*skb
, int tlen
)
1106 struct fcoe_percpu_s
*fps
;
1109 fps
= &get_cpu_var(fcoe_percpu
);
1110 page
= fps
->crc_eof_page
;
1112 page
= alloc_page(GFP_ATOMIC
);
1114 put_cpu_var(fcoe_percpu
);
1117 fps
->crc_eof_page
= page
;
1118 fps
->crc_eof_offset
= 0;
1122 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
, page
,
1123 fps
->crc_eof_offset
, tlen
);
1125 skb
->data_len
+= tlen
;
1126 skb
->truesize
+= tlen
;
1127 fps
->crc_eof_offset
+= sizeof(struct fcoe_crc_eof
);
1129 if (fps
->crc_eof_offset
>= PAGE_SIZE
) {
1130 fps
->crc_eof_page
= NULL
;
1131 fps
->crc_eof_offset
= 0;
1134 put_cpu_var(fcoe_percpu
);
1139 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
1140 * @fp: the fc_frame containing data to be checksummed
1142 * This uses crc32() to calculate the crc for port frame
1143 * Return : 32 bit crc
1145 u32
fcoe_fc_crc(struct fc_frame
*fp
)
1147 struct sk_buff
*skb
= fp_skb(fp
);
1148 struct skb_frag_struct
*frag
;
1149 unsigned char *data
;
1150 unsigned long off
, len
, clen
;
1154 crc
= crc32(~0, skb
->data
, skb_headlen(skb
));
1156 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1157 frag
= &skb_shinfo(skb
)->frags
[i
];
1158 off
= frag
->page_offset
;
1161 clen
= min(len
, PAGE_SIZE
- (off
& ~PAGE_MASK
));
1162 data
= kmap_atomic(frag
->page
+ (off
>> PAGE_SHIFT
),
1163 KM_SKB_DATA_SOFTIRQ
);
1164 crc
= crc32(crc
, data
+ (off
& ~PAGE_MASK
), clen
);
1165 kunmap_atomic(data
, KM_SKB_DATA_SOFTIRQ
);
1174 * fcoe_xmit() - FCoE frame transmit function
1175 * @lp: the associated local fcoe
1176 * @fp: the fc_frame to be transmitted
1178 * Return : 0 for success
1180 int fcoe_xmit(struct fc_lport
*lp
, struct fc_frame
*fp
)
1185 struct fcoe_crc_eof
*cp
;
1186 struct sk_buff
*skb
;
1187 struct fcoe_dev_stats
*stats
;
1188 struct fc_frame_header
*fh
;
1189 unsigned int hlen
; /* header length implies the version */
1190 unsigned int tlen
; /* trailer length */
1191 unsigned int elen
; /* eth header, may include vlan */
1192 struct fcoe_port
*port
= lport_priv(lp
);
1193 struct fcoe_interface
*fcoe
= port
->fcoe
;
1195 struct fcoe_hdr
*hp
;
1197 WARN_ON((fr_len(fp
) % sizeof(u32
)) != 0);
1199 fh
= fc_frame_header_get(fp
);
1201 wlen
= skb
->len
/ FCOE_WORD_TO_BYTE
;
1208 if (unlikely(fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
) &&
1209 fcoe_ctlr_els_send(&fcoe
->ctlr
, skb
))
1215 elen
= sizeof(struct ethhdr
);
1216 hlen
= sizeof(struct fcoe_hdr
);
1217 tlen
= sizeof(struct fcoe_crc_eof
);
1218 wlen
= (skb
->len
- tlen
+ sizeof(crc
)) / FCOE_WORD_TO_BYTE
;
1221 if (likely(lp
->crc_offload
)) {
1222 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1223 skb
->csum_start
= skb_headroom(skb
);
1224 skb
->csum_offset
= skb
->len
;
1227 skb
->ip_summed
= CHECKSUM_NONE
;
1228 crc
= fcoe_fc_crc(fp
);
1231 /* copy port crc and eof to the skb buff */
1232 if (skb_is_nonlinear(skb
)) {
1234 if (fcoe_get_paged_crc_eof(skb
, tlen
)) {
1238 frag
= &skb_shinfo(skb
)->frags
[skb_shinfo(skb
)->nr_frags
- 1];
1239 cp
= kmap_atomic(frag
->page
, KM_SKB_DATA_SOFTIRQ
)
1240 + frag
->page_offset
;
1242 cp
= (struct fcoe_crc_eof
*)skb_put(skb
, tlen
);
1245 memset(cp
, 0, sizeof(*cp
));
1247 cp
->fcoe_crc32
= cpu_to_le32(~crc
);
1249 if (skb_is_nonlinear(skb
)) {
1250 kunmap_atomic(cp
, KM_SKB_DATA_SOFTIRQ
);
1254 /* adjust skb network/transport offsets to match mac/fcoe/port */
1255 skb_push(skb
, elen
+ hlen
);
1256 skb_reset_mac_header(skb
);
1257 skb_reset_network_header(skb
);
1258 skb
->mac_len
= elen
;
1259 skb
->protocol
= htons(ETH_P_FCOE
);
1260 skb
->dev
= fcoe
->netdev
;
1262 /* fill up mac and fcoe headers */
1264 eh
->h_proto
= htons(ETH_P_FCOE
);
1265 if (fcoe
->ctlr
.map_dest
)
1266 fc_fcoe_set_mac(eh
->h_dest
, fh
->fh_d_id
);
1268 /* insert GW address */
1269 memcpy(eh
->h_dest
, fcoe
->ctlr
.dest_addr
, ETH_ALEN
);
1271 if (unlikely(fcoe
->ctlr
.flogi_oxid
!= FC_XID_UNKNOWN
))
1272 memcpy(eh
->h_source
, fcoe
->ctlr
.ctl_src_addr
, ETH_ALEN
);
1274 memcpy(eh
->h_source
, fcoe
->ctlr
.data_src_addr
, ETH_ALEN
);
1276 hp
= (struct fcoe_hdr
*)(eh
+ 1);
1277 memset(hp
, 0, sizeof(*hp
));
1279 FC_FCOE_ENCAPS_VER(hp
, FC_FCOE_VER
);
1282 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1283 if (lp
->seq_offload
&& fr_max_payload(fp
)) {
1284 skb_shinfo(skb
)->gso_type
= SKB_GSO_FCOE
;
1285 skb_shinfo(skb
)->gso_size
= fr_max_payload(fp
);
1287 skb_shinfo(skb
)->gso_type
= 0;
1288 skb_shinfo(skb
)->gso_size
= 0;
1290 /* update tx stats: regardless if LLD fails */
1291 stats
= fc_lport_get_stats(lp
);
1293 stats
->TxWords
+= wlen
;
1295 /* send down to lld */
1297 if (port
->fcoe_pending_queue
.qlen
)
1298 fcoe_check_wait_queue(lp
, skb
);
1299 else if (fcoe_start_io(skb
))
1300 fcoe_check_wait_queue(lp
, skb
);
1306 * fcoe_percpu_flush_done() - Indicate percpu queue flush completion.
1307 * @skb: the skb being completed.
1309 static void fcoe_percpu_flush_done(struct sk_buff
*skb
)
1311 complete(&fcoe_flush_completion
);
1315 * fcoe_percpu_receive_thread() - recv thread per cpu
1316 * @arg: ptr to the fcoe per cpu struct
1318 * Return: 0 for success
1320 int fcoe_percpu_receive_thread(void *arg
)
1322 struct fcoe_percpu_s
*p
= arg
;
1324 struct fc_lport
*lp
;
1325 struct fcoe_rcv_info
*fr
;
1326 struct fcoe_dev_stats
*stats
;
1327 struct fc_frame_header
*fh
;
1328 struct sk_buff
*skb
;
1329 struct fcoe_crc_eof crc_eof
;
1330 struct fc_frame
*fp
;
1332 struct fcoe_port
*port
;
1333 struct fcoe_hdr
*hp
;
1335 set_user_nice(current
, -20);
1337 while (!kthread_should_stop()) {
1339 spin_lock_bh(&p
->fcoe_rx_list
.lock
);
1340 while ((skb
= __skb_dequeue(&p
->fcoe_rx_list
)) == NULL
) {
1341 set_current_state(TASK_INTERRUPTIBLE
);
1342 spin_unlock_bh(&p
->fcoe_rx_list
.lock
);
1344 set_current_state(TASK_RUNNING
);
1345 if (kthread_should_stop())
1347 spin_lock_bh(&p
->fcoe_rx_list
.lock
);
1349 spin_unlock_bh(&p
->fcoe_rx_list
.lock
);
1350 fr
= fcoe_dev_from_skb(skb
);
1352 if (unlikely(lp
== NULL
)) {
1353 if (skb
->destructor
!= fcoe_percpu_flush_done
)
1354 FCOE_NETDEV_DBG(skb
->dev
, "NULL lport in skb");
1359 FCOE_NETDEV_DBG(skb
->dev
, "skb_info: len:%d data_len:%d "
1360 "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1361 skb
->len
, skb
->data_len
,
1362 skb
->head
, skb
->data
, skb_tail_pointer(skb
),
1363 skb_end_pointer(skb
), skb
->csum
,
1364 skb
->dev
? skb
->dev
->name
: "<NULL>");
1367 * Save source MAC address before discarding header.
1369 port
= lport_priv(lp
);
1370 if (skb_is_nonlinear(skb
))
1371 skb_linearize(skb
); /* not ideal */
1372 mac
= eth_hdr(skb
)->h_source
;
1375 * Frame length checks and setting up the header pointers
1376 * was done in fcoe_rcv already.
1378 hp
= (struct fcoe_hdr
*) skb_network_header(skb
);
1379 fh
= (struct fc_frame_header
*) skb_transport_header(skb
);
1381 stats
= fc_lport_get_stats(lp
);
1382 if (unlikely(FC_FCOE_DECAPS_VER(hp
) != FC_FCOE_VER
)) {
1383 if (stats
->ErrorFrames
< 5)
1384 printk(KERN_WARNING
"fcoe: FCoE version "
1385 "mismatch: The frame has "
1386 "version %x, but the "
1387 "initiator supports version "
1388 "%x\n", FC_FCOE_DECAPS_VER(hp
),
1390 stats
->ErrorFrames
++;
1395 skb_pull(skb
, sizeof(struct fcoe_hdr
));
1396 fr_len
= skb
->len
- sizeof(struct fcoe_crc_eof
);
1399 stats
->RxWords
+= fr_len
/ FCOE_WORD_TO_BYTE
;
1401 fp
= (struct fc_frame
*)skb
;
1404 fr_sof(fp
) = hp
->fcoe_sof
;
1406 /* Copy out the CRC and EOF trailer for access */
1407 if (skb_copy_bits(skb
, fr_len
, &crc_eof
, sizeof(crc_eof
))) {
1411 fr_eof(fp
) = crc_eof
.fcoe_eof
;
1412 fr_crc(fp
) = crc_eof
.fcoe_crc32
;
1413 if (pskb_trim(skb
, fr_len
)) {
1419 * We only check CRC if no offload is available and if it is
1420 * it's solicited data, in which case, the FCP layer would
1421 * check it during the copy.
1423 if (lp
->crc_offload
&& skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
1424 fr_flags(fp
) &= ~FCPHF_CRC_UNCHECKED
;
1426 fr_flags(fp
) |= FCPHF_CRC_UNCHECKED
;
1428 fh
= fc_frame_header_get(fp
);
1429 if (fh
->fh_r_ctl
== FC_RCTL_DD_SOL_DATA
&&
1430 fh
->fh_type
== FC_TYPE_FCP
) {
1431 fc_exch_recv(lp
, fp
);
1434 if (fr_flags(fp
) & FCPHF_CRC_UNCHECKED
) {
1435 if (le32_to_cpu(fr_crc(fp
)) !=
1436 ~crc32(~0, skb
->data
, fr_len
)) {
1437 if (stats
->InvalidCRCCount
< 5)
1438 printk(KERN_WARNING
"fcoe: dropping "
1439 "frame with CRC error\n");
1440 stats
->InvalidCRCCount
++;
1441 stats
->ErrorFrames
++;
1445 fr_flags(fp
) &= ~FCPHF_CRC_UNCHECKED
;
1447 if (unlikely(port
->fcoe
->ctlr
.flogi_oxid
!= FC_XID_UNKNOWN
) &&
1448 fcoe_ctlr_recv_flogi(&port
->fcoe
->ctlr
, fp
, mac
)) {
1452 fc_exch_recv(lp
, fp
);
1458 * fcoe_check_wait_queue() - attempt to clear the transmit backlog
1461 * This empties the wait_queue, dequeue the head of the wait_queue queue
1462 * and calls fcoe_start_io() for each packet, if all skb have been
1463 * transmitted, return qlen or -1 if a error occurs, then restore
1464 * wait_queue and try again later.
1466 * The wait_queue is used when the skb transmit fails. skb will go
1467 * in the wait_queue which will be emptied by the timer function or
1468 * by the next skb transmit.
1470 static void fcoe_check_wait_queue(struct fc_lport
*lp
, struct sk_buff
*skb
)
1472 struct fcoe_port
*port
= lport_priv(lp
);
1475 spin_lock_bh(&port
->fcoe_pending_queue
.lock
);
1478 __skb_queue_tail(&port
->fcoe_pending_queue
, skb
);
1480 if (port
->fcoe_pending_queue_active
)
1482 port
->fcoe_pending_queue_active
= 1;
1484 while (port
->fcoe_pending_queue
.qlen
) {
1485 /* keep qlen > 0 until fcoe_start_io succeeds */
1486 port
->fcoe_pending_queue
.qlen
++;
1487 skb
= __skb_dequeue(&port
->fcoe_pending_queue
);
1489 spin_unlock_bh(&port
->fcoe_pending_queue
.lock
);
1490 rc
= fcoe_start_io(skb
);
1491 spin_lock_bh(&port
->fcoe_pending_queue
.lock
);
1494 __skb_queue_head(&port
->fcoe_pending_queue
, skb
);
1495 /* undo temporary increment above */
1496 port
->fcoe_pending_queue
.qlen
--;
1499 /* undo temporary increment above */
1500 port
->fcoe_pending_queue
.qlen
--;
1503 if (port
->fcoe_pending_queue
.qlen
< FCOE_LOW_QUEUE_DEPTH
)
1505 if (port
->fcoe_pending_queue
.qlen
&& !timer_pending(&port
->timer
))
1506 mod_timer(&port
->timer
, jiffies
+ 2);
1507 port
->fcoe_pending_queue_active
= 0;
1509 if (port
->fcoe_pending_queue
.qlen
> FCOE_MAX_QUEUE_DEPTH
)
1511 spin_unlock_bh(&port
->fcoe_pending_queue
.lock
);
1516 * fcoe_dev_setup() - setup link change notification interface
1518 static void fcoe_dev_setup(void)
1520 register_netdevice_notifier(&fcoe_notifier
);
1524 * fcoe_dev_cleanup() - cleanup link change notification interface
1526 static void fcoe_dev_cleanup(void)
1528 unregister_netdevice_notifier(&fcoe_notifier
);
1532 * fcoe_device_notification() - netdev event notification callback
1533 * @notifier: context of the notification
1534 * @event: type of event
1535 * @ptr: fixed array for output parsed ifname
1537 * This function is called by the ethernet driver in case of link change event
1539 * Returns: 0 for success
1541 static int fcoe_device_notification(struct notifier_block
*notifier
,
1542 ulong event
, void *ptr
)
1544 struct fc_lport
*lp
= NULL
;
1545 struct net_device
*netdev
= ptr
;
1546 struct fcoe_interface
*fcoe
;
1547 struct fcoe_port
*port
;
1548 struct fcoe_dev_stats
*stats
;
1549 u32 link_possible
= 1;
1553 list_for_each_entry(fcoe
, &fcoe_hostlist
, list
) {
1554 if (fcoe
->netdev
== netdev
) {
1566 case NETDEV_GOING_DOWN
:
1572 case NETDEV_CHANGEMTU
:
1573 mfs
= netdev
->mtu
- (sizeof(struct fcoe_hdr
) +
1574 sizeof(struct fcoe_crc_eof
));
1575 if (mfs
>= FC_MIN_MAX_FRAME
)
1576 fc_set_mfs(lp
, mfs
);
1578 case NETDEV_REGISTER
:
1580 case NETDEV_UNREGISTER
:
1581 list_del(&fcoe
->list
);
1582 port
= lport_priv(fcoe
->ctlr
.lp
);
1583 fcoe_interface_cleanup(fcoe
);
1584 schedule_work(&port
->destroy_work
);
1588 FCOE_NETDEV_DBG(netdev
, "Unknown event %ld "
1589 "from netdev netlink\n", event
);
1591 if (link_possible
&& !fcoe_link_ok(lp
))
1592 fcoe_ctlr_link_up(&fcoe
->ctlr
);
1593 else if (fcoe_ctlr_link_down(&fcoe
->ctlr
)) {
1594 stats
= fc_lport_get_stats(lp
);
1595 stats
->LinkFailureCount
++;
1596 fcoe_clean_pending_queue(lp
);
1603 * fcoe_if_to_netdev() - parse a name buffer to get netdev
1604 * @buffer: incoming buffer to be copied
1606 * Returns: NULL or ptr to net_device
1608 static struct net_device
*fcoe_if_to_netdev(const char *buffer
)
1611 char ifname
[IFNAMSIZ
+ 2];
1614 strlcpy(ifname
, buffer
, IFNAMSIZ
);
1615 cp
= ifname
+ strlen(ifname
);
1616 while (--cp
>= ifname
&& *cp
== '\n')
1618 return dev_get_by_name(&init_net
, ifname
);
1624 * fcoe_destroy() - handles the destroy from sysfs
1625 * @buffer: expected to be an eth if name
1626 * @kp: associated kernel param
1628 * Returns: 0 for success
1630 static int fcoe_destroy(const char *buffer
, struct kernel_param
*kp
)
1632 struct fcoe_interface
*fcoe
;
1633 struct net_device
*netdev
;
1636 mutex_lock(&fcoe_config_mutex
);
1637 #ifdef CONFIG_FCOE_MODULE
1639 * Make sure the module has been initialized, and is not about to be
1640 * removed. Module paramter sysfs files are writable before the
1641 * module_init function is called and after module_exit.
1643 if (THIS_MODULE
->state
!= MODULE_STATE_LIVE
) {
1649 netdev
= fcoe_if_to_netdev(buffer
);
1656 fcoe
= fcoe_hostlist_lookup_port(netdev
);
1662 list_del(&fcoe
->list
);
1663 fcoe_interface_cleanup(fcoe
);
1665 fcoe_if_destroy(fcoe
->ctlr
.lp
);
1669 mutex_unlock(&fcoe_config_mutex
);
1673 static void fcoe_destroy_work(struct work_struct
*work
)
1675 struct fcoe_port
*port
;
1677 port
= container_of(work
, struct fcoe_port
, destroy_work
);
1678 mutex_lock(&fcoe_config_mutex
);
1679 fcoe_if_destroy(port
->lport
);
1680 mutex_unlock(&fcoe_config_mutex
);
1684 * fcoe_create() - Handles the create call from sysfs
1685 * @buffer: expected to be an eth if name
1686 * @kp: associated kernel param
1688 * Returns: 0 for success
1690 static int fcoe_create(const char *buffer
, struct kernel_param
*kp
)
1693 struct fcoe_interface
*fcoe
;
1694 struct fc_lport
*lport
;
1695 struct net_device
*netdev
;
1697 mutex_lock(&fcoe_config_mutex
);
1698 #ifdef CONFIG_FCOE_MODULE
1700 * Make sure the module has been initialized, and is not about to be
1701 * removed. Module paramter sysfs files are writable before the
1702 * module_init function is called and after module_exit.
1704 if (THIS_MODULE
->state
!= MODULE_STATE_LIVE
) {
1711 netdev
= fcoe_if_to_netdev(buffer
);
1717 /* look for existing lport */
1718 if (fcoe_hostlist_lookup(netdev
)) {
1723 fcoe
= fcoe_interface_create(netdev
);
1729 lport
= fcoe_if_create(fcoe
, &netdev
->dev
);
1730 if (IS_ERR(lport
)) {
1731 printk(KERN_ERR
"fcoe: Failed to create interface (%s)\n",
1734 fcoe_interface_cleanup(fcoe
);
1738 /* Make this the "master" N_Port */
1739 fcoe
->ctlr
.lp
= lport
;
1741 /* add to lports list */
1742 fcoe_hostlist_add(lport
);
1744 /* start FIP Discovery and FLOGI */
1745 lport
->boot_time
= jiffies
;
1746 fc_fabric_login(lport
);
1747 if (!fcoe_link_ok(lport
))
1748 fcoe_ctlr_link_up(&fcoe
->ctlr
);
1753 * Release from init in fcoe_interface_create(), on success lport
1754 * should be holding a reference taken in fcoe_if_create().
1756 fcoe_interface_put(fcoe
);
1761 mutex_unlock(&fcoe_config_mutex
);
1765 module_param_call(create
, fcoe_create
, NULL
, NULL
, S_IWUSR
);
1766 __MODULE_PARM_TYPE(create
, "string");
1767 MODULE_PARM_DESC(create
, "Create fcoe fcoe using net device passed in.");
1768 module_param_call(destroy
, fcoe_destroy
, NULL
, NULL
, S_IWUSR
);
1769 __MODULE_PARM_TYPE(destroy
, "string");
1770 MODULE_PARM_DESC(destroy
, "Destroy fcoe fcoe");
1773 * fcoe_link_ok() - Check if link is ok for the fc_lport
1774 * @lp: ptr to the fc_lport
1776 * Any permanently-disqualifying conditions have been previously checked.
1777 * This also updates the speed setting, which may change with link for 100/1000.
1779 * This function should probably be checking for PAUSE support at some point
1780 * in the future. Currently Per-priority-pause is not determinable using
1781 * ethtool, so we shouldn't be restrictive until that problem is resolved.
1783 * Returns: 0 if link is OK for use by FCoE.
1786 int fcoe_link_ok(struct fc_lport
*lp
)
1788 struct fcoe_port
*port
= lport_priv(lp
);
1789 struct net_device
*dev
= port
->fcoe
->netdev
;
1790 struct ethtool_cmd ecmd
= { ETHTOOL_GSET
};
1792 if ((dev
->flags
& IFF_UP
) && netif_carrier_ok(dev
) &&
1793 (!dev_ethtool_get_settings(dev
, &ecmd
))) {
1794 lp
->link_supported_speeds
&=
1795 ~(FC_PORTSPEED_1GBIT
| FC_PORTSPEED_10GBIT
);
1796 if (ecmd
.supported
& (SUPPORTED_1000baseT_Half
|
1797 SUPPORTED_1000baseT_Full
))
1798 lp
->link_supported_speeds
|= FC_PORTSPEED_1GBIT
;
1799 if (ecmd
.supported
& SUPPORTED_10000baseT_Full
)
1800 lp
->link_supported_speeds
|=
1801 FC_PORTSPEED_10GBIT
;
1802 if (ecmd
.speed
== SPEED_1000
)
1803 lp
->link_speed
= FC_PORTSPEED_1GBIT
;
1804 if (ecmd
.speed
== SPEED_10000
)
1805 lp
->link_speed
= FC_PORTSPEED_10GBIT
;
1813 * fcoe_percpu_clean() - Clear the pending skbs for an lport
1816 * Must be called with fcoe_create_mutex held to single-thread completion.
1818 * This flushes the pending skbs by adding a new skb to each queue and
1819 * waiting until they are all freed. This assures us that not only are
1820 * there no packets that will be handled by the lport, but also that any
1821 * threads already handling packet have returned.
1823 void fcoe_percpu_clean(struct fc_lport
*lp
)
1825 struct fcoe_percpu_s
*pp
;
1826 struct fcoe_rcv_info
*fr
;
1827 struct sk_buff_head
*list
;
1828 struct sk_buff
*skb
, *next
;
1829 struct sk_buff
*head
;
1832 for_each_possible_cpu(cpu
) {
1833 pp
= &per_cpu(fcoe_percpu
, cpu
);
1834 spin_lock_bh(&pp
->fcoe_rx_list
.lock
);
1835 list
= &pp
->fcoe_rx_list
;
1837 for (skb
= head
; skb
!= (struct sk_buff
*)list
;
1840 fr
= fcoe_dev_from_skb(skb
);
1841 if (fr
->fr_dev
== lp
) {
1842 __skb_unlink(skb
, list
);
1847 if (!pp
->thread
|| !cpu_online(cpu
)) {
1848 spin_unlock_bh(&pp
->fcoe_rx_list
.lock
);
1852 skb
= dev_alloc_skb(0);
1854 spin_unlock_bh(&pp
->fcoe_rx_list
.lock
);
1857 skb
->destructor
= fcoe_percpu_flush_done
;
1859 __skb_queue_tail(&pp
->fcoe_rx_list
, skb
);
1860 if (pp
->fcoe_rx_list
.qlen
== 1)
1861 wake_up_process(pp
->thread
);
1862 spin_unlock_bh(&pp
->fcoe_rx_list
.lock
);
1864 wait_for_completion(&fcoe_flush_completion
);
1869 * fcoe_clean_pending_queue() - Dequeue a skb and free it
1870 * @lp: the corresponding fc_lport
1874 void fcoe_clean_pending_queue(struct fc_lport
*lp
)
1876 struct fcoe_port
*port
= lport_priv(lp
);
1877 struct sk_buff
*skb
;
1879 spin_lock_bh(&port
->fcoe_pending_queue
.lock
);
1880 while ((skb
= __skb_dequeue(&port
->fcoe_pending_queue
)) != NULL
) {
1881 spin_unlock_bh(&port
->fcoe_pending_queue
.lock
);
1883 spin_lock_bh(&port
->fcoe_pending_queue
.lock
);
1885 spin_unlock_bh(&port
->fcoe_pending_queue
.lock
);
1889 * fcoe_reset() - Resets the fcoe
1890 * @shost: shost the reset is from
1894 int fcoe_reset(struct Scsi_Host
*shost
)
1896 struct fc_lport
*lport
= shost_priv(shost
);
1897 fc_lport_reset(lport
);
1902 * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device
1903 * @dev: this is currently ptr to net_device
1905 * Returns: NULL or the located fcoe_port
1906 * Locking: must be called with the RNL mutex held
1908 static struct fcoe_interface
*
1909 fcoe_hostlist_lookup_port(const struct net_device
*dev
)
1911 struct fcoe_interface
*fcoe
;
1913 list_for_each_entry(fcoe
, &fcoe_hostlist
, list
) {
1914 if (fcoe
->netdev
== dev
)
1921 * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
1922 * @netdev: ptr to net_device
1924 * Returns: 0 for success
1925 * Locking: must be called with the RTNL mutex held
1927 static struct fc_lport
*fcoe_hostlist_lookup(const struct net_device
*netdev
)
1929 struct fcoe_interface
*fcoe
;
1931 fcoe
= fcoe_hostlist_lookup_port(netdev
);
1932 return (fcoe
) ? fcoe
->ctlr
.lp
: NULL
;
1936 * fcoe_hostlist_add() - Add a lport to lports list
1937 * @lp: ptr to the fc_lport to be added
1939 * Returns: 0 for success
1940 * Locking: must be called with the RTNL mutex held
1942 static int fcoe_hostlist_add(const struct fc_lport
*lport
)
1944 struct fcoe_interface
*fcoe
;
1945 struct fcoe_port
*port
;
1947 fcoe
= fcoe_hostlist_lookup_port(fcoe_netdev(lport
));
1949 port
= lport_priv(lport
);
1951 list_add_tail(&fcoe
->list
, &fcoe_hostlist
);
1957 * fcoe_init() - fcoe module loading initialization
1959 * Returns 0 on success, negative on failure
1961 static int __init
fcoe_init(void)
1965 struct fcoe_percpu_s
*p
;
1967 mutex_lock(&fcoe_config_mutex
);
1969 for_each_possible_cpu(cpu
) {
1970 p
= &per_cpu(fcoe_percpu
, cpu
);
1971 skb_queue_head_init(&p
->fcoe_rx_list
);
1974 for_each_online_cpu(cpu
)
1975 fcoe_percpu_thread_create(cpu
);
1977 /* Initialize per CPU interrupt thread */
1978 rc
= register_hotcpu_notifier(&fcoe_cpu_notifier
);
1982 /* Setup link change notification */
1985 rc
= fcoe_if_init();
1989 mutex_unlock(&fcoe_config_mutex
);
1993 for_each_online_cpu(cpu
) {
1994 fcoe_percpu_thread_destroy(cpu
);
1996 mutex_unlock(&fcoe_config_mutex
);
1999 module_init(fcoe_init
);
2002 * fcoe_exit() - fcoe module unloading cleanup
2004 * Returns 0 on success, negative on failure
2006 static void __exit
fcoe_exit(void)
2009 struct fcoe_interface
*fcoe
, *tmp
;
2010 struct fcoe_port
*port
;
2012 mutex_lock(&fcoe_config_mutex
);
2016 /* releases the associated fcoe hosts */
2018 list_for_each_entry_safe(fcoe
, tmp
, &fcoe_hostlist
, list
) {
2019 list_del(&fcoe
->list
);
2020 port
= lport_priv(fcoe
->ctlr
.lp
);
2021 fcoe_interface_cleanup(fcoe
);
2022 schedule_work(&port
->destroy_work
);
2026 unregister_hotcpu_notifier(&fcoe_cpu_notifier
);
2028 for_each_online_cpu(cpu
)
2029 fcoe_percpu_thread_destroy(cpu
);
2031 mutex_unlock(&fcoe_config_mutex
);
2033 /* flush any asyncronous interface destroys,
2034 * this should happen after the netdev notifier is unregistered */
2035 flush_scheduled_work();
2037 /* detach from scsi transport
2038 * must happen after all destroys are done, therefor after the flush */
2041 module_exit(fcoe_exit
);