[SCSI] fcoe, libfc: fix function declarations to be ANSI-compliant
[linux/fpc-iii.git] / drivers / scsi / fcoe / fcoe.c
blobd08121f246c39052abf37ad3372e78600276421d
1 /*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * Maintained at www.Open-FCoE.org
20 #include <linux/module.h>
21 #include <linux/version.h>
22 #include <linux/spinlock.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/crc32.h>
29 #include <linux/cpu.h>
30 #include <linux/fs.h>
31 #include <linux/sysfs.h>
32 #include <linux/ctype.h>
33 #include <scsi/scsi_tcq.h>
34 #include <scsi/scsicam.h>
35 #include <scsi/scsi_transport.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <net/rtnetlink.h>
39 #include <scsi/fc/fc_encaps.h>
40 #include <scsi/fc/fc_fip.h>
42 #include <scsi/libfc.h>
43 #include <scsi/fc_frame.h>
44 #include <scsi/libfcoe.h>
46 #include "fcoe.h"
48 static int debug_fcoe;
50 MODULE_AUTHOR("Open-FCoE.org");
51 MODULE_DESCRIPTION("FCoE");
52 MODULE_LICENSE("GPL v2");
54 /* fcoe host list */
55 LIST_HEAD(fcoe_hostlist);
56 DEFINE_RWLOCK(fcoe_hostlist_lock);
57 DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
58 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
60 /* Function Prototypes */
61 static int fcoe_reset(struct Scsi_Host *shost);
62 static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
63 static int fcoe_rcv(struct sk_buff *, struct net_device *,
64 struct packet_type *, struct net_device *);
65 static int fcoe_percpu_receive_thread(void *arg);
66 static void fcoe_clean_pending_queue(struct fc_lport *lp);
67 static void fcoe_percpu_clean(struct fc_lport *lp);
68 static int fcoe_link_ok(struct fc_lport *lp);
70 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
71 static int fcoe_hostlist_add(const struct fc_lport *);
72 static int fcoe_hostlist_remove(const struct fc_lport *);
74 static int fcoe_check_wait_queue(struct fc_lport *);
75 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
76 static void fcoe_dev_setup(void);
77 static void fcoe_dev_cleanup(void);
79 /* notification function from net device */
80 static struct notifier_block fcoe_notifier = {
81 .notifier_call = fcoe_device_notification,
84 static struct scsi_transport_template *scsi_transport_fcoe_sw;
86 struct fc_function_template fcoe_transport_function = {
87 .show_host_node_name = 1,
88 .show_host_port_name = 1,
89 .show_host_supported_classes = 1,
90 .show_host_supported_fc4s = 1,
91 .show_host_active_fc4s = 1,
92 .show_host_maxframe_size = 1,
94 .show_host_port_id = 1,
95 .show_host_supported_speeds = 1,
96 .get_host_speed = fc_get_host_speed,
97 .show_host_speed = 1,
98 .show_host_port_type = 1,
99 .get_host_port_state = fc_get_host_port_state,
100 .show_host_port_state = 1,
101 .show_host_symbolic_name = 1,
103 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
104 .show_rport_maxframe_size = 1,
105 .show_rport_supported_classes = 1,
107 .show_host_fabric_name = 1,
108 .show_starget_node_name = 1,
109 .show_starget_port_name = 1,
110 .show_starget_port_id = 1,
111 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
112 .show_rport_dev_loss_tmo = 1,
113 .get_fc_host_stats = fc_get_host_stats,
114 .issue_fc_host_lip = fcoe_reset,
116 .terminate_rport_io = fc_rport_terminate_io,
119 static struct scsi_host_template fcoe_shost_template = {
120 .module = THIS_MODULE,
121 .name = "FCoE Driver",
122 .proc_name = FCOE_NAME,
123 .queuecommand = fc_queuecommand,
124 .eh_abort_handler = fc_eh_abort,
125 .eh_device_reset_handler = fc_eh_device_reset,
126 .eh_host_reset_handler = fc_eh_host_reset,
127 .slave_alloc = fc_slave_alloc,
128 .change_queue_depth = fc_change_queue_depth,
129 .change_queue_type = fc_change_queue_type,
130 .this_id = -1,
131 .cmd_per_lun = 32,
132 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
133 .use_clustering = ENABLE_CLUSTERING,
134 .sg_tablesize = SG_ALL,
135 .max_sectors = 0xffff,
139 * fcoe_lport_config() - sets up the fc_lport
140 * @lp: ptr to the fc_lport
142 * Returns: 0 for success
144 static int fcoe_lport_config(struct fc_lport *lp)
146 lp->link_up = 0;
147 lp->qfull = 0;
148 lp->max_retry_count = 3;
149 lp->e_d_tov = 2 * 1000; /* FC-FS default */
150 lp->r_a_tov = 2 * 2 * 1000;
151 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
152 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
154 fc_lport_init_stats(lp);
156 /* lport fc_lport related configuration */
157 fc_lport_config(lp);
159 /* offload related configuration */
160 lp->crc_offload = 0;
161 lp->seq_offload = 0;
162 lp->lro_enabled = 0;
163 lp->lro_xid = 0;
164 lp->lso_max = 0;
166 return 0;
170 * fcoe_netdev_config() - Set up netdev for SW FCoE
171 * @lp : ptr to the fc_lport
172 * @netdev : ptr to the associated netdevice struct
174 * Must be called after fcoe_lport_config() as it will use lport mutex
176 * Returns : 0 for success
178 static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
180 u32 mfs;
181 u64 wwnn, wwpn;
182 struct fcoe_softc *fc;
183 u8 flogi_maddr[ETH_ALEN];
185 /* Setup lport private data to point to fcoe softc */
186 fc = lport_priv(lp);
187 fc->ctlr.lp = lp;
188 fc->real_dev = netdev;
189 fc->phys_dev = netdev;
191 /* Require support for get_pauseparam ethtool op. */
192 if (netdev->priv_flags & IFF_802_1Q_VLAN)
193 fc->phys_dev = vlan_dev_real_dev(netdev);
195 /* Do not support for bonding device */
196 if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
197 (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
198 (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
199 return -EOPNOTSUPP;
203 * Determine max frame size based on underlying device and optional
204 * user-configured limit. If the MFS is too low, fcoe_link_ok()
205 * will return 0, so do this first.
207 mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
208 sizeof(struct fcoe_crc_eof));
209 if (fc_set_mfs(lp, mfs))
210 return -EINVAL;
212 /* offload features support */
213 if (fc->real_dev->features & NETIF_F_SG)
214 lp->sg_supp = 1;
216 #ifdef NETIF_F_FCOE_CRC
217 if (netdev->features & NETIF_F_FCOE_CRC) {
218 lp->crc_offload = 1;
219 printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n",
220 netdev->name);
222 #endif
223 #ifdef NETIF_F_FSO
224 if (netdev->features & NETIF_F_FSO) {
225 lp->seq_offload = 1;
226 lp->lso_max = netdev->gso_max_size;
227 printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n",
228 netdev->name, lp->lso_max);
230 #endif
231 if (netdev->fcoe_ddp_xid) {
232 lp->lro_enabled = 1;
233 lp->lro_xid = netdev->fcoe_ddp_xid;
234 printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n",
235 netdev->name, lp->lro_xid);
237 skb_queue_head_init(&fc->fcoe_pending_queue);
238 fc->fcoe_pending_queue_active = 0;
240 /* setup Source Mac Address */
241 memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
242 fc->real_dev->addr_len);
244 wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
245 fc_set_wwnn(lp, wwnn);
246 /* XXX - 3rd arg needs to be vlan id */
247 wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
248 fc_set_wwpn(lp, wwpn);
251 * Add FCoE MAC address as second unicast MAC address
252 * or enter promiscuous mode if not capable of listening
253 * for multiple unicast MACs.
255 rtnl_lock();
256 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
257 dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
258 dev_mc_add(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
259 rtnl_unlock();
262 * setup the receive function from ethernet driver
263 * on the ethertype for the given device
265 fc->fcoe_packet_type.func = fcoe_rcv;
266 fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
267 fc->fcoe_packet_type.dev = fc->real_dev;
268 dev_add_pack(&fc->fcoe_packet_type);
270 return 0;
274 * fcoe_shost_config() - Sets up fc_lport->host
275 * @lp : ptr to the fc_lport
276 * @shost : ptr to the associated scsi host
277 * @dev : device associated to scsi host
279 * Must be called after fcoe_lport_config() and fcoe_netdev_config()
281 * Returns : 0 for success
283 static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
284 struct device *dev)
286 int rc = 0;
288 /* lport scsi host config */
289 lp->host = shost;
291 lp->host->max_lun = FCOE_MAX_LUN;
292 lp->host->max_id = FCOE_MAX_FCP_TARGET;
293 lp->host->max_channel = 0;
294 lp->host->transportt = scsi_transport_fcoe_sw;
296 /* add the new host to the SCSI-ml */
297 rc = scsi_add_host(lp->host, dev);
298 if (rc) {
299 FC_DBG("fcoe_shost_config:error on scsi_add_host\n");
300 return rc;
302 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
303 FCOE_NAME, FCOE_VERSION,
304 fcoe_netdev(lp)->name);
306 return 0;
310 * fcoe_em_config() - allocates em for this lport
311 * @lp: the port that em is to allocated for
313 * Returns : 0 on success
315 static inline int fcoe_em_config(struct fc_lport *lp)
317 BUG_ON(lp->emp);
319 lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
320 FCOE_MIN_XID, FCOE_MAX_XID);
321 if (!lp->emp)
322 return -ENOMEM;
324 return 0;
328 * fcoe_if_destroy() - FCoE software HBA tear-down function
329 * @netdev: ptr to the associated net_device
331 * Returns: 0 if link is OK for use by FCoE.
333 static int fcoe_if_destroy(struct net_device *netdev)
335 struct fc_lport *lp = NULL;
336 struct fcoe_softc *fc;
337 u8 flogi_maddr[ETH_ALEN];
339 BUG_ON(!netdev);
341 printk(KERN_DEBUG "fcoe_if_destroy:interface on %s\n",
342 netdev->name);
344 lp = fcoe_hostlist_lookup(netdev);
345 if (!lp)
346 return -ENODEV;
348 fc = lport_priv(lp);
350 /* Logout of the fabric */
351 fc_fabric_logoff(lp);
353 /* Remove the instance from fcoe's list */
354 fcoe_hostlist_remove(lp);
356 /* Don't listen for Ethernet packets anymore */
357 dev_remove_pack(&fc->fcoe_packet_type);
358 dev_remove_pack(&fc->fip_packet_type);
359 fcoe_ctlr_destroy(&fc->ctlr);
361 /* Cleanup the fc_lport */
362 fc_lport_destroy(lp);
363 fc_fcp_destroy(lp);
365 /* Detach from the scsi-ml */
366 fc_remove_host(lp->host);
367 scsi_remove_host(lp->host);
369 /* There are no more rports or I/O, free the EM */
370 if (lp->emp)
371 fc_exch_mgr_free(lp->emp);
373 /* Delete secondary MAC addresses */
374 rtnl_lock();
375 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
376 dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
377 if (!is_zero_ether_addr(fc->ctlr.data_src_addr))
378 dev_unicast_delete(fc->real_dev,
379 fc->ctlr.data_src_addr, ETH_ALEN);
380 dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
381 rtnl_unlock();
383 /* Free the per-CPU receive threads */
384 fcoe_percpu_clean(lp);
386 /* Free existing skbs */
387 fcoe_clean_pending_queue(lp);
389 /* Free memory used by statistical counters */
390 fc_lport_free_stats(lp);
392 /* Release the net_device and Scsi_Host */
393 dev_put(fc->real_dev);
394 scsi_host_put(lp->host);
396 return 0;
400 * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
401 * @lp: the corresponding fc_lport
402 * @xid: the exchange id for this ddp transfer
403 * @sgl: the scatterlist describing this transfer
404 * @sgc: number of sg items
406 * Returns : 0 no ddp
408 static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
409 struct scatterlist *sgl, unsigned int sgc)
411 struct net_device *n = fcoe_netdev(lp);
413 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
414 return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
416 return 0;
420 * fcoe_ddp_done - calls LLD's ddp_done through net_device
421 * @lp: the corresponding fc_lport
422 * @xid: the exchange id for this ddp transfer
424 * Returns : the length of data that have been completed by ddp
426 static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
428 struct net_device *n = fcoe_netdev(lp);
430 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
431 return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
432 return 0;
435 static struct libfc_function_template fcoe_libfc_fcn_templ = {
436 .frame_send = fcoe_xmit,
437 .ddp_setup = fcoe_ddp_setup,
438 .ddp_done = fcoe_ddp_done,
442 * fcoe_fip_recv - handle a received FIP frame.
443 * @skb: the receive skb
444 * @dev: associated &net_device
445 * @ptype: the &packet_type structure which was used to register this handler.
446 * @orig_dev: original receive &net_device, in case @dev is a bond.
448 * Returns: 0 for success
450 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
451 struct packet_type *ptype,
452 struct net_device *orig_dev)
454 struct fcoe_softc *fc;
456 fc = container_of(ptype, struct fcoe_softc, fip_packet_type);
457 fcoe_ctlr_recv(&fc->ctlr, skb);
458 return 0;
462 * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
463 * @fip: FCoE controller.
464 * @skb: FIP Packet.
466 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
468 skb->dev = fcoe_from_ctlr(fip)->real_dev;
469 dev_queue_xmit(skb);
473 * fcoe_update_src_mac() - Update Ethernet MAC filters.
474 * @fip: FCoE controller.
475 * @old: Unicast MAC address to delete if the MAC is non-zero.
476 * @new: Unicast MAC address to add.
478 * Remove any previously-set unicast MAC filter.
479 * Add secondary FCoE MAC address filter for our OUI.
481 static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
483 struct fcoe_softc *fc;
485 fc = fcoe_from_ctlr(fip);
486 rtnl_lock();
487 if (!is_zero_ether_addr(old))
488 dev_unicast_delete(fc->real_dev, old, ETH_ALEN);
489 dev_unicast_add(fc->real_dev, new, ETH_ALEN);
490 rtnl_unlock();
494 * fcoe_if_create() - this function creates the fcoe interface
495 * @netdev: pointer the associated netdevice
497 * Creates fc_lport struct and scsi_host for lport, configures lport
498 * and starts fabric login.
500 * Returns : 0 on success
502 static int fcoe_if_create(struct net_device *netdev)
504 int rc;
505 struct fc_lport *lp = NULL;
506 struct fcoe_softc *fc;
507 struct Scsi_Host *shost;
509 BUG_ON(!netdev);
511 printk(KERN_DEBUG "fcoe_if_create:interface on %s\n",
512 netdev->name);
514 lp = fcoe_hostlist_lookup(netdev);
515 if (lp)
516 return -EEXIST;
518 shost = libfc_host_alloc(&fcoe_shost_template,
519 sizeof(struct fcoe_softc));
520 if (!shost) {
521 FC_DBG("Could not allocate host structure\n");
522 return -ENOMEM;
524 lp = shost_priv(shost);
525 fc = lport_priv(lp);
527 /* configure fc_lport, e.g., em */
528 rc = fcoe_lport_config(lp);
529 if (rc) {
530 FC_DBG("Could not configure lport\n");
531 goto out_host_put;
534 /* configure lport network properties */
535 rc = fcoe_netdev_config(lp, netdev);
536 if (rc) {
537 FC_DBG("Could not configure netdev for lport\n");
538 goto out_host_put;
542 * Initialize FIP.
544 fcoe_ctlr_init(&fc->ctlr);
545 fc->ctlr.send = fcoe_fip_send;
546 fc->ctlr.update_mac = fcoe_update_src_mac;
548 fc->fip_packet_type.func = fcoe_fip_recv;
549 fc->fip_packet_type.type = htons(ETH_P_FIP);
550 fc->fip_packet_type.dev = fc->real_dev;
551 dev_add_pack(&fc->fip_packet_type);
553 /* configure lport scsi host properties */
554 rc = fcoe_shost_config(lp, shost, &netdev->dev);
555 if (rc) {
556 FC_DBG("Could not configure shost for lport\n");
557 goto out_host_put;
560 /* lport exch manager allocation */
561 rc = fcoe_em_config(lp);
562 if (rc) {
563 FC_DBG("Could not configure em for lport\n");
564 goto out_host_put;
567 /* Initialize the library */
568 rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ);
569 if (rc) {
570 FC_DBG("Could not configure libfc for lport!\n");
571 goto out_lp_destroy;
574 /* add to lports list */
575 fcoe_hostlist_add(lp);
577 lp->boot_time = jiffies;
579 fc_fabric_login(lp);
581 if (!fcoe_link_ok(lp))
582 fcoe_ctlr_link_up(&fc->ctlr);
584 dev_hold(netdev);
586 return rc;
588 out_lp_destroy:
589 fc_exch_mgr_free(lp->emp); /* Free the EM */
590 out_host_put:
591 scsi_host_put(lp->host);
592 return rc;
596 * fcoe_if_init() - attach to scsi transport
598 * Returns : 0 on success
600 static int __init fcoe_if_init(void)
602 /* attach to scsi transport */
603 scsi_transport_fcoe_sw =
604 fc_attach_transport(&fcoe_transport_function);
606 if (!scsi_transport_fcoe_sw) {
607 printk(KERN_ERR "fcoe_init:fc_attach_transport() failed\n");
608 return -ENODEV;
611 return 0;
615 * fcoe_if_exit() - detach from scsi transport
617 * Returns : 0 on success
619 int __exit fcoe_if_exit(void)
621 fc_release_transport(scsi_transport_fcoe_sw);
622 return 0;
626 * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
627 * @cpu: cpu index for the online cpu
629 static void fcoe_percpu_thread_create(unsigned int cpu)
631 struct fcoe_percpu_s *p;
632 struct task_struct *thread;
634 p = &per_cpu(fcoe_percpu, cpu);
636 thread = kthread_create(fcoe_percpu_receive_thread,
637 (void *)p, "fcoethread/%d", cpu);
639 if (likely(!IS_ERR(p->thread))) {
640 kthread_bind(thread, cpu);
641 wake_up_process(thread);
643 spin_lock_bh(&p->fcoe_rx_list.lock);
644 p->thread = thread;
645 spin_unlock_bh(&p->fcoe_rx_list.lock);
650 * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
651 * @cpu: cpu index the rx thread is to be removed
653 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
654 * current CPU's Rx thread. If the thread being destroyed is bound to
655 * the CPU processing this context the skbs will be freed.
657 static void fcoe_percpu_thread_destroy(unsigned int cpu)
659 struct fcoe_percpu_s *p;
660 struct task_struct *thread;
661 struct page *crc_eof;
662 struct sk_buff *skb;
663 #ifdef CONFIG_SMP
664 struct fcoe_percpu_s *p0;
665 unsigned targ_cpu = smp_processor_id();
666 #endif /* CONFIG_SMP */
668 printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu);
670 /* Prevent any new skbs from being queued for this CPU. */
671 p = &per_cpu(fcoe_percpu, cpu);
672 spin_lock_bh(&p->fcoe_rx_list.lock);
673 thread = p->thread;
674 p->thread = NULL;
675 crc_eof = p->crc_eof_page;
676 p->crc_eof_page = NULL;
677 p->crc_eof_offset = 0;
678 spin_unlock_bh(&p->fcoe_rx_list.lock);
680 #ifdef CONFIG_SMP
682 * Don't bother moving the skb's if this context is running
683 * on the same CPU that is having its thread destroyed. This
684 * can easily happen when the module is removed.
686 if (cpu != targ_cpu) {
687 p0 = &per_cpu(fcoe_percpu, targ_cpu);
688 spin_lock_bh(&p0->fcoe_rx_list.lock);
689 if (p0->thread) {
690 FC_DBG("Moving frames from CPU %d to CPU %d\n",
691 cpu, targ_cpu);
693 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
694 __skb_queue_tail(&p0->fcoe_rx_list, skb);
695 spin_unlock_bh(&p0->fcoe_rx_list.lock);
696 } else {
698 * The targeted CPU is not initialized and cannot accept
699 * new skbs. Unlock the targeted CPU and drop the skbs
700 * on the CPU that is going offline.
702 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
703 kfree_skb(skb);
704 spin_unlock_bh(&p0->fcoe_rx_list.lock);
706 } else {
708 * This scenario occurs when the module is being removed
709 * and all threads are being destroyed. skbs will continue
710 * to be shifted from the CPU thread that is being removed
711 * to the CPU thread associated with the CPU that is processing
712 * the module removal. Once there is only one CPU Rx thread it
713 * will reach this case and we will drop all skbs and later
714 * stop the thread.
716 spin_lock_bh(&p->fcoe_rx_list.lock);
717 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
718 kfree_skb(skb);
719 spin_unlock_bh(&p->fcoe_rx_list.lock);
721 #else
723 * This a non-SMP scenario where the singular Rx thread is
724 * being removed. Free all skbs and stop the thread.
726 spin_lock_bh(&p->fcoe_rx_list.lock);
727 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
728 kfree_skb(skb);
729 spin_unlock_bh(&p->fcoe_rx_list.lock);
730 #endif
732 if (thread)
733 kthread_stop(thread);
735 if (crc_eof)
736 put_page(crc_eof);
740 * fcoe_cpu_callback() - fcoe cpu hotplug event callback
741 * @nfb: callback data block
742 * @action: event triggering the callback
743 * @hcpu: index for the cpu of this event
745 * This creates or destroys per cpu data for fcoe
747 * Returns NOTIFY_OK always.
749 static int fcoe_cpu_callback(struct notifier_block *nfb,
750 unsigned long action, void *hcpu)
752 unsigned cpu = (unsigned long)hcpu;
754 switch (action) {
755 case CPU_ONLINE:
756 case CPU_ONLINE_FROZEN:
757 FC_DBG("CPU %x online: Create Rx thread\n", cpu);
758 fcoe_percpu_thread_create(cpu);
759 break;
760 case CPU_DEAD:
761 case CPU_DEAD_FROZEN:
762 FC_DBG("CPU %x offline: Remove Rx thread\n", cpu);
763 fcoe_percpu_thread_destroy(cpu);
764 break;
765 default:
766 break;
768 return NOTIFY_OK;
771 static struct notifier_block fcoe_cpu_notifier = {
772 .notifier_call = fcoe_cpu_callback,
776 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
777 * @skb: the receive skb
778 * @dev: associated net device
779 * @ptype: context
780 * @olddev: last device
782 * this function will receive the packet and build fc frame and pass it up
784 * Returns: 0 for success
786 int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
787 struct packet_type *ptype, struct net_device *olddev)
789 struct fc_lport *lp;
790 struct fcoe_rcv_info *fr;
791 struct fcoe_softc *fc;
792 struct fc_frame_header *fh;
793 struct fcoe_percpu_s *fps;
794 unsigned short oxid;
795 unsigned int cpu = 0;
797 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
798 lp = fc->ctlr.lp;
799 if (unlikely(lp == NULL)) {
800 FC_DBG("cannot find hba structure");
801 goto err2;
803 if (!lp->link_up)
804 goto err2;
806 if (unlikely(debug_fcoe)) {
807 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
808 "end:%p sum:%d dev:%s", skb->len, skb->data_len,
809 skb->head, skb->data, skb_tail_pointer(skb),
810 skb_end_pointer(skb), skb->csum,
811 skb->dev ? skb->dev->name : "<NULL>");
815 /* check for FCOE packet type */
816 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
817 FC_DBG("wrong FC type frame");
818 goto err;
822 * Check for minimum frame length, and make sure required FCoE
823 * and FC headers are pulled into the linear data area.
825 if (unlikely((skb->len < FCOE_MIN_FRAME) ||
826 !pskb_may_pull(skb, FCOE_HEADER_LEN)))
827 goto err;
829 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
830 fh = (struct fc_frame_header *) skb_transport_header(skb);
832 oxid = ntohs(fh->fh_ox_id);
834 fr = fcoe_dev_from_skb(skb);
835 fr->fr_dev = lp;
836 fr->ptype = ptype;
838 #ifdef CONFIG_SMP
840 * The incoming frame exchange id(oxid) is ANDed with num of online
841 * cpu bits to get cpu and then this cpu is used for selecting
842 * a per cpu kernel thread from fcoe_percpu.
844 cpu = oxid & (num_online_cpus() - 1);
845 #endif
847 fps = &per_cpu(fcoe_percpu, cpu);
848 spin_lock_bh(&fps->fcoe_rx_list.lock);
849 if (unlikely(!fps->thread)) {
851 * The targeted CPU is not ready, let's target
852 * the first CPU now. For non-SMP systems this
853 * will check the same CPU twice.
855 FC_DBG("CPU is online, but no receive thread ready "
856 "for incoming skb- using first online CPU.\n");
858 spin_unlock_bh(&fps->fcoe_rx_list.lock);
859 cpu = first_cpu(cpu_online_map);
860 fps = &per_cpu(fcoe_percpu, cpu);
861 spin_lock_bh(&fps->fcoe_rx_list.lock);
862 if (!fps->thread) {
863 spin_unlock_bh(&fps->fcoe_rx_list.lock);
864 goto err;
869 * We now have a valid CPU that we're targeting for
870 * this skb. We also have this receive thread locked,
871 * so we're free to queue skbs into it's queue.
873 __skb_queue_tail(&fps->fcoe_rx_list, skb);
874 if (fps->fcoe_rx_list.qlen == 1)
875 wake_up_process(fps->thread);
877 spin_unlock_bh(&fps->fcoe_rx_list.lock);
879 return 0;
880 err:
881 fc_lport_get_stats(lp)->ErrorFrames++;
883 err2:
884 kfree_skb(skb);
885 return -1;
889 * fcoe_start_io() - pass to netdev to start xmit for fcoe
890 * @skb: the skb to be xmitted
892 * Returns: 0 for success
894 static inline int fcoe_start_io(struct sk_buff *skb)
896 int rc;
898 skb_get(skb);
899 rc = dev_queue_xmit(skb);
900 if (rc != 0)
901 return rc;
902 kfree_skb(skb);
903 return 0;
907 * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof
908 * @skb: the skb to be xmitted
909 * @tlen: total len
911 * Returns: 0 for success
913 static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
915 struct fcoe_percpu_s *fps;
916 struct page *page;
918 fps = &get_cpu_var(fcoe_percpu);
919 page = fps->crc_eof_page;
920 if (!page) {
921 page = alloc_page(GFP_ATOMIC);
922 if (!page) {
923 put_cpu_var(fcoe_percpu);
924 return -ENOMEM;
926 fps->crc_eof_page = page;
927 fps->crc_eof_offset = 0;
930 get_page(page);
931 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
932 fps->crc_eof_offset, tlen);
933 skb->len += tlen;
934 skb->data_len += tlen;
935 skb->truesize += tlen;
936 fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
938 if (fps->crc_eof_offset >= PAGE_SIZE) {
939 fps->crc_eof_page = NULL;
940 fps->crc_eof_offset = 0;
941 put_page(page);
943 put_cpu_var(fcoe_percpu);
944 return 0;
948 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
949 * @fp: the fc_frame containing data to be checksummed
951 * This uses crc32() to calculate the crc for fc frame
952 * Return : 32 bit crc
954 u32 fcoe_fc_crc(struct fc_frame *fp)
956 struct sk_buff *skb = fp_skb(fp);
957 struct skb_frag_struct *frag;
958 unsigned char *data;
959 unsigned long off, len, clen;
960 u32 crc;
961 unsigned i;
963 crc = crc32(~0, skb->data, skb_headlen(skb));
965 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
966 frag = &skb_shinfo(skb)->frags[i];
967 off = frag->page_offset;
968 len = frag->size;
969 while (len > 0) {
970 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
971 data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
972 KM_SKB_DATA_SOFTIRQ);
973 crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
974 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
975 off += clen;
976 len -= clen;
979 return crc;
983 * fcoe_xmit() - FCoE frame transmit function
984 * @lp: the associated local port
985 * @fp: the fc_frame to be transmitted
987 * Return : 0 for success
989 int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
991 int wlen, rc = 0;
992 u32 crc;
993 struct ethhdr *eh;
994 struct fcoe_crc_eof *cp;
995 struct sk_buff *skb;
996 struct fcoe_dev_stats *stats;
997 struct fc_frame_header *fh;
998 unsigned int hlen; /* header length implies the version */
999 unsigned int tlen; /* trailer length */
1000 unsigned int elen; /* eth header, may include vlan */
1001 struct fcoe_softc *fc;
1002 u8 sof, eof;
1003 struct fcoe_hdr *hp;
1005 WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1007 fc = lport_priv(lp);
1008 fh = fc_frame_header_get(fp);
1009 skb = fp_skb(fp);
1010 wlen = skb->len / FCOE_WORD_TO_BYTE;
1012 if (!lp->link_up) {
1013 kfree_skb(skb);
1014 return 0;
1017 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1018 fcoe_ctlr_els_send(&fc->ctlr, skb))
1019 return 0;
1021 sof = fr_sof(fp);
1022 eof = fr_eof(fp);
1024 elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
1025 sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
1026 hlen = sizeof(struct fcoe_hdr);
1027 tlen = sizeof(struct fcoe_crc_eof);
1028 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1030 /* crc offload */
1031 if (likely(lp->crc_offload)) {
1032 skb->ip_summed = CHECKSUM_PARTIAL;
1033 skb->csum_start = skb_headroom(skb);
1034 skb->csum_offset = skb->len;
1035 crc = 0;
1036 } else {
1037 skb->ip_summed = CHECKSUM_NONE;
1038 crc = fcoe_fc_crc(fp);
1041 /* copy fc crc and eof to the skb buff */
1042 if (skb_is_nonlinear(skb)) {
1043 skb_frag_t *frag;
1044 if (fcoe_get_paged_crc_eof(skb, tlen)) {
1045 kfree_skb(skb);
1046 return -ENOMEM;
1048 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1049 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
1050 + frag->page_offset;
1051 } else {
1052 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1055 memset(cp, 0, sizeof(*cp));
1056 cp->fcoe_eof = eof;
1057 cp->fcoe_crc32 = cpu_to_le32(~crc);
1059 if (skb_is_nonlinear(skb)) {
1060 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1061 cp = NULL;
1064 /* adjust skb network/transport offsets to match mac/fcoe/fc */
1065 skb_push(skb, elen + hlen);
1066 skb_reset_mac_header(skb);
1067 skb_reset_network_header(skb);
1068 skb->mac_len = elen;
1069 skb->protocol = htons(ETH_P_FCOE);
1070 skb->dev = fc->real_dev;
1072 /* fill up mac and fcoe headers */
1073 eh = eth_hdr(skb);
1074 eh->h_proto = htons(ETH_P_FCOE);
1075 if (fc->ctlr.map_dest)
1076 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1077 else
1078 /* insert GW address */
1079 memcpy(eh->h_dest, fc->ctlr.dest_addr, ETH_ALEN);
1081 if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1082 memcpy(eh->h_source, fc->ctlr.ctl_src_addr, ETH_ALEN);
1083 else
1084 memcpy(eh->h_source, fc->ctlr.data_src_addr, ETH_ALEN);
1086 hp = (struct fcoe_hdr *)(eh + 1);
1087 memset(hp, 0, sizeof(*hp));
1088 if (FC_FCOE_VER)
1089 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1090 hp->fcoe_sof = sof;
1092 #ifdef NETIF_F_FSO
1093 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1094 if (lp->seq_offload && fr_max_payload(fp)) {
1095 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1096 skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1097 } else {
1098 skb_shinfo(skb)->gso_type = 0;
1099 skb_shinfo(skb)->gso_size = 0;
1101 #endif
1102 /* update tx stats: regardless if LLD fails */
1103 stats = fc_lport_get_stats(lp);
1104 stats->TxFrames++;
1105 stats->TxWords += wlen;
1107 /* send down to lld */
1108 fr_dev(fp) = lp;
1109 if (fc->fcoe_pending_queue.qlen)
1110 rc = fcoe_check_wait_queue(lp);
1112 if (rc == 0)
1113 rc = fcoe_start_io(skb);
1115 if (rc) {
1116 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1117 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1118 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1119 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1120 lp->qfull = 1;
1123 return 0;
1127 * fcoe_percpu_receive_thread() - recv thread per cpu
1128 * @arg: ptr to the fcoe per cpu struct
1130 * Return: 0 for success
1132 int fcoe_percpu_receive_thread(void *arg)
1134 struct fcoe_percpu_s *p = arg;
1135 u32 fr_len;
1136 struct fc_lport *lp;
1137 struct fcoe_rcv_info *fr;
1138 struct fcoe_dev_stats *stats;
1139 struct fc_frame_header *fh;
1140 struct sk_buff *skb;
1141 struct fcoe_crc_eof crc_eof;
1142 struct fc_frame *fp;
1143 u8 *mac = NULL;
1144 struct fcoe_softc *fc;
1145 struct fcoe_hdr *hp;
1147 set_user_nice(current, -20);
1149 while (!kthread_should_stop()) {
1151 spin_lock_bh(&p->fcoe_rx_list.lock);
1152 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
1153 set_current_state(TASK_INTERRUPTIBLE);
1154 spin_unlock_bh(&p->fcoe_rx_list.lock);
1155 schedule();
1156 set_current_state(TASK_RUNNING);
1157 if (kthread_should_stop())
1158 return 0;
1159 spin_lock_bh(&p->fcoe_rx_list.lock);
1161 spin_unlock_bh(&p->fcoe_rx_list.lock);
1162 fr = fcoe_dev_from_skb(skb);
1163 lp = fr->fr_dev;
1164 if (unlikely(lp == NULL)) {
1165 FC_DBG("invalid HBA Structure");
1166 kfree_skb(skb);
1167 continue;
1170 if (unlikely(debug_fcoe)) {
1171 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
1172 "tail:%p end:%p sum:%d dev:%s",
1173 skb->len, skb->data_len,
1174 skb->head, skb->data, skb_tail_pointer(skb),
1175 skb_end_pointer(skb), skb->csum,
1176 skb->dev ? skb->dev->name : "<NULL>");
1180 * Save source MAC address before discarding header.
1182 fc = lport_priv(lp);
1183 if (skb_is_nonlinear(skb))
1184 skb_linearize(skb); /* not ideal */
1185 mac = eth_hdr(skb)->h_source;
1188 * Frame length checks and setting up the header pointers
1189 * was done in fcoe_rcv already.
1191 hp = (struct fcoe_hdr *) skb_network_header(skb);
1192 fh = (struct fc_frame_header *) skb_transport_header(skb);
1194 stats = fc_lport_get_stats(lp);
1195 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1196 if (stats->ErrorFrames < 5)
1197 printk(KERN_WARNING "FCoE version "
1198 "mismatch: The frame has "
1199 "version %x, but the "
1200 "initiator supports version "
1201 "%x\n", FC_FCOE_DECAPS_VER(hp),
1202 FC_FCOE_VER);
1203 stats->ErrorFrames++;
1204 kfree_skb(skb);
1205 continue;
1208 skb_pull(skb, sizeof(struct fcoe_hdr));
1209 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1211 stats->RxFrames++;
1212 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1214 fp = (struct fc_frame *)skb;
1215 fc_frame_init(fp);
1216 fr_dev(fp) = lp;
1217 fr_sof(fp) = hp->fcoe_sof;
1219 /* Copy out the CRC and EOF trailer for access */
1220 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
1221 kfree_skb(skb);
1222 continue;
1224 fr_eof(fp) = crc_eof.fcoe_eof;
1225 fr_crc(fp) = crc_eof.fcoe_crc32;
1226 if (pskb_trim(skb, fr_len)) {
1227 kfree_skb(skb);
1228 continue;
1232 * We only check CRC if no offload is available and if it is
1233 * it's solicited data, in which case, the FCP layer would
1234 * check it during the copy.
1236 if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
1237 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1238 else
1239 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1241 fh = fc_frame_header_get(fp);
1242 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
1243 fh->fh_type == FC_TYPE_FCP) {
1244 fc_exch_recv(lp, lp->emp, fp);
1245 continue;
1247 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1248 if (le32_to_cpu(fr_crc(fp)) !=
1249 ~crc32(~0, skb->data, fr_len)) {
1250 if (debug_fcoe || stats->InvalidCRCCount < 5)
1251 printk(KERN_WARNING "fcoe: dropping "
1252 "frame with CRC error\n");
1253 stats->InvalidCRCCount++;
1254 stats->ErrorFrames++;
1255 fc_frame_free(fp);
1256 continue;
1258 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1260 if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
1261 fcoe_ctlr_recv_flogi(&fc->ctlr, fp, mac)) {
1262 fc_frame_free(fp);
1263 continue;
1265 fc_exch_recv(lp, lp->emp, fp);
1267 return 0;
1271 * fcoe_watchdog() - fcoe timer callback
1272 * @vp:
1274 * This checks the pending queue length for fcoe and set lport qfull
1275 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
1276 * fcoe_hostlist.
1278 * Returns: 0 for success
1280 void fcoe_watchdog(ulong vp)
1282 struct fcoe_softc *fc;
1284 read_lock(&fcoe_hostlist_lock);
1285 list_for_each_entry(fc, &fcoe_hostlist, list) {
1286 if (fc->ctlr.lp)
1287 fcoe_check_wait_queue(fc->ctlr.lp);
1289 read_unlock(&fcoe_hostlist_lock);
1291 fcoe_timer.expires = jiffies + (1 * HZ);
1292 add_timer(&fcoe_timer);
1297 * fcoe_check_wait_queue() - attempt to clear the transmit backlog
1298 * @lp: the fc_lport
1300 * This empties the wait_queue, dequeue the head of the wait_queue queue
1301 * and calls fcoe_start_io() for each packet, if all skb have been
1302 * transmitted, return qlen or -1 if a error occurs, then restore
1303 * wait_queue and try again later.
1305 * The wait_queue is used when the skb transmit fails. skb will go
1306 * in the wait_queue which will be emptied by the timer function or
1307 * by the next skb transmit.
1309 * Returns: 0 for success
1311 static int fcoe_check_wait_queue(struct fc_lport *lp)
1313 struct fcoe_softc *fc = lport_priv(lp);
1314 struct sk_buff *skb;
1315 int rc = -1;
1317 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1318 if (fc->fcoe_pending_queue_active)
1319 goto out;
1320 fc->fcoe_pending_queue_active = 1;
1322 while (fc->fcoe_pending_queue.qlen) {
1323 /* keep qlen > 0 until fcoe_start_io succeeds */
1324 fc->fcoe_pending_queue.qlen++;
1325 skb = __skb_dequeue(&fc->fcoe_pending_queue);
1327 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1328 rc = fcoe_start_io(skb);
1329 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1331 if (rc) {
1332 __skb_queue_head(&fc->fcoe_pending_queue, skb);
1333 /* undo temporary increment above */
1334 fc->fcoe_pending_queue.qlen--;
1335 break;
1337 /* undo temporary increment above */
1338 fc->fcoe_pending_queue.qlen--;
1341 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1342 lp->qfull = 0;
1343 fc->fcoe_pending_queue_active = 0;
1344 rc = fc->fcoe_pending_queue.qlen;
1345 out:
1346 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1347 return rc;
1351 * fcoe_dev_setup() - setup link change notification interface
1353 static void fcoe_dev_setup(void)
1355 register_netdevice_notifier(&fcoe_notifier);
1359 * fcoe_dev_cleanup() - cleanup link change notification interface
1361 static void fcoe_dev_cleanup(void)
1363 unregister_netdevice_notifier(&fcoe_notifier);
1367 * fcoe_device_notification() - netdev event notification callback
1368 * @notifier: context of the notification
1369 * @event: type of event
1370 * @ptr: fixed array for output parsed ifname
1372 * This function is called by the ethernet driver in case of link change event
1374 * Returns: 0 for success
1376 static int fcoe_device_notification(struct notifier_block *notifier,
1377 ulong event, void *ptr)
1379 struct fc_lport *lp = NULL;
1380 struct net_device *real_dev = ptr;
1381 struct fcoe_softc *fc;
1382 struct fcoe_dev_stats *stats;
1383 u32 link_possible = 1;
1384 u32 mfs;
1385 int rc = NOTIFY_OK;
1387 read_lock(&fcoe_hostlist_lock);
1388 list_for_each_entry(fc, &fcoe_hostlist, list) {
1389 if (fc->real_dev == real_dev) {
1390 lp = fc->ctlr.lp;
1391 break;
1394 read_unlock(&fcoe_hostlist_lock);
1395 if (lp == NULL) {
1396 rc = NOTIFY_DONE;
1397 goto out;
1400 switch (event) {
1401 case NETDEV_DOWN:
1402 case NETDEV_GOING_DOWN:
1403 link_possible = 0;
1404 break;
1405 case NETDEV_UP:
1406 case NETDEV_CHANGE:
1407 break;
1408 case NETDEV_CHANGEMTU:
1409 mfs = fc->real_dev->mtu -
1410 (sizeof(struct fcoe_hdr) +
1411 sizeof(struct fcoe_crc_eof));
1412 if (mfs >= FC_MIN_MAX_FRAME)
1413 fc_set_mfs(lp, mfs);
1414 break;
1415 case NETDEV_REGISTER:
1416 break;
1417 default:
1418 FC_DBG("Unknown event %ld from netdev netlink\n", event);
1420 if (link_possible && !fcoe_link_ok(lp))
1421 fcoe_ctlr_link_up(&fc->ctlr);
1422 else if (fcoe_ctlr_link_down(&fc->ctlr)) {
1423 stats = fc_lport_get_stats(lp);
1424 stats->LinkFailureCount++;
1425 fcoe_clean_pending_queue(lp);
1427 out:
1428 return rc;
1432 * fcoe_if_to_netdev() - parse a name buffer to get netdev
1433 * @buffer: incoming buffer to be copied
1435 * Returns: NULL or ptr to net_device
1437 static struct net_device *fcoe_if_to_netdev(const char *buffer)
1439 char *cp;
1440 char ifname[IFNAMSIZ + 2];
1442 if (buffer) {
1443 strlcpy(ifname, buffer, IFNAMSIZ);
1444 cp = ifname + strlen(ifname);
1445 while (--cp >= ifname && *cp == '\n')
1446 *cp = '\0';
1447 return dev_get_by_name(&init_net, ifname);
1449 return NULL;
1453 * fcoe_netdev_to_module_owner() - finds out the driver module of the netdev
1454 * @netdev: the target netdev
1456 * Returns: ptr to the struct module, NULL for failure
1458 static struct module *
1459 fcoe_netdev_to_module_owner(const struct net_device *netdev)
1461 struct device *dev;
1463 if (!netdev)
1464 return NULL;
1466 dev = netdev->dev.parent;
1467 if (!dev)
1468 return NULL;
1470 if (!dev->driver)
1471 return NULL;
1473 return dev->driver->owner;
1477 * fcoe_ethdrv_get() - Hold the Ethernet driver
1478 * @netdev: the target netdev
1480 * Holds the Ethernet driver module by try_module_get() for
1481 * the corresponding netdev.
1483 * Returns: 0 for success
1485 static int fcoe_ethdrv_get(const struct net_device *netdev)
1487 struct module *owner;
1489 owner = fcoe_netdev_to_module_owner(netdev);
1490 if (owner) {
1491 printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
1492 module_name(owner), netdev->name);
1493 return try_module_get(owner);
1495 return -ENODEV;
1499 * fcoe_ethdrv_put() - Release the Ethernet driver
1500 * @netdev: the target netdev
1502 * Releases the Ethernet driver module by module_put for
1503 * the corresponding netdev.
1505 * Returns: 0 for success
1507 static int fcoe_ethdrv_put(const struct net_device *netdev)
1509 struct module *owner;
1511 owner = fcoe_netdev_to_module_owner(netdev);
1512 if (owner) {
1513 printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
1514 module_name(owner), netdev->name);
1515 module_put(owner);
1516 return 0;
1518 return -ENODEV;
1522 * fcoe_destroy() - handles the destroy from sysfs
1523 * @buffer: expected to be an eth if name
1524 * @kp: associated kernel param
1526 * Returns: 0 for success
1528 static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1530 int rc;
1531 struct net_device *netdev;
1533 netdev = fcoe_if_to_netdev(buffer);
1534 if (!netdev) {
1535 rc = -ENODEV;
1536 goto out_nodev;
1538 /* look for existing lport */
1539 if (!fcoe_hostlist_lookup(netdev)) {
1540 rc = -ENODEV;
1541 goto out_putdev;
1543 rc = fcoe_if_destroy(netdev);
1544 if (rc) {
1545 printk(KERN_ERR "fcoe: fcoe_if_destroy(%s) failed\n",
1546 netdev->name);
1547 rc = -EIO;
1548 goto out_putdev;
1550 fcoe_ethdrv_put(netdev);
1551 rc = 0;
1552 out_putdev:
1553 dev_put(netdev);
1554 out_nodev:
1555 return rc;
1559 * fcoe_create() - Handles the create call from sysfs
1560 * @buffer: expected to be an eth if name
1561 * @kp: associated kernel param
1563 * Returns: 0 for success
1565 static int fcoe_create(const char *buffer, struct kernel_param *kp)
1567 int rc;
1568 struct net_device *netdev;
1570 netdev = fcoe_if_to_netdev(buffer);
1571 if (!netdev) {
1572 rc = -ENODEV;
1573 goto out_nodev;
1575 /* look for existing lport */
1576 if (fcoe_hostlist_lookup(netdev)) {
1577 rc = -EEXIST;
1578 goto out_putdev;
1580 fcoe_ethdrv_get(netdev);
1582 rc = fcoe_if_create(netdev);
1583 if (rc) {
1584 printk(KERN_ERR "fcoe: fcoe_if_create(%s) failed\n",
1585 netdev->name);
1586 fcoe_ethdrv_put(netdev);
1587 rc = -EIO;
1588 goto out_putdev;
1590 rc = 0;
1591 out_putdev:
1592 dev_put(netdev);
1593 out_nodev:
1594 return rc;
1597 module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
1598 __MODULE_PARM_TYPE(create, "string");
1599 MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
1600 module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
1601 __MODULE_PARM_TYPE(destroy, "string");
1602 MODULE_PARM_DESC(destroy, "Destroy fcoe port");
1605 * fcoe_link_ok() - Check if link is ok for the fc_lport
1606 * @lp: ptr to the fc_lport
1608 * Any permanently-disqualifying conditions have been previously checked.
1609 * This also updates the speed setting, which may change with link for 100/1000.
1611 * This function should probably be checking for PAUSE support at some point
1612 * in the future. Currently Per-priority-pause is not determinable using
1613 * ethtool, so we shouldn't be restrictive until that problem is resolved.
1615 * Returns: 0 if link is OK for use by FCoE.
1618 int fcoe_link_ok(struct fc_lport *lp)
1620 struct fcoe_softc *fc = lport_priv(lp);
1621 struct net_device *dev = fc->real_dev;
1622 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1623 int rc = 0;
1625 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1626 dev = fc->phys_dev;
1627 if (dev->ethtool_ops->get_settings) {
1628 dev->ethtool_ops->get_settings(dev, &ecmd);
1629 lp->link_supported_speeds &=
1630 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1631 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1632 SUPPORTED_1000baseT_Full))
1633 lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1634 if (ecmd.supported & SUPPORTED_10000baseT_Full)
1635 lp->link_supported_speeds |=
1636 FC_PORTSPEED_10GBIT;
1637 if (ecmd.speed == SPEED_1000)
1638 lp->link_speed = FC_PORTSPEED_1GBIT;
1639 if (ecmd.speed == SPEED_10000)
1640 lp->link_speed = FC_PORTSPEED_10GBIT;
1642 } else
1643 rc = -1;
1645 return rc;
1649 * fcoe_percpu_clean() - Clear the pending skbs for an lport
1650 * @lp: the fc_lport
1652 void fcoe_percpu_clean(struct fc_lport *lp)
1654 struct fcoe_percpu_s *pp;
1655 struct fcoe_rcv_info *fr;
1656 struct sk_buff_head *list;
1657 struct sk_buff *skb, *next;
1658 struct sk_buff *head;
1659 unsigned int cpu;
1661 for_each_possible_cpu(cpu) {
1662 pp = &per_cpu(fcoe_percpu, cpu);
1663 spin_lock_bh(&pp->fcoe_rx_list.lock);
1664 list = &pp->fcoe_rx_list;
1665 head = list->next;
1666 for (skb = head; skb != (struct sk_buff *)list;
1667 skb = next) {
1668 next = skb->next;
1669 fr = fcoe_dev_from_skb(skb);
1670 if (fr->fr_dev == lp) {
1671 __skb_unlink(skb, list);
1672 kfree_skb(skb);
1675 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1680 * fcoe_clean_pending_queue() - Dequeue a skb and free it
1681 * @lp: the corresponding fc_lport
1683 * Returns: none
1685 void fcoe_clean_pending_queue(struct fc_lport *lp)
1687 struct fcoe_softc *fc = lport_priv(lp);
1688 struct sk_buff *skb;
1690 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1691 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1692 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1693 kfree_skb(skb);
1694 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1696 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1700 * fcoe_reset() - Resets the fcoe
1701 * @shost: shost the reset is from
1703 * Returns: always 0
1705 int fcoe_reset(struct Scsi_Host *shost)
1707 struct fc_lport *lport = shost_priv(shost);
1708 fc_lport_reset(lport);
1709 return 0;
1713 * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
1714 * @dev: this is currently ptr to net_device
1716 * Returns: NULL or the located fcoe_softc
1718 static struct fcoe_softc *
1719 fcoe_hostlist_lookup_softc(const struct net_device *dev)
1721 struct fcoe_softc *fc;
1723 read_lock(&fcoe_hostlist_lock);
1724 list_for_each_entry(fc, &fcoe_hostlist, list) {
1725 if (fc->real_dev == dev) {
1726 read_unlock(&fcoe_hostlist_lock);
1727 return fc;
1730 read_unlock(&fcoe_hostlist_lock);
1731 return NULL;
1735 * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
1736 * @netdev: ptr to net_device
1738 * Returns: 0 for success
1740 struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1742 struct fcoe_softc *fc;
1744 fc = fcoe_hostlist_lookup_softc(netdev);
1746 return (fc) ? fc->ctlr.lp : NULL;
1750 * fcoe_hostlist_add() - Add a lport to lports list
1751 * @lp: ptr to the fc_lport to be added
1753 * Returns: 0 for success
1755 int fcoe_hostlist_add(const struct fc_lport *lp)
1757 struct fcoe_softc *fc;
1759 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1760 if (!fc) {
1761 fc = lport_priv(lp);
1762 write_lock_bh(&fcoe_hostlist_lock);
1763 list_add_tail(&fc->list, &fcoe_hostlist);
1764 write_unlock_bh(&fcoe_hostlist_lock);
1766 return 0;
1770 * fcoe_hostlist_remove() - remove a lport from lports list
1771 * @lp: ptr to the fc_lport to be removed
1773 * Returns: 0 for success
1775 int fcoe_hostlist_remove(const struct fc_lport *lp)
1777 struct fcoe_softc *fc;
1779 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1780 BUG_ON(!fc);
1781 write_lock_bh(&fcoe_hostlist_lock);
1782 list_del(&fc->list);
1783 write_unlock_bh(&fcoe_hostlist_lock);
1785 return 0;
1789 * fcoe_init() - fcoe module loading initialization
1791 * Returns 0 on success, negative on failure
1793 static int __init fcoe_init(void)
1795 unsigned int cpu;
1796 int rc = 0;
1797 struct fcoe_percpu_s *p;
1799 INIT_LIST_HEAD(&fcoe_hostlist);
1800 rwlock_init(&fcoe_hostlist_lock);
1802 for_each_possible_cpu(cpu) {
1803 p = &per_cpu(fcoe_percpu, cpu);
1804 skb_queue_head_init(&p->fcoe_rx_list);
1807 for_each_online_cpu(cpu)
1808 fcoe_percpu_thread_create(cpu);
1810 /* Initialize per CPU interrupt thread */
1811 rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
1812 if (rc)
1813 goto out_free;
1815 /* Setup link change notification */
1816 fcoe_dev_setup();
1818 setup_timer(&fcoe_timer, fcoe_watchdog, 0);
1820 mod_timer(&fcoe_timer, jiffies + (10 * HZ));
1822 fcoe_if_init();
1824 return 0;
1826 out_free:
1827 for_each_online_cpu(cpu) {
1828 fcoe_percpu_thread_destroy(cpu);
1831 return rc;
1833 module_init(fcoe_init);
1836 * fcoe_exit() - fcoe module unloading cleanup
1838 * Returns 0 on success, negative on failure
1840 static void __exit fcoe_exit(void)
1842 unsigned int cpu;
1843 struct fcoe_softc *fc, *tmp;
1845 fcoe_dev_cleanup();
1847 /* Stop the timer */
1848 del_timer_sync(&fcoe_timer);
1850 /* releases the associated fcoe hosts */
1851 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1852 fcoe_if_destroy(fc->real_dev);
1854 unregister_hotcpu_notifier(&fcoe_cpu_notifier);
1856 for_each_online_cpu(cpu) {
1857 fcoe_percpu_thread_destroy(cpu);
1860 /* detach from scsi transport */
1861 fcoe_if_exit();
1863 module_exit(fcoe_exit);