1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
6 #include <linux/module.h>
7 #include <linux/mempool.h>
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/pci.h>
13 #include <linux/skbuff.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/spinlock.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_ether.h>
19 #include <linux/blk-mq-pci.h>
20 #include <scsi/fc/fc_fip.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_transport.h>
23 #include <scsi/scsi_transport_fc.h>
24 #include <scsi/scsi_tcq.h>
25 #include <scsi/libfc.h>
26 #include <scsi/fc_frame.h>
29 #include "vnic_intr.h"
30 #include "vnic_stats.h"
35 #define PCI_DEVICE_ID_CISCO_FNIC 0x0045
37 /* Timer to poll notification area for events. Used for MSI interrupts */
38 #define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ)
40 static struct kmem_cache
*fnic_sgl_cache
[FNIC_SGL_NUM_CACHES
];
41 static struct kmem_cache
*fnic_io_req_cache
;
42 static LIST_HEAD(fnic_list
);
43 static DEFINE_SPINLOCK(fnic_list_lock
);
44 static DEFINE_IDA(fnic_ida
);
46 /* Supported devices by fnic module */
47 static struct pci_device_id fnic_id_table
[] = {
48 { PCI_DEVICE(PCI_VENDOR_ID_CISCO
, PCI_DEVICE_ID_CISCO_FNIC
) },
52 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
53 MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, "
54 "Joseph R. Eykholt <jeykholt@cisco.com>");
55 MODULE_LICENSE("GPL v2");
56 MODULE_VERSION(DRV_VERSION
);
57 MODULE_DEVICE_TABLE(pci
, fnic_id_table
);
59 unsigned int fnic_log_level
;
60 module_param(fnic_log_level
, int, S_IRUGO
|S_IWUSR
);
61 MODULE_PARM_DESC(fnic_log_level
, "bit mask of fnic logging levels");
64 unsigned int io_completions
= FNIC_DFLT_IO_COMPLETIONS
;
65 module_param(io_completions
, int, S_IRUGO
|S_IWUSR
);
66 MODULE_PARM_DESC(io_completions
, "Max CQ entries to process at a time");
68 unsigned int fnic_trace_max_pages
= 16;
69 module_param(fnic_trace_max_pages
, uint
, S_IRUGO
|S_IWUSR
);
70 MODULE_PARM_DESC(fnic_trace_max_pages
, "Total allocated memory pages "
71 "for fnic trace buffer");
73 unsigned int fnic_fc_trace_max_pages
= 64;
74 module_param(fnic_fc_trace_max_pages
, uint
, S_IRUGO
|S_IWUSR
);
75 MODULE_PARM_DESC(fnic_fc_trace_max_pages
,
76 "Total allocated memory pages for fc trace buffer");
78 static unsigned int fnic_max_qdepth
= FNIC_DFLT_QUEUE_DEPTH
;
79 module_param(fnic_max_qdepth
, uint
, S_IRUGO
|S_IWUSR
);
80 MODULE_PARM_DESC(fnic_max_qdepth
, "Queue depth to report for each LUN");
82 static struct libfc_function_template fnic_transport_template
= {
83 .frame_send
= fnic_send
,
84 .lport_set_port_id
= fnic_set_port_id
,
85 .fcp_abort_io
= fnic_empty_scsi_cleanup
,
86 .fcp_cleanup
= fnic_empty_scsi_cleanup
,
87 .exch_mgr_reset
= fnic_exch_mgr_reset
90 static int fnic_slave_alloc(struct scsi_device
*sdev
)
92 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
94 if (!rport
|| fc_remote_port_chkready(rport
))
97 scsi_change_queue_depth(sdev
, fnic_max_qdepth
);
101 static const struct scsi_host_template fnic_host_template
= {
102 .module
= THIS_MODULE
,
104 .queuecommand
= fnic_queuecommand
,
105 .eh_timed_out
= fc_eh_timed_out
,
106 .eh_abort_handler
= fnic_abort_cmd
,
107 .eh_device_reset_handler
= fnic_device_reset
,
108 .eh_host_reset_handler
= fnic_host_reset
,
109 .slave_alloc
= fnic_slave_alloc
,
110 .change_queue_depth
= scsi_change_queue_depth
,
113 .can_queue
= FNIC_DFLT_IO_REQ
,
114 .sg_tablesize
= FNIC_MAX_SG_DESC_CNT
,
115 .max_sectors
= 0xffff,
116 .shost_groups
= fnic_host_groups
,
117 .track_queue_depth
= 1,
118 .cmd_size
= sizeof(struct fnic_cmd_priv
),
119 .map_queues
= fnic_mq_map_queues_cpus
,
123 fnic_set_rport_dev_loss_tmo(struct fc_rport
*rport
, u32 timeout
)
126 rport
->dev_loss_tmo
= timeout
;
128 rport
->dev_loss_tmo
= 1;
131 static void fnic_get_host_speed(struct Scsi_Host
*shost
);
132 static struct scsi_transport_template
*fnic_fc_transport
;
133 static struct fc_host_statistics
*fnic_get_stats(struct Scsi_Host
*);
134 static void fnic_reset_host_stats(struct Scsi_Host
*);
136 static struct fc_function_template fnic_fc_functions
= {
138 .show_host_node_name
= 1,
139 .show_host_port_name
= 1,
140 .show_host_supported_classes
= 1,
141 .show_host_supported_fc4s
= 1,
142 .show_host_active_fc4s
= 1,
143 .show_host_maxframe_size
= 1,
144 .show_host_port_id
= 1,
145 .show_host_supported_speeds
= 1,
146 .get_host_speed
= fnic_get_host_speed
,
147 .show_host_speed
= 1,
148 .show_host_port_type
= 1,
149 .get_host_port_state
= fc_get_host_port_state
,
150 .show_host_port_state
= 1,
151 .show_host_symbolic_name
= 1,
152 .show_rport_maxframe_size
= 1,
153 .show_rport_supported_classes
= 1,
154 .show_host_fabric_name
= 1,
155 .show_starget_node_name
= 1,
156 .show_starget_port_name
= 1,
157 .show_starget_port_id
= 1,
158 .show_rport_dev_loss_tmo
= 1,
159 .set_rport_dev_loss_tmo
= fnic_set_rport_dev_loss_tmo
,
160 .issue_fc_host_lip
= fnic_reset
,
161 .get_fc_host_stats
= fnic_get_stats
,
162 .reset_fc_host_stats
= fnic_reset_host_stats
,
163 .dd_fcrport_size
= sizeof(struct fc_rport_libfc_priv
),
164 .terminate_rport_io
= fnic_terminate_rport_io
,
165 .bsg_request
= fc_lport_bsg_request
,
168 static void fnic_get_host_speed(struct Scsi_Host
*shost
)
170 struct fc_lport
*lp
= shost_priv(shost
);
171 struct fnic
*fnic
= lport_priv(lp
);
172 u32 port_speed
= vnic_dev_port_speed(fnic
->vdev
);
174 /* Add in other values as they get defined in fw */
175 switch (port_speed
) {
176 case DCEM_PORTSPEED_10G
:
177 fc_host_speed(shost
) = FC_PORTSPEED_10GBIT
;
179 case DCEM_PORTSPEED_20G
:
180 fc_host_speed(shost
) = FC_PORTSPEED_20GBIT
;
182 case DCEM_PORTSPEED_25G
:
183 fc_host_speed(shost
) = FC_PORTSPEED_25GBIT
;
185 case DCEM_PORTSPEED_40G
:
186 case DCEM_PORTSPEED_4x10G
:
187 fc_host_speed(shost
) = FC_PORTSPEED_40GBIT
;
189 case DCEM_PORTSPEED_100G
:
190 fc_host_speed(shost
) = FC_PORTSPEED_100GBIT
;
193 fc_host_speed(shost
) = FC_PORTSPEED_UNKNOWN
;
198 static struct fc_host_statistics
*fnic_get_stats(struct Scsi_Host
*host
)
201 struct fc_lport
*lp
= shost_priv(host
);
202 struct fnic
*fnic
= lport_priv(lp
);
203 struct fc_host_statistics
*stats
= &lp
->host_stats
;
204 struct vnic_stats
*vs
;
207 if (time_before(jiffies
, fnic
->stats_time
+ HZ
/ FNIC_STATS_RATE_LIMIT
))
209 fnic
->stats_time
= jiffies
;
211 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
212 ret
= vnic_dev_stats_dump(fnic
->vdev
, &fnic
->stats
);
213 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
216 FNIC_MAIN_DBG(KERN_DEBUG
, fnic
->lport
->host
, fnic
->fnic_num
,
217 "fnic: Get vnic stats failed"
222 stats
->tx_frames
= vs
->tx
.tx_unicast_frames_ok
;
223 stats
->tx_words
= vs
->tx
.tx_unicast_bytes_ok
/ 4;
224 stats
->rx_frames
= vs
->rx
.rx_unicast_frames_ok
;
225 stats
->rx_words
= vs
->rx
.rx_unicast_bytes_ok
/ 4;
226 stats
->error_frames
= vs
->tx
.tx_errors
+ vs
->rx
.rx_errors
;
227 stats
->dumped_frames
= vs
->tx
.tx_drops
+ vs
->rx
.rx_drop
;
228 stats
->invalid_crc_count
= vs
->rx
.rx_crc_errors
;
229 stats
->seconds_since_last_reset
=
230 (jiffies
- fnic
->stats_reset_time
) / HZ
;
231 stats
->fcp_input_megabytes
= div_u64(fnic
->fcp_input_bytes
, 1000000);
232 stats
->fcp_output_megabytes
= div_u64(fnic
->fcp_output_bytes
, 1000000);
238 * fnic_dump_fchost_stats
239 * note : dumps fc_statistics into system logs
241 void fnic_dump_fchost_stats(struct Scsi_Host
*host
,
242 struct fc_host_statistics
*stats
)
244 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
245 "fnic: seconds since last reset = %llu\n",
246 stats
->seconds_since_last_reset
);
247 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
248 "fnic: tx frames = %llu\n",
250 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
251 "fnic: tx words = %llu\n",
253 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
254 "fnic: rx frames = %llu\n",
256 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
257 "fnic: rx words = %llu\n",
259 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
260 "fnic: lip count = %llu\n",
262 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
263 "fnic: nos count = %llu\n",
265 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
266 "fnic: error frames = %llu\n",
267 stats
->error_frames
);
268 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
269 "fnic: dumped frames = %llu\n",
270 stats
->dumped_frames
);
271 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
272 "fnic: link failure count = %llu\n",
273 stats
->link_failure_count
);
274 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
275 "fnic: loss of sync count = %llu\n",
276 stats
->loss_of_sync_count
);
277 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
278 "fnic: loss of signal count = %llu\n",
279 stats
->loss_of_signal_count
);
280 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
281 "fnic: prim seq protocol err count = %llu\n",
282 stats
->prim_seq_protocol_err_count
);
283 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
284 "fnic: invalid tx word count= %llu\n",
285 stats
->invalid_tx_word_count
);
286 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
287 "fnic: invalid crc count = %llu\n",
288 stats
->invalid_crc_count
);
289 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
290 "fnic: fcp input requests = %llu\n",
291 stats
->fcp_input_requests
);
292 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
293 "fnic: fcp output requests = %llu\n",
294 stats
->fcp_output_requests
);
295 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
296 "fnic: fcp control requests = %llu\n",
297 stats
->fcp_control_requests
);
298 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
299 "fnic: fcp input megabytes = %llu\n",
300 stats
->fcp_input_megabytes
);
301 FNIC_MAIN_NOTE(KERN_NOTICE
, host
,
302 "fnic: fcp output megabytes = %llu\n",
303 stats
->fcp_output_megabytes
);
308 * fnic_reset_host_stats : clears host stats
309 * note : called when reset_statistics set under sysfs dir
311 static void fnic_reset_host_stats(struct Scsi_Host
*host
)
314 struct fc_lport
*lp
= shost_priv(host
);
315 struct fnic
*fnic
= lport_priv(lp
);
316 struct fc_host_statistics
*stats
;
319 /* dump current stats, before clearing them */
320 stats
= fnic_get_stats(host
);
321 fnic_dump_fchost_stats(host
, stats
);
323 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
324 ret
= vnic_dev_stats_clear(fnic
->vdev
);
325 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
328 FNIC_MAIN_DBG(KERN_DEBUG
, fnic
->lport
->host
, fnic
->fnic_num
,
329 "fnic: Reset vnic stats failed"
333 fnic
->stats_reset_time
= jiffies
;
334 memset(stats
, 0, sizeof(*stats
));
339 void fnic_log_q_error(struct fnic
*fnic
)
344 for (i
= 0; i
< fnic
->raw_wq_count
; i
++) {
345 error_status
= ioread32(&fnic
->wq
[i
].ctrl
->error_status
);
347 shost_printk(KERN_ERR
, fnic
->lport
->host
,
348 "WQ[%d] error_status"
349 " %d\n", i
, error_status
);
352 for (i
= 0; i
< fnic
->rq_count
; i
++) {
353 error_status
= ioread32(&fnic
->rq
[i
].ctrl
->error_status
);
355 shost_printk(KERN_ERR
, fnic
->lport
->host
,
356 "RQ[%d] error_status"
357 " %d\n", i
, error_status
);
360 for (i
= 0; i
< fnic
->wq_copy_count
; i
++) {
361 error_status
= ioread32(&fnic
->hw_copy_wq
[i
].ctrl
->error_status
);
363 shost_printk(KERN_ERR
, fnic
->lport
->host
,
364 "CWQ[%d] error_status"
365 " %d\n", i
, error_status
);
369 void fnic_handle_link_event(struct fnic
*fnic
)
373 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
374 if (fnic
->stop_rx_link_events
) {
375 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
378 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
380 queue_work(fnic_event_queue
, &fnic
->link_work
);
384 static int fnic_notify_set(struct fnic
*fnic
)
388 switch (vnic_dev_get_intr_mode(fnic
->vdev
)) {
389 case VNIC_DEV_INTR_MODE_INTX
:
390 err
= vnic_dev_notify_set(fnic
->vdev
, FNIC_INTX_NOTIFY
);
392 case VNIC_DEV_INTR_MODE_MSI
:
393 err
= vnic_dev_notify_set(fnic
->vdev
, -1);
395 case VNIC_DEV_INTR_MODE_MSIX
:
396 err
= vnic_dev_notify_set(fnic
->vdev
, fnic
->wq_copy_count
+ fnic
->copy_wq_base
);
399 shost_printk(KERN_ERR
, fnic
->lport
->host
,
400 "Interrupt mode should be set up"
401 " before devcmd notify set %d\n",
402 vnic_dev_get_intr_mode(fnic
->vdev
));
410 static void fnic_notify_timer(struct timer_list
*t
)
412 struct fnic
*fnic
= from_timer(fnic
, t
, notify_timer
);
414 fnic_handle_link_event(fnic
);
415 mod_timer(&fnic
->notify_timer
,
416 round_jiffies(jiffies
+ FNIC_NOTIFY_TIMER_PERIOD
));
419 static void fnic_fip_notify_timer(struct timer_list
*t
)
421 struct fnic
*fnic
= from_timer(fnic
, t
, fip_timer
);
423 fnic_handle_fip_timer(fnic
);
426 static void fnic_notify_timer_start(struct fnic
*fnic
)
428 switch (vnic_dev_get_intr_mode(fnic
->vdev
)) {
429 case VNIC_DEV_INTR_MODE_MSI
:
431 * Schedule first timeout immediately. The driver is
432 * initiatialized and ready to look for link up notification
434 mod_timer(&fnic
->notify_timer
, jiffies
);
437 /* Using intr for notification for INTx/MSI-X */
442 static int fnic_dev_wait(struct vnic_dev
*vdev
,
443 int (*start
)(struct vnic_dev
*, int),
444 int (*finished
)(struct vnic_dev
*, int *),
454 err
= start(vdev
, arg
);
458 /* Wait for func to complete.
459 * Sometime schedule_timeout_uninterruptible take long time
460 * to wake up so we do not retry as we are only waiting for
461 * 2 seconds in while loop. By adding count, we make sure
462 * we try atleast three times before returning -ETIMEDOUT
464 time
= jiffies
+ (HZ
* 2);
466 err
= finished(vdev
, &done
);
472 schedule_timeout_uninterruptible(HZ
/ 10);
473 } while (time_after(time
, jiffies
) || (count
< 3));
478 static int fnic_cleanup(struct fnic
*fnic
)
482 int raw_wq_rq_counts
;
484 vnic_dev_disable(fnic
->vdev
);
485 for (i
= 0; i
< fnic
->intr_count
; i
++)
486 vnic_intr_mask(&fnic
->intr
[i
]);
488 for (i
= 0; i
< fnic
->rq_count
; i
++) {
489 err
= vnic_rq_disable(&fnic
->rq
[i
]);
493 for (i
= 0; i
< fnic
->raw_wq_count
; i
++) {
494 err
= vnic_wq_disable(&fnic
->wq
[i
]);
498 for (i
= 0; i
< fnic
->wq_copy_count
; i
++) {
499 err
= vnic_wq_copy_disable(&fnic
->hw_copy_wq
[i
]);
502 raw_wq_rq_counts
= fnic
->raw_wq_count
+ fnic
->rq_count
;
503 fnic_wq_copy_cmpl_handler(fnic
, -1, i
+ raw_wq_rq_counts
);
506 /* Clean up completed IOs and FCS frames */
507 fnic_wq_cmpl_handler(fnic
, -1);
508 fnic_rq_cmpl_handler(fnic
, -1);
510 /* Clean up the IOs and FCS frames that have not completed */
511 for (i
= 0; i
< fnic
->raw_wq_count
; i
++)
512 vnic_wq_clean(&fnic
->wq
[i
], fnic_free_wq_buf
);
513 for (i
= 0; i
< fnic
->rq_count
; i
++)
514 vnic_rq_clean(&fnic
->rq
[i
], fnic_free_rq_buf
);
515 for (i
= 0; i
< fnic
->wq_copy_count
; i
++)
516 vnic_wq_copy_clean(&fnic
->hw_copy_wq
[i
],
517 fnic_wq_copy_cleanup_handler
);
519 for (i
= 0; i
< fnic
->cq_count
; i
++)
520 vnic_cq_clean(&fnic
->cq
[i
]);
521 for (i
= 0; i
< fnic
->intr_count
; i
++)
522 vnic_intr_clean(&fnic
->intr
[i
]);
524 mempool_destroy(fnic
->io_req_pool
);
525 for (i
= 0; i
< FNIC_SGL_NUM_CACHES
; i
++)
526 mempool_destroy(fnic
->io_sgl_pool
[i
]);
531 static void fnic_iounmap(struct fnic
*fnic
)
533 if (fnic
->bar0
.vaddr
)
534 iounmap(fnic
->bar0
.vaddr
);
538 * fnic_get_mac() - get assigned data MAC address for FIP code.
539 * @lport: local port.
541 static u8
*fnic_get_mac(struct fc_lport
*lport
)
543 struct fnic
*fnic
= lport_priv(lport
);
545 return fnic
->data_src_addr
;
548 static void fnic_set_vlan(struct fnic
*fnic
, u16 vlan_id
)
550 vnic_dev_set_default_vlan(fnic
->vdev
, vlan_id
);
553 static int fnic_scsi_drv_init(struct fnic
*fnic
)
555 struct Scsi_Host
*host
= fnic
->lport
->host
;
557 /* Configure maximum outstanding IO reqs*/
558 if (fnic
->config
.io_throttle_count
!= FNIC_UCSM_DFLT_THROTTLE_CNT_BLD
)
559 host
->can_queue
= min_t(u32
, FNIC_MAX_IO_REQ
,
560 max_t(u32
, FNIC_MIN_IO_REQ
,
561 fnic
->config
.io_throttle_count
));
563 fnic
->fnic_max_tag_id
= host
->can_queue
;
564 host
->max_lun
= fnic
->config
.luns_per_tgt
;
565 host
->max_id
= FNIC_MAX_FCP_TARGET
;
566 host
->max_cmd_len
= FCOE_MAX_CMD_LEN
;
568 host
->nr_hw_queues
= fnic
->wq_copy_count
;
570 shost_printk(KERN_INFO
, host
,
571 "fnic: can_queue: %d max_lun: %llu",
572 host
->can_queue
, host
->max_lun
);
574 shost_printk(KERN_INFO
, host
,
575 "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
576 host
->max_id
, host
->max_cmd_len
, host
->nr_hw_queues
);
581 void fnic_mq_map_queues_cpus(struct Scsi_Host
*host
)
583 struct fc_lport
*lp
= shost_priv(host
);
584 struct fnic
*fnic
= lport_priv(lp
);
585 struct pci_dev
*l_pdev
= fnic
->pdev
;
586 int intr_mode
= fnic
->config
.intr_mode
;
587 struct blk_mq_queue_map
*qmap
= &host
->tag_set
.map
[HCTX_TYPE_DEFAULT
];
589 if (intr_mode
== VNIC_DEV_INTR_MODE_MSI
|| intr_mode
== VNIC_DEV_INTR_MODE_INTX
) {
590 FNIC_MAIN_DBG(KERN_ERR
, fnic
->lport
->host
, fnic
->fnic_num
,
591 "intr_mode is not msix\n");
595 FNIC_MAIN_DBG(KERN_INFO
, fnic
->lport
->host
, fnic
->fnic_num
,
596 "qmap->nr_queues: %d\n", qmap
->nr_queues
);
598 if (l_pdev
== NULL
) {
599 FNIC_MAIN_DBG(KERN_ERR
, fnic
->lport
->host
, fnic
->fnic_num
,
604 blk_mq_pci_map_queues(qmap
, l_pdev
, FNIC_PCI_OFFSET
);
607 static int fnic_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
609 struct Scsi_Host
*host
;
620 * Allocate SCSI Host and set up association between host,
621 * local port, and fnic
623 lp
= libfc_host_alloc(&fnic_host_template
, sizeof(struct fnic
));
625 printk(KERN_ERR PFX
"Unable to alloc libfc local port\n");
631 fnic
= lport_priv(lp
);
633 fnic_id
= ida_alloc(&fnic_ida
, GFP_KERNEL
);
635 pr_err("Unable to alloc fnic ID\n");
637 goto err_out_ida_alloc
;
641 fnic
->link_events
= 0;
644 snprintf(fnic
->name
, sizeof(fnic
->name
) - 1, "%s%d", DRV_NAME
,
647 host
->transportt
= fnic_fc_transport
;
648 fnic
->fnic_num
= fnic_id
;
649 fnic_stats_debugfs_init(fnic
);
651 err
= pci_enable_device(pdev
);
653 shost_printk(KERN_ERR
, fnic
->lport
->host
,
654 "Cannot enable PCI device, aborting.\n");
655 goto err_out_free_hba
;
658 err
= pci_request_regions(pdev
, DRV_NAME
);
660 shost_printk(KERN_ERR
, fnic
->lport
->host
,
661 "Cannot enable PCI resources, aborting\n");
662 goto err_out_disable_device
;
665 pci_set_master(pdev
);
667 /* Query PCI controller on system for DMA addressing
668 * limitation for the device. Try 47-bit first, and
669 * fail to 32-bit. Cisco VIC supports 47 bits only.
671 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(47));
673 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
675 shost_printk(KERN_ERR
, fnic
->lport
->host
,
676 "No usable DMA configuration "
678 goto err_out_release_regions
;
682 /* Map vNIC resources from BAR0 */
683 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
684 shost_printk(KERN_ERR
, fnic
->lport
->host
,
685 "BAR0 not memory-map'able, aborting.\n");
687 goto err_out_release_regions
;
690 fnic
->bar0
.vaddr
= pci_iomap(pdev
, 0, 0);
691 fnic
->bar0
.bus_addr
= pci_resource_start(pdev
, 0);
692 fnic
->bar0
.len
= pci_resource_len(pdev
, 0);
694 if (!fnic
->bar0
.vaddr
) {
695 shost_printk(KERN_ERR
, fnic
->lport
->host
,
696 "Cannot memory-map BAR0 res hdr, "
699 goto err_out_release_regions
;
702 fnic
->vdev
= vnic_dev_register(NULL
, fnic
, pdev
, &fnic
->bar0
);
704 shost_printk(KERN_ERR
, fnic
->lport
->host
,
705 "vNIC registration failed, "
708 goto err_out_iounmap
;
711 err
= vnic_dev_cmd_init(fnic
->vdev
);
713 shost_printk(KERN_ERR
, fnic
->lport
->host
,
714 "vnic_dev_cmd_init() returns %d, aborting\n",
716 goto err_out_vnic_unregister
;
719 err
= fnic_dev_wait(fnic
->vdev
, vnic_dev_open
,
720 vnic_dev_open_done
, CMD_OPENF_RQ_ENABLE_THEN_POST
);
722 shost_printk(KERN_ERR
, fnic
->lport
->host
,
723 "vNIC dev open failed, aborting.\n");
724 goto err_out_dev_cmd_deinit
;
727 err
= vnic_dev_init(fnic
->vdev
, 0);
729 shost_printk(KERN_ERR
, fnic
->lport
->host
,
730 "vNIC dev init failed, aborting.\n");
731 goto err_out_dev_close
;
734 err
= vnic_dev_mac_addr(fnic
->vdev
, fnic
->ctlr
.ctl_src_addr
);
736 shost_printk(KERN_ERR
, fnic
->lport
->host
,
737 "vNIC get MAC addr failed \n");
738 goto err_out_dev_close
;
740 /* set data_src for point-to-point mode and to keep it non-zero */
741 memcpy(fnic
->data_src_addr
, fnic
->ctlr
.ctl_src_addr
, ETH_ALEN
);
743 /* Get vNIC configuration */
744 err
= fnic_get_vnic_config(fnic
);
746 shost_printk(KERN_ERR
, fnic
->lport
->host
,
747 "Get vNIC configuration failed, "
749 goto err_out_dev_close
;
752 /* Setup PCI resources */
753 pci_set_drvdata(pdev
, fnic
);
755 fnic_get_res_counts(fnic
);
757 err
= fnic_set_intr_mode(fnic
);
759 shost_printk(KERN_ERR
, fnic
->lport
->host
,
760 "Failed to set intr mode, "
762 goto err_out_dev_close
;
765 err
= fnic_alloc_vnic_resources(fnic
);
767 shost_printk(KERN_ERR
, fnic
->lport
->host
,
768 "Failed to alloc vNIC resources, "
770 goto err_out_clear_intr
;
773 fnic_scsi_drv_init(fnic
);
775 for (hwq
= 0; hwq
< fnic
->wq_copy_count
; hwq
++) {
776 fnic
->sw_copy_wq
[hwq
].ioreq_table_size
= fnic
->fnic_max_tag_id
;
777 fnic
->sw_copy_wq
[hwq
].io_req_table
=
778 kzalloc((fnic
->sw_copy_wq
[hwq
].ioreq_table_size
+ 1) *
779 sizeof(struct fnic_io_req
*), GFP_KERNEL
);
781 shost_printk(KERN_INFO
, fnic
->lport
->host
, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
782 fnic
->wq_copy_count
, fnic
->sw_copy_wq
[0].ioreq_table_size
);
784 /* initialize all fnic locks */
785 spin_lock_init(&fnic
->fnic_lock
);
787 for (i
= 0; i
< FNIC_WQ_MAX
; i
++)
788 spin_lock_init(&fnic
->wq_lock
[i
]);
790 for (i
= 0; i
< FNIC_WQ_COPY_MAX
; i
++) {
791 spin_lock_init(&fnic
->wq_copy_lock
[i
]);
792 fnic
->wq_copy_desc_low
[i
] = DESC_CLEAN_LOW_WATERMARK
;
793 fnic
->fw_ack_recd
[i
] = 0;
794 fnic
->fw_ack_index
[i
] = -1;
798 fnic
->io_req_pool
= mempool_create_slab_pool(2, fnic_io_req_cache
);
799 if (!fnic
->io_req_pool
)
800 goto err_out_free_resources
;
802 pool
= mempool_create_slab_pool(2, fnic_sgl_cache
[FNIC_SGL_CACHE_DFLT
]);
804 goto err_out_free_ioreq_pool
;
805 fnic
->io_sgl_pool
[FNIC_SGL_CACHE_DFLT
] = pool
;
807 pool
= mempool_create_slab_pool(2, fnic_sgl_cache
[FNIC_SGL_CACHE_MAX
]);
809 goto err_out_free_dflt_pool
;
810 fnic
->io_sgl_pool
[FNIC_SGL_CACHE_MAX
] = pool
;
812 /* setup vlan config, hw inserts vlan header */
813 fnic
->vlan_hw_insert
= 1;
816 /* Initialize the FIP fcoe_ctrl struct */
817 fnic
->ctlr
.send
= fnic_eth_send
;
818 fnic
->ctlr
.update_mac
= fnic_update_mac
;
819 fnic
->ctlr
.get_src_addr
= fnic_get_mac
;
820 if (fnic
->config
.flags
& VFCF_FIP_CAPABLE
) {
821 shost_printk(KERN_INFO
, fnic
->lport
->host
,
822 "firmware supports FIP\n");
823 /* enable directed and multicast */
824 vnic_dev_packet_filter(fnic
->vdev
, 1, 1, 0, 0, 0);
825 vnic_dev_add_addr(fnic
->vdev
, FIP_ALL_ENODE_MACS
);
826 vnic_dev_add_addr(fnic
->vdev
, fnic
->ctlr
.ctl_src_addr
);
827 fnic
->set_vlan
= fnic_set_vlan
;
828 fcoe_ctlr_init(&fnic
->ctlr
, FIP_MODE_AUTO
);
829 timer_setup(&fnic
->fip_timer
, fnic_fip_notify_timer
, 0);
830 spin_lock_init(&fnic
->vlans_lock
);
831 INIT_WORK(&fnic
->fip_frame_work
, fnic_handle_fip_frame
);
832 INIT_WORK(&fnic
->event_work
, fnic_handle_event
);
833 skb_queue_head_init(&fnic
->fip_frame_queue
);
834 INIT_LIST_HEAD(&fnic
->evlist
);
835 INIT_LIST_HEAD(&fnic
->vlans
);
837 shost_printk(KERN_INFO
, fnic
->lport
->host
,
838 "firmware uses non-FIP mode\n");
839 fcoe_ctlr_init(&fnic
->ctlr
, FIP_MODE_NON_FIP
);
840 fnic
->ctlr
.state
= FIP_ST_NON_FIP
;
842 fnic
->state
= FNIC_IN_FC_MODE
;
844 atomic_set(&fnic
->in_flight
, 0);
845 fnic
->state_flags
= FNIC_FLAGS_NONE
;
847 /* Enable hardware stripping of vlan header on ingress */
848 fnic_set_nic_config(fnic
, 0, 0, 0, 0, 0, 0, 1);
850 /* Setup notification buffer area */
851 err
= fnic_notify_set(fnic
);
853 shost_printk(KERN_ERR
, fnic
->lport
->host
,
854 "Failed to alloc notify buffer, aborting.\n");
855 goto err_out_free_max_pool
;
858 /* Setup notify timer when using MSI interrupts */
859 if (vnic_dev_get_intr_mode(fnic
->vdev
) == VNIC_DEV_INTR_MODE_MSI
)
860 timer_setup(&fnic
->notify_timer
, fnic_notify_timer
, 0);
862 /* allocate RQ buffers and post them to RQ*/
863 for (i
= 0; i
< fnic
->rq_count
; i
++) {
864 err
= vnic_rq_fill(&fnic
->rq
[i
], fnic_alloc_rq_frame
);
866 shost_printk(KERN_ERR
, fnic
->lport
->host
,
867 "fnic_alloc_rq_frame can't alloc "
873 /* Enable all queues */
874 for (i
= 0; i
< fnic
->raw_wq_count
; i
++)
875 vnic_wq_enable(&fnic
->wq
[i
]);
876 for (i
= 0; i
< fnic
->rq_count
; i
++) {
877 if (!ioread32(&fnic
->rq
[i
].ctrl
->enable
))
878 vnic_rq_enable(&fnic
->rq
[i
]);
880 for (i
= 0; i
< fnic
->wq_copy_count
; i
++)
881 vnic_wq_copy_enable(&fnic
->hw_copy_wq
[i
]);
883 err
= fnic_request_intr(fnic
);
885 shost_printk(KERN_ERR
, fnic
->lport
->host
,
886 "Unable to request irq.\n");
887 goto err_out_request_intr
;
891 * Initialization done with PCI system, hardware, firmware.
894 err
= scsi_add_host(lp
->host
, &pdev
->dev
);
896 shost_printk(KERN_ERR
, fnic
->lport
->host
,
897 "fnic: scsi_add_host failed...exiting\n");
898 goto err_out_scsi_add_host
;
902 /* Start local port initiatialization */
906 lp
->max_retry_count
= fnic
->config
.flogi_retries
;
907 lp
->max_rport_retry_count
= fnic
->config
.plogi_retries
;
908 lp
->service_params
= (FCP_SPPF_INIT_FCN
| FCP_SPPF_RD_XRDY_DIS
|
909 FCP_SPPF_CONF_COMPL
);
910 if (fnic
->config
.flags
& VFCF_FCP_SEQ_LVL_ERR
)
911 lp
->service_params
|= FCP_SPPF_RETRY
;
913 lp
->boot_time
= jiffies
;
914 lp
->e_d_tov
= fnic
->config
.ed_tov
;
915 lp
->r_a_tov
= fnic
->config
.ra_tov
;
916 lp
->link_supported_speeds
= FC_PORTSPEED_10GBIT
;
917 fc_set_wwnn(lp
, fnic
->config
.node_wwn
);
918 fc_set_wwpn(lp
, fnic
->config
.port_wwn
);
920 fcoe_libfc_config(lp
, &fnic
->ctlr
, &fnic_transport_template
, 0);
922 if (!fc_exch_mgr_alloc(lp
, FC_CLASS_3
, FCPIO_HOST_EXCH_RANGE_START
,
923 FCPIO_HOST_EXCH_RANGE_END
, NULL
)) {
925 goto err_out_fc_exch_mgr_alloc
;
928 fc_lport_init_stats(lp
);
929 fnic
->stats_reset_time
= jiffies
;
933 if (fc_set_mfs(lp
, fnic
->config
.maxdatafieldsize
+
934 sizeof(struct fc_frame_header
))) {
936 goto err_out_free_exch_mgr
;
938 fc_host_maxframe_size(lp
->host
) = lp
->mfs
;
939 fc_host_dev_loss_tmo(lp
->host
) = fnic
->config
.port_down_timeout
/ 1000;
941 sprintf(fc_host_symbolic_name(lp
->host
),
942 DRV_NAME
" v" DRV_VERSION
" over %s", fnic
->name
);
944 spin_lock_irqsave(&fnic_list_lock
, flags
);
945 list_add_tail(&fnic
->list
, &fnic_list
);
946 spin_unlock_irqrestore(&fnic_list_lock
, flags
);
948 INIT_WORK(&fnic
->link_work
, fnic_handle_link
);
949 INIT_WORK(&fnic
->frame_work
, fnic_handle_frame
);
950 INIT_WORK(&fnic
->flush_work
, fnic_flush_tx
);
951 skb_queue_head_init(&fnic
->frame_queue
);
952 skb_queue_head_init(&fnic
->tx_queue
);
956 vnic_dev_enable(fnic
->vdev
);
958 for (i
= 0; i
< fnic
->intr_count
; i
++)
959 vnic_intr_unmask(&fnic
->intr
[i
]);
961 fnic_notify_timer_start(fnic
);
965 err_out_free_exch_mgr
:
966 fc_exch_mgr_free(lp
);
967 err_out_fc_exch_mgr_alloc
:
968 fc_remove_host(lp
->host
);
969 scsi_remove_host(lp
->host
);
970 err_out_scsi_add_host
:
971 fnic_free_intr(fnic
);
972 err_out_request_intr
:
973 for (i
= 0; i
< fnic
->rq_count
; i
++)
974 vnic_rq_clean(&fnic
->rq
[i
], fnic_free_rq_buf
);
976 vnic_dev_notify_unset(fnic
->vdev
);
977 err_out_free_max_pool
:
978 mempool_destroy(fnic
->io_sgl_pool
[FNIC_SGL_CACHE_MAX
]);
979 err_out_free_dflt_pool
:
980 mempool_destroy(fnic
->io_sgl_pool
[FNIC_SGL_CACHE_DFLT
]);
981 err_out_free_ioreq_pool
:
982 mempool_destroy(fnic
->io_req_pool
);
983 err_out_free_resources
:
984 for (hwq
= 0; hwq
< fnic
->wq_copy_count
; hwq
++)
985 kfree(fnic
->sw_copy_wq
[hwq
].io_req_table
);
986 fnic_free_vnic_resources(fnic
);
988 fnic_clear_intr_mode(fnic
);
990 vnic_dev_close(fnic
->vdev
);
991 err_out_dev_cmd_deinit
:
992 err_out_vnic_unregister
:
993 vnic_dev_unregister(fnic
->vdev
);
996 err_out_release_regions
:
997 pci_release_regions(pdev
);
998 err_out_disable_device
:
999 pci_disable_device(pdev
);
1001 fnic_stats_debugfs_remove(fnic
);
1002 ida_free(&fnic_ida
, fnic
->fnic_num
);
1004 scsi_host_put(lp
->host
);
1009 static void fnic_remove(struct pci_dev
*pdev
)
1011 struct fnic
*fnic
= pci_get_drvdata(pdev
);
1012 struct fc_lport
*lp
= fnic
->lport
;
1013 unsigned long flags
;
1017 * Mark state so that the workqueue thread stops forwarding
1018 * received frames and link events to the local port. ISR and
1019 * other threads that can queue work items will also stop
1020 * creating work items on the fnic workqueue
1022 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1023 fnic
->stop_rx_link_events
= 1;
1024 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1026 if (vnic_dev_get_intr_mode(fnic
->vdev
) == VNIC_DEV_INTR_MODE_MSI
)
1027 del_timer_sync(&fnic
->notify_timer
);
1030 * Flush the fnic event queue. After this call, there should
1031 * be no event queued for this fnic device in the workqueue
1033 flush_workqueue(fnic_event_queue
);
1034 skb_queue_purge(&fnic
->frame_queue
);
1035 skb_queue_purge(&fnic
->tx_queue
);
1037 if (fnic
->config
.flags
& VFCF_FIP_CAPABLE
) {
1038 del_timer_sync(&fnic
->fip_timer
);
1039 skb_queue_purge(&fnic
->fip_frame_queue
);
1040 fnic_fcoe_reset_vlans(fnic
);
1041 fnic_fcoe_evlist_free(fnic
);
1045 * Log off the fabric. This stops all remote ports, dns port,
1046 * logs off the fabric. This flushes all rport, disc, lport work
1049 fc_fabric_logoff(fnic
->lport
);
1051 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
1052 fnic
->in_remove
= 1;
1053 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
1055 fcoe_ctlr_destroy(&fnic
->ctlr
);
1056 fc_lport_destroy(lp
);
1057 fnic_stats_debugfs_remove(fnic
);
1060 * This stops the fnic device, masks all interrupts. Completed
1061 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
1066 BUG_ON(!skb_queue_empty(&fnic
->frame_queue
));
1067 BUG_ON(!skb_queue_empty(&fnic
->tx_queue
));
1069 spin_lock_irqsave(&fnic_list_lock
, flags
);
1070 list_del(&fnic
->list
);
1071 spin_unlock_irqrestore(&fnic_list_lock
, flags
);
1073 fc_remove_host(fnic
->lport
->host
);
1074 scsi_remove_host(fnic
->lport
->host
);
1075 for (hwq
= 0; hwq
< fnic
->wq_copy_count
; hwq
++)
1076 kfree(fnic
->sw_copy_wq
[hwq
].io_req_table
);
1077 fc_exch_mgr_free(fnic
->lport
);
1078 vnic_dev_notify_unset(fnic
->vdev
);
1079 fnic_free_intr(fnic
);
1080 fnic_free_vnic_resources(fnic
);
1081 fnic_clear_intr_mode(fnic
);
1082 vnic_dev_close(fnic
->vdev
);
1083 vnic_dev_unregister(fnic
->vdev
);
1085 pci_release_regions(pdev
);
1086 pci_disable_device(pdev
);
1087 ida_free(&fnic_ida
, fnic
->fnic_num
);
1088 scsi_host_put(lp
->host
);
1091 static struct pci_driver fnic_driver
= {
1093 .id_table
= fnic_id_table
,
1094 .probe
= fnic_probe
,
1095 .remove
= fnic_remove
,
1098 static int __init
fnic_init_module(void)
1103 printk(KERN_INFO PFX
"%s, ver %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
1105 /* Create debugfs entries for fnic */
1106 err
= fnic_debugfs_init();
1108 printk(KERN_ERR PFX
"Failed to create fnic directory "
1109 "for tracing and stats logging\n");
1110 fnic_debugfs_terminate();
1113 /* Allocate memory for trace buffer */
1114 err
= fnic_trace_buf_init();
1117 "Trace buffer initialization Failed. "
1118 "Fnic Tracing utility is disabled\n");
1122 /* Allocate memory for fc trace buffer */
1123 err
= fnic_fc_trace_init();
1125 printk(KERN_ERR PFX
"FC trace buffer initialization Failed "
1126 "FC frame tracing utility is disabled\n");
1127 fnic_fc_trace_free();
1130 /* Create a cache for allocation of default size sgls */
1131 len
= sizeof(struct fnic_dflt_sgl_list
);
1132 fnic_sgl_cache
[FNIC_SGL_CACHE_DFLT
] = kmem_cache_create
1133 ("fnic_sgl_dflt", len
+ FNIC_SG_DESC_ALIGN
, FNIC_SG_DESC_ALIGN
,
1136 if (!fnic_sgl_cache
[FNIC_SGL_CACHE_DFLT
]) {
1137 printk(KERN_ERR PFX
"failed to create fnic dflt sgl slab\n");
1139 goto err_create_fnic_sgl_slab_dflt
;
1142 /* Create a cache for allocation of max size sgls*/
1143 len
= sizeof(struct fnic_sgl_list
);
1144 fnic_sgl_cache
[FNIC_SGL_CACHE_MAX
] = kmem_cache_create
1145 ("fnic_sgl_max", len
+ FNIC_SG_DESC_ALIGN
, FNIC_SG_DESC_ALIGN
,
1148 if (!fnic_sgl_cache
[FNIC_SGL_CACHE_MAX
]) {
1149 printk(KERN_ERR PFX
"failed to create fnic max sgl slab\n");
1151 goto err_create_fnic_sgl_slab_max
;
1154 /* Create a cache of io_req structs for use via mempool */
1155 fnic_io_req_cache
= kmem_cache_create("fnic_io_req",
1156 sizeof(struct fnic_io_req
),
1157 0, SLAB_HWCACHE_ALIGN
, NULL
);
1158 if (!fnic_io_req_cache
) {
1159 printk(KERN_ERR PFX
"failed to create fnic io_req slab\n");
1161 goto err_create_fnic_ioreq_slab
;
1165 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM
, "fnic_event_wq");
1166 if (!fnic_event_queue
) {
1167 printk(KERN_ERR PFX
"fnic work queue create failed\n");
1169 goto err_create_fnic_workq
;
1173 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM
, "fnic_fip_q");
1174 if (!fnic_fip_queue
) {
1175 printk(KERN_ERR PFX
"fnic FIP work queue create failed\n");
1177 goto err_create_fip_workq
;
1180 fnic_fc_transport
= fc_attach_transport(&fnic_fc_functions
);
1181 if (!fnic_fc_transport
) {
1182 printk(KERN_ERR PFX
"fc_attach_transport error\n");
1184 goto err_fc_transport
;
1187 /* register the driver with PCI system */
1188 err
= pci_register_driver(&fnic_driver
);
1190 printk(KERN_ERR PFX
"pci register error\n");
1191 goto err_pci_register
;
1196 fc_release_transport(fnic_fc_transport
);
1198 destroy_workqueue(fnic_fip_queue
);
1199 err_create_fip_workq
:
1200 destroy_workqueue(fnic_event_queue
);
1201 err_create_fnic_workq
:
1202 kmem_cache_destroy(fnic_io_req_cache
);
1203 err_create_fnic_ioreq_slab
:
1204 kmem_cache_destroy(fnic_sgl_cache
[FNIC_SGL_CACHE_MAX
]);
1205 err_create_fnic_sgl_slab_max
:
1206 kmem_cache_destroy(fnic_sgl_cache
[FNIC_SGL_CACHE_DFLT
]);
1207 err_create_fnic_sgl_slab_dflt
:
1209 fnic_fc_trace_free();
1210 fnic_debugfs_terminate();
1214 static void __exit
fnic_cleanup_module(void)
1216 pci_unregister_driver(&fnic_driver
);
1217 destroy_workqueue(fnic_event_queue
);
1219 destroy_workqueue(fnic_fip_queue
);
1220 kmem_cache_destroy(fnic_sgl_cache
[FNIC_SGL_CACHE_MAX
]);
1221 kmem_cache_destroy(fnic_sgl_cache
[FNIC_SGL_CACHE_DFLT
]);
1222 kmem_cache_destroy(fnic_io_req_cache
);
1223 fc_release_transport(fnic_fc_transport
);
1225 fnic_fc_trace_free();
1226 fnic_debugfs_terminate();
1227 ida_destroy(&fnic_ida
);
1230 module_init(fnic_init_module
);
1231 module_exit(fnic_cleanup_module
);