drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / drivers / scsi / snic / snic_main.c
blob9be3f0193145fd481adb09d0b75a2e57b1560d6d
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2014 Cisco Systems, Inc. All rights reserved.
4 #include <linux/module.h>
5 #include <linux/mempool.h>
6 #include <linux/string.h>
7 #include <linux/slab.h>
8 #include <linux/errno.h>
9 #include <linux/init.h>
10 #include <linux/pci.h>
11 #include <linux/skbuff.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/workqueue.h>
15 #include <scsi/scsi_host.h>
16 #include <scsi/scsi_tcq.h>
18 #include "snic.h"
19 #include "snic_fwint.h"
21 #define PCI_DEVICE_ID_CISCO_SNIC 0x0046
23 /* Supported devices by snic module */
24 static struct pci_device_id snic_id_table[] = {
25 {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
26 { 0, } /* end of table */
29 unsigned int snic_log_level = 0x0;
30 module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
31 MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
33 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
34 unsigned int snic_trace_max_pages = 16;
35 module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
36 MODULE_PARM_DESC(snic_trace_max_pages,
37 "Total allocated memory pages for snic trace buffer");
39 #endif
40 unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
41 module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
42 MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
45 * snic_slave_alloc : callback function to SCSI Mid Layer, called on
46 * scsi device initialization.
48 static int
49 snic_slave_alloc(struct scsi_device *sdev)
51 struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
53 if (!tgt || snic_tgt_chkready(tgt))
54 return -ENXIO;
56 return 0;
60 * snic_slave_configure : callback function to SCSI Mid Layer, called on
61 * scsi device initialization.
63 static int
64 snic_slave_configure(struct scsi_device *sdev)
66 struct snic *snic = shost_priv(sdev->host);
67 u32 qdepth = 0, max_ios = 0;
68 int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
70 /* Set Queue Depth */
71 max_ios = snic_max_qdepth;
72 qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
73 scsi_change_queue_depth(sdev, qdepth);
75 if (snic->fwinfo.io_tmo > 1)
76 tmo = snic->fwinfo.io_tmo * HZ;
78 /* FW requires extended timeouts */
79 blk_queue_rq_timeout(sdev->request_queue, tmo);
81 return 0;
84 static int
85 snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
87 struct snic *snic = shost_priv(sdev->host);
88 int qsz = 0;
90 qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
91 if (qsz < sdev->queue_depth)
92 atomic64_inc(&snic->s_stats.misc.qsz_rampdown);
93 else if (qsz > sdev->queue_depth)
94 atomic64_inc(&snic->s_stats.misc.qsz_rampup);
96 atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth);
98 scsi_change_queue_depth(sdev, qsz);
100 return sdev->queue_depth;
103 static const struct scsi_host_template snic_host_template = {
104 .module = THIS_MODULE,
105 .name = SNIC_DRV_NAME,
106 .queuecommand = snic_queuecommand,
107 .eh_abort_handler = snic_abort_cmd,
108 .eh_device_reset_handler = snic_device_reset,
109 .eh_host_reset_handler = snic_host_reset,
110 .slave_alloc = snic_slave_alloc,
111 .slave_configure = snic_slave_configure,
112 .change_queue_depth = snic_change_queue_depth,
113 .this_id = -1,
114 .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
115 .can_queue = SNIC_MAX_IO_REQ,
116 .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
117 .max_sectors = 0x800,
118 .shost_groups = snic_host_groups,
119 .track_queue_depth = 1,
120 .cmd_size = sizeof(struct snic_internal_io_state),
121 .proc_name = "snic_scsi",
125 * snic_handle_link_event : Handles link events such as link up/down/error
127 void
128 snic_handle_link_event(struct snic *snic)
130 unsigned long flags;
132 spin_lock_irqsave(&snic->snic_lock, flags);
133 if (snic->stop_link_events) {
134 spin_unlock_irqrestore(&snic->snic_lock, flags);
136 return;
138 spin_unlock_irqrestore(&snic->snic_lock, flags);
140 queue_work(snic_glob->event_q, &snic->link_work);
141 } /* end of snic_handle_link_event */
144 * snic_notify_set : sets notification area
145 * This notification area is to receive events from fw
146 * Note: snic supports only MSIX interrupts, in which we can just call
147 * svnic_dev_notify_set directly
149 static int
150 snic_notify_set(struct snic *snic)
152 int ret = 0;
153 enum vnic_dev_intr_mode intr_mode;
155 intr_mode = svnic_dev_get_intr_mode(snic->vdev);
157 if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
158 ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
159 } else {
160 SNIC_HOST_ERR(snic->shost,
161 "Interrupt mode should be setup before devcmd notify set %d\n",
162 intr_mode);
163 ret = -1;
166 return ret;
167 } /* end of snic_notify_set */
170 * snic_dev_wait : polls vnic open status.
172 static int
173 snic_dev_wait(struct vnic_dev *vdev,
174 int (*start)(struct vnic_dev *, int),
175 int (*finished)(struct vnic_dev *, int *),
176 int arg)
178 unsigned long time;
179 int ret, done;
180 int retry_cnt = 0;
182 ret = start(vdev, arg);
183 if (ret)
184 return ret;
187 * Wait for func to complete...2 seconds max.
189 * Sometimes schedule_timeout_uninterruptible take long time
190 * to wakeup, which results skipping retry. The retry counter
191 * ensures to retry at least two times.
193 time = jiffies + (HZ * 2);
194 do {
195 ret = finished(vdev, &done);
196 if (ret)
197 return ret;
199 if (done)
200 return 0;
201 schedule_timeout_uninterruptible(HZ/10);
202 ++retry_cnt;
203 } while (time_after(time, jiffies) || (retry_cnt < 3));
205 return -ETIMEDOUT;
206 } /* end of snic_dev_wait */
209 * snic_cleanup: called by snic_remove
210 * Stops the snic device, masks all interrupts, Completed CQ entries are
211 * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
213 static int
214 snic_cleanup(struct snic *snic)
216 unsigned int i;
217 int ret;
219 svnic_dev_disable(snic->vdev);
220 for (i = 0; i < snic->intr_count; i++)
221 svnic_intr_mask(&snic->intr[i]);
223 for (i = 0; i < snic->wq_count; i++) {
224 ret = svnic_wq_disable(&snic->wq[i]);
225 if (ret)
226 return ret;
229 /* Clean up completed IOs */
230 snic_fwcq_cmpl_handler(snic, -1);
232 snic_wq_cmpl_handler(snic, -1);
234 /* Clean up the IOs that have not completed */
235 for (i = 0; i < snic->wq_count; i++)
236 svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
238 for (i = 0; i < snic->cq_count; i++)
239 svnic_cq_clean(&snic->cq[i]);
241 for (i = 0; i < snic->intr_count; i++)
242 svnic_intr_clean(&snic->intr[i]);
244 /* Cleanup snic specific requests */
245 snic_free_all_untagged_reqs(snic);
247 /* Cleanup Pending SCSI commands */
248 snic_shutdown_scsi_cleanup(snic);
250 for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
251 mempool_destroy(snic->req_pool[i]);
253 return 0;
254 } /* end of snic_cleanup */
257 static void
258 snic_iounmap(struct snic *snic)
260 if (snic->bar0.vaddr)
261 iounmap(snic->bar0.vaddr);
265 * snic_vdev_open_done : polls for svnic_dev_open cmd completion.
267 static int
268 snic_vdev_open_done(struct vnic_dev *vdev, int *done)
270 struct snic *snic = svnic_dev_priv(vdev);
271 int ret;
272 int nretries = 5;
274 do {
275 ret = svnic_dev_open_done(vdev, done);
276 if (ret == 0)
277 break;
279 SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
280 } while (nretries--);
282 return ret;
283 } /* end of snic_vdev_open_done */
286 * snic_add_host : registers scsi host with ML
288 static int
289 snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
291 int ret = 0;
293 ret = scsi_add_host(shost, &pdev->dev);
294 if (ret) {
295 SNIC_HOST_ERR(shost,
296 "snic: scsi_add_host failed. %d\n",
297 ret);
299 return ret;
302 SNIC_BUG_ON(shost->work_q != NULL);
303 shost->work_q = alloc_ordered_workqueue("scsi_wq_%d", WQ_MEM_RECLAIM,
304 shost->host_no);
305 if (!shost->work_q) {
306 SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
308 ret = -ENOMEM;
311 return ret;
312 } /* end of snic_add_host */
314 static void
315 snic_del_host(struct Scsi_Host *shost)
317 if (!shost->work_q)
318 return;
320 destroy_workqueue(shost->work_q);
321 shost->work_q = NULL;
322 scsi_remove_host(shost);
326 snic_get_state(struct snic *snic)
328 return atomic_read(&snic->state);
331 void
332 snic_set_state(struct snic *snic, enum snic_state state)
334 SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
335 snic_state_to_str(snic_get_state(snic)),
336 snic_state_to_str(state));
338 atomic_set(&snic->state, state);
342 * snic_probe : Initialize the snic interface.
344 static int
345 snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
347 struct Scsi_Host *shost;
348 struct snic *snic;
349 mempool_t *pool;
350 unsigned long flags;
351 u32 max_ios = 0;
352 int ret, i;
354 /* Device Information */
355 SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
356 pdev->vendor, pdev->device, pdev->subsystem_vendor,
357 pdev->subsystem_device);
359 SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
360 pdev->bus->number, PCI_SLOT(pdev->devfn),
361 PCI_FUNC(pdev->devfn));
364 * Allocate SCSI Host and setup association between host, and snic
366 shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
367 if (!shost) {
368 SNIC_ERR("Unable to alloc scsi_host\n");
369 ret = -ENOMEM;
371 goto prob_end;
373 snic = shost_priv(shost);
374 snic->shost = shost;
376 snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
377 shost->host_no);
379 SNIC_HOST_INFO(shost,
380 "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
381 shost->host_no, snic, shost, pdev->bus->number,
382 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
383 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
384 /* Per snic debugfs init */
385 snic_stats_debugfs_init(snic);
386 #endif
388 /* Setup PCI Resources */
389 pci_set_drvdata(pdev, snic);
390 snic->pdev = pdev;
392 ret = pci_enable_device(pdev);
393 if (ret) {
394 SNIC_HOST_ERR(shost,
395 "Cannot enable PCI Resources, aborting : %d\n",
396 ret);
398 goto err_free_snic;
401 ret = pci_request_regions(pdev, SNIC_DRV_NAME);
402 if (ret) {
403 SNIC_HOST_ERR(shost,
404 "Cannot obtain PCI Resources, aborting : %d\n",
405 ret);
407 goto err_pci_disable;
410 pci_set_master(pdev);
413 * Query PCI Controller on system for DMA addressing
414 * limitation for the device. Try 43-bit first, and
415 * fail to 32-bit.
417 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
418 if (ret) {
419 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
420 if (ret) {
421 SNIC_HOST_ERR(shost,
422 "No Usable DMA Configuration, aborting %d\n",
423 ret);
424 goto err_rel_regions;
428 /* Map vNIC resources from BAR0 */
429 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
430 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
432 ret = -ENODEV;
433 goto err_rel_regions;
436 snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
437 if (!snic->bar0.vaddr) {
438 SNIC_HOST_ERR(shost,
439 "Cannot memory map BAR0 res hdr aborting.\n");
441 ret = -ENODEV;
442 goto err_rel_regions;
445 snic->bar0.bus_addr = pci_resource_start(pdev, 0);
446 snic->bar0.len = pci_resource_len(pdev, 0);
447 SNIC_BUG_ON(snic->bar0.bus_addr == 0);
449 /* Devcmd2 Resource Allocation and Initialization */
450 snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
451 if (!snic->vdev) {
452 SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
454 ret = -ENODEV;
455 goto err_iounmap;
458 ret = svnic_dev_cmd_init(snic->vdev, 0);
459 if (ret) {
460 SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
462 goto err_vnic_unreg;
465 ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
466 if (ret) {
467 SNIC_HOST_ERR(shost,
468 "vNIC dev open failed, aborting. %d\n",
469 ret);
471 goto err_vnic_unreg;
474 ret = svnic_dev_init(snic->vdev, 0);
475 if (ret) {
476 SNIC_HOST_ERR(shost,
477 "vNIC dev init failed. aborting. %d\n",
478 ret);
480 goto err_dev_close;
483 /* Get vNIC information */
484 ret = snic_get_vnic_config(snic);
485 if (ret) {
486 SNIC_HOST_ERR(shost,
487 "Get vNIC configuration failed, aborting. %d\n",
488 ret);
490 goto err_dev_close;
493 /* Configure Maximum Outstanding IO reqs */
494 max_ios = snic->config.io_throttle_count;
495 if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
496 shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
497 max_t(u32, SNIC_MIN_IO_REQ, max_ios));
499 snic->max_tag_id = shost->can_queue;
501 shost->max_lun = snic->config.luns_per_tgt;
502 shost->max_id = SNIC_MAX_TARGET;
504 shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
506 snic_get_res_counts(snic);
509 * Assumption: Only MSIx is supported
511 ret = snic_set_intr_mode(snic);
512 if (ret) {
513 SNIC_HOST_ERR(shost,
514 "Failed to set intr mode aborting. %d\n",
515 ret);
517 goto err_dev_close;
520 ret = snic_alloc_vnic_res(snic);
521 if (ret) {
522 SNIC_HOST_ERR(shost,
523 "Failed to alloc vNIC resources aborting. %d\n",
524 ret);
526 goto err_clear_intr;
529 /* Initialize specific lists */
530 INIT_LIST_HEAD(&snic->list);
533 * spl_cmd_list for maintaining snic specific cmds
534 * such as EXCH_VER_REQ, REPORT_TARGETS etc
536 INIT_LIST_HEAD(&snic->spl_cmd_list);
537 spin_lock_init(&snic->spl_cmd_lock);
539 /* initialize all snic locks */
540 spin_lock_init(&snic->snic_lock);
542 for (i = 0; i < SNIC_WQ_MAX; i++)
543 spin_lock_init(&snic->wq_lock[i]);
545 for (i = 0; i < SNIC_IO_LOCKS; i++)
546 spin_lock_init(&snic->io_req_lock[i]);
548 pool = mempool_create_slab_pool(2,
549 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
550 if (!pool) {
551 SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
553 ret = -ENOMEM;
554 goto err_free_res;
557 snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
559 pool = mempool_create_slab_pool(2,
560 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
561 if (!pool) {
562 SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
564 ret = -ENOMEM;
565 goto err_free_dflt_sgl_pool;
568 snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
570 pool = mempool_create_slab_pool(2,
571 snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
572 if (!pool) {
573 SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
575 ret = -ENOMEM;
576 goto err_free_max_sgl_pool;
579 snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
581 /* Initialize snic state */
582 atomic_set(&snic->state, SNIC_INIT);
584 atomic_set(&snic->ios_inflight, 0);
586 /* Setup notification buffer area */
587 ret = snic_notify_set(snic);
588 if (ret) {
589 SNIC_HOST_ERR(shost,
590 "Failed to alloc notify buffer aborting. %d\n",
591 ret);
593 goto err_free_tmreq_pool;
596 spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
597 list_add_tail(&snic->list, &snic_glob->snic_list);
598 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
600 snic_disc_init(&snic->disc);
601 INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
602 INIT_WORK(&snic->disc_work, snic_handle_disc);
603 INIT_WORK(&snic->link_work, snic_handle_link);
605 /* Enable all queues */
606 for (i = 0; i < snic->wq_count; i++)
607 svnic_wq_enable(&snic->wq[i]);
609 ret = svnic_dev_enable_wait(snic->vdev);
610 if (ret) {
611 SNIC_HOST_ERR(shost,
612 "vNIC dev enable failed w/ error %d\n",
613 ret);
615 goto err_vdev_enable;
618 ret = snic_request_intr(snic);
619 if (ret) {
620 SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
622 goto err_req_intr;
625 for (i = 0; i < snic->intr_count; i++)
626 svnic_intr_unmask(&snic->intr[i]);
628 /* Get snic params */
629 ret = snic_get_conf(snic);
630 if (ret) {
631 SNIC_HOST_ERR(shost,
632 "Failed to get snic io config from FW w err %d\n",
633 ret);
635 goto err_get_conf;
639 * Initialization done with PCI system, hardware, firmware.
640 * Add shost to SCSI
642 ret = snic_add_host(shost, pdev);
643 if (ret) {
644 SNIC_HOST_ERR(shost,
645 "Adding scsi host Failed ... exiting. %d\n",
646 ret);
648 goto err_get_conf;
651 snic_set_state(snic, SNIC_ONLINE);
653 ret = snic_disc_start(snic);
654 if (ret) {
655 SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
656 ret);
658 goto err_get_conf;
661 SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
663 return 0;
665 err_get_conf:
666 snic_free_all_untagged_reqs(snic);
668 for (i = 0; i < snic->intr_count; i++)
669 svnic_intr_mask(&snic->intr[i]);
671 snic_free_intr(snic);
673 err_req_intr:
674 svnic_dev_disable(snic->vdev);
676 err_vdev_enable:
677 svnic_dev_notify_unset(snic->vdev);
679 for (i = 0; i < snic->wq_count; i++) {
680 int rc = 0;
682 rc = svnic_wq_disable(&snic->wq[i]);
683 if (rc) {
684 SNIC_HOST_ERR(shost,
685 "WQ Disable Failed w/ err = %d\n", rc);
687 break;
690 snic_del_host(snic->shost);
692 err_free_tmreq_pool:
693 mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
695 err_free_max_sgl_pool:
696 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
698 err_free_dflt_sgl_pool:
699 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
701 err_free_res:
702 snic_free_vnic_res(snic);
704 err_clear_intr:
705 snic_clear_intr_mode(snic);
707 err_dev_close:
708 svnic_dev_close(snic->vdev);
710 err_vnic_unreg:
711 svnic_dev_unregister(snic->vdev);
713 err_iounmap:
714 snic_iounmap(snic);
716 err_rel_regions:
717 pci_release_regions(pdev);
719 err_pci_disable:
720 pci_disable_device(pdev);
722 err_free_snic:
723 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
724 snic_stats_debugfs_remove(snic);
725 #endif
726 scsi_host_put(shost);
727 pci_set_drvdata(pdev, NULL);
729 prob_end:
730 SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
731 pdev->bus->number, PCI_SLOT(pdev->devfn),
732 PCI_FUNC(pdev->devfn));
734 return ret;
735 } /* end of snic_probe */
739 * snic_remove : invoked on unbinding the interface to cleanup the
740 * resources allocated in snic_probe on initialization.
742 static void
743 snic_remove(struct pci_dev *pdev)
745 struct snic *snic = pci_get_drvdata(pdev);
746 unsigned long flags;
748 if (!snic) {
749 SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
750 pdev->bus->number, PCI_SLOT(pdev->devfn),
751 PCI_FUNC(pdev->devfn));
753 return;
757 * Mark state so that the workqueue thread stops forwarding
758 * received frames and link events. ISR and other threads
759 * that can queue work items will also stop creating work
760 * items on the snic workqueue
762 snic_set_state(snic, SNIC_OFFLINE);
763 spin_lock_irqsave(&snic->snic_lock, flags);
764 snic->stop_link_events = 1;
765 spin_unlock_irqrestore(&snic->snic_lock, flags);
767 flush_workqueue(snic_glob->event_q);
768 snic_disc_term(snic);
770 spin_lock_irqsave(&snic->snic_lock, flags);
771 snic->in_remove = 1;
772 spin_unlock_irqrestore(&snic->snic_lock, flags);
775 * This stops the snic device, masks all interrupts, Completed
776 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
777 * cleanup
779 snic_cleanup(snic);
781 spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
782 list_del(&snic->list);
783 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
785 snic_tgt_del_all(snic);
786 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
787 snic_stats_debugfs_remove(snic);
788 #endif
789 snic_del_host(snic->shost);
791 svnic_dev_notify_unset(snic->vdev);
792 snic_free_intr(snic);
793 snic_free_vnic_res(snic);
794 snic_clear_intr_mode(snic);
795 svnic_dev_close(snic->vdev);
796 svnic_dev_unregister(snic->vdev);
797 snic_iounmap(snic);
798 pci_release_regions(pdev);
799 pci_disable_device(pdev);
800 pci_set_drvdata(pdev, NULL);
802 /* this frees Scsi_Host and snic memory (continuous chunk) */
803 scsi_host_put(snic->shost);
804 } /* end of snic_remove */
807 struct snic_global *snic_glob;
810 * snic_global_data_init: Initialize SNIC Global Data
811 * Notes: All the global lists, variables should be part of global data
812 * this helps in debugging.
814 static int
815 snic_global_data_init(void)
817 int ret = 0;
818 struct kmem_cache *cachep;
819 ssize_t len = 0;
821 snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
823 if (!snic_glob) {
824 SNIC_ERR("Failed to allocate Global Context.\n");
826 ret = -ENOMEM;
827 goto gdi_end;
830 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
831 /* Debugfs related Initialization */
832 /* Create debugfs entries for snic */
833 snic_debugfs_init();
835 /* Trace related Initialization */
836 /* Allocate memory for trace buffer */
837 ret = snic_trc_init();
838 if (ret < 0) {
839 SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
840 snic_trc_free();
841 /* continue even if it fails */
844 #endif
845 INIT_LIST_HEAD(&snic_glob->snic_list);
846 spin_lock_init(&snic_glob->snic_list_lock);
848 /* Create a cache for allocation of snic_host_req+default size ESGLs */
849 len = sizeof(struct snic_req_info);
850 len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
851 cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
852 SLAB_HWCACHE_ALIGN, NULL);
853 if (!cachep) {
854 SNIC_ERR("Failed to create snic default sgl slab\n");
855 ret = -ENOMEM;
857 goto err_dflt_req_slab;
859 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
861 /* Create a cache for allocation of max size Extended SGLs */
862 len = sizeof(struct snic_req_info);
863 len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
864 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
865 SLAB_HWCACHE_ALIGN, NULL);
866 if (!cachep) {
867 SNIC_ERR("Failed to create snic max sgl slab\n");
868 ret = -ENOMEM;
870 goto err_max_req_slab;
872 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
874 len = sizeof(struct snic_host_req);
875 cachep = kmem_cache_create("snic_req_tm", len, SNIC_SG_DESC_ALIGN,
876 SLAB_HWCACHE_ALIGN, NULL);
877 if (!cachep) {
878 SNIC_ERR("Failed to create snic tm req slab\n");
879 ret = -ENOMEM;
881 goto err_tmreq_slab;
883 snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
885 /* snic_event queue */
886 snic_glob->event_q =
887 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "snic_event_wq");
888 if (!snic_glob->event_q) {
889 SNIC_ERR("snic event queue create failed\n");
890 ret = -ENOMEM;
892 goto err_eventq;
895 return ret;
897 err_eventq:
898 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
900 err_tmreq_slab:
901 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
903 err_max_req_slab:
904 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
906 err_dflt_req_slab:
907 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
908 snic_trc_free();
909 snic_debugfs_term();
910 #endif
911 kfree(snic_glob);
912 snic_glob = NULL;
914 gdi_end:
915 return ret;
916 } /* end of snic_glob_init */
919 * snic_global_data_cleanup : Frees SNIC Global Data
921 static void
922 snic_global_data_cleanup(void)
924 SNIC_BUG_ON(snic_glob == NULL);
926 destroy_workqueue(snic_glob->event_q);
927 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
928 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
929 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
931 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
932 /* Freeing Trace Resources */
933 snic_trc_free();
935 /* Freeing Debugfs Resources */
936 snic_debugfs_term();
937 #endif
938 kfree(snic_glob);
939 snic_glob = NULL;
940 } /* end of snic_glob_cleanup */
942 static struct pci_driver snic_driver = {
943 .name = SNIC_DRV_NAME,
944 .id_table = snic_id_table,
945 .probe = snic_probe,
946 .remove = snic_remove,
949 static int __init
950 snic_init_module(void)
952 int ret = 0;
954 #ifndef __x86_64__
955 SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
956 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
957 #endif
959 SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
961 ret = snic_global_data_init();
962 if (ret) {
963 SNIC_ERR("Failed to Initialize Global Data.\n");
965 return ret;
968 ret = pci_register_driver(&snic_driver);
969 if (ret < 0) {
970 SNIC_ERR("PCI driver register error\n");
972 goto err_pci_reg;
975 return ret;
977 err_pci_reg:
978 snic_global_data_cleanup();
980 return ret;
983 static void __exit
984 snic_cleanup_module(void)
986 pci_unregister_driver(&snic_driver);
987 snic_global_data_cleanup();
990 module_init(snic_init_module);
991 module_exit(snic_cleanup_module);
993 MODULE_LICENSE("GPL v2");
994 MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
995 MODULE_VERSION(SNIC_DRV_VERSION);
996 MODULE_DEVICE_TABLE(pci, snic_id_table);
997 MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
998 "Sesidhar Baddela <sebaddel@cisco.com>");