Linux 4.18.10
[linux/fpc-iii.git] / drivers / scsi / lpfc / lpfc_vport.c
blob81bc12dedf415c03f538e5a078b392d5f3410f17
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/kthread.h>
30 #include <linux/pci.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched/signal.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_transport_fc.h>
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc_scsi.h"
47 #include "lpfc.h"
48 #include "lpfc_logmsg.h"
49 #include "lpfc_crtn.h"
50 #include "lpfc_version.h"
51 #include "lpfc_vport.h"
53 inline void lpfc_vport_set_state(struct lpfc_vport *vport,
54 enum fc_vport_state new_state)
56 struct fc_vport *fc_vport = vport->fc_vport;
58 if (fc_vport) {
60 * When the transport defines fc_vport_set state we will replace
61 * this code with the following line
63 /* fc_vport_set_state(fc_vport, new_state); */
64 if (new_state != FC_VPORT_INITIALIZING)
65 fc_vport->vport_last_state = fc_vport->vport_state;
66 fc_vport->vport_state = new_state;
69 /* for all the error states we will set the invternal state to FAILED */
70 switch (new_state) {
71 case FC_VPORT_NO_FABRIC_SUPP:
72 case FC_VPORT_NO_FABRIC_RSCS:
73 case FC_VPORT_FABRIC_LOGOUT:
74 case FC_VPORT_FABRIC_REJ_WWN:
75 case FC_VPORT_FAILED:
76 vport->port_state = LPFC_VPORT_FAILED;
77 break;
78 case FC_VPORT_LINKDOWN:
79 vport->port_state = LPFC_VPORT_UNKNOWN;
80 break;
81 default:
82 /* do nothing */
83 break;
87 int
88 lpfc_alloc_vpi(struct lpfc_hba *phba)
90 unsigned long vpi;
92 spin_lock_irq(&phba->hbalock);
93 /* Start at bit 1 because vpi zero is reserved for the physical port */
94 vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
95 if (vpi > phba->max_vpi)
96 vpi = 0;
97 else
98 set_bit(vpi, phba->vpi_bmask);
99 if (phba->sli_rev == LPFC_SLI_REV4)
100 phba->sli4_hba.max_cfg_param.vpi_used++;
101 spin_unlock_irq(&phba->hbalock);
102 return vpi;
105 static void
106 lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
108 if (vpi == 0)
109 return;
110 spin_lock_irq(&phba->hbalock);
111 clear_bit(vpi, phba->vpi_bmask);
112 if (phba->sli_rev == LPFC_SLI_REV4)
113 phba->sli4_hba.max_cfg_param.vpi_used--;
114 spin_unlock_irq(&phba->hbalock);
117 static int
118 lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
120 LPFC_MBOXQ_t *pmb;
121 MAILBOX_t *mb;
122 struct lpfc_dmabuf *mp;
123 int rc;
125 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
126 if (!pmb) {
127 return -ENOMEM;
129 mb = &pmb->u.mb;
131 rc = lpfc_read_sparam(phba, pmb, vport->vpi);
132 if (rc) {
133 mempool_free(pmb, phba->mbox_mem_pool);
134 return -ENOMEM;
138 * Grab buffer pointer and clear context1 so we can use
139 * lpfc_sli_issue_box_wait
141 mp = (struct lpfc_dmabuf *) pmb->context1;
142 pmb->context1 = NULL;
144 pmb->vport = vport;
145 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
146 if (rc != MBX_SUCCESS) {
147 if (signal_pending(current)) {
148 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
149 "1830 Signal aborted mbxCmd x%x\n",
150 mb->mbxCommand);
151 lpfc_mbuf_free(phba, mp->virt, mp->phys);
152 kfree(mp);
153 if (rc != MBX_TIMEOUT)
154 mempool_free(pmb, phba->mbox_mem_pool);
155 return -EINTR;
156 } else {
157 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
158 "1818 VPort failed init, mbxCmd x%x "
159 "READ_SPARM mbxStatus x%x, rc = x%x\n",
160 mb->mbxCommand, mb->mbxStatus, rc);
161 lpfc_mbuf_free(phba, mp->virt, mp->phys);
162 kfree(mp);
163 if (rc != MBX_TIMEOUT)
164 mempool_free(pmb, phba->mbox_mem_pool);
165 return -EIO;
169 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
170 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
171 sizeof (struct lpfc_name));
172 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
173 sizeof (struct lpfc_name));
175 lpfc_mbuf_free(phba, mp->virt, mp->phys);
176 kfree(mp);
177 mempool_free(pmb, phba->mbox_mem_pool);
179 return 0;
182 static int
183 lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
184 const char *name_type)
186 /* ensure that IEEE format 1 addresses
187 * contain zeros in bits 59-48
189 if (!((wwn->u.wwn[0] >> 4) == 1 &&
190 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
191 return 1;
193 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
194 "1822 Invalid %s: %02x:%02x:%02x:%02x:"
195 "%02x:%02x:%02x:%02x\n",
196 name_type,
197 wwn->u.wwn[0], wwn->u.wwn[1],
198 wwn->u.wwn[2], wwn->u.wwn[3],
199 wwn->u.wwn[4], wwn->u.wwn[5],
200 wwn->u.wwn[6], wwn->u.wwn[7]);
201 return 0;
204 static int
205 lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
207 struct lpfc_vport *vport;
208 unsigned long flags;
210 spin_lock_irqsave(&phba->hbalock, flags);
211 list_for_each_entry(vport, &phba->port_list, listentry) {
212 if (vport == new_vport)
213 continue;
214 /* If they match, return not unique */
215 if (memcmp(&vport->fc_sparam.portName,
216 &new_vport->fc_sparam.portName,
217 sizeof(struct lpfc_name)) == 0) {
218 spin_unlock_irqrestore(&phba->hbalock, flags);
219 return 0;
222 spin_unlock_irqrestore(&phba->hbalock, flags);
223 return 1;
227 * lpfc_discovery_wait - Wait for driver discovery to quiesce
228 * @vport: The virtual port for which this call is being executed.
230 * This driver calls this routine specifically from lpfc_vport_delete
231 * to enforce a synchronous execution of vport
232 * delete relative to discovery activities. The
233 * lpfc_vport_delete routine should not return until it
234 * can reasonably guarantee that discovery has quiesced.
235 * Post FDISC LOGO, the driver must wait until its SAN teardown is
236 * complete and all resources recovered before allowing
237 * cleanup.
239 * This routine does not require any locks held.
241 static void lpfc_discovery_wait(struct lpfc_vport *vport)
243 struct lpfc_hba *phba = vport->phba;
244 uint32_t wait_flags = 0;
245 unsigned long wait_time_max;
246 unsigned long start_time;
248 wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
249 FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
252 * The time constraint on this loop is a balance between the
253 * fabric RA_TOV value and dev_loss tmo. The driver's
254 * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
256 wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
257 wait_time_max += jiffies;
258 start_time = jiffies;
259 while (time_before(jiffies, wait_time_max)) {
260 if ((vport->num_disc_nodes > 0) ||
261 (vport->fc_flag & wait_flags) ||
262 ((vport->port_state > LPFC_VPORT_FAILED) &&
263 (vport->port_state < LPFC_VPORT_READY))) {
264 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
265 "1833 Vport discovery quiesce Wait:"
266 " state x%x fc_flags x%x"
267 " num_nodes x%x, waiting 1000 msecs"
268 " total wait msecs x%x\n",
269 vport->port_state, vport->fc_flag,
270 vport->num_disc_nodes,
271 jiffies_to_msecs(jiffies - start_time));
272 msleep(1000);
273 } else {
274 /* Base case. Wait variants satisfied. Break out */
275 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
276 "1834 Vport discovery quiesced:"
277 " state x%x fc_flags x%x"
278 " wait msecs x%x\n",
279 vport->port_state, vport->fc_flag,
280 jiffies_to_msecs(jiffies
281 - start_time));
282 break;
286 if (time_after(jiffies, wait_time_max))
287 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
288 "1835 Vport discovery quiesce failed:"
289 " state x%x fc_flags x%x wait msecs x%x\n",
290 vport->port_state, vport->fc_flag,
291 jiffies_to_msecs(jiffies - start_time));
295 lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
297 struct lpfc_nodelist *ndlp;
298 struct Scsi_Host *shost = fc_vport->shost;
299 struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata;
300 struct lpfc_hba *phba = pport->phba;
301 struct lpfc_vport *vport = NULL;
302 int instance;
303 int vpi;
304 int rc = VPORT_ERROR;
305 int status;
307 if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
308 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
309 "1808 Create VPORT failed: "
310 "NPIV is not enabled: SLImode:%d\n",
311 phba->sli_rev);
312 rc = VPORT_INVAL;
313 goto error_out;
316 /* NPIV is not supported if HBA has NVME enabled */
317 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
318 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
319 "3189 Create VPORT failed: "
320 "NPIV is not supported on NVME\n");
321 rc = VPORT_INVAL;
322 goto error_out;
325 vpi = lpfc_alloc_vpi(phba);
326 if (vpi == 0) {
327 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
328 "1809 Create VPORT failed: "
329 "Max VPORTs (%d) exceeded\n",
330 phba->max_vpi);
331 rc = VPORT_NORESOURCES;
332 goto error_out;
335 /* Assign an unused board number */
336 if ((instance = lpfc_get_instance()) < 0) {
337 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
338 "1810 Create VPORT failed: Cannot get "
339 "instance number\n");
340 lpfc_free_vpi(phba, vpi);
341 rc = VPORT_NORESOURCES;
342 goto error_out;
345 vport = lpfc_create_port(phba, instance, &fc_vport->dev);
346 if (!vport) {
347 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
348 "1811 Create VPORT failed: vpi x%x\n", vpi);
349 lpfc_free_vpi(phba, vpi);
350 rc = VPORT_NORESOURCES;
351 goto error_out;
354 vport->vpi = vpi;
355 lpfc_debugfs_initialize(vport);
357 if ((status = lpfc_vport_sparm(phba, vport))) {
358 if (status == -EINTR) {
359 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
360 "1831 Create VPORT Interrupted.\n");
361 rc = VPORT_ERROR;
362 } else {
363 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
364 "1813 Create VPORT failed. "
365 "Cannot get sparam\n");
366 rc = VPORT_NORESOURCES;
368 lpfc_free_vpi(phba, vpi);
369 destroy_port(vport);
370 goto error_out;
373 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
374 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
376 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
377 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
379 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
380 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
381 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
382 "1821 Create VPORT failed. "
383 "Invalid WWN format\n");
384 lpfc_free_vpi(phba, vpi);
385 destroy_port(vport);
386 rc = VPORT_INVAL;
387 goto error_out;
390 if (!lpfc_unique_wwpn(phba, vport)) {
391 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
392 "1823 Create VPORT failed. "
393 "Duplicate WWN on HBA\n");
394 lpfc_free_vpi(phba, vpi);
395 destroy_port(vport);
396 rc = VPORT_INVAL;
397 goto error_out;
400 /* Create binary sysfs attribute for vport */
401 lpfc_alloc_sysfs_attr(vport);
403 /* Set the DFT_LUN_Q_DEPTH accordingly */
404 vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth;
406 *(struct lpfc_vport **)fc_vport->dd_data = vport;
407 vport->fc_vport = fc_vport;
409 /* At this point we are fully registered with SCSI Layer. */
410 vport->load_flag |= FC_ALLOW_FDMI;
411 if (phba->cfg_enable_SmartSAN ||
412 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
413 /* Setup appropriate attribute masks */
414 vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask;
415 vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
418 if ((phba->nvmet_support == 0) &&
419 ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
420 (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))) {
421 /* Create NVME binding with nvme_fc_transport. This
422 * ensures the vport is initialized.
424 rc = lpfc_nvme_create_localport(vport);
425 if (rc) {
426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
427 "6003 %s status x%x\n",
428 "NVME registration failed, ",
429 rc);
430 goto error_out;
435 * In SLI4, the vpi must be activated before it can be used
436 * by the port.
438 if ((phba->sli_rev == LPFC_SLI_REV4) &&
439 (pport->fc_flag & FC_VFI_REGISTERED)) {
440 rc = lpfc_sli4_init_vpi(vport);
441 if (rc) {
442 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
443 "1838 Failed to INIT_VPI on vpi %d "
444 "status %d\n", vpi, rc);
445 rc = VPORT_NORESOURCES;
446 lpfc_free_vpi(phba, vpi);
447 goto error_out;
449 } else if (phba->sli_rev == LPFC_SLI_REV4) {
451 * Driver cannot INIT_VPI now. Set the flags to
452 * init_vpi when reg_vfi complete.
454 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
455 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
456 rc = VPORT_OK;
457 goto out;
460 if ((phba->link_state < LPFC_LINK_UP) ||
461 (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
462 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
463 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
464 rc = VPORT_OK;
465 goto out;
468 if (disable) {
469 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
470 rc = VPORT_OK;
471 goto out;
474 /* Use the Physical nodes Fabric NDLP to determine if the link is
475 * up and ready to FDISC.
477 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
478 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
479 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
480 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
481 lpfc_set_disctmo(vport);
482 lpfc_initial_fdisc(vport);
483 } else {
484 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
485 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
486 "0262 No NPIV Fabric support\n");
488 } else {
489 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
491 rc = VPORT_OK;
493 out:
494 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
495 "1825 Vport Created.\n");
496 lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
497 error_out:
498 return rc;
501 static int
502 disable_vport(struct fc_vport *fc_vport)
504 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
505 struct lpfc_hba *phba = vport->phba;
506 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
507 long timeout;
508 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
510 ndlp = lpfc_findnode_did(vport, Fabric_DID);
511 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
512 && phba->link_state >= LPFC_LINK_UP) {
513 vport->unreg_vpi_cmpl = VPORT_INVAL;
514 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
515 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
516 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
517 timeout = schedule_timeout(timeout);
520 lpfc_sli_host_down(vport);
522 /* Mark all nodes for discovery so we can remove them by
523 * calling lpfc_cleanup_rpis(vport, 1)
525 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
526 if (!NLP_CHK_NODE_ACT(ndlp))
527 continue;
528 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
529 continue;
530 lpfc_disc_state_machine(vport, ndlp, NULL,
531 NLP_EVT_DEVICE_RECOVERY);
533 lpfc_cleanup_rpis(vport, 1);
535 lpfc_stop_vport_timers(vport);
536 lpfc_unreg_all_rpis(vport);
537 lpfc_unreg_default_rpis(vport);
539 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
540 * scsi_host_put() to release the vport.
542 lpfc_mbx_unreg_vpi(vport);
543 spin_lock_irq(shost->host_lock);
544 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
545 spin_unlock_irq(shost->host_lock);
547 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
548 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
549 "1826 Vport Disabled.\n");
550 return VPORT_OK;
553 static int
554 enable_vport(struct fc_vport *fc_vport)
556 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
557 struct lpfc_hba *phba = vport->phba;
558 struct lpfc_nodelist *ndlp = NULL;
559 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
561 if ((phba->link_state < LPFC_LINK_UP) ||
562 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
563 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
564 return VPORT_OK;
567 spin_lock_irq(shost->host_lock);
568 vport->load_flag |= FC_LOADING;
569 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
570 spin_unlock_irq(shost->host_lock);
571 lpfc_issue_init_vpi(vport);
572 goto out;
575 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
576 spin_unlock_irq(shost->host_lock);
578 /* Use the Physical nodes Fabric NDLP to determine if the link is
579 * up and ready to FDISC.
581 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
582 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
583 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
584 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
585 lpfc_set_disctmo(vport);
586 lpfc_initial_fdisc(vport);
587 } else {
588 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
589 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
590 "0264 No NPIV Fabric support\n");
592 } else {
593 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
596 out:
597 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
598 "1827 Vport Enabled.\n");
599 return VPORT_OK;
603 lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
605 if (disable)
606 return disable_vport(fc_vport);
607 else
608 return enable_vport(fc_vport);
613 lpfc_vport_delete(struct fc_vport *fc_vport)
615 struct lpfc_nodelist *ndlp = NULL;
616 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
617 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
618 struct lpfc_hba *phba = vport->phba;
619 long timeout;
620 bool ns_ndlp_referenced = false;
622 if (vport->port_type == LPFC_PHYSICAL_PORT) {
623 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
624 "1812 vport_delete failed: Cannot delete "
625 "physical host\n");
626 return VPORT_ERROR;
629 /* If the vport is a static vport fail the deletion. */
630 if ((vport->vport_flag & STATIC_VPORT) &&
631 !(phba->pport->load_flag & FC_UNLOADING)) {
632 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
633 "1837 vport_delete failed: Cannot delete "
634 "static vport.\n");
635 return VPORT_ERROR;
637 spin_lock_irq(&phba->hbalock);
638 vport->load_flag |= FC_UNLOADING;
639 spin_unlock_irq(&phba->hbalock);
641 * If we are not unloading the driver then prevent the vport_delete
642 * from happening until after this vport's discovery is finished.
644 if (!(phba->pport->load_flag & FC_UNLOADING)) {
645 int check_count = 0;
646 while (check_count < ((phba->fc_ratov * 3) + 3) &&
647 vport->port_state > LPFC_VPORT_FAILED &&
648 vport->port_state < LPFC_VPORT_READY) {
649 check_count++;
650 msleep(1000);
652 if (vport->port_state > LPFC_VPORT_FAILED &&
653 vport->port_state < LPFC_VPORT_READY)
654 return -EAGAIN;
657 * This is a bit of a mess. We want to ensure the shost doesn't get
658 * torn down until we're done with the embedded lpfc_vport structure.
660 * Beyond holding a reference for this function, we also need a
661 * reference for outstanding I/O requests we schedule during delete
662 * processing. But once we scsi_remove_host() we can no longer obtain
663 * a reference through scsi_host_get().
665 * So we take two references here. We release one reference at the
666 * bottom of the function -- after delinking the vport. And we
667 * release the other at the completion of the unreg_vpi that get's
668 * initiated after we've disposed of all other resources associated
669 * with the port.
671 if (!scsi_host_get(shost))
672 return VPORT_INVAL;
673 if (!scsi_host_get(shost)) {
674 scsi_host_put(shost);
675 return VPORT_INVAL;
677 lpfc_free_sysfs_attr(vport);
679 lpfc_debugfs_terminate(vport);
682 * The call to fc_remove_host might release the NameServer ndlp. Since
683 * we might need to use the ndlp to send the DA_ID CT command,
684 * increment the reference for the NameServer ndlp to prevent it from
685 * being released.
687 ndlp = lpfc_findnode_did(vport, NameServer_DID);
688 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
689 lpfc_nlp_get(ndlp);
690 ns_ndlp_referenced = true;
693 /* Remove FC host and then SCSI host with the vport */
694 fc_remove_host(shost);
695 scsi_remove_host(shost);
697 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
699 /* In case of driver unload, we shall not perform fabric logo as the
700 * worker thread already stopped at this stage and, in this case, we
701 * can safely skip the fabric logo.
703 if (phba->pport->load_flag & FC_UNLOADING) {
704 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
705 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
706 phba->link_state >= LPFC_LINK_UP) {
707 /* First look for the Fabric ndlp */
708 ndlp = lpfc_findnode_did(vport, Fabric_DID);
709 if (!ndlp)
710 goto skip_logo;
711 else if (!NLP_CHK_NODE_ACT(ndlp)) {
712 ndlp = lpfc_enable_node(vport, ndlp,
713 NLP_STE_UNUSED_NODE);
714 if (!ndlp)
715 goto skip_logo;
717 /* Remove ndlp from vport npld list */
718 lpfc_dequeue_node(vport, ndlp);
720 /* Indicate free memory when release */
721 spin_lock_irq(&phba->ndlp_lock);
722 NLP_SET_FREE_REQ(ndlp);
723 spin_unlock_irq(&phba->ndlp_lock);
724 /* Kick off release ndlp when it can be safely done */
725 lpfc_nlp_put(ndlp);
727 goto skip_logo;
730 /* Otherwise, we will perform fabric logo as needed */
731 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
732 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
733 phba->link_state >= LPFC_LINK_UP &&
734 phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
735 if (vport->cfg_enable_da_id) {
736 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
737 if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
738 while (vport->ct_flags && timeout)
739 timeout = schedule_timeout(timeout);
740 else
741 lpfc_printf_log(vport->phba, KERN_WARNING,
742 LOG_VPORT,
743 "1829 CT command failed to "
744 "delete objects on fabric\n");
746 /* First look for the Fabric ndlp */
747 ndlp = lpfc_findnode_did(vport, Fabric_DID);
748 if (!ndlp) {
749 /* Cannot find existing Fabric ndlp, allocate one */
750 ndlp = lpfc_nlp_init(vport, Fabric_DID);
751 if (!ndlp)
752 goto skip_logo;
753 /* Indicate free memory when release */
754 NLP_SET_FREE_REQ(ndlp);
755 } else {
756 if (!NLP_CHK_NODE_ACT(ndlp)) {
757 ndlp = lpfc_enable_node(vport, ndlp,
758 NLP_STE_UNUSED_NODE);
759 if (!ndlp)
760 goto skip_logo;
763 /* Remove ndlp from vport list */
764 lpfc_dequeue_node(vport, ndlp);
765 spin_lock_irq(&phba->ndlp_lock);
766 if (!NLP_CHK_FREE_REQ(ndlp))
767 /* Indicate free memory when release */
768 NLP_SET_FREE_REQ(ndlp);
769 else {
770 /* Skip this if ndlp is already in free mode */
771 spin_unlock_irq(&phba->ndlp_lock);
772 goto skip_logo;
774 spin_unlock_irq(&phba->ndlp_lock);
778 * If the vpi is not registered, then a valid FDISC doesn't
779 * exist and there is no need for a ELS LOGO. Just cleanup
780 * the ndlp.
782 if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) {
783 lpfc_nlp_put(ndlp);
784 goto skip_logo;
787 vport->unreg_vpi_cmpl = VPORT_INVAL;
788 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
789 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
790 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
791 timeout = schedule_timeout(timeout);
794 if (!(phba->pport->load_flag & FC_UNLOADING))
795 lpfc_discovery_wait(vport);
797 skip_logo:
800 * If the NameServer ndlp has been incremented to allow the DA_ID CT
801 * command to be sent, decrement the ndlp now.
803 if (ns_ndlp_referenced) {
804 ndlp = lpfc_findnode_did(vport, NameServer_DID);
805 lpfc_nlp_put(ndlp);
808 lpfc_cleanup(vport);
809 lpfc_sli_host_down(vport);
811 lpfc_stop_vport_timers(vport);
813 if (!(phba->pport->load_flag & FC_UNLOADING)) {
814 lpfc_unreg_all_rpis(vport);
815 lpfc_unreg_default_rpis(vport);
817 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
818 * does the scsi_host_put() to release the vport.
820 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
821 lpfc_mbx_unreg_vpi(vport))
822 scsi_host_put(shost);
823 } else
824 scsi_host_put(shost);
826 lpfc_free_vpi(phba, vport->vpi);
827 vport->work_port_events = 0;
828 spin_lock_irq(&phba->hbalock);
829 list_del_init(&vport->listentry);
830 spin_unlock_irq(&phba->hbalock);
831 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
832 "1828 Vport Deleted.\n");
833 scsi_host_put(shost);
834 return VPORT_OK;
837 struct lpfc_vport **
838 lpfc_create_vport_work_array(struct lpfc_hba *phba)
840 struct lpfc_vport *port_iterator;
841 struct lpfc_vport **vports;
842 int index = 0;
843 vports = kcalloc(phba->max_vports + 1, sizeof(struct lpfc_vport *),
844 GFP_KERNEL);
845 if (vports == NULL)
846 return NULL;
847 spin_lock_irq(&phba->hbalock);
848 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
849 if (port_iterator->load_flag & FC_UNLOADING)
850 continue;
851 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
852 lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
853 "1801 Create vport work array FAILED: "
854 "cannot do scsi_host_get\n");
855 continue;
857 vports[index++] = port_iterator;
859 spin_unlock_irq(&phba->hbalock);
860 return vports;
863 void
864 lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
866 int i;
867 if (vports == NULL)
868 return;
869 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
870 scsi_host_put(lpfc_shost_from_vport(vports[i]));
871 kfree(vports);
876 * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
877 * @vport: Pointer to vport object.
879 * This function resets the statistical data for the vport. This function
880 * is called with the host_lock held
882 void
883 lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
885 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
887 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
888 if (!NLP_CHK_NODE_ACT(ndlp))
889 continue;
890 if (ndlp->lat_data)
891 memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
892 sizeof(struct lpfc_scsicmd_bkt));
898 * lpfc_alloc_bucket - Allocate data buffer required for statistical data
899 * @vport: Pointer to vport object.
901 * This function allocates data buffer required for all the FC
902 * nodes of the vport to collect statistical data.
904 void
905 lpfc_alloc_bucket(struct lpfc_vport *vport)
907 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
909 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
910 if (!NLP_CHK_NODE_ACT(ndlp))
911 continue;
913 kfree(ndlp->lat_data);
914 ndlp->lat_data = NULL;
916 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
917 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
918 sizeof(struct lpfc_scsicmd_bkt),
919 GFP_ATOMIC);
921 if (!ndlp->lat_data)
922 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
923 "0287 lpfc_alloc_bucket failed to "
924 "allocate statistical data buffer DID "
925 "0x%x\n", ndlp->nlp_DID);
931 * lpfc_free_bucket - Free data buffer required for statistical data
932 * @vport: Pointer to vport object.
934 * Th function frees statistical data buffer of all the FC
935 * nodes of the vport.
937 void
938 lpfc_free_bucket(struct lpfc_vport *vport)
940 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
942 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
943 if (!NLP_CHK_NODE_ACT(ndlp))
944 continue;
946 kfree(ndlp->lat_data);
947 ndlp->lat_data = NULL;