Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux-2.6/verdex.git] / drivers / scsi / lpfc / lpfc_els.c
blobcbb68a9422554524f33ba7bc6bdfe29c61678f24
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 /* See Fibre Channel protocol T11 FC-LS for details */
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
31 #include "lpfc_hw.h"
32 #include "lpfc_sli.h"
33 #include "lpfc_disc.h"
34 #include "lpfc_scsi.h"
35 #include "lpfc.h"
36 #include "lpfc_logmsg.h"
37 #include "lpfc_crtn.h"
38 #include "lpfc_vport.h"
39 #include "lpfc_debugfs.h"
41 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
42 struct lpfc_iocbq *);
43 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
44 struct lpfc_iocbq *);
45 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
46 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
47 struct lpfc_nodelist *ndlp, uint8_t retry);
48 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
49 struct lpfc_iocbq *iocb);
50 static void lpfc_register_new_vport(struct lpfc_hba *phba,
51 struct lpfc_vport *vport,
52 struct lpfc_nodelist *ndlp);
54 static int lpfc_max_els_tries = 3;
56 int
57 lpfc_els_chk_latt(struct lpfc_vport *vport)
59 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
60 struct lpfc_hba *phba = vport->phba;
61 uint32_t ha_copy;
63 if (vport->port_state >= LPFC_VPORT_READY ||
64 phba->link_state == LPFC_LINK_DOWN)
65 return 0;
67 /* Read the HBA Host Attention Register */
68 ha_copy = readl(phba->HAregaddr);
70 if (!(ha_copy & HA_LATT))
71 return 0;
73 /* Pending Link Event during Discovery */
74 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
75 "0237 Pending Link Event during "
76 "Discovery: State x%x\n",
77 phba->pport->port_state);
79 /* CLEAR_LA should re-enable link attention events and
80 * we should then imediately take a LATT event. The
81 * LATT processing should call lpfc_linkdown() which
82 * will cleanup any left over in-progress discovery
83 * events.
85 spin_lock_irq(shost->host_lock);
86 vport->fc_flag |= FC_ABORT_DISCOVERY;
87 spin_unlock_irq(shost->host_lock);
89 if (phba->link_state != LPFC_CLEAR_LA)
90 lpfc_issue_clear_la(phba, vport);
92 return 1;
95 static struct lpfc_iocbq *
96 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
97 uint16_t cmdSize, uint8_t retry,
98 struct lpfc_nodelist *ndlp, uint32_t did,
99 uint32_t elscmd)
101 struct lpfc_hba *phba = vport->phba;
102 struct lpfc_iocbq *elsiocb;
103 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
104 struct ulp_bde64 *bpl;
105 IOCB_t *icmd;
108 if (!lpfc_is_link_up(phba))
109 return NULL;
111 /* Allocate buffer for command iocb */
112 elsiocb = lpfc_sli_get_iocbq(phba);
114 if (elsiocb == NULL)
115 return NULL;
117 icmd = &elsiocb->iocb;
119 /* fill in BDEs for command */
120 /* Allocate buffer for command payload */
121 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
122 if (pcmd)
123 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
124 if (!pcmd || !pcmd->virt)
125 goto els_iocb_free_pcmb_exit;
127 INIT_LIST_HEAD(&pcmd->list);
129 /* Allocate buffer for response payload */
130 if (expectRsp) {
131 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
132 if (prsp)
133 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
134 &prsp->phys);
135 if (!prsp || !prsp->virt)
136 goto els_iocb_free_prsp_exit;
137 INIT_LIST_HEAD(&prsp->list);
138 } else
139 prsp = NULL;
141 /* Allocate buffer for Buffer ptr list */
142 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
143 if (pbuflist)
144 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
145 &pbuflist->phys);
146 if (!pbuflist || !pbuflist->virt)
147 goto els_iocb_free_pbuf_exit;
149 INIT_LIST_HEAD(&pbuflist->list);
151 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
152 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
153 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
154 icmd->un.elsreq64.remoteID = did; /* DID */
155 if (expectRsp) {
156 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
157 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
158 icmd->ulpTimeout = phba->fc_ratov * 2;
159 } else {
160 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
161 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
163 icmd->ulpBdeCount = 1;
164 icmd->ulpLe = 1;
165 icmd->ulpClass = CLASS3;
167 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
168 icmd->un.elsreq64.myID = vport->fc_myDID;
170 /* For ELS_REQUEST64_CR, use the VPI by default */
171 icmd->ulpContext = vport->vpi;
172 icmd->ulpCt_h = 0;
173 icmd->ulpCt_l = 1;
176 bpl = (struct ulp_bde64 *) pbuflist->virt;
177 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
178 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
179 bpl->tus.f.bdeSize = cmdSize;
180 bpl->tus.f.bdeFlags = 0;
181 bpl->tus.w = le32_to_cpu(bpl->tus.w);
183 if (expectRsp) {
184 bpl++;
185 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
186 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
187 bpl->tus.f.bdeSize = FCELSSIZE;
188 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
189 bpl->tus.w = le32_to_cpu(bpl->tus.w);
192 /* prevent preparing iocb with NULL ndlp reference */
193 elsiocb->context1 = lpfc_nlp_get(ndlp);
194 if (!elsiocb->context1)
195 goto els_iocb_free_pbuf_exit;
196 elsiocb->context2 = pcmd;
197 elsiocb->context3 = pbuflist;
198 elsiocb->retry = retry;
199 elsiocb->vport = vport;
200 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
202 if (prsp) {
203 list_add(&prsp->list, &pcmd->list);
205 if (expectRsp) {
206 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
207 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
208 "0116 Xmit ELS command x%x to remote "
209 "NPORT x%x I/O tag: x%x, port state: x%x\n",
210 elscmd, did, elsiocb->iotag,
211 vport->port_state);
212 } else {
213 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
214 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
215 "0117 Xmit ELS response x%x to remote "
216 "NPORT x%x I/O tag: x%x, size: x%x\n",
217 elscmd, ndlp->nlp_DID, elsiocb->iotag,
218 cmdSize);
220 return elsiocb;
222 els_iocb_free_pbuf_exit:
223 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
224 kfree(pbuflist);
226 els_iocb_free_prsp_exit:
227 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
228 kfree(prsp);
230 els_iocb_free_pcmb_exit:
231 kfree(pcmd);
232 lpfc_sli_release_iocbq(phba, elsiocb);
233 return NULL;
236 static int
237 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
239 struct lpfc_hba *phba = vport->phba;
240 LPFC_MBOXQ_t *mbox;
241 struct lpfc_dmabuf *mp;
242 struct lpfc_nodelist *ndlp;
243 struct serv_parm *sp;
244 int rc;
245 int err = 0;
247 sp = &phba->fc_fabparam;
248 ndlp = lpfc_findnode_did(vport, Fabric_DID);
249 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
250 err = 1;
251 goto fail;
254 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
255 if (!mbox) {
256 err = 2;
257 goto fail;
260 vport->port_state = LPFC_FABRIC_CFG_LINK;
261 lpfc_config_link(phba, mbox);
262 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
263 mbox->vport = vport;
265 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
266 if (rc == MBX_NOT_FINISHED) {
267 err = 3;
268 goto fail_free_mbox;
271 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
272 if (!mbox) {
273 err = 4;
274 goto fail;
276 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
278 if (rc) {
279 err = 5;
280 goto fail_free_mbox;
283 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
284 mbox->vport = vport;
285 /* increment the reference count on ndlp to hold reference
286 * for the callback routine.
288 mbox->context2 = lpfc_nlp_get(ndlp);
290 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
291 if (rc == MBX_NOT_FINISHED) {
292 err = 6;
293 goto fail_issue_reg_login;
296 return 0;
298 fail_issue_reg_login:
299 /* decrement the reference count on ndlp just incremented
300 * for the failed mbox command.
302 lpfc_nlp_put(ndlp);
303 mp = (struct lpfc_dmabuf *) mbox->context1;
304 lpfc_mbuf_free(phba, mp->virt, mp->phys);
305 kfree(mp);
306 fail_free_mbox:
307 mempool_free(mbox, phba->mbox_mem_pool);
309 fail:
310 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
311 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
312 "0249 Cannot issue Register Fabric login: Err %d\n", err);
313 return -ENXIO;
316 static int
317 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
318 struct serv_parm *sp, IOCB_t *irsp)
320 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
321 struct lpfc_hba *phba = vport->phba;
322 struct lpfc_nodelist *np;
323 struct lpfc_nodelist *next_np;
325 spin_lock_irq(shost->host_lock);
326 vport->fc_flag |= FC_FABRIC;
327 spin_unlock_irq(shost->host_lock);
329 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
330 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
331 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
333 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
335 if (phba->fc_topology == TOPOLOGY_LOOP) {
336 spin_lock_irq(shost->host_lock);
337 vport->fc_flag |= FC_PUBLIC_LOOP;
338 spin_unlock_irq(shost->host_lock);
339 } else {
341 * If we are a N-port connected to a Fabric, fixup sparam's so
342 * logins to devices on remote loops work.
344 vport->fc_sparam.cmn.altBbCredit = 1;
347 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
348 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
349 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
350 ndlp->nlp_class_sup = 0;
351 if (sp->cls1.classValid)
352 ndlp->nlp_class_sup |= FC_COS_CLASS1;
353 if (sp->cls2.classValid)
354 ndlp->nlp_class_sup |= FC_COS_CLASS2;
355 if (sp->cls3.classValid)
356 ndlp->nlp_class_sup |= FC_COS_CLASS3;
357 if (sp->cls4.classValid)
358 ndlp->nlp_class_sup |= FC_COS_CLASS4;
359 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
360 sp->cmn.bbRcvSizeLsb;
361 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
363 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
364 if (sp->cmn.response_multiple_NPort) {
365 lpfc_printf_vlog(vport, KERN_WARNING,
366 LOG_ELS | LOG_VPORT,
367 "1816 FLOGI NPIV supported, "
368 "response data 0x%x\n",
369 sp->cmn.response_multiple_NPort);
370 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
371 } else {
372 /* Because we asked f/w for NPIV it still expects us
373 to call reg_vnpid atleast for the physcial host */
374 lpfc_printf_vlog(vport, KERN_WARNING,
375 LOG_ELS | LOG_VPORT,
376 "1817 Fabric does not support NPIV "
377 "- configuring single port mode.\n");
378 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
382 if ((vport->fc_prevDID != vport->fc_myDID) &&
383 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
385 /* If our NportID changed, we need to ensure all
386 * remaining NPORTs get unreg_login'ed.
388 list_for_each_entry_safe(np, next_np,
389 &vport->fc_nodes, nlp_listp) {
390 if (!NLP_CHK_NODE_ACT(ndlp))
391 continue;
392 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
393 !(np->nlp_flag & NLP_NPR_ADISC))
394 continue;
395 spin_lock_irq(shost->host_lock);
396 np->nlp_flag &= ~NLP_NPR_ADISC;
397 spin_unlock_irq(shost->host_lock);
398 lpfc_unreg_rpi(vport, np);
400 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
401 lpfc_mbx_unreg_vpi(vport);
402 spin_lock_irq(shost->host_lock);
403 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
404 spin_unlock_irq(shost->host_lock);
408 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
410 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
411 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
412 lpfc_register_new_vport(phba, vport, ndlp);
413 return 0;
415 lpfc_issue_fabric_reglogin(vport);
416 return 0;
420 * We FLOGIed into an NPort, initiate pt2pt protocol
422 static int
423 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
424 struct serv_parm *sp)
426 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
427 struct lpfc_hba *phba = vport->phba;
428 LPFC_MBOXQ_t *mbox;
429 int rc;
431 spin_lock_irq(shost->host_lock);
432 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
433 spin_unlock_irq(shost->host_lock);
435 phba->fc_edtov = FF_DEF_EDTOV;
436 phba->fc_ratov = FF_DEF_RATOV;
437 rc = memcmp(&vport->fc_portname, &sp->portName,
438 sizeof(vport->fc_portname));
439 if (rc >= 0) {
440 /* This side will initiate the PLOGI */
441 spin_lock_irq(shost->host_lock);
442 vport->fc_flag |= FC_PT2PT_PLOGI;
443 spin_unlock_irq(shost->host_lock);
446 * N_Port ID cannot be 0, set our to LocalID the other
447 * side will be RemoteID.
450 /* not equal */
451 if (rc)
452 vport->fc_myDID = PT2PT_LocalID;
454 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
455 if (!mbox)
456 goto fail;
458 lpfc_config_link(phba, mbox);
460 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
461 mbox->vport = vport;
462 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
463 if (rc == MBX_NOT_FINISHED) {
464 mempool_free(mbox, phba->mbox_mem_pool);
465 goto fail;
467 /* Decrement ndlp reference count indicating that ndlp can be
468 * safely released when other references to it are done.
470 lpfc_nlp_put(ndlp);
472 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
473 if (!ndlp) {
475 * Cannot find existing Fabric ndlp, so allocate a
476 * new one
478 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
479 if (!ndlp)
480 goto fail;
481 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
482 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
483 ndlp = lpfc_enable_node(vport, ndlp,
484 NLP_STE_UNUSED_NODE);
485 if(!ndlp)
486 goto fail;
489 memcpy(&ndlp->nlp_portname, &sp->portName,
490 sizeof(struct lpfc_name));
491 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
492 sizeof(struct lpfc_name));
493 /* Set state will put ndlp onto node list if not already done */
494 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
495 spin_lock_irq(shost->host_lock);
496 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
497 spin_unlock_irq(shost->host_lock);
498 } else
499 /* This side will wait for the PLOGI, decrement ndlp reference
500 * count indicating that ndlp can be released when other
501 * references to it are done.
503 lpfc_nlp_put(ndlp);
505 /* If we are pt2pt with another NPort, force NPIV off! */
506 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
508 spin_lock_irq(shost->host_lock);
509 vport->fc_flag |= FC_PT2PT;
510 spin_unlock_irq(shost->host_lock);
512 /* Start discovery - this should just do CLEAR_LA */
513 lpfc_disc_start(vport);
514 return 0;
515 fail:
516 return -ENXIO;
519 static void
520 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
521 struct lpfc_iocbq *rspiocb)
523 struct lpfc_vport *vport = cmdiocb->vport;
524 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
525 IOCB_t *irsp = &rspiocb->iocb;
526 struct lpfc_nodelist *ndlp = cmdiocb->context1;
527 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
528 struct serv_parm *sp;
529 int rc;
531 /* Check to see if link went down during discovery */
532 if (lpfc_els_chk_latt(vport)) {
533 /* One additional decrement on node reference count to
534 * trigger the release of the node
536 lpfc_nlp_put(ndlp);
537 goto out;
540 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
541 "FLOGI cmpl: status:x%x/x%x state:x%x",
542 irsp->ulpStatus, irsp->un.ulpWord[4],
543 vport->port_state);
545 if (irsp->ulpStatus) {
546 /* Check for retry */
547 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
548 goto out;
550 /* FLOGI failed, so there is no fabric */
551 spin_lock_irq(shost->host_lock);
552 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
553 spin_unlock_irq(shost->host_lock);
555 /* If private loop, then allow max outstanding els to be
556 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
557 * alpa map would take too long otherwise.
559 if (phba->alpa_map[0] == 0) {
560 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
563 /* FLOGI failure */
564 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
565 "0100 FLOGI failure Data: x%x x%x "
566 "x%x\n",
567 irsp->ulpStatus, irsp->un.ulpWord[4],
568 irsp->ulpTimeout);
569 goto flogifail;
573 * The FLogI succeeded. Sync the data for the CPU before
574 * accessing it.
576 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
578 sp = prsp->virt + sizeof(uint32_t);
580 /* FLOGI completes successfully */
581 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
582 "0101 FLOGI completes sucessfully "
583 "Data: x%x x%x x%x x%x\n",
584 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
585 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
587 if (vport->port_state == LPFC_FLOGI) {
589 * If Common Service Parameters indicate Nport
590 * we are point to point, if Fport we are Fabric.
592 if (sp->cmn.fPort)
593 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
594 else
595 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
597 if (!rc)
598 goto out;
601 flogifail:
602 lpfc_nlp_put(ndlp);
604 if (!lpfc_error_lost_link(irsp)) {
605 /* FLOGI failed, so just use loop map to make discovery list */
606 lpfc_disc_list_loopmap(vport);
608 /* Start discovery */
609 lpfc_disc_start(vport);
610 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
611 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
612 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
613 (phba->link_state != LPFC_CLEAR_LA)) {
614 /* If FLOGI failed enable link interrupt. */
615 lpfc_issue_clear_la(phba, vport);
617 out:
618 lpfc_els_free_iocb(phba, cmdiocb);
621 static int
622 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
623 uint8_t retry)
625 struct lpfc_hba *phba = vport->phba;
626 struct serv_parm *sp;
627 IOCB_t *icmd;
628 struct lpfc_iocbq *elsiocb;
629 struct lpfc_sli_ring *pring;
630 uint8_t *pcmd;
631 uint16_t cmdsize;
632 uint32_t tmo;
633 int rc;
635 pring = &phba->sli.ring[LPFC_ELS_RING];
637 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
638 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
639 ndlp->nlp_DID, ELS_CMD_FLOGI);
641 if (!elsiocb)
642 return 1;
644 icmd = &elsiocb->iocb;
645 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
647 /* For FLOGI request, remainder of payload is service parameters */
648 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
649 pcmd += sizeof(uint32_t);
650 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
651 sp = (struct serv_parm *) pcmd;
653 /* Setup CSPs accordingly for Fabric */
654 sp->cmn.e_d_tov = 0;
655 sp->cmn.w2.r_a_tov = 0;
656 sp->cls1.classValid = 0;
657 sp->cls2.seqDelivery = 1;
658 sp->cls3.seqDelivery = 1;
659 if (sp->cmn.fcphLow < FC_PH3)
660 sp->cmn.fcphLow = FC_PH3;
661 if (sp->cmn.fcphHigh < FC_PH3)
662 sp->cmn.fcphHigh = FC_PH3;
664 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
665 sp->cmn.request_multiple_Nport = 1;
667 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
668 icmd->ulpCt_h = 1;
669 icmd->ulpCt_l = 0;
672 if (phba->fc_topology != TOPOLOGY_LOOP) {
673 icmd->un.elsreq64.myID = 0;
674 icmd->un.elsreq64.fl = 1;
677 tmo = phba->fc_ratov;
678 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
679 lpfc_set_disctmo(vport);
680 phba->fc_ratov = tmo;
682 phba->fc_stat.elsXmitFLOGI++;
683 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
685 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
686 "Issue FLOGI: opt:x%x",
687 phba->sli3_options, 0, 0);
689 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
690 if (rc == IOCB_ERROR) {
691 lpfc_els_free_iocb(phba, elsiocb);
692 return 1;
694 return 0;
698 lpfc_els_abort_flogi(struct lpfc_hba *phba)
700 struct lpfc_sli_ring *pring;
701 struct lpfc_iocbq *iocb, *next_iocb;
702 struct lpfc_nodelist *ndlp;
703 IOCB_t *icmd;
705 /* Abort outstanding I/O on NPort <nlp_DID> */
706 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
707 "0201 Abort outstanding I/O on NPort x%x\n",
708 Fabric_DID);
710 pring = &phba->sli.ring[LPFC_ELS_RING];
713 * Check the txcmplq for an iocb that matches the nport the driver is
714 * searching for.
716 spin_lock_irq(&phba->hbalock);
717 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
718 icmd = &iocb->iocb;
719 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
720 icmd->un.elsreq64.bdl.ulpIoTag32) {
721 ndlp = (struct lpfc_nodelist *)(iocb->context1);
722 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
723 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
727 spin_unlock_irq(&phba->hbalock);
729 return 0;
733 lpfc_initial_flogi(struct lpfc_vport *vport)
735 struct lpfc_hba *phba = vport->phba;
736 struct lpfc_nodelist *ndlp;
738 vport->port_state = LPFC_FLOGI;
739 lpfc_set_disctmo(vport);
741 /* First look for the Fabric ndlp */
742 ndlp = lpfc_findnode_did(vport, Fabric_DID);
743 if (!ndlp) {
744 /* Cannot find existing Fabric ndlp, so allocate a new one */
745 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
746 if (!ndlp)
747 return 0;
748 lpfc_nlp_init(vport, ndlp, Fabric_DID);
749 /* Put ndlp onto node list */
750 lpfc_enqueue_node(vport, ndlp);
751 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
752 /* re-setup ndlp without removing from node list */
753 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
754 if (!ndlp)
755 return 0;
758 if (lpfc_issue_els_flogi(vport, ndlp, 0))
759 /* This decrement of reference count to node shall kick off
760 * the release of the node.
762 lpfc_nlp_put(ndlp);
764 return 1;
768 lpfc_initial_fdisc(struct lpfc_vport *vport)
770 struct lpfc_hba *phba = vport->phba;
771 struct lpfc_nodelist *ndlp;
773 /* First look for the Fabric ndlp */
774 ndlp = lpfc_findnode_did(vport, Fabric_DID);
775 if (!ndlp) {
776 /* Cannot find existing Fabric ndlp, so allocate a new one */
777 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
778 if (!ndlp)
779 return 0;
780 lpfc_nlp_init(vport, ndlp, Fabric_DID);
781 /* Put ndlp onto node list */
782 lpfc_enqueue_node(vport, ndlp);
783 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
784 /* re-setup ndlp without removing from node list */
785 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
786 if (!ndlp)
787 return 0;
790 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
791 /* decrement node reference count to trigger the release of
792 * the node.
794 lpfc_nlp_put(ndlp);
795 return 0;
797 return 1;
800 void
801 lpfc_more_plogi(struct lpfc_vport *vport)
803 int sentplogi;
805 if (vport->num_disc_nodes)
806 vport->num_disc_nodes--;
808 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
809 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
810 "0232 Continue discovery with %d PLOGIs to go "
811 "Data: x%x x%x x%x\n",
812 vport->num_disc_nodes, vport->fc_plogi_cnt,
813 vport->fc_flag, vport->port_state);
814 /* Check to see if there are more PLOGIs to be sent */
815 if (vport->fc_flag & FC_NLP_MORE)
816 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
817 sentplogi = lpfc_els_disc_plogi(vport);
819 return;
822 static struct lpfc_nodelist *
823 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
824 struct lpfc_nodelist *ndlp)
826 struct lpfc_vport *vport = ndlp->vport;
827 struct lpfc_nodelist *new_ndlp;
828 struct lpfc_rport_data *rdata;
829 struct fc_rport *rport;
830 struct serv_parm *sp;
831 uint8_t name[sizeof(struct lpfc_name)];
832 uint32_t rc;
834 /* Fabric nodes can have the same WWPN so we don't bother searching
835 * by WWPN. Just return the ndlp that was given to us.
837 if (ndlp->nlp_type & NLP_FABRIC)
838 return ndlp;
840 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
841 memset(name, 0, sizeof(struct lpfc_name));
843 /* Now we find out if the NPort we are logging into, matches the WWPN
844 * we have for that ndlp. If not, we have some work to do.
846 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
848 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
849 return ndlp;
851 if (!new_ndlp) {
852 rc = memcmp(&ndlp->nlp_portname, name,
853 sizeof(struct lpfc_name));
854 if (!rc)
855 return ndlp;
856 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
857 if (!new_ndlp)
858 return ndlp;
859 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
860 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
861 new_ndlp = lpfc_enable_node(vport, new_ndlp,
862 NLP_STE_UNUSED_NODE);
863 if (!new_ndlp)
864 return ndlp;
867 lpfc_unreg_rpi(vport, new_ndlp);
868 new_ndlp->nlp_DID = ndlp->nlp_DID;
869 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
871 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
872 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
873 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
875 /* Set state will put new_ndlp on to node list if not already done */
876 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
878 /* Move this back to NPR state */
879 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
880 /* The new_ndlp is replacing ndlp totally, so we need
881 * to put ndlp on UNUSED list and try to free it.
884 /* Fix up the rport accordingly */
885 rport = ndlp->rport;
886 if (rport) {
887 rdata = rport->dd_data;
888 if (rdata->pnode == ndlp) {
889 lpfc_nlp_put(ndlp);
890 ndlp->rport = NULL;
891 rdata->pnode = lpfc_nlp_get(new_ndlp);
892 new_ndlp->rport = rport;
894 new_ndlp->nlp_type = ndlp->nlp_type;
897 lpfc_drop_node(vport, ndlp);
899 else {
900 lpfc_unreg_rpi(vport, ndlp);
901 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
902 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
904 return new_ndlp;
907 void
908 lpfc_end_rscn(struct lpfc_vport *vport)
910 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
912 if (vport->fc_flag & FC_RSCN_MODE) {
914 * Check to see if more RSCNs came in while we were
915 * processing this one.
917 if (vport->fc_rscn_id_cnt ||
918 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
919 lpfc_els_handle_rscn(vport);
920 else {
921 spin_lock_irq(shost->host_lock);
922 vport->fc_flag &= ~FC_RSCN_MODE;
923 spin_unlock_irq(shost->host_lock);
928 static void
929 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
930 struct lpfc_iocbq *rspiocb)
932 struct lpfc_vport *vport = cmdiocb->vport;
933 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
934 IOCB_t *irsp;
935 struct lpfc_nodelist *ndlp;
936 struct lpfc_dmabuf *prsp;
937 int disc, rc, did, type;
939 /* we pass cmdiocb to state machine which needs rspiocb as well */
940 cmdiocb->context_un.rsp_iocb = rspiocb;
942 irsp = &rspiocb->iocb;
943 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
944 "PLOGI cmpl: status:x%x/x%x did:x%x",
945 irsp->ulpStatus, irsp->un.ulpWord[4],
946 irsp->un.elsreq64.remoteID);
948 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
949 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
950 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
951 "0136 PLOGI completes to NPort x%x "
952 "with no ndlp. Data: x%x x%x x%x\n",
953 irsp->un.elsreq64.remoteID,
954 irsp->ulpStatus, irsp->un.ulpWord[4],
955 irsp->ulpIoTag);
956 goto out;
959 /* Since ndlp can be freed in the disc state machine, note if this node
960 * is being used during discovery.
962 spin_lock_irq(shost->host_lock);
963 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
964 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
965 spin_unlock_irq(shost->host_lock);
966 rc = 0;
968 /* PLOGI completes to NPort <nlp_DID> */
969 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
970 "0102 PLOGI completes to NPort x%x "
971 "Data: x%x x%x x%x x%x x%x\n",
972 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
973 irsp->ulpTimeout, disc, vport->num_disc_nodes);
974 /* Check to see if link went down during discovery */
975 if (lpfc_els_chk_latt(vport)) {
976 spin_lock_irq(shost->host_lock);
977 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
978 spin_unlock_irq(shost->host_lock);
979 goto out;
982 /* ndlp could be freed in DSM, save these values now */
983 type = ndlp->nlp_type;
984 did = ndlp->nlp_DID;
986 if (irsp->ulpStatus) {
987 /* Check for retry */
988 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
989 /* ELS command is being retried */
990 if (disc) {
991 spin_lock_irq(shost->host_lock);
992 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
993 spin_unlock_irq(shost->host_lock);
995 goto out;
997 /* PLOGI failed */
998 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
999 if (lpfc_error_lost_link(irsp))
1000 rc = NLP_STE_FREED_NODE;
1001 else
1002 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1003 NLP_EVT_CMPL_PLOGI);
1004 } else {
1005 /* Good status, call state machine */
1006 prsp = list_entry(((struct lpfc_dmabuf *)
1007 cmdiocb->context2)->list.next,
1008 struct lpfc_dmabuf, list);
1009 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
1010 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1011 NLP_EVT_CMPL_PLOGI);
1014 if (disc && vport->num_disc_nodes) {
1015 /* Check to see if there are more PLOGIs to be sent */
1016 lpfc_more_plogi(vport);
1018 if (vport->num_disc_nodes == 0) {
1019 spin_lock_irq(shost->host_lock);
1020 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1021 spin_unlock_irq(shost->host_lock);
1023 lpfc_can_disctmo(vport);
1024 lpfc_end_rscn(vport);
1028 out:
1029 lpfc_els_free_iocb(phba, cmdiocb);
1030 return;
1034 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1036 struct lpfc_hba *phba = vport->phba;
1037 struct serv_parm *sp;
1038 IOCB_t *icmd;
1039 struct lpfc_nodelist *ndlp;
1040 struct lpfc_iocbq *elsiocb;
1041 struct lpfc_sli_ring *pring;
1042 struct lpfc_sli *psli;
1043 uint8_t *pcmd;
1044 uint16_t cmdsize;
1045 int ret;
1047 psli = &phba->sli;
1048 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1050 ndlp = lpfc_findnode_did(vport, did);
1051 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1052 ndlp = NULL;
1054 /* If ndlp is not NULL, we will bump the reference count on it */
1055 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1056 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
1057 ELS_CMD_PLOGI);
1058 if (!elsiocb)
1059 return 1;
1061 icmd = &elsiocb->iocb;
1062 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1064 /* For PLOGI request, remainder of payload is service parameters */
1065 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
1066 pcmd += sizeof(uint32_t);
1067 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1068 sp = (struct serv_parm *) pcmd;
1070 if (sp->cmn.fcphLow < FC_PH_4_3)
1071 sp->cmn.fcphLow = FC_PH_4_3;
1073 if (sp->cmn.fcphHigh < FC_PH3)
1074 sp->cmn.fcphHigh = FC_PH3;
1076 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1077 "Issue PLOGI: did:x%x",
1078 did, 0, 0);
1080 phba->fc_stat.elsXmitPLOGI++;
1081 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1082 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1084 if (ret == IOCB_ERROR) {
1085 lpfc_els_free_iocb(phba, elsiocb);
1086 return 1;
1088 return 0;
1091 static void
1092 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1093 struct lpfc_iocbq *rspiocb)
1095 struct lpfc_vport *vport = cmdiocb->vport;
1096 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1097 IOCB_t *irsp;
1098 struct lpfc_sli *psli;
1099 struct lpfc_nodelist *ndlp;
1101 psli = &phba->sli;
1102 /* we pass cmdiocb to state machine which needs rspiocb as well */
1103 cmdiocb->context_un.rsp_iocb = rspiocb;
1105 irsp = &(rspiocb->iocb);
1106 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1107 spin_lock_irq(shost->host_lock);
1108 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1109 spin_unlock_irq(shost->host_lock);
1111 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1112 "PRLI cmpl: status:x%x/x%x did:x%x",
1113 irsp->ulpStatus, irsp->un.ulpWord[4],
1114 ndlp->nlp_DID);
1115 /* PRLI completes to NPort <nlp_DID> */
1116 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1117 "0103 PRLI completes to NPort x%x "
1118 "Data: x%x x%x x%x x%x\n",
1119 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1120 irsp->ulpTimeout, vport->num_disc_nodes);
1122 vport->fc_prli_sent--;
1123 /* Check to see if link went down during discovery */
1124 if (lpfc_els_chk_latt(vport))
1125 goto out;
1127 if (irsp->ulpStatus) {
1128 /* Check for retry */
1129 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1130 /* ELS command is being retried */
1131 goto out;
1133 /* PRLI failed */
1134 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1135 if (lpfc_error_lost_link(irsp))
1136 goto out;
1137 else
1138 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1139 NLP_EVT_CMPL_PRLI);
1140 } else
1141 /* Good status, call state machine */
1142 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1143 NLP_EVT_CMPL_PRLI);
1144 out:
1145 lpfc_els_free_iocb(phba, cmdiocb);
1146 return;
1150 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1151 uint8_t retry)
1153 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1154 struct lpfc_hba *phba = vport->phba;
1155 PRLI *npr;
1156 IOCB_t *icmd;
1157 struct lpfc_iocbq *elsiocb;
1158 struct lpfc_sli_ring *pring;
1159 struct lpfc_sli *psli;
1160 uint8_t *pcmd;
1161 uint16_t cmdsize;
1163 psli = &phba->sli;
1164 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1166 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1167 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1168 ndlp->nlp_DID, ELS_CMD_PRLI);
1169 if (!elsiocb)
1170 return 1;
1172 icmd = &elsiocb->iocb;
1173 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1175 /* For PRLI request, remainder of payload is service parameters */
1176 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
1177 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
1178 pcmd += sizeof(uint32_t);
1180 /* For PRLI, remainder of payload is PRLI parameter page */
1181 npr = (PRLI *) pcmd;
1183 * If our firmware version is 3.20 or later,
1184 * set the following bits for FC-TAPE support.
1186 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
1187 npr->ConfmComplAllowed = 1;
1188 npr->Retry = 1;
1189 npr->TaskRetryIdReq = 1;
1191 npr->estabImagePair = 1;
1192 npr->readXferRdyDis = 1;
1194 /* For FCP support */
1195 npr->prliType = PRLI_FCP_TYPE;
1196 npr->initiatorFunc = 1;
1198 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1199 "Issue PRLI: did:x%x",
1200 ndlp->nlp_DID, 0, 0);
1202 phba->fc_stat.elsXmitPRLI++;
1203 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
1204 spin_lock_irq(shost->host_lock);
1205 ndlp->nlp_flag |= NLP_PRLI_SND;
1206 spin_unlock_irq(shost->host_lock);
1207 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1208 spin_lock_irq(shost->host_lock);
1209 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1210 spin_unlock_irq(shost->host_lock);
1211 lpfc_els_free_iocb(phba, elsiocb);
1212 return 1;
1214 vport->fc_prli_sent++;
1215 return 0;
1218 void
1219 lpfc_more_adisc(struct lpfc_vport *vport)
1221 int sentadisc;
1223 if (vport->num_disc_nodes)
1224 vport->num_disc_nodes--;
1225 /* Continue discovery with <num_disc_nodes> ADISCs to go */
1226 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1227 "0210 Continue discovery with %d ADISCs to go "
1228 "Data: x%x x%x x%x\n",
1229 vport->num_disc_nodes, vport->fc_adisc_cnt,
1230 vport->fc_flag, vport->port_state);
1231 /* Check to see if there are more ADISCs to be sent */
1232 if (vport->fc_flag & FC_NLP_MORE) {
1233 lpfc_set_disctmo(vport);
1234 /* go thru NPR nodes and issue any remaining ELS ADISCs */
1235 sentadisc = lpfc_els_disc_adisc(vport);
1237 return;
1240 static void
1241 lpfc_rscn_disc(struct lpfc_vport *vport)
1243 lpfc_can_disctmo(vport);
1245 /* RSCN discovery */
1246 /* go thru NPR nodes and issue ELS PLOGIs */
1247 if (vport->fc_npr_cnt)
1248 if (lpfc_els_disc_plogi(vport))
1249 return;
1251 lpfc_end_rscn(vport);
1254 static void
1255 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1256 struct lpfc_iocbq *rspiocb)
1258 struct lpfc_vport *vport = cmdiocb->vport;
1259 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1260 IOCB_t *irsp;
1261 struct lpfc_nodelist *ndlp;
1262 int disc;
1264 /* we pass cmdiocb to state machine which needs rspiocb as well */
1265 cmdiocb->context_un.rsp_iocb = rspiocb;
1267 irsp = &(rspiocb->iocb);
1268 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1270 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1271 "ADISC cmpl: status:x%x/x%x did:x%x",
1272 irsp->ulpStatus, irsp->un.ulpWord[4],
1273 ndlp->nlp_DID);
1275 /* Since ndlp can be freed in the disc state machine, note if this node
1276 * is being used during discovery.
1278 spin_lock_irq(shost->host_lock);
1279 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1280 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
1281 spin_unlock_irq(shost->host_lock);
1282 /* ADISC completes to NPort <nlp_DID> */
1283 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1284 "0104 ADISC completes to NPort x%x "
1285 "Data: x%x x%x x%x x%x x%x\n",
1286 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1287 irsp->ulpTimeout, disc, vport->num_disc_nodes);
1288 /* Check to see if link went down during discovery */
1289 if (lpfc_els_chk_latt(vport)) {
1290 spin_lock_irq(shost->host_lock);
1291 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1292 spin_unlock_irq(shost->host_lock);
1293 goto out;
1296 if (irsp->ulpStatus) {
1297 /* Check for retry */
1298 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1299 /* ELS command is being retried */
1300 if (disc) {
1301 spin_lock_irq(shost->host_lock);
1302 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1303 spin_unlock_irq(shost->host_lock);
1304 lpfc_set_disctmo(vport);
1306 goto out;
1308 /* ADISC failed */
1309 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1310 if (!lpfc_error_lost_link(irsp))
1311 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1312 NLP_EVT_CMPL_ADISC);
1313 } else
1314 /* Good status, call state machine */
1315 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1316 NLP_EVT_CMPL_ADISC);
1318 if (disc && vport->num_disc_nodes) {
1319 /* Check to see if there are more ADISCs to be sent */
1320 lpfc_more_adisc(vport);
1322 /* Check to see if we are done with ADISC authentication */
1323 if (vport->num_disc_nodes == 0) {
1324 /* If we get here, there is nothing left to ADISC */
1326 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1327 * and continue discovery.
1329 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1330 !(vport->fc_flag & FC_RSCN_MODE)) {
1331 lpfc_issue_reg_vpi(phba, vport);
1332 goto out;
1335 * For SLI2, we need to set port_state to READY
1336 * and continue discovery.
1338 if (vport->port_state < LPFC_VPORT_READY) {
1339 /* If we get here, there is nothing to ADISC */
1340 if (vport->port_type == LPFC_PHYSICAL_PORT)
1341 lpfc_issue_clear_la(phba, vport);
1343 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1344 vport->num_disc_nodes = 0;
1345 /* go thru NPR list, issue ELS PLOGIs */
1346 if (vport->fc_npr_cnt)
1347 lpfc_els_disc_plogi(vport);
1349 if (!vport->num_disc_nodes) {
1350 spin_lock_irq(shost->host_lock);
1351 vport->fc_flag &=
1352 ~FC_NDISC_ACTIVE;
1353 spin_unlock_irq(
1354 shost->host_lock);
1355 lpfc_can_disctmo(vport);
1358 vport->port_state = LPFC_VPORT_READY;
1359 } else {
1360 lpfc_rscn_disc(vport);
1364 out:
1365 lpfc_els_free_iocb(phba, cmdiocb);
1366 return;
1370 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1371 uint8_t retry)
1373 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1374 struct lpfc_hba *phba = vport->phba;
1375 ADISC *ap;
1376 IOCB_t *icmd;
1377 struct lpfc_iocbq *elsiocb;
1378 struct lpfc_sli *psli = &phba->sli;
1379 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1380 uint8_t *pcmd;
1381 uint16_t cmdsize;
1383 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
1384 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1385 ndlp->nlp_DID, ELS_CMD_ADISC);
1386 if (!elsiocb)
1387 return 1;
1389 icmd = &elsiocb->iocb;
1390 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1392 /* For ADISC request, remainder of payload is service parameters */
1393 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1394 pcmd += sizeof(uint32_t);
1396 /* Fill in ADISC payload */
1397 ap = (ADISC *) pcmd;
1398 ap->hardAL_PA = phba->fc_pref_ALPA;
1399 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
1400 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1401 ap->DID = be32_to_cpu(vport->fc_myDID);
1403 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1404 "Issue ADISC: did:x%x",
1405 ndlp->nlp_DID, 0, 0);
1407 phba->fc_stat.elsXmitADISC++;
1408 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
1409 spin_lock_irq(shost->host_lock);
1410 ndlp->nlp_flag |= NLP_ADISC_SND;
1411 spin_unlock_irq(shost->host_lock);
1412 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1413 spin_lock_irq(shost->host_lock);
1414 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1415 spin_unlock_irq(shost->host_lock);
1416 lpfc_els_free_iocb(phba, elsiocb);
1417 return 1;
1419 return 0;
1422 static void
1423 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1424 struct lpfc_iocbq *rspiocb)
1426 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1427 struct lpfc_vport *vport = ndlp->vport;
1428 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1429 IOCB_t *irsp;
1430 struct lpfc_sli *psli;
1432 psli = &phba->sli;
1433 /* we pass cmdiocb to state machine which needs rspiocb as well */
1434 cmdiocb->context_un.rsp_iocb = rspiocb;
1436 irsp = &(rspiocb->iocb);
1437 spin_lock_irq(shost->host_lock);
1438 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1439 spin_unlock_irq(shost->host_lock);
1441 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1442 "LOGO cmpl: status:x%x/x%x did:x%x",
1443 irsp->ulpStatus, irsp->un.ulpWord[4],
1444 ndlp->nlp_DID);
1445 /* LOGO completes to NPort <nlp_DID> */
1446 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1447 "0105 LOGO completes to NPort x%x "
1448 "Data: x%x x%x x%x x%x\n",
1449 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1450 irsp->ulpTimeout, vport->num_disc_nodes);
1451 /* Check to see if link went down during discovery */
1452 if (lpfc_els_chk_latt(vport))
1453 goto out;
1455 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
1456 /* NLP_EVT_DEVICE_RM should unregister the RPI
1457 * which should abort all outstanding IOs.
1459 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1460 NLP_EVT_DEVICE_RM);
1461 goto out;
1464 if (irsp->ulpStatus) {
1465 /* Check for retry */
1466 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1467 /* ELS command is being retried */
1468 goto out;
1469 /* LOGO failed */
1470 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1471 if (lpfc_error_lost_link(irsp))
1472 goto out;
1473 else
1474 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1475 NLP_EVT_CMPL_LOGO);
1476 } else
1477 /* Good status, call state machine.
1478 * This will unregister the rpi if needed.
1480 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1481 NLP_EVT_CMPL_LOGO);
1482 out:
1483 lpfc_els_free_iocb(phba, cmdiocb);
1484 return;
1488 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1489 uint8_t retry)
1491 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1492 struct lpfc_hba *phba = vport->phba;
1493 IOCB_t *icmd;
1494 struct lpfc_iocbq *elsiocb;
1495 struct lpfc_sli_ring *pring;
1496 struct lpfc_sli *psli;
1497 uint8_t *pcmd;
1498 uint16_t cmdsize;
1499 int rc;
1501 psli = &phba->sli;
1502 pring = &psli->ring[LPFC_ELS_RING];
1504 spin_lock_irq(shost->host_lock);
1505 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1506 spin_unlock_irq(shost->host_lock);
1507 return 0;
1509 spin_unlock_irq(shost->host_lock);
1511 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
1512 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1513 ndlp->nlp_DID, ELS_CMD_LOGO);
1514 if (!elsiocb)
1515 return 1;
1517 icmd = &elsiocb->iocb;
1518 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1519 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
1520 pcmd += sizeof(uint32_t);
1522 /* Fill in LOGO payload */
1523 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
1524 pcmd += sizeof(uint32_t);
1525 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
1527 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1528 "Issue LOGO: did:x%x",
1529 ndlp->nlp_DID, 0, 0);
1531 phba->fc_stat.elsXmitLOGO++;
1532 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
1533 spin_lock_irq(shost->host_lock);
1534 ndlp->nlp_flag |= NLP_LOGO_SND;
1535 spin_unlock_irq(shost->host_lock);
1536 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1538 if (rc == IOCB_ERROR) {
1539 spin_lock_irq(shost->host_lock);
1540 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1541 spin_unlock_irq(shost->host_lock);
1542 lpfc_els_free_iocb(phba, elsiocb);
1543 return 1;
1545 return 0;
1548 static void
1549 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1550 struct lpfc_iocbq *rspiocb)
1552 struct lpfc_vport *vport = cmdiocb->vport;
1553 IOCB_t *irsp;
1555 irsp = &rspiocb->iocb;
1557 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1558 "ELS cmd cmpl: status:x%x/x%x did:x%x",
1559 irsp->ulpStatus, irsp->un.ulpWord[4],
1560 irsp->un.elsreq64.remoteID);
1561 /* ELS cmd tag <ulpIoTag> completes */
1562 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1563 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
1564 irsp->ulpIoTag, irsp->ulpStatus,
1565 irsp->un.ulpWord[4], irsp->ulpTimeout);
1566 /* Check to see if link went down during discovery */
1567 lpfc_els_chk_latt(vport);
1568 lpfc_els_free_iocb(phba, cmdiocb);
1569 return;
1573 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1575 struct lpfc_hba *phba = vport->phba;
1576 IOCB_t *icmd;
1577 struct lpfc_iocbq *elsiocb;
1578 struct lpfc_sli_ring *pring;
1579 struct lpfc_sli *psli;
1580 uint8_t *pcmd;
1581 uint16_t cmdsize;
1582 struct lpfc_nodelist *ndlp;
1584 psli = &phba->sli;
1585 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1586 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
1588 ndlp = lpfc_findnode_did(vport, nportid);
1589 if (!ndlp) {
1590 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1591 if (!ndlp)
1592 return 1;
1593 lpfc_nlp_init(vport, ndlp, nportid);
1594 lpfc_enqueue_node(vport, ndlp);
1595 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1596 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1597 if (!ndlp)
1598 return 1;
1601 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1602 ndlp->nlp_DID, ELS_CMD_SCR);
1604 if (!elsiocb) {
1605 /* This will trigger the release of the node just
1606 * allocated
1608 lpfc_nlp_put(ndlp);
1609 return 1;
1612 icmd = &elsiocb->iocb;
1613 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1615 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
1616 pcmd += sizeof(uint32_t);
1618 /* For SCR, remainder of payload is SCR parameter page */
1619 memset(pcmd, 0, sizeof(SCR));
1620 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
1622 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1623 "Issue SCR: did:x%x",
1624 ndlp->nlp_DID, 0, 0);
1626 phba->fc_stat.elsXmitSCR++;
1627 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1628 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1629 /* The additional lpfc_nlp_put will cause the following
1630 * lpfc_els_free_iocb routine to trigger the rlease of
1631 * the node.
1633 lpfc_nlp_put(ndlp);
1634 lpfc_els_free_iocb(phba, elsiocb);
1635 return 1;
1637 /* This will cause the callback-function lpfc_cmpl_els_cmd to
1638 * trigger the release of node.
1640 lpfc_nlp_put(ndlp);
1641 return 0;
1644 static int
1645 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1647 struct lpfc_hba *phba = vport->phba;
1648 IOCB_t *icmd;
1649 struct lpfc_iocbq *elsiocb;
1650 struct lpfc_sli_ring *pring;
1651 struct lpfc_sli *psli;
1652 FARP *fp;
1653 uint8_t *pcmd;
1654 uint32_t *lp;
1655 uint16_t cmdsize;
1656 struct lpfc_nodelist *ondlp;
1657 struct lpfc_nodelist *ndlp;
1659 psli = &phba->sli;
1660 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1661 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
1663 ndlp = lpfc_findnode_did(vport, nportid);
1664 if (!ndlp) {
1665 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1666 if (!ndlp)
1667 return 1;
1668 lpfc_nlp_init(vport, ndlp, nportid);
1669 lpfc_enqueue_node(vport, ndlp);
1670 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1671 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1672 if (!ndlp)
1673 return 1;
1676 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1677 ndlp->nlp_DID, ELS_CMD_RNID);
1678 if (!elsiocb) {
1679 /* This will trigger the release of the node just
1680 * allocated
1682 lpfc_nlp_put(ndlp);
1683 return 1;
1686 icmd = &elsiocb->iocb;
1687 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1689 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
1690 pcmd += sizeof(uint32_t);
1692 /* Fill in FARPR payload */
1693 fp = (FARP *) (pcmd);
1694 memset(fp, 0, sizeof(FARP));
1695 lp = (uint32_t *) pcmd;
1696 *lp++ = be32_to_cpu(nportid);
1697 *lp++ = be32_to_cpu(vport->fc_myDID);
1698 fp->Rflags = 0;
1699 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
1701 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
1702 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1703 ondlp = lpfc_findnode_did(vport, nportid);
1704 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
1705 memcpy(&fp->OportName, &ondlp->nlp_portname,
1706 sizeof(struct lpfc_name));
1707 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
1708 sizeof(struct lpfc_name));
1711 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1712 "Issue FARPR: did:x%x",
1713 ndlp->nlp_DID, 0, 0);
1715 phba->fc_stat.elsXmitFARPR++;
1716 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1717 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1718 /* The additional lpfc_nlp_put will cause the following
1719 * lpfc_els_free_iocb routine to trigger the release of
1720 * the node.
1722 lpfc_nlp_put(ndlp);
1723 lpfc_els_free_iocb(phba, elsiocb);
1724 return 1;
1726 /* This will cause the callback-function lpfc_cmpl_els_cmd to
1727 * trigger the release of the node.
1729 lpfc_nlp_put(ndlp);
1730 return 0;
1733 void
1734 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1736 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1737 struct lpfc_work_evt *evtp;
1739 spin_lock_irq(shost->host_lock);
1740 nlp->nlp_flag &= ~NLP_DELAY_TMO;
1741 spin_unlock_irq(shost->host_lock);
1742 del_timer_sync(&nlp->nlp_delayfunc);
1743 nlp->nlp_last_elscmd = 0;
1745 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
1746 list_del_init(&nlp->els_retry_evt.evt_listp);
1747 /* Decrement nlp reference count held for the delayed retry */
1748 evtp = &nlp->els_retry_evt;
1749 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
1752 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
1753 spin_lock_irq(shost->host_lock);
1754 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1755 spin_unlock_irq(shost->host_lock);
1756 if (vport->num_disc_nodes) {
1757 /* Check to see if there are more
1758 * PLOGIs to be sent
1760 lpfc_more_plogi(vport);
1762 if (vport->num_disc_nodes == 0) {
1763 spin_lock_irq(shost->host_lock);
1764 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1765 spin_unlock_irq(shost->host_lock);
1766 lpfc_can_disctmo(vport);
1767 lpfc_end_rscn(vport);
1771 return;
1774 void
1775 lpfc_els_retry_delay(unsigned long ptr)
1777 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
1778 struct lpfc_vport *vport = ndlp->vport;
1779 struct lpfc_hba *phba = vport->phba;
1780 unsigned long flags;
1781 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
1783 ndlp = (struct lpfc_nodelist *) ptr;
1784 phba = ndlp->vport->phba;
1785 evtp = &ndlp->els_retry_evt;
1787 spin_lock_irqsave(&phba->hbalock, flags);
1788 if (!list_empty(&evtp->evt_listp)) {
1789 spin_unlock_irqrestore(&phba->hbalock, flags);
1790 return;
1793 /* We need to hold the node by incrementing the reference
1794 * count until the queued work is done
1796 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
1797 evtp->evt = LPFC_EVT_ELS_RETRY;
1798 list_add_tail(&evtp->evt_listp, &phba->work_list);
1799 if (phba->work_wait)
1800 lpfc_worker_wake_up(phba);
1802 spin_unlock_irqrestore(&phba->hbalock, flags);
1803 return;
1806 void
1807 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1809 struct lpfc_vport *vport = ndlp->vport;
1810 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1811 uint32_t cmd, did, retry;
1813 spin_lock_irq(shost->host_lock);
1814 did = ndlp->nlp_DID;
1815 cmd = ndlp->nlp_last_elscmd;
1816 ndlp->nlp_last_elscmd = 0;
1818 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1819 spin_unlock_irq(shost->host_lock);
1820 return;
1823 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1824 spin_unlock_irq(shost->host_lock);
1826 * If a discovery event readded nlp_delayfunc after timer
1827 * firing and before processing the timer, cancel the
1828 * nlp_delayfunc.
1830 del_timer_sync(&ndlp->nlp_delayfunc);
1831 retry = ndlp->nlp_retry;
1833 switch (cmd) {
1834 case ELS_CMD_FLOGI:
1835 lpfc_issue_els_flogi(vport, ndlp, retry);
1836 break;
1837 case ELS_CMD_PLOGI:
1838 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
1839 ndlp->nlp_prev_state = ndlp->nlp_state;
1840 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1842 break;
1843 case ELS_CMD_ADISC:
1844 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
1845 ndlp->nlp_prev_state = ndlp->nlp_state;
1846 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1848 break;
1849 case ELS_CMD_PRLI:
1850 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
1851 ndlp->nlp_prev_state = ndlp->nlp_state;
1852 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1854 break;
1855 case ELS_CMD_LOGO:
1856 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
1857 ndlp->nlp_prev_state = ndlp->nlp_state;
1858 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1860 break;
1861 case ELS_CMD_FDISC:
1862 lpfc_issue_els_fdisc(vport, ndlp, retry);
1863 break;
1865 return;
1868 static int
1869 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1870 struct lpfc_iocbq *rspiocb)
1872 struct lpfc_vport *vport = cmdiocb->vport;
1873 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1874 IOCB_t *irsp = &rspiocb->iocb;
1875 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1876 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1877 uint32_t *elscmd;
1878 struct ls_rjt stat;
1879 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
1880 int logerr = 0;
1881 uint32_t cmd = 0;
1882 uint32_t did;
1885 /* Note: context2 may be 0 for internal driver abort
1886 * of delays ELS command.
1889 if (pcmd && pcmd->virt) {
1890 elscmd = (uint32_t *) (pcmd->virt);
1891 cmd = *elscmd++;
1894 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
1895 did = ndlp->nlp_DID;
1896 else {
1897 /* We should only hit this case for retrying PLOGI */
1898 did = irsp->un.elsreq64.remoteID;
1899 ndlp = lpfc_findnode_did(vport, did);
1900 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
1901 && (cmd != ELS_CMD_PLOGI))
1902 return 1;
1905 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1906 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
1907 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
1909 switch (irsp->ulpStatus) {
1910 case IOSTAT_FCP_RSP_ERROR:
1911 case IOSTAT_REMOTE_STOP:
1912 break;
1914 case IOSTAT_LOCAL_REJECT:
1915 switch ((irsp->un.ulpWord[4] & 0xff)) {
1916 case IOERR_LOOP_OPEN_FAILURE:
1917 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
1918 delay = 1000;
1919 retry = 1;
1920 break;
1922 case IOERR_ILLEGAL_COMMAND:
1923 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1924 "0124 Retry illegal cmd x%x "
1925 "retry:x%x delay:x%x\n",
1926 cmd, cmdiocb->retry, delay);
1927 retry = 1;
1928 /* All command's retry policy */
1929 maxretry = 8;
1930 if (cmdiocb->retry > 2)
1931 delay = 1000;
1932 break;
1934 case IOERR_NO_RESOURCES:
1935 logerr = 1; /* HBA out of resources */
1936 retry = 1;
1937 if (cmdiocb->retry > 100)
1938 delay = 100;
1939 maxretry = 250;
1940 break;
1942 case IOERR_ILLEGAL_FRAME:
1943 delay = 100;
1944 retry = 1;
1945 break;
1947 case IOERR_SEQUENCE_TIMEOUT:
1948 case IOERR_INVALID_RPI:
1949 retry = 1;
1950 break;
1952 break;
1954 case IOSTAT_NPORT_RJT:
1955 case IOSTAT_FABRIC_RJT:
1956 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
1957 retry = 1;
1958 break;
1960 break;
1962 case IOSTAT_NPORT_BSY:
1963 case IOSTAT_FABRIC_BSY:
1964 logerr = 1; /* Fabric / Remote NPort out of resources */
1965 retry = 1;
1966 break;
1968 case IOSTAT_LS_RJT:
1969 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
1970 /* Added for Vendor specifc support
1971 * Just keep retrying for these Rsn / Exp codes
1973 switch (stat.un.b.lsRjtRsnCode) {
1974 case LSRJT_UNABLE_TPC:
1975 if (stat.un.b.lsRjtRsnCodeExp ==
1976 LSEXP_CMD_IN_PROGRESS) {
1977 if (cmd == ELS_CMD_PLOGI) {
1978 delay = 1000;
1979 maxretry = 48;
1981 retry = 1;
1982 break;
1984 if (cmd == ELS_CMD_PLOGI) {
1985 delay = 1000;
1986 maxretry = lpfc_max_els_tries + 1;
1987 retry = 1;
1988 break;
1990 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1991 (cmd == ELS_CMD_FDISC) &&
1992 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
1993 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1994 "0125 FDISC Failed (x%x). "
1995 "Fabric out of resources\n",
1996 stat.un.lsRjtError);
1997 lpfc_vport_set_state(vport,
1998 FC_VPORT_NO_FABRIC_RSCS);
2000 break;
2002 case LSRJT_LOGICAL_BSY:
2003 if ((cmd == ELS_CMD_PLOGI) ||
2004 (cmd == ELS_CMD_PRLI)) {
2005 delay = 1000;
2006 maxretry = 48;
2007 } else if (cmd == ELS_CMD_FDISC) {
2008 /* FDISC retry policy */
2009 maxretry = 48;
2010 if (cmdiocb->retry >= 32)
2011 delay = 1000;
2013 retry = 1;
2014 break;
2016 case LSRJT_LOGICAL_ERR:
2017 /* There are some cases where switches return this
2018 * error when they are not ready and should be returning
2019 * Logical Busy. We should delay every time.
2021 if (cmd == ELS_CMD_FDISC &&
2022 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
2023 maxretry = 3;
2024 delay = 1000;
2025 retry = 1;
2026 break;
2028 case LSRJT_PROTOCOL_ERR:
2029 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2030 (cmd == ELS_CMD_FDISC) &&
2031 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
2032 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
2034 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2035 "0123 FDISC Failed (x%x). "
2036 "Fabric Detected Bad WWN\n",
2037 stat.un.lsRjtError);
2038 lpfc_vport_set_state(vport,
2039 FC_VPORT_FABRIC_REJ_WWN);
2041 break;
2043 break;
2045 case IOSTAT_INTERMED_RSP:
2046 case IOSTAT_BA_RJT:
2047 break;
2049 default:
2050 break;
2053 if (did == FDMI_DID)
2054 retry = 1;
2056 if ((cmd == ELS_CMD_FLOGI) &&
2057 (phba->fc_topology != TOPOLOGY_LOOP) &&
2058 !lpfc_error_lost_link(irsp)) {
2059 /* FLOGI retry policy */
2060 retry = 1;
2061 maxretry = 48;
2062 if (cmdiocb->retry >= 32)
2063 delay = 1000;
2066 if ((++cmdiocb->retry) >= maxretry) {
2067 phba->fc_stat.elsRetryExceeded++;
2068 retry = 0;
2071 if ((vport->load_flag & FC_UNLOADING) != 0)
2072 retry = 0;
2074 if (retry) {
2076 /* Retry ELS command <elsCmd> to remote NPORT <did> */
2077 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2078 "0107 Retry ELS command x%x to remote "
2079 "NPORT x%x Data: x%x x%x\n",
2080 cmd, did, cmdiocb->retry, delay);
2082 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
2083 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
2084 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
2085 /* Don't reset timer for no resources */
2087 /* If discovery / RSCN timer is running, reset it */
2088 if (timer_pending(&vport->fc_disctmo) ||
2089 (vport->fc_flag & FC_RSCN_MODE))
2090 lpfc_set_disctmo(vport);
2093 phba->fc_stat.elsXmitRetry++;
2094 if (ndlp && delay) {
2095 phba->fc_stat.elsDelayRetry++;
2096 ndlp->nlp_retry = cmdiocb->retry;
2098 /* delay is specified in milliseconds */
2099 mod_timer(&ndlp->nlp_delayfunc,
2100 jiffies + msecs_to_jiffies(delay));
2101 spin_lock_irq(shost->host_lock);
2102 ndlp->nlp_flag |= NLP_DELAY_TMO;
2103 spin_unlock_irq(shost->host_lock);
2105 ndlp->nlp_prev_state = ndlp->nlp_state;
2106 if (cmd == ELS_CMD_PRLI)
2107 lpfc_nlp_set_state(vport, ndlp,
2108 NLP_STE_REG_LOGIN_ISSUE);
2109 else
2110 lpfc_nlp_set_state(vport, ndlp,
2111 NLP_STE_NPR_NODE);
2112 ndlp->nlp_last_elscmd = cmd;
2114 return 1;
2116 switch (cmd) {
2117 case ELS_CMD_FLOGI:
2118 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
2119 return 1;
2120 case ELS_CMD_FDISC:
2121 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
2122 return 1;
2123 case ELS_CMD_PLOGI:
2124 if (ndlp) {
2125 ndlp->nlp_prev_state = ndlp->nlp_state;
2126 lpfc_nlp_set_state(vport, ndlp,
2127 NLP_STE_PLOGI_ISSUE);
2129 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
2130 return 1;
2131 case ELS_CMD_ADISC:
2132 ndlp->nlp_prev_state = ndlp->nlp_state;
2133 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2134 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
2135 return 1;
2136 case ELS_CMD_PRLI:
2137 ndlp->nlp_prev_state = ndlp->nlp_state;
2138 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2139 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
2140 return 1;
2141 case ELS_CMD_LOGO:
2142 ndlp->nlp_prev_state = ndlp->nlp_state;
2143 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2144 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
2145 return 1;
2148 /* No retry ELS command <elsCmd> to remote NPORT <did> */
2149 if (logerr) {
2150 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2151 "0137 No retry ELS command x%x to remote "
2152 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
2153 cmd, did, irsp->ulpStatus,
2154 irsp->un.ulpWord[4]);
2156 else {
2157 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2158 "0108 No retry ELS command x%x to remote "
2159 "NPORT x%x Retried:%d Error:x%x/%x\n",
2160 cmd, did, cmdiocb->retry, irsp->ulpStatus,
2161 irsp->un.ulpWord[4]);
2163 return 0;
2166 static int
2167 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
2169 struct lpfc_dmabuf *buf_ptr;
2171 /* Free the response before processing the command. */
2172 if (!list_empty(&buf_ptr1->list)) {
2173 list_remove_head(&buf_ptr1->list, buf_ptr,
2174 struct lpfc_dmabuf,
2175 list);
2176 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2177 kfree(buf_ptr);
2179 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2180 kfree(buf_ptr1);
2181 return 0;
2184 static int
2185 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
2187 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2188 kfree(buf_ptr);
2189 return 0;
2193 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
2195 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
2196 struct lpfc_nodelist *ndlp;
2198 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
2199 if (ndlp) {
2200 if (ndlp->nlp_flag & NLP_DEFER_RM) {
2201 lpfc_nlp_put(ndlp);
2203 /* If the ndlp is not being used by another discovery
2204 * thread, free it.
2206 if (!lpfc_nlp_not_used(ndlp)) {
2207 /* If ndlp is being used by another discovery
2208 * thread, just clear NLP_DEFER_RM
2210 ndlp->nlp_flag &= ~NLP_DEFER_RM;
2213 else
2214 lpfc_nlp_put(ndlp);
2215 elsiocb->context1 = NULL;
2217 /* context2 = cmd, context2->next = rsp, context3 = bpl */
2218 if (elsiocb->context2) {
2219 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
2220 /* Firmware could still be in progress of DMAing
2221 * payload, so don't free data buffer till after
2222 * a hbeat.
2224 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
2225 buf_ptr = elsiocb->context2;
2226 elsiocb->context2 = NULL;
2227 if (buf_ptr) {
2228 buf_ptr1 = NULL;
2229 spin_lock_irq(&phba->hbalock);
2230 if (!list_empty(&buf_ptr->list)) {
2231 list_remove_head(&buf_ptr->list,
2232 buf_ptr1, struct lpfc_dmabuf,
2233 list);
2234 INIT_LIST_HEAD(&buf_ptr1->list);
2235 list_add_tail(&buf_ptr1->list,
2236 &phba->elsbuf);
2237 phba->elsbuf_cnt++;
2239 INIT_LIST_HEAD(&buf_ptr->list);
2240 list_add_tail(&buf_ptr->list, &phba->elsbuf);
2241 phba->elsbuf_cnt++;
2242 spin_unlock_irq(&phba->hbalock);
2244 } else {
2245 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
2246 lpfc_els_free_data(phba, buf_ptr1);
2250 if (elsiocb->context3) {
2251 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
2252 lpfc_els_free_bpl(phba, buf_ptr);
2254 lpfc_sli_release_iocbq(phba, elsiocb);
2255 return 0;
2258 static void
2259 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2260 struct lpfc_iocbq *rspiocb)
2262 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2263 struct lpfc_vport *vport = cmdiocb->vport;
2264 IOCB_t *irsp;
2266 irsp = &rspiocb->iocb;
2267 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2268 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
2269 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
2270 /* ACC to LOGO completes to NPort <nlp_DID> */
2271 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2272 "0109 ACC to LOGO completes to NPort x%x "
2273 "Data: x%x x%x x%x\n",
2274 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2275 ndlp->nlp_rpi);
2277 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
2278 /* NPort Recovery mode or node is just allocated */
2279 if (!lpfc_nlp_not_used(ndlp)) {
2280 /* If the ndlp is being used by another discovery
2281 * thread, just unregister the RPI.
2283 lpfc_unreg_rpi(vport, ndlp);
2284 } else {
2285 /* Indicate the node has already released, should
2286 * not reference to it from within lpfc_els_free_iocb.
2288 cmdiocb->context1 = NULL;
2291 lpfc_els_free_iocb(phba, cmdiocb);
2292 return;
2295 void
2296 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2298 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2299 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2301 pmb->context1 = NULL;
2302 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2303 kfree(mp);
2304 mempool_free(pmb, phba->mbox_mem_pool);
2305 if (ndlp) {
2306 lpfc_nlp_put(ndlp);
2307 /* This is the end of the default RPI cleanup logic for this
2308 * ndlp. If no other discovery threads are using this ndlp.
2309 * we should free all resources associated with it.
2311 lpfc_nlp_not_used(ndlp);
2313 return;
2316 static void
2317 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2318 struct lpfc_iocbq *rspiocb)
2320 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2321 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
2322 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
2323 IOCB_t *irsp;
2324 uint8_t *pcmd;
2325 LPFC_MBOXQ_t *mbox = NULL;
2326 struct lpfc_dmabuf *mp = NULL;
2327 uint32_t ls_rjt = 0;
2329 irsp = &rspiocb->iocb;
2331 if (cmdiocb->context_un.mbox)
2332 mbox = cmdiocb->context_un.mbox;
2334 /* First determine if this is a LS_RJT cmpl. Note, this callback
2335 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
2337 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
2338 if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
2339 /* A LS_RJT associated with Default RPI cleanup has its own
2340 * seperate code path.
2342 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
2343 ls_rjt = 1;
2346 /* Check to see if link went down during discovery */
2347 if (!ndlp || lpfc_els_chk_latt(vport)) {
2348 if (mbox) {
2349 mp = (struct lpfc_dmabuf *) mbox->context1;
2350 if (mp) {
2351 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2352 kfree(mp);
2354 mempool_free(mbox, phba->mbox_mem_pool);
2356 if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
2357 if (lpfc_nlp_not_used(ndlp)) {
2358 ndlp = NULL;
2359 /* Indicate the node has already released,
2360 * should not reference to it from within
2361 * the routine lpfc_els_free_iocb.
2363 cmdiocb->context1 = NULL;
2365 goto out;
2368 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2369 "ELS rsp cmpl: status:x%x/x%x did:x%x",
2370 irsp->ulpStatus, irsp->un.ulpWord[4],
2371 cmdiocb->iocb.un.elsreq64.remoteID);
2372 /* ELS response tag <ulpIoTag> completes */
2373 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2374 "0110 ELS response tag x%x completes "
2375 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
2376 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
2377 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
2378 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2379 ndlp->nlp_rpi);
2380 if (mbox) {
2381 if ((rspiocb->iocb.ulpStatus == 0)
2382 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
2383 lpfc_unreg_rpi(vport, ndlp);
2384 /* Increment reference count to ndlp to hold the
2385 * reference to ndlp for the callback function.
2387 mbox->context2 = lpfc_nlp_get(ndlp);
2388 mbox->vport = vport;
2389 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
2390 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
2391 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
2393 else {
2394 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
2395 ndlp->nlp_prev_state = ndlp->nlp_state;
2396 lpfc_nlp_set_state(vport, ndlp,
2397 NLP_STE_REG_LOGIN_ISSUE);
2399 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
2400 != MBX_NOT_FINISHED)
2401 goto out;
2402 else
2403 /* Decrement the ndlp reference count we
2404 * set for this failed mailbox command.
2406 lpfc_nlp_put(ndlp);
2408 /* ELS rsp: Cannot issue reg_login for <NPortid> */
2409 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2410 "0138 ELS rsp: Cannot issue reg_login for x%x "
2411 "Data: x%x x%x x%x\n",
2412 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2413 ndlp->nlp_rpi);
2415 if (lpfc_nlp_not_used(ndlp)) {
2416 ndlp = NULL;
2417 /* Indicate node has already been released,
2418 * should not reference to it from within
2419 * the routine lpfc_els_free_iocb.
2421 cmdiocb->context1 = NULL;
2423 } else {
2424 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
2425 if (!lpfc_error_lost_link(irsp) &&
2426 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
2427 if (lpfc_nlp_not_used(ndlp)) {
2428 ndlp = NULL;
2429 /* Indicate node has already been
2430 * released, should not reference
2431 * to it from within the routine
2432 * lpfc_els_free_iocb.
2434 cmdiocb->context1 = NULL;
2438 mp = (struct lpfc_dmabuf *) mbox->context1;
2439 if (mp) {
2440 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2441 kfree(mp);
2443 mempool_free(mbox, phba->mbox_mem_pool);
2445 out:
2446 if (ndlp) {
2447 spin_lock_irq(shost->host_lock);
2448 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
2449 spin_unlock_irq(shost->host_lock);
2451 /* If the node is not being used by another discovery thread,
2452 * and we are sending a reject, we are done with it.
2453 * Release driver reference count here and free associated
2454 * resources.
2456 if (ls_rjt)
2457 if (lpfc_nlp_not_used(ndlp))
2458 /* Indicate node has already been released,
2459 * should not reference to it from within
2460 * the routine lpfc_els_free_iocb.
2462 cmdiocb->context1 = NULL;
2465 lpfc_els_free_iocb(phba, cmdiocb);
2466 return;
2470 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
2471 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
2472 LPFC_MBOXQ_t *mbox)
2474 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2475 struct lpfc_hba *phba = vport->phba;
2476 IOCB_t *icmd;
2477 IOCB_t *oldcmd;
2478 struct lpfc_iocbq *elsiocb;
2479 struct lpfc_sli_ring *pring;
2480 struct lpfc_sli *psli;
2481 uint8_t *pcmd;
2482 uint16_t cmdsize;
2483 int rc;
2484 ELS_PKT *els_pkt_ptr;
2486 psli = &phba->sli;
2487 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2488 oldcmd = &oldiocb->iocb;
2490 switch (flag) {
2491 case ELS_CMD_ACC:
2492 cmdsize = sizeof(uint32_t);
2493 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
2494 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2495 if (!elsiocb) {
2496 spin_lock_irq(shost->host_lock);
2497 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2498 spin_unlock_irq(shost->host_lock);
2499 return 1;
2502 icmd = &elsiocb->iocb;
2503 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2504 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2505 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2506 pcmd += sizeof(uint32_t);
2508 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2509 "Issue ACC: did:x%x flg:x%x",
2510 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2511 break;
2512 case ELS_CMD_PLOGI:
2513 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
2514 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
2515 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2516 if (!elsiocb)
2517 return 1;
2519 icmd = &elsiocb->iocb;
2520 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2521 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2523 if (mbox)
2524 elsiocb->context_un.mbox = mbox;
2526 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2527 pcmd += sizeof(uint32_t);
2528 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2530 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2531 "Issue ACC PLOGI: did:x%x flg:x%x",
2532 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2533 break;
2534 case ELS_CMD_PRLO:
2535 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
2536 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
2537 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
2538 if (!elsiocb)
2539 return 1;
2541 icmd = &elsiocb->iocb;
2542 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2543 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2545 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
2546 sizeof(uint32_t) + sizeof(PRLO));
2547 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
2548 els_pkt_ptr = (ELS_PKT *) pcmd;
2549 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
2551 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2552 "Issue ACC PRLO: did:x%x flg:x%x",
2553 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2554 break;
2555 default:
2556 return 1;
2558 /* Xmit ELS ACC response tag <ulpIoTag> */
2559 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2560 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
2561 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
2562 elsiocb->iotag, elsiocb->iocb.ulpContext,
2563 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2564 ndlp->nlp_rpi);
2565 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2566 spin_lock_irq(shost->host_lock);
2567 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2568 spin_unlock_irq(shost->host_lock);
2569 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
2570 } else {
2571 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2574 phba->fc_stat.elsXmitACC++;
2575 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2576 if (rc == IOCB_ERROR) {
2577 lpfc_els_free_iocb(phba, elsiocb);
2578 return 1;
2580 return 0;
2584 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
2585 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
2586 LPFC_MBOXQ_t *mbox)
2588 struct lpfc_hba *phba = vport->phba;
2589 IOCB_t *icmd;
2590 IOCB_t *oldcmd;
2591 struct lpfc_iocbq *elsiocb;
2592 struct lpfc_sli_ring *pring;
2593 struct lpfc_sli *psli;
2594 uint8_t *pcmd;
2595 uint16_t cmdsize;
2596 int rc;
2598 psli = &phba->sli;
2599 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2601 cmdsize = 2 * sizeof(uint32_t);
2602 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2603 ndlp->nlp_DID, ELS_CMD_LS_RJT);
2604 if (!elsiocb)
2605 return 1;
2607 icmd = &elsiocb->iocb;
2608 oldcmd = &oldiocb->iocb;
2609 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2610 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2612 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
2613 pcmd += sizeof(uint32_t);
2614 *((uint32_t *) (pcmd)) = rejectError;
2616 if (mbox)
2617 elsiocb->context_un.mbox = mbox;
2619 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
2620 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2621 "0129 Xmit ELS RJT x%x response tag x%x "
2622 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2623 "rpi x%x\n",
2624 rejectError, elsiocb->iotag,
2625 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2626 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2627 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2628 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
2629 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
2631 phba->fc_stat.elsXmitLSRJT++;
2632 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2633 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2635 if (rc == IOCB_ERROR) {
2636 lpfc_els_free_iocb(phba, elsiocb);
2637 return 1;
2639 return 0;
2643 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2644 struct lpfc_nodelist *ndlp)
2646 struct lpfc_hba *phba = vport->phba;
2647 struct lpfc_sli *psli = &phba->sli;
2648 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2649 ADISC *ap;
2650 IOCB_t *icmd, *oldcmd;
2651 struct lpfc_iocbq *elsiocb;
2652 uint8_t *pcmd;
2653 uint16_t cmdsize;
2654 int rc;
2656 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
2657 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2658 ndlp->nlp_DID, ELS_CMD_ACC);
2659 if (!elsiocb)
2660 return 1;
2662 icmd = &elsiocb->iocb;
2663 oldcmd = &oldiocb->iocb;
2664 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2666 /* Xmit ADISC ACC response tag <ulpIoTag> */
2667 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2668 "0130 Xmit ADISC ACC response iotag x%x xri: "
2669 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
2670 elsiocb->iotag, elsiocb->iocb.ulpContext,
2671 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2672 ndlp->nlp_rpi);
2673 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2675 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2676 pcmd += sizeof(uint32_t);
2678 ap = (ADISC *) (pcmd);
2679 ap->hardAL_PA = phba->fc_pref_ALPA;
2680 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2681 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2682 ap->DID = be32_to_cpu(vport->fc_myDID);
2684 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2685 "Issue ACC ADISC: did:x%x flg:x%x",
2686 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2688 phba->fc_stat.elsXmitACC++;
2689 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2690 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2691 if (rc == IOCB_ERROR) {
2692 lpfc_els_free_iocb(phba, elsiocb);
2693 return 1;
2695 return 0;
2699 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2700 struct lpfc_nodelist *ndlp)
2702 struct lpfc_hba *phba = vport->phba;
2703 PRLI *npr;
2704 lpfc_vpd_t *vpd;
2705 IOCB_t *icmd;
2706 IOCB_t *oldcmd;
2707 struct lpfc_iocbq *elsiocb;
2708 struct lpfc_sli_ring *pring;
2709 struct lpfc_sli *psli;
2710 uint8_t *pcmd;
2711 uint16_t cmdsize;
2712 int rc;
2714 psli = &phba->sli;
2715 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2717 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
2718 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2719 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
2720 if (!elsiocb)
2721 return 1;
2723 icmd = &elsiocb->iocb;
2724 oldcmd = &oldiocb->iocb;
2725 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2726 /* Xmit PRLI ACC response tag <ulpIoTag> */
2727 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2728 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
2729 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
2730 elsiocb->iotag, elsiocb->iocb.ulpContext,
2731 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2732 ndlp->nlp_rpi);
2733 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2735 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
2736 pcmd += sizeof(uint32_t);
2738 /* For PRLI, remainder of payload is PRLI parameter page */
2739 memset(pcmd, 0, sizeof(PRLI));
2741 npr = (PRLI *) pcmd;
2742 vpd = &phba->vpd;
2744 * If our firmware version is 3.20 or later,
2745 * set the following bits for FC-TAPE support.
2747 if (vpd->rev.feaLevelHigh >= 0x02) {
2748 npr->ConfmComplAllowed = 1;
2749 npr->Retry = 1;
2750 npr->TaskRetryIdReq = 1;
2753 npr->acceptRspCode = PRLI_REQ_EXECUTED;
2754 npr->estabImagePair = 1;
2755 npr->readXferRdyDis = 1;
2756 npr->ConfmComplAllowed = 1;
2758 npr->prliType = PRLI_FCP_TYPE;
2759 npr->initiatorFunc = 1;
2761 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2762 "Issue ACC PRLI: did:x%x flg:x%x",
2763 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2765 phba->fc_stat.elsXmitACC++;
2766 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2768 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2769 if (rc == IOCB_ERROR) {
2770 lpfc_els_free_iocb(phba, elsiocb);
2771 return 1;
2773 return 0;
2776 static int
2777 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
2778 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
2780 struct lpfc_hba *phba = vport->phba;
2781 RNID *rn;
2782 IOCB_t *icmd, *oldcmd;
2783 struct lpfc_iocbq *elsiocb;
2784 struct lpfc_sli_ring *pring;
2785 struct lpfc_sli *psli;
2786 uint8_t *pcmd;
2787 uint16_t cmdsize;
2788 int rc;
2790 psli = &phba->sli;
2791 pring = &psli->ring[LPFC_ELS_RING];
2793 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
2794 + (2 * sizeof(struct lpfc_name));
2795 if (format)
2796 cmdsize += sizeof(RNID_TOP_DISC);
2798 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2799 ndlp->nlp_DID, ELS_CMD_ACC);
2800 if (!elsiocb)
2801 return 1;
2803 icmd = &elsiocb->iocb;
2804 oldcmd = &oldiocb->iocb;
2805 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2806 /* Xmit RNID ACC response tag <ulpIoTag> */
2807 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2808 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
2809 elsiocb->iotag, elsiocb->iocb.ulpContext);
2810 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2811 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2812 pcmd += sizeof(uint32_t);
2814 memset(pcmd, 0, sizeof(RNID));
2815 rn = (RNID *) (pcmd);
2816 rn->Format = format;
2817 rn->CommonLen = (2 * sizeof(struct lpfc_name));
2818 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2819 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2820 switch (format) {
2821 case 0:
2822 rn->SpecificLen = 0;
2823 break;
2824 case RNID_TOPOLOGY_DISC:
2825 rn->SpecificLen = sizeof(RNID_TOP_DISC);
2826 memcpy(&rn->un.topologyDisc.portName,
2827 &vport->fc_portname, sizeof(struct lpfc_name));
2828 rn->un.topologyDisc.unitType = RNID_HBA;
2829 rn->un.topologyDisc.physPort = 0;
2830 rn->un.topologyDisc.attachedNodes = 0;
2831 break;
2832 default:
2833 rn->CommonLen = 0;
2834 rn->SpecificLen = 0;
2835 break;
2838 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2839 "Issue ACC RNID: did:x%x flg:x%x",
2840 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2842 phba->fc_stat.elsXmitACC++;
2843 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2844 lpfc_nlp_put(ndlp);
2845 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
2846 * it could be freed */
2848 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2849 if (rc == IOCB_ERROR) {
2850 lpfc_els_free_iocb(phba, elsiocb);
2851 return 1;
2853 return 0;
2857 lpfc_els_disc_adisc(struct lpfc_vport *vport)
2859 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2860 struct lpfc_nodelist *ndlp, *next_ndlp;
2861 int sentadisc = 0;
2863 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2864 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2865 if (!NLP_CHK_NODE_ACT(ndlp))
2866 continue;
2867 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2868 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2869 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
2870 spin_lock_irq(shost->host_lock);
2871 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2872 spin_unlock_irq(shost->host_lock);
2873 ndlp->nlp_prev_state = ndlp->nlp_state;
2874 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2875 lpfc_issue_els_adisc(vport, ndlp, 0);
2876 sentadisc++;
2877 vport->num_disc_nodes++;
2878 if (vport->num_disc_nodes >=
2879 vport->cfg_discovery_threads) {
2880 spin_lock_irq(shost->host_lock);
2881 vport->fc_flag |= FC_NLP_MORE;
2882 spin_unlock_irq(shost->host_lock);
2883 break;
2887 if (sentadisc == 0) {
2888 spin_lock_irq(shost->host_lock);
2889 vport->fc_flag &= ~FC_NLP_MORE;
2890 spin_unlock_irq(shost->host_lock);
2892 return sentadisc;
2896 lpfc_els_disc_plogi(struct lpfc_vport *vport)
2898 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2899 struct lpfc_nodelist *ndlp, *next_ndlp;
2900 int sentplogi = 0;
2902 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
2903 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2904 if (!NLP_CHK_NODE_ACT(ndlp))
2905 continue;
2906 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2907 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2908 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
2909 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
2910 ndlp->nlp_prev_state = ndlp->nlp_state;
2911 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2912 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2913 sentplogi++;
2914 vport->num_disc_nodes++;
2915 if (vport->num_disc_nodes >=
2916 vport->cfg_discovery_threads) {
2917 spin_lock_irq(shost->host_lock);
2918 vport->fc_flag |= FC_NLP_MORE;
2919 spin_unlock_irq(shost->host_lock);
2920 break;
2924 if (sentplogi) {
2925 lpfc_set_disctmo(vport);
2927 else {
2928 spin_lock_irq(shost->host_lock);
2929 vport->fc_flag &= ~FC_NLP_MORE;
2930 spin_unlock_irq(shost->host_lock);
2932 return sentplogi;
2935 void
2936 lpfc_els_flush_rscn(struct lpfc_vport *vport)
2938 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2939 struct lpfc_hba *phba = vport->phba;
2940 int i;
2942 spin_lock_irq(shost->host_lock);
2943 if (vport->fc_rscn_flush) {
2944 /* Another thread is walking fc_rscn_id_list on this vport */
2945 spin_unlock_irq(shost->host_lock);
2946 return;
2948 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
2949 vport->fc_rscn_flush = 1;
2950 spin_unlock_irq(shost->host_lock);
2952 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2953 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2954 vport->fc_rscn_id_list[i] = NULL;
2956 spin_lock_irq(shost->host_lock);
2957 vport->fc_rscn_id_cnt = 0;
2958 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
2959 spin_unlock_irq(shost->host_lock);
2960 lpfc_can_disctmo(vport);
2961 /* Indicate we are done walking this fc_rscn_id_list */
2962 vport->fc_rscn_flush = 0;
2966 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2968 D_ID ns_did;
2969 D_ID rscn_did;
2970 uint32_t *lp;
2971 uint32_t payload_len, i;
2972 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2974 ns_did.un.word = did;
2976 /* Never match fabric nodes for RSCNs */
2977 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2978 return 0;
2980 /* If we are doing a FULL RSCN rediscovery, match everything */
2981 if (vport->fc_flag & FC_RSCN_DISCOVERY)
2982 return did;
2984 spin_lock_irq(shost->host_lock);
2985 if (vport->fc_rscn_flush) {
2986 /* Another thread is walking fc_rscn_id_list on this vport */
2987 spin_unlock_irq(shost->host_lock);
2988 return 0;
2990 /* Indicate we are walking fc_rscn_id_list on this vport */
2991 vport->fc_rscn_flush = 1;
2992 spin_unlock_irq(shost->host_lock);
2993 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2994 lp = vport->fc_rscn_id_list[i]->virt;
2995 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
2996 payload_len -= sizeof(uint32_t); /* take off word 0 */
2997 while (payload_len) {
2998 rscn_did.un.word = be32_to_cpu(*lp++);
2999 payload_len -= sizeof(uint32_t);
3000 switch (rscn_did.un.b.resv) {
3001 case 0: /* Single N_Port ID effected */
3002 if (ns_did.un.word == rscn_did.un.word)
3003 goto return_did_out;
3004 break;
3005 case 1: /* Whole N_Port Area effected */
3006 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3007 && (ns_did.un.b.area == rscn_did.un.b.area))
3008 goto return_did_out;
3009 break;
3010 case 2: /* Whole N_Port Domain effected */
3011 if (ns_did.un.b.domain == rscn_did.un.b.domain)
3012 goto return_did_out;
3013 break;
3014 default:
3015 /* Unknown Identifier in RSCN node */
3016 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3017 "0217 Unknown Identifier in "
3018 "RSCN payload Data: x%x\n",
3019 rscn_did.un.word);
3020 case 3: /* Whole Fabric effected */
3021 goto return_did_out;
3025 /* Indicate we are done with walking fc_rscn_id_list on this vport */
3026 vport->fc_rscn_flush = 0;
3027 return 0;
3028 return_did_out:
3029 /* Indicate we are done with walking fc_rscn_id_list on this vport */
3030 vport->fc_rscn_flush = 0;
3031 return did;
3034 static int
3035 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
3037 struct lpfc_nodelist *ndlp = NULL;
3039 /* Look at all nodes effected by pending RSCNs and move
3040 * them to NPR state.
3043 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3044 if (!NLP_CHK_NODE_ACT(ndlp) ||
3045 ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
3046 lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
3047 continue;
3049 lpfc_disc_state_machine(vport, ndlp, NULL,
3050 NLP_EVT_DEVICE_RECOVERY);
3053 * Make sure NLP_DELAY_TMO is NOT running after a device
3054 * recovery event.
3056 if (ndlp->nlp_flag & NLP_DELAY_TMO)
3057 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3060 return 0;
3063 static int
3064 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3065 struct lpfc_nodelist *ndlp)
3067 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3068 struct lpfc_hba *phba = vport->phba;
3069 struct lpfc_dmabuf *pcmd;
3070 uint32_t *lp, *datap;
3071 IOCB_t *icmd;
3072 uint32_t payload_len, length, nportid, *cmd;
3073 int rscn_cnt;
3074 int rscn_id = 0, hba_id = 0;
3075 int i;
3077 icmd = &cmdiocb->iocb;
3078 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3079 lp = (uint32_t *) pcmd->virt;
3081 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
3082 payload_len -= sizeof(uint32_t); /* take off word 0 */
3083 /* RSCN received */
3084 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3085 "0214 RSCN received Data: x%x x%x x%x x%x\n",
3086 vport->fc_flag, payload_len, *lp,
3087 vport->fc_rscn_id_cnt);
3088 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
3089 fc_host_post_event(shost, fc_get_event_number(),
3090 FCH_EVT_RSCN, lp[i]);
3092 /* If we are about to begin discovery, just ACC the RSCN.
3093 * Discovery processing will satisfy it.
3095 if (vport->port_state <= LPFC_NS_QRY) {
3096 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3097 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
3098 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
3100 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3101 return 0;
3104 /* If this RSCN just contains NPortIDs for other vports on this HBA,
3105 * just ACC and ignore it.
3107 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3108 !(vport->cfg_peer_port_login)) {
3109 i = payload_len;
3110 datap = lp;
3111 while (i > 0) {
3112 nportid = *datap++;
3113 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
3114 i -= sizeof(uint32_t);
3115 rscn_id++;
3116 if (lpfc_find_vport_by_did(phba, nportid))
3117 hba_id++;
3119 if (rscn_id == hba_id) {
3120 /* ALL NPortIDs in RSCN are on HBA */
3121 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3122 "0214 Ignore RSCN "
3123 "Data: x%x x%x x%x x%x\n",
3124 vport->fc_flag, payload_len,
3125 *lp, vport->fc_rscn_id_cnt);
3126 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3127 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
3128 ndlp->nlp_DID, vport->port_state,
3129 ndlp->nlp_flag);
3131 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
3132 ndlp, NULL);
3133 return 0;
3137 spin_lock_irq(shost->host_lock);
3138 if (vport->fc_rscn_flush) {
3139 /* Another thread is walking fc_rscn_id_list on this vport */
3140 spin_unlock_irq(shost->host_lock);
3141 vport->fc_flag |= FC_RSCN_DISCOVERY;
3142 return 0;
3144 /* Indicate we are walking fc_rscn_id_list on this vport */
3145 vport->fc_rscn_flush = 1;
3146 spin_unlock_irq(shost->host_lock);
3147 /* Get the array count after sucessfully have the token */
3148 rscn_cnt = vport->fc_rscn_id_cnt;
3149 /* If we are already processing an RSCN, save the received
3150 * RSCN payload buffer, cmdiocb->context2 to process later.
3152 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
3153 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3154 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
3155 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
3157 spin_lock_irq(shost->host_lock);
3158 vport->fc_flag |= FC_RSCN_DEFERRED;
3159 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
3160 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
3161 vport->fc_flag |= FC_RSCN_MODE;
3162 spin_unlock_irq(shost->host_lock);
3163 if (rscn_cnt) {
3164 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
3165 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
3167 if ((rscn_cnt) &&
3168 (payload_len + length <= LPFC_BPL_SIZE)) {
3169 *cmd &= ELS_CMD_MASK;
3170 *cmd |= cpu_to_be32(payload_len + length);
3171 memcpy(((uint8_t *)cmd) + length, lp,
3172 payload_len);
3173 } else {
3174 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
3175 vport->fc_rscn_id_cnt++;
3176 /* If we zero, cmdiocb->context2, the calling
3177 * routine will not try to free it.
3179 cmdiocb->context2 = NULL;
3181 /* Deferred RSCN */
3182 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3183 "0235 Deferred RSCN "
3184 "Data: x%x x%x x%x\n",
3185 vport->fc_rscn_id_cnt, vport->fc_flag,
3186 vport->port_state);
3187 } else {
3188 vport->fc_flag |= FC_RSCN_DISCOVERY;
3189 spin_unlock_irq(shost->host_lock);
3190 /* ReDiscovery RSCN */
3191 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3192 "0234 ReDiscovery RSCN "
3193 "Data: x%x x%x x%x\n",
3194 vport->fc_rscn_id_cnt, vport->fc_flag,
3195 vport->port_state);
3197 /* Indicate we are done walking fc_rscn_id_list on this vport */
3198 vport->fc_rscn_flush = 0;
3199 /* Send back ACC */
3200 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3201 /* send RECOVERY event for ALL nodes that match RSCN payload */
3202 lpfc_rscn_recovery_check(vport);
3203 spin_lock_irq(shost->host_lock);
3204 vport->fc_flag &= ~FC_RSCN_DEFERRED;
3205 spin_unlock_irq(shost->host_lock);
3206 return 0;
3208 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3209 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
3210 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
3212 spin_lock_irq(shost->host_lock);
3213 vport->fc_flag |= FC_RSCN_MODE;
3214 spin_unlock_irq(shost->host_lock);
3215 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
3216 /* Indicate we are done walking fc_rscn_id_list on this vport */
3217 vport->fc_rscn_flush = 0;
3219 * If we zero, cmdiocb->context2, the calling routine will
3220 * not try to free it.
3222 cmdiocb->context2 = NULL;
3223 lpfc_set_disctmo(vport);
3224 /* Send back ACC */
3225 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3226 /* send RECOVERY event for ALL nodes that match RSCN payload */
3227 lpfc_rscn_recovery_check(vport);
3228 return lpfc_els_handle_rscn(vport);
3232 lpfc_els_handle_rscn(struct lpfc_vport *vport)
3234 struct lpfc_nodelist *ndlp;
3235 struct lpfc_hba *phba = vport->phba;
3237 /* Ignore RSCN if the port is being torn down. */
3238 if (vport->load_flag & FC_UNLOADING) {
3239 lpfc_els_flush_rscn(vport);
3240 return 0;
3243 /* Start timer for RSCN processing */
3244 lpfc_set_disctmo(vport);
3246 /* RSCN processed */
3247 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3248 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
3249 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
3250 vport->port_state);
3252 /* To process RSCN, first compare RSCN data with NameServer */
3253 vport->fc_ns_retry = 0;
3254 vport->num_disc_nodes = 0;
3256 ndlp = lpfc_findnode_did(vport, NameServer_DID);
3257 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
3258 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
3259 /* Good ndlp, issue CT Request to NameServer */
3260 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
3261 /* Wait for NameServer query cmpl before we can
3262 continue */
3263 return 1;
3264 } else {
3265 /* If login to NameServer does not exist, issue one */
3266 /* Good status, issue PLOGI to NameServer */
3267 ndlp = lpfc_findnode_did(vport, NameServer_DID);
3268 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3269 /* Wait for NameServer login cmpl before we can
3270 continue */
3271 return 1;
3273 if (ndlp) {
3274 ndlp = lpfc_enable_node(vport, ndlp,
3275 NLP_STE_PLOGI_ISSUE);
3276 if (!ndlp) {
3277 lpfc_els_flush_rscn(vport);
3278 return 0;
3280 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
3281 } else {
3282 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3283 if (!ndlp) {
3284 lpfc_els_flush_rscn(vport);
3285 return 0;
3287 lpfc_nlp_init(vport, ndlp, NameServer_DID);
3288 ndlp->nlp_prev_state = ndlp->nlp_state;
3289 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3291 ndlp->nlp_type |= NLP_FABRIC;
3292 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
3293 /* Wait for NameServer login cmpl before we can
3294 * continue
3296 return 1;
3299 lpfc_els_flush_rscn(vport);
3300 return 0;
3303 static int
3304 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3305 struct lpfc_nodelist *ndlp)
3307 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3308 struct lpfc_hba *phba = vport->phba;
3309 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3310 uint32_t *lp = (uint32_t *) pcmd->virt;
3311 IOCB_t *icmd = &cmdiocb->iocb;
3312 struct serv_parm *sp;
3313 LPFC_MBOXQ_t *mbox;
3314 struct ls_rjt stat;
3315 uint32_t cmd, did;
3316 int rc;
3318 cmd = *lp++;
3319 sp = (struct serv_parm *) lp;
3321 /* FLOGI received */
3323 lpfc_set_disctmo(vport);
3325 if (phba->fc_topology == TOPOLOGY_LOOP) {
3326 /* We should never receive a FLOGI in loop mode, ignore it */
3327 did = icmd->un.elsreq64.remoteID;
3329 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
3330 Loop Mode */
3331 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3332 "0113 An FLOGI ELS command x%x was "
3333 "received from DID x%x in Loop Mode\n",
3334 cmd, did);
3335 return 1;
3338 did = Fabric_DID;
3340 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) {
3341 /* For a FLOGI we accept, then if our portname is greater
3342 * then the remote portname we initiate Nport login.
3345 rc = memcmp(&vport->fc_portname, &sp->portName,
3346 sizeof(struct lpfc_name));
3348 if (!rc) {
3349 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3350 if (!mbox)
3351 return 1;
3353 lpfc_linkdown(phba);
3354 lpfc_init_link(phba, mbox,
3355 phba->cfg_topology,
3356 phba->cfg_link_speed);
3357 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
3358 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3359 mbox->vport = vport;
3360 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3361 lpfc_set_loopback_flag(phba);
3362 if (rc == MBX_NOT_FINISHED) {
3363 mempool_free(mbox, phba->mbox_mem_pool);
3365 return 1;
3366 } else if (rc > 0) { /* greater than */
3367 spin_lock_irq(shost->host_lock);
3368 vport->fc_flag |= FC_PT2PT_PLOGI;
3369 spin_unlock_irq(shost->host_lock);
3371 spin_lock_irq(shost->host_lock);
3372 vport->fc_flag |= FC_PT2PT;
3373 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3374 spin_unlock_irq(shost->host_lock);
3375 } else {
3376 /* Reject this request because invalid parameters */
3377 stat.un.b.lsRjtRsvd0 = 0;
3378 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3379 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
3380 stat.un.b.vendorUnique = 0;
3381 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3382 NULL);
3383 return 1;
3386 /* Send back ACC */
3387 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
3389 return 0;
3392 static int
3393 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3394 struct lpfc_nodelist *ndlp)
3396 struct lpfc_dmabuf *pcmd;
3397 uint32_t *lp;
3398 IOCB_t *icmd;
3399 RNID *rn;
3400 struct ls_rjt stat;
3401 uint32_t cmd, did;
3403 icmd = &cmdiocb->iocb;
3404 did = icmd->un.elsreq64.remoteID;
3405 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3406 lp = (uint32_t *) pcmd->virt;
3408 cmd = *lp++;
3409 rn = (RNID *) lp;
3411 /* RNID received */
3413 switch (rn->Format) {
3414 case 0:
3415 case RNID_TOPOLOGY_DISC:
3416 /* Send back ACC */
3417 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
3418 break;
3419 default:
3420 /* Reject this request because format not supported */
3421 stat.un.b.lsRjtRsvd0 = 0;
3422 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3423 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3424 stat.un.b.vendorUnique = 0;
3425 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3426 NULL);
3428 return 0;
3431 static int
3432 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3433 struct lpfc_nodelist *ndlp)
3435 struct ls_rjt stat;
3437 /* For now, unconditionally reject this command */
3438 stat.un.b.lsRjtRsvd0 = 0;
3439 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3440 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3441 stat.un.b.vendorUnique = 0;
3442 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
3443 return 0;
3446 static void
3447 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3449 struct lpfc_sli *psli = &phba->sli;
3450 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3451 MAILBOX_t *mb;
3452 IOCB_t *icmd;
3453 RPS_RSP *rps_rsp;
3454 uint8_t *pcmd;
3455 struct lpfc_iocbq *elsiocb;
3456 struct lpfc_nodelist *ndlp;
3457 uint16_t xri, status;
3458 uint32_t cmdsize;
3460 mb = &pmb->mb;
3462 ndlp = (struct lpfc_nodelist *) pmb->context2;
3463 xri = (uint16_t) ((unsigned long)(pmb->context1));
3464 pmb->context1 = NULL;
3465 pmb->context2 = NULL;
3467 if (mb->mbxStatus) {
3468 mempool_free(pmb, phba->mbox_mem_pool);
3469 return;
3472 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
3473 mempool_free(pmb, phba->mbox_mem_pool);
3474 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
3475 lpfc_max_els_tries, ndlp,
3476 ndlp->nlp_DID, ELS_CMD_ACC);
3478 /* Decrement the ndlp reference count from previous mbox command */
3479 lpfc_nlp_put(ndlp);
3481 if (!elsiocb)
3482 return;
3484 icmd = &elsiocb->iocb;
3485 icmd->ulpContext = xri;
3487 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3488 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3489 pcmd += sizeof(uint32_t); /* Skip past command */
3490 rps_rsp = (RPS_RSP *)pcmd;
3492 if (phba->fc_topology != TOPOLOGY_LOOP)
3493 status = 0x10;
3494 else
3495 status = 0x8;
3496 if (phba->pport->fc_flag & FC_FABRIC)
3497 status |= 0x4;
3499 rps_rsp->rsvd1 = 0;
3500 rps_rsp->portStatus = cpu_to_be16(status);
3501 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
3502 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
3503 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
3504 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
3505 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
3506 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
3507 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
3508 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
3509 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
3510 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
3511 elsiocb->iotag, elsiocb->iocb.ulpContext,
3512 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3513 ndlp->nlp_rpi);
3514 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3515 phba->fc_stat.elsXmitACC++;
3516 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
3517 lpfc_els_free_iocb(phba, elsiocb);
3518 return;
3521 static int
3522 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3523 struct lpfc_nodelist *ndlp)
3525 struct lpfc_hba *phba = vport->phba;
3526 uint32_t *lp;
3527 uint8_t flag;
3528 LPFC_MBOXQ_t *mbox;
3529 struct lpfc_dmabuf *pcmd;
3530 RPS *rps;
3531 struct ls_rjt stat;
3533 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3534 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
3535 stat.un.b.lsRjtRsvd0 = 0;
3536 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3537 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3538 stat.un.b.vendorUnique = 0;
3539 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3540 NULL);
3543 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3544 lp = (uint32_t *) pcmd->virt;
3545 flag = (be32_to_cpu(*lp++) & 0xf);
3546 rps = (RPS *) lp;
3548 if ((flag == 0) ||
3549 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
3550 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
3551 sizeof(struct lpfc_name)) == 0))) {
3553 printk("Fix me....\n");
3554 dump_stack();
3555 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
3556 if (mbox) {
3557 lpfc_read_lnk_stat(phba, mbox);
3558 mbox->context1 =
3559 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
3560 mbox->context2 = lpfc_nlp_get(ndlp);
3561 mbox->vport = vport;
3562 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
3563 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
3564 != MBX_NOT_FINISHED)
3565 /* Mbox completion will send ELS Response */
3566 return 0;
3567 /* Decrement reference count used for the failed mbox
3568 * command.
3570 lpfc_nlp_put(ndlp);
3571 mempool_free(mbox, phba->mbox_mem_pool);
3574 stat.un.b.lsRjtRsvd0 = 0;
3575 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3576 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3577 stat.un.b.vendorUnique = 0;
3578 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
3579 return 0;
3582 static int
3583 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
3584 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
3586 struct lpfc_hba *phba = vport->phba;
3587 IOCB_t *icmd, *oldcmd;
3588 RPL_RSP rpl_rsp;
3589 struct lpfc_iocbq *elsiocb;
3590 struct lpfc_sli *psli = &phba->sli;
3591 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3592 uint8_t *pcmd;
3594 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3595 ndlp->nlp_DID, ELS_CMD_ACC);
3597 if (!elsiocb)
3598 return 1;
3600 icmd = &elsiocb->iocb;
3601 oldcmd = &oldiocb->iocb;
3602 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3604 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3605 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3606 pcmd += sizeof(uint16_t);
3607 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
3608 pcmd += sizeof(uint16_t);
3610 /* Setup the RPL ACC payload */
3611 rpl_rsp.listLen = be32_to_cpu(1);
3612 rpl_rsp.index = 0;
3613 rpl_rsp.port_num_blk.portNum = 0;
3614 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
3615 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
3616 sizeof(struct lpfc_name));
3617 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
3618 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
3619 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3620 "0120 Xmit ELS RPL ACC response tag x%x "
3621 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3622 "rpi x%x\n",
3623 elsiocb->iotag, elsiocb->iocb.ulpContext,
3624 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3625 ndlp->nlp_rpi);
3626 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3627 phba->fc_stat.elsXmitACC++;
3628 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
3629 lpfc_els_free_iocb(phba, elsiocb);
3630 return 1;
3632 return 0;
3635 static int
3636 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3637 struct lpfc_nodelist *ndlp)
3639 struct lpfc_dmabuf *pcmd;
3640 uint32_t *lp;
3641 uint32_t maxsize;
3642 uint16_t cmdsize;
3643 RPL *rpl;
3644 struct ls_rjt stat;
3646 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3647 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
3648 stat.un.b.lsRjtRsvd0 = 0;
3649 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3650 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3651 stat.un.b.vendorUnique = 0;
3652 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3653 NULL);
3656 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3657 lp = (uint32_t *) pcmd->virt;
3658 rpl = (RPL *) (lp + 1);
3660 maxsize = be32_to_cpu(rpl->maxsize);
3662 /* We support only one port */
3663 if ((rpl->index == 0) &&
3664 ((maxsize == 0) ||
3665 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
3666 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
3667 } else {
3668 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
3670 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
3672 return 0;
3675 static int
3676 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3677 struct lpfc_nodelist *ndlp)
3679 struct lpfc_dmabuf *pcmd;
3680 uint32_t *lp;
3681 IOCB_t *icmd;
3682 FARP *fp;
3683 uint32_t cmd, cnt, did;
3685 icmd = &cmdiocb->iocb;
3686 did = icmd->un.elsreq64.remoteID;
3687 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3688 lp = (uint32_t *) pcmd->virt;
3690 cmd = *lp++;
3691 fp = (FARP *) lp;
3692 /* FARP-REQ received from DID <did> */
3693 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3694 "0601 FARP-REQ received from DID x%x\n", did);
3695 /* We will only support match on WWPN or WWNN */
3696 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
3697 return 0;
3700 cnt = 0;
3701 /* If this FARP command is searching for my portname */
3702 if (fp->Mflags & FARP_MATCH_PORT) {
3703 if (memcmp(&fp->RportName, &vport->fc_portname,
3704 sizeof(struct lpfc_name)) == 0)
3705 cnt = 1;
3708 /* If this FARP command is searching for my nodename */
3709 if (fp->Mflags & FARP_MATCH_NODE) {
3710 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
3711 sizeof(struct lpfc_name)) == 0)
3712 cnt = 1;
3715 if (cnt) {
3716 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
3717 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
3718 /* Log back into the node before sending the FARP. */
3719 if (fp->Rflags & FARP_REQUEST_PLOGI) {
3720 ndlp->nlp_prev_state = ndlp->nlp_state;
3721 lpfc_nlp_set_state(vport, ndlp,
3722 NLP_STE_PLOGI_ISSUE);
3723 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
3726 /* Send a FARP response to that node */
3727 if (fp->Rflags & FARP_REQUEST_FARPR)
3728 lpfc_issue_els_farpr(vport, did, 0);
3731 return 0;
3734 static int
3735 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3736 struct lpfc_nodelist *ndlp)
3738 struct lpfc_dmabuf *pcmd;
3739 uint32_t *lp;
3740 IOCB_t *icmd;
3741 uint32_t cmd, did;
3743 icmd = &cmdiocb->iocb;
3744 did = icmd->un.elsreq64.remoteID;
3745 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3746 lp = (uint32_t *) pcmd->virt;
3748 cmd = *lp++;
3749 /* FARP-RSP received from DID <did> */
3750 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3751 "0600 FARP-RSP received from DID x%x\n", did);
3752 /* ACCEPT the Farp resp request */
3753 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3755 return 0;
3758 static int
3759 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3760 struct lpfc_nodelist *fan_ndlp)
3762 struct lpfc_dmabuf *pcmd;
3763 uint32_t *lp;
3764 IOCB_t *icmd;
3765 uint32_t cmd, did;
3766 FAN *fp;
3767 struct lpfc_nodelist *ndlp, *next_ndlp;
3768 struct lpfc_hba *phba = vport->phba;
3770 /* FAN received */
3771 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3772 "0265 FAN received\n");
3773 icmd = &cmdiocb->iocb;
3774 did = icmd->un.elsreq64.remoteID;
3775 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
3776 lp = (uint32_t *)pcmd->virt;
3778 cmd = *lp++;
3779 fp = (FAN *) lp;
3781 /* FAN received; Fan does not have a reply sequence */
3783 if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) {
3784 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
3785 sizeof(struct lpfc_name)) != 0) ||
3786 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
3787 sizeof(struct lpfc_name)) != 0)) {
3789 * This node has switched fabrics. FLOGI is required
3790 * Clean up the old rpi's
3793 list_for_each_entry_safe(ndlp, next_ndlp,
3794 &vport->fc_nodes, nlp_listp) {
3795 if (!NLP_CHK_NODE_ACT(ndlp))
3796 continue;
3797 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3798 continue;
3799 if (ndlp->nlp_type & NLP_FABRIC) {
3801 * Clean up old Fabric, Nameserver and
3802 * other NLP_FABRIC logins
3804 lpfc_drop_node(vport, ndlp);
3806 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
3807 /* Fail outstanding I/O now since this
3808 * device is marked for PLOGI
3810 lpfc_unreg_rpi(vport, ndlp);
3814 lpfc_initial_flogi(vport);
3815 return 0;
3817 /* Discovery not needed,
3818 * move the nodes to their original state.
3820 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
3821 nlp_listp) {
3822 if (!NLP_CHK_NODE_ACT(ndlp))
3823 continue;
3824 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3825 continue;
3827 switch (ndlp->nlp_prev_state) {
3828 case NLP_STE_UNMAPPED_NODE:
3829 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3830 lpfc_nlp_set_state(vport, ndlp,
3831 NLP_STE_UNMAPPED_NODE);
3832 break;
3834 case NLP_STE_MAPPED_NODE:
3835 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3836 lpfc_nlp_set_state(vport, ndlp,
3837 NLP_STE_MAPPED_NODE);
3838 break;
3840 default:
3841 break;
3845 /* Start discovery - this should just do CLEAR_LA */
3846 lpfc_disc_start(vport);
3848 return 0;
3851 void
3852 lpfc_els_timeout(unsigned long ptr)
3854 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
3855 struct lpfc_hba *phba = vport->phba;
3856 unsigned long iflag;
3858 spin_lock_irqsave(&vport->work_port_lock, iflag);
3859 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
3860 vport->work_port_events |= WORKER_ELS_TMO;
3861 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3863 spin_lock_irqsave(&phba->hbalock, iflag);
3864 if (phba->work_wait)
3865 lpfc_worker_wake_up(phba);
3866 spin_unlock_irqrestore(&phba->hbalock, iflag);
3868 else
3869 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3870 return;
3873 void
3874 lpfc_els_timeout_handler(struct lpfc_vport *vport)
3876 struct lpfc_hba *phba = vport->phba;
3877 struct lpfc_sli_ring *pring;
3878 struct lpfc_iocbq *tmp_iocb, *piocb;
3879 IOCB_t *cmd = NULL;
3880 struct lpfc_dmabuf *pcmd;
3881 uint32_t els_command = 0;
3882 uint32_t timeout;
3883 uint32_t remote_ID = 0xffffffff;
3885 /* If the timer is already canceled do nothing */
3886 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
3887 return;
3889 spin_lock_irq(&phba->hbalock);
3890 timeout = (uint32_t)(phba->fc_ratov << 1);
3892 pring = &phba->sli.ring[LPFC_ELS_RING];
3894 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3895 cmd = &piocb->iocb;
3897 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
3898 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
3899 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
3900 continue;
3902 if (piocb->vport != vport)
3903 continue;
3905 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3906 if (pcmd)
3907 els_command = *(uint32_t *) (pcmd->virt);
3909 if (els_command == ELS_CMD_FARP ||
3910 els_command == ELS_CMD_FARPR ||
3911 els_command == ELS_CMD_FDISC)
3912 continue;
3914 if (vport != piocb->vport)
3915 continue;
3917 if (piocb->drvrTimeout > 0) {
3918 if (piocb->drvrTimeout >= timeout)
3919 piocb->drvrTimeout -= timeout;
3920 else
3921 piocb->drvrTimeout = 0;
3922 continue;
3925 remote_ID = 0xffffffff;
3926 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
3927 remote_ID = cmd->un.elsreq64.remoteID;
3928 else {
3929 struct lpfc_nodelist *ndlp;
3930 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
3931 if (ndlp)
3932 remote_ID = ndlp->nlp_DID;
3934 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3935 "0127 ELS timeout Data: x%x x%x x%x "
3936 "x%x\n", els_command,
3937 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
3938 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3940 spin_unlock_irq(&phba->hbalock);
3942 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
3943 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
3946 void
3947 lpfc_els_flush_cmd(struct lpfc_vport *vport)
3949 LIST_HEAD(completions);
3950 struct lpfc_hba *phba = vport->phba;
3951 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3952 struct lpfc_iocbq *tmp_iocb, *piocb;
3953 IOCB_t *cmd = NULL;
3955 lpfc_fabric_abort_vport(vport);
3957 spin_lock_irq(&phba->hbalock);
3958 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
3959 cmd = &piocb->iocb;
3961 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3962 continue;
3965 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
3966 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
3967 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
3968 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3969 cmd->ulpCommand == CMD_ABORT_XRI_CN)
3970 continue;
3972 if (piocb->vport != vport)
3973 continue;
3975 list_move_tail(&piocb->list, &completions);
3976 pring->txq_cnt--;
3979 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3980 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3981 continue;
3984 if (piocb->vport != vport)
3985 continue;
3987 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3989 spin_unlock_irq(&phba->hbalock);
3991 while (!list_empty(&completions)) {
3992 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
3993 cmd = &piocb->iocb;
3994 list_del_init(&piocb->list);
3996 if (!piocb->iocb_cmpl)
3997 lpfc_sli_release_iocbq(phba, piocb);
3998 else {
3999 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4000 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4001 (piocb->iocb_cmpl) (phba, piocb, piocb);
4005 return;
4008 void
4009 lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
4011 LIST_HEAD(completions);
4012 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4013 struct lpfc_iocbq *tmp_iocb, *piocb;
4014 IOCB_t *cmd = NULL;
4016 lpfc_fabric_abort_hba(phba);
4017 spin_lock_irq(&phba->hbalock);
4018 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
4019 cmd = &piocb->iocb;
4020 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
4021 continue;
4022 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
4023 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
4024 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
4025 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
4026 cmd->ulpCommand == CMD_ABORT_XRI_CN)
4027 continue;
4028 list_move_tail(&piocb->list, &completions);
4029 pring->txq_cnt--;
4031 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
4032 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
4033 continue;
4034 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
4036 spin_unlock_irq(&phba->hbalock);
4037 while (!list_empty(&completions)) {
4038 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4039 cmd = &piocb->iocb;
4040 list_del_init(&piocb->list);
4041 if (!piocb->iocb_cmpl)
4042 lpfc_sli_release_iocbq(phba, piocb);
4043 else {
4044 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4045 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4046 (piocb->iocb_cmpl) (phba, piocb, piocb);
4049 return;
4052 static void
4053 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4054 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
4056 struct Scsi_Host *shost;
4057 struct lpfc_nodelist *ndlp;
4058 struct ls_rjt stat;
4059 uint32_t *payload;
4060 uint32_t cmd, did, newnode, rjt_err = 0;
4061 IOCB_t *icmd = &elsiocb->iocb;
4063 if (!vport || !(elsiocb->context2))
4064 goto dropit;
4066 newnode = 0;
4067 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
4068 cmd = *payload;
4069 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
4070 lpfc_post_buffer(phba, pring, 1, 1);
4072 did = icmd->un.rcvels.remoteID;
4073 if (icmd->ulpStatus) {
4074 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4075 "RCV Unsol ELS: status:x%x/x%x did:x%x",
4076 icmd->ulpStatus, icmd->un.ulpWord[4], did);
4077 goto dropit;
4080 /* Check to see if link went down during discovery */
4081 if (lpfc_els_chk_latt(vport))
4082 goto dropit;
4084 /* Ignore traffic recevied during vport shutdown. */
4085 if (vport->load_flag & FC_UNLOADING)
4086 goto dropit;
4088 ndlp = lpfc_findnode_did(vport, did);
4089 if (!ndlp) {
4090 /* Cannot find existing Fabric ndlp, so allocate a new one */
4091 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4092 if (!ndlp)
4093 goto dropit;
4095 lpfc_nlp_init(vport, ndlp, did);
4096 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4097 newnode = 1;
4098 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4099 ndlp->nlp_type |= NLP_FABRIC;
4100 } else {
4101 if (!NLP_CHK_NODE_ACT(ndlp)) {
4102 ndlp = lpfc_enable_node(vport, ndlp,
4103 NLP_STE_UNUSED_NODE);
4104 if (!ndlp)
4105 goto dropit;
4107 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
4108 /* This is simular to the new node path */
4109 ndlp = lpfc_nlp_get(ndlp);
4110 if (!ndlp)
4111 goto dropit;
4112 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4113 newnode = 1;
4117 phba->fc_stat.elsRcvFrame++;
4118 if (elsiocb->context1)
4119 lpfc_nlp_put(elsiocb->context1);
4121 elsiocb->context1 = lpfc_nlp_get(ndlp);
4122 elsiocb->vport = vport;
4124 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
4125 cmd &= ELS_CMD_MASK;
4127 /* ELS command <elsCmd> received from NPORT <did> */
4128 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4129 "0112 ELS command x%x received from NPORT x%x "
4130 "Data: x%x\n", cmd, did, vport->port_state);
4131 switch (cmd) {
4132 case ELS_CMD_PLOGI:
4133 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4134 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
4135 did, vport->port_state, ndlp->nlp_flag);
4137 phba->fc_stat.elsRcvPLOGI++;
4138 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
4140 if (vport->port_state < LPFC_DISC_AUTH) {
4141 if (!(phba->pport->fc_flag & FC_PT2PT) ||
4142 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
4143 rjt_err = LSRJT_UNABLE_TPC;
4144 break;
4146 /* We get here, and drop thru, if we are PT2PT with
4147 * another NPort and the other side has initiated
4148 * the PLOGI before responding to our FLOGI.
4152 shost = lpfc_shost_from_vport(vport);
4153 spin_lock_irq(shost->host_lock);
4154 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
4155 spin_unlock_irq(shost->host_lock);
4157 lpfc_disc_state_machine(vport, ndlp, elsiocb,
4158 NLP_EVT_RCV_PLOGI);
4160 break;
4161 case ELS_CMD_FLOGI:
4162 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4163 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
4164 did, vport->port_state, ndlp->nlp_flag);
4166 phba->fc_stat.elsRcvFLOGI++;
4167 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
4168 if (newnode)
4169 lpfc_nlp_put(ndlp);
4170 break;
4171 case ELS_CMD_LOGO:
4172 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4173 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
4174 did, vport->port_state, ndlp->nlp_flag);
4176 phba->fc_stat.elsRcvLOGO++;
4177 if (vport->port_state < LPFC_DISC_AUTH) {
4178 rjt_err = LSRJT_UNABLE_TPC;
4179 break;
4181 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
4182 break;
4183 case ELS_CMD_PRLO:
4184 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4185 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
4186 did, vport->port_state, ndlp->nlp_flag);
4188 phba->fc_stat.elsRcvPRLO++;
4189 if (vport->port_state < LPFC_DISC_AUTH) {
4190 rjt_err = LSRJT_UNABLE_TPC;
4191 break;
4193 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
4194 break;
4195 case ELS_CMD_RSCN:
4196 phba->fc_stat.elsRcvRSCN++;
4197 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
4198 if (newnode)
4199 lpfc_nlp_put(ndlp);
4200 break;
4201 case ELS_CMD_ADISC:
4202 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4203 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
4204 did, vport->port_state, ndlp->nlp_flag);
4206 phba->fc_stat.elsRcvADISC++;
4207 if (vport->port_state < LPFC_DISC_AUTH) {
4208 rjt_err = LSRJT_UNABLE_TPC;
4209 break;
4211 lpfc_disc_state_machine(vport, ndlp, elsiocb,
4212 NLP_EVT_RCV_ADISC);
4213 break;
4214 case ELS_CMD_PDISC:
4215 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4216 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
4217 did, vport->port_state, ndlp->nlp_flag);
4219 phba->fc_stat.elsRcvPDISC++;
4220 if (vport->port_state < LPFC_DISC_AUTH) {
4221 rjt_err = LSRJT_UNABLE_TPC;
4222 break;
4224 lpfc_disc_state_machine(vport, ndlp, elsiocb,
4225 NLP_EVT_RCV_PDISC);
4226 break;
4227 case ELS_CMD_FARPR:
4228 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4229 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
4230 did, vport->port_state, ndlp->nlp_flag);
4232 phba->fc_stat.elsRcvFARPR++;
4233 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
4234 break;
4235 case ELS_CMD_FARP:
4236 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4237 "RCV FARP: did:x%x/ste:x%x flg:x%x",
4238 did, vport->port_state, ndlp->nlp_flag);
4240 phba->fc_stat.elsRcvFARP++;
4241 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
4242 break;
4243 case ELS_CMD_FAN:
4244 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4245 "RCV FAN: did:x%x/ste:x%x flg:x%x",
4246 did, vport->port_state, ndlp->nlp_flag);
4248 phba->fc_stat.elsRcvFAN++;
4249 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
4250 break;
4251 case ELS_CMD_PRLI:
4252 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4253 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
4254 did, vport->port_state, ndlp->nlp_flag);
4256 phba->fc_stat.elsRcvPRLI++;
4257 if (vport->port_state < LPFC_DISC_AUTH) {
4258 rjt_err = LSRJT_UNABLE_TPC;
4259 break;
4261 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
4262 break;
4263 case ELS_CMD_LIRR:
4264 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4265 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
4266 did, vport->port_state, ndlp->nlp_flag);
4268 phba->fc_stat.elsRcvLIRR++;
4269 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
4270 if (newnode)
4271 lpfc_nlp_put(ndlp);
4272 break;
4273 case ELS_CMD_RPS:
4274 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4275 "RCV RPS: did:x%x/ste:x%x flg:x%x",
4276 did, vport->port_state, ndlp->nlp_flag);
4278 phba->fc_stat.elsRcvRPS++;
4279 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
4280 if (newnode)
4281 lpfc_nlp_put(ndlp);
4282 break;
4283 case ELS_CMD_RPL:
4284 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4285 "RCV RPL: did:x%x/ste:x%x flg:x%x",
4286 did, vport->port_state, ndlp->nlp_flag);
4288 phba->fc_stat.elsRcvRPL++;
4289 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
4290 if (newnode)
4291 lpfc_nlp_put(ndlp);
4292 break;
4293 case ELS_CMD_RNID:
4294 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4295 "RCV RNID: did:x%x/ste:x%x flg:x%x",
4296 did, vport->port_state, ndlp->nlp_flag);
4298 phba->fc_stat.elsRcvRNID++;
4299 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
4300 if (newnode)
4301 lpfc_nlp_put(ndlp);
4302 break;
4303 default:
4304 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4305 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
4306 cmd, did, vport->port_state);
4308 /* Unsupported ELS command, reject */
4309 rjt_err = LSRJT_INVALID_CMD;
4311 /* Unknown ELS command <elsCmd> received from NPORT <did> */
4312 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4313 "0115 Unknown ELS command x%x "
4314 "received from NPORT x%x\n", cmd, did);
4315 if (newnode)
4316 lpfc_nlp_put(ndlp);
4317 break;
4320 /* check if need to LS_RJT received ELS cmd */
4321 if (rjt_err) {
4322 memset(&stat, 0, sizeof(stat));
4323 stat.un.b.lsRjtRsnCode = rjt_err;
4324 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
4325 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
4326 NULL);
4329 return;
4331 dropit:
4332 if (vport && !(vport->load_flag & FC_UNLOADING))
4333 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4334 "(%d):0111 Dropping received ELS cmd "
4335 "Data: x%x x%x x%x\n",
4336 vport->vpi, icmd->ulpStatus,
4337 icmd->un.ulpWord[4], icmd->ulpTimeout);
4338 phba->fc_stat.elsRcvDrop++;
4341 static struct lpfc_vport *
4342 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
4344 struct lpfc_vport *vport;
4345 unsigned long flags;
4347 spin_lock_irqsave(&phba->hbalock, flags);
4348 list_for_each_entry(vport, &phba->port_list, listentry) {
4349 if (vport->vpi == vpi) {
4350 spin_unlock_irqrestore(&phba->hbalock, flags);
4351 return vport;
4354 spin_unlock_irqrestore(&phba->hbalock, flags);
4355 return NULL;
4358 void
4359 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4360 struct lpfc_iocbq *elsiocb)
4362 struct lpfc_vport *vport = phba->pport;
4363 IOCB_t *icmd = &elsiocb->iocb;
4364 dma_addr_t paddr;
4365 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
4366 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
4368 elsiocb->context2 = NULL;
4369 elsiocb->context3 = NULL;
4371 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
4372 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
4373 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
4374 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
4375 phba->fc_stat.NoRcvBuf++;
4376 /* Not enough posted buffers; Try posting more buffers */
4377 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
4378 lpfc_post_buffer(phba, pring, 0, 1);
4379 return;
4382 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4383 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
4384 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
4385 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
4386 vport = phba->pport;
4387 else {
4388 uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
4389 vport = lpfc_find_vport_by_vpid(phba, vpi);
4392 /* If there are no BDEs associated
4393 * with this IOCB, there is nothing to do.
4395 if (icmd->ulpBdeCount == 0)
4396 return;
4398 /* type of ELS cmd is first 32bit word
4399 * in packet
4401 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4402 elsiocb->context2 = bdeBuf1;
4403 } else {
4404 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
4405 icmd->un.cont64[0].addrLow);
4406 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
4407 paddr);
4410 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
4412 * The different unsolicited event handlers would tell us
4413 * if they are done with "mp" by setting context2 to NULL.
4415 lpfc_nlp_put(elsiocb->context1);
4416 elsiocb->context1 = NULL;
4417 if (elsiocb->context2) {
4418 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
4419 elsiocb->context2 = NULL;
4422 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
4423 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
4424 icmd->ulpBdeCount == 2) {
4425 elsiocb->context2 = bdeBuf2;
4426 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
4427 /* free mp if we are done with it */
4428 if (elsiocb->context2) {
4429 lpfc_in_buf_free(phba, elsiocb->context2);
4430 elsiocb->context2 = NULL;
4435 void
4436 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4438 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
4440 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4441 if (!ndlp) {
4442 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4443 if (!ndlp) {
4444 if (phba->fc_topology == TOPOLOGY_LOOP) {
4445 lpfc_disc_start(vport);
4446 return;
4448 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4449 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4450 "0251 NameServer login: no memory\n");
4451 return;
4453 lpfc_nlp_init(vport, ndlp, NameServer_DID);
4454 ndlp->nlp_type |= NLP_FABRIC;
4455 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4456 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4457 if (!ndlp) {
4458 if (phba->fc_topology == TOPOLOGY_LOOP) {
4459 lpfc_disc_start(vport);
4460 return;
4462 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4463 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4464 "0348 NameServer login: node freed\n");
4465 return;
4469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4471 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
4472 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4473 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4474 "0252 Cannot issue NameServer login\n");
4475 return;
4478 if (vport->cfg_fdmi_on) {
4479 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
4480 GFP_KERNEL);
4481 if (ndlp_fdmi) {
4482 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
4483 ndlp_fdmi->nlp_type |= NLP_FABRIC;
4484 ndlp_fdmi->nlp_state =
4485 NLP_STE_PLOGI_ISSUE;
4486 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
4490 return;
4493 static void
4494 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4496 struct lpfc_vport *vport = pmb->vport;
4497 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4498 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
4499 MAILBOX_t *mb = &pmb->mb;
4501 spin_lock_irq(shost->host_lock);
4502 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4503 spin_unlock_irq(shost->host_lock);
4505 if (mb->mbxStatus) {
4506 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
4507 "0915 Register VPI failed: 0x%x\n",
4508 mb->mbxStatus);
4510 switch (mb->mbxStatus) {
4511 case 0x11: /* unsupported feature */
4512 case 0x9603: /* max_vpi exceeded */
4513 case 0x9602: /* Link event since CLEAR_LA */
4514 /* giving up on vport registration */
4515 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4516 spin_lock_irq(shost->host_lock);
4517 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4518 spin_unlock_irq(shost->host_lock);
4519 lpfc_can_disctmo(vport);
4520 break;
4521 default:
4522 /* Try to recover from this error */
4523 lpfc_mbx_unreg_vpi(vport);
4524 spin_lock_irq(shost->host_lock);
4525 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4526 spin_unlock_irq(shost->host_lock);
4527 if (vport->port_type == LPFC_PHYSICAL_PORT)
4528 lpfc_initial_flogi(vport);
4529 else
4530 lpfc_initial_fdisc(vport);
4531 break;
4534 } else {
4535 if (vport == phba->pport)
4536 lpfc_issue_fabric_reglogin(vport);
4537 else
4538 lpfc_do_scr_ns_plogi(phba, vport);
4541 /* Now, we decrement the ndlp reference count held for this
4542 * callback function
4544 lpfc_nlp_put(ndlp);
4546 mempool_free(pmb, phba->mbox_mem_pool);
4547 return;
4550 static void
4551 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
4552 struct lpfc_nodelist *ndlp)
4554 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4555 LPFC_MBOXQ_t *mbox;
4557 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4558 if (mbox) {
4559 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
4560 mbox->vport = vport;
4561 mbox->context2 = lpfc_nlp_get(ndlp);
4562 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
4563 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
4564 == MBX_NOT_FINISHED) {
4565 /* mailbox command not success, decrement ndlp
4566 * reference count for this command
4568 lpfc_nlp_put(ndlp);
4569 mempool_free(mbox, phba->mbox_mem_pool);
4571 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
4572 "0253 Register VPI: Can't send mbox\n");
4573 goto mbox_err_exit;
4575 } else {
4576 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
4577 "0254 Register VPI: no memory\n");
4578 goto mbox_err_exit;
4580 return;
4582 mbox_err_exit:
4583 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4584 spin_lock_irq(shost->host_lock);
4585 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4586 spin_unlock_irq(shost->host_lock);
4587 return;
4590 static void
4591 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4592 struct lpfc_iocbq *rspiocb)
4594 struct lpfc_vport *vport = cmdiocb->vport;
4595 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4596 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4597 struct lpfc_nodelist *np;
4598 struct lpfc_nodelist *next_np;
4599 IOCB_t *irsp = &rspiocb->iocb;
4600 struct lpfc_iocbq *piocb;
4602 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4603 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
4604 irsp->ulpStatus, irsp->un.ulpWord[4],
4605 vport->fc_prevDID);
4606 /* Since all FDISCs are being single threaded, we
4607 * must reset the discovery timer for ALL vports
4608 * waiting to send FDISC when one completes.
4610 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
4611 lpfc_set_disctmo(piocb->vport);
4614 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4615 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
4616 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
4618 if (irsp->ulpStatus) {
4619 /* Check for retry */
4620 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
4621 goto out;
4622 /* FDISC failed */
4623 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4624 "0124 FDISC failed. (%d/%d)\n",
4625 irsp->ulpStatus, irsp->un.ulpWord[4]);
4626 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
4627 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4628 lpfc_nlp_put(ndlp);
4629 /* giving up on FDISC. Cancel discovery timer */
4630 lpfc_can_disctmo(vport);
4631 } else {
4632 spin_lock_irq(shost->host_lock);
4633 vport->fc_flag |= FC_FABRIC;
4634 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
4635 vport->fc_flag |= FC_PUBLIC_LOOP;
4636 spin_unlock_irq(shost->host_lock);
4638 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
4639 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
4640 if ((vport->fc_prevDID != vport->fc_myDID) &&
4641 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
4642 /* If our NportID changed, we need to ensure all
4643 * remaining NPORTs get unreg_login'ed so we can
4644 * issue unreg_vpi.
4646 list_for_each_entry_safe(np, next_np,
4647 &vport->fc_nodes, nlp_listp) {
4648 if (!NLP_CHK_NODE_ACT(ndlp) ||
4649 (np->nlp_state != NLP_STE_NPR_NODE) ||
4650 !(np->nlp_flag & NLP_NPR_ADISC))
4651 continue;
4652 spin_lock_irq(shost->host_lock);
4653 np->nlp_flag &= ~NLP_NPR_ADISC;
4654 spin_unlock_irq(shost->host_lock);
4655 lpfc_unreg_rpi(vport, np);
4657 lpfc_mbx_unreg_vpi(vport);
4658 spin_lock_irq(shost->host_lock);
4659 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4660 spin_unlock_irq(shost->host_lock);
4663 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
4664 lpfc_register_new_vport(phba, vport, ndlp);
4665 else
4666 lpfc_do_scr_ns_plogi(phba, vport);
4668 /* Unconditionaly kick off releasing fabric node for vports */
4669 lpfc_nlp_put(ndlp);
4672 out:
4673 lpfc_els_free_iocb(phba, cmdiocb);
4676 static int
4677 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4678 uint8_t retry)
4680 struct lpfc_hba *phba = vport->phba;
4681 IOCB_t *icmd;
4682 struct lpfc_iocbq *elsiocb;
4683 struct serv_parm *sp;
4684 uint8_t *pcmd;
4685 uint16_t cmdsize;
4686 int did = ndlp->nlp_DID;
4687 int rc;
4689 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
4690 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
4691 ELS_CMD_FDISC);
4692 if (!elsiocb) {
4693 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4694 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4695 "0255 Issue FDISC: no IOCB\n");
4696 return 1;
4699 icmd = &elsiocb->iocb;
4700 icmd->un.elsreq64.myID = 0;
4701 icmd->un.elsreq64.fl = 1;
4703 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
4704 icmd->ulpCt_h = 1;
4705 icmd->ulpCt_l = 0;
4707 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4708 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
4709 pcmd += sizeof(uint32_t); /* CSP Word 1 */
4710 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
4711 sp = (struct serv_parm *) pcmd;
4712 /* Setup CSPs accordingly for Fabric */
4713 sp->cmn.e_d_tov = 0;
4714 sp->cmn.w2.r_a_tov = 0;
4715 sp->cls1.classValid = 0;
4716 sp->cls2.seqDelivery = 1;
4717 sp->cls3.seqDelivery = 1;
4719 pcmd += sizeof(uint32_t); /* CSP Word 2 */
4720 pcmd += sizeof(uint32_t); /* CSP Word 3 */
4721 pcmd += sizeof(uint32_t); /* CSP Word 4 */
4722 pcmd += sizeof(uint32_t); /* Port Name */
4723 memcpy(pcmd, &vport->fc_portname, 8);
4724 pcmd += sizeof(uint32_t); /* Node Name */
4725 pcmd += sizeof(uint32_t); /* Node Name */
4726 memcpy(pcmd, &vport->fc_nodename, 8);
4728 lpfc_set_disctmo(vport);
4730 phba->fc_stat.elsXmitFDISC++;
4731 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
4733 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4734 "Issue FDISC: did:x%x",
4735 did, 0, 0);
4737 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
4738 if (rc == IOCB_ERROR) {
4739 lpfc_els_free_iocb(phba, elsiocb);
4740 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4741 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4742 "0256 Issue FDISC: Cannot send IOCB\n");
4743 return 1;
4745 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
4746 vport->port_state = LPFC_FDISC;
4747 return 0;
4750 static void
4751 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4752 struct lpfc_iocbq *rspiocb)
4754 struct lpfc_vport *vport = cmdiocb->vport;
4755 IOCB_t *irsp;
4756 struct lpfc_nodelist *ndlp;
4757 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
4759 irsp = &rspiocb->iocb;
4760 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4761 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
4762 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
4764 lpfc_els_free_iocb(phba, cmdiocb);
4765 vport->unreg_vpi_cmpl = VPORT_ERROR;
4767 /* Trigger the release of the ndlp after logo */
4768 lpfc_nlp_put(ndlp);
4772 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4774 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4775 struct lpfc_hba *phba = vport->phba;
4776 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4777 IOCB_t *icmd;
4778 struct lpfc_iocbq *elsiocb;
4779 uint8_t *pcmd;
4780 uint16_t cmdsize;
4782 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
4783 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
4784 ELS_CMD_LOGO);
4785 if (!elsiocb)
4786 return 1;
4788 icmd = &elsiocb->iocb;
4789 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4790 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
4791 pcmd += sizeof(uint32_t);
4793 /* Fill in LOGO payload */
4794 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
4795 pcmd += sizeof(uint32_t);
4796 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
4798 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4799 "Issue LOGO npiv did:x%x flg:x%x",
4800 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4802 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
4803 spin_lock_irq(shost->host_lock);
4804 ndlp->nlp_flag |= NLP_LOGO_SND;
4805 spin_unlock_irq(shost->host_lock);
4806 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
4807 spin_lock_irq(shost->host_lock);
4808 ndlp->nlp_flag &= ~NLP_LOGO_SND;
4809 spin_unlock_irq(shost->host_lock);
4810 lpfc_els_free_iocb(phba, elsiocb);
4811 return 1;
4813 return 0;
4816 void
4817 lpfc_fabric_block_timeout(unsigned long ptr)
4819 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4820 unsigned long iflags;
4821 uint32_t tmo_posted;
4822 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
4823 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
4824 if (!tmo_posted)
4825 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
4826 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
4828 if (!tmo_posted) {
4829 spin_lock_irqsave(&phba->hbalock, iflags);
4830 if (phba->work_wait)
4831 lpfc_worker_wake_up(phba);
4832 spin_unlock_irqrestore(&phba->hbalock, iflags);
4836 static void
4837 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
4839 struct lpfc_iocbq *iocb;
4840 unsigned long iflags;
4841 int ret;
4842 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4843 IOCB_t *cmd;
4845 repeat:
4846 iocb = NULL;
4847 spin_lock_irqsave(&phba->hbalock, iflags);
4848 /* Post any pending iocb to the SLI layer */
4849 if (atomic_read(&phba->fabric_iocb_count) == 0) {
4850 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
4851 list);
4852 if (iocb)
4853 /* Increment fabric iocb count to hold the position */
4854 atomic_inc(&phba->fabric_iocb_count);
4856 spin_unlock_irqrestore(&phba->hbalock, iflags);
4857 if (iocb) {
4858 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4859 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4860 iocb->iocb_flag |= LPFC_IO_FABRIC;
4862 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
4863 "Fabric sched1: ste:x%x",
4864 iocb->vport->port_state, 0, 0);
4866 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4868 if (ret == IOCB_ERROR) {
4869 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
4870 iocb->fabric_iocb_cmpl = NULL;
4871 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
4872 cmd = &iocb->iocb;
4873 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4874 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4875 iocb->iocb_cmpl(phba, iocb, iocb);
4877 atomic_dec(&phba->fabric_iocb_count);
4878 goto repeat;
4882 return;
4885 void
4886 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
4888 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4890 lpfc_resume_fabric_iocbs(phba);
4891 return;
4894 static void
4895 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
4897 int blocked;
4899 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4900 /* Start a timer to unblock fabric iocbs after 100ms */
4901 if (!blocked)
4902 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
4904 return;
4907 static void
4908 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4909 struct lpfc_iocbq *rspiocb)
4911 struct ls_rjt stat;
4913 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
4914 BUG();
4916 switch (rspiocb->iocb.ulpStatus) {
4917 case IOSTAT_NPORT_RJT:
4918 case IOSTAT_FABRIC_RJT:
4919 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
4920 lpfc_block_fabric_iocbs(phba);
4922 break;
4924 case IOSTAT_NPORT_BSY:
4925 case IOSTAT_FABRIC_BSY:
4926 lpfc_block_fabric_iocbs(phba);
4927 break;
4929 case IOSTAT_LS_RJT:
4930 stat.un.lsRjtError =
4931 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
4932 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
4933 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
4934 lpfc_block_fabric_iocbs(phba);
4935 break;
4938 if (atomic_read(&phba->fabric_iocb_count) == 0)
4939 BUG();
4941 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
4942 cmdiocb->fabric_iocb_cmpl = NULL;
4943 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
4944 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
4946 atomic_dec(&phba->fabric_iocb_count);
4947 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
4948 /* Post any pending iocbs to HBA */
4949 lpfc_resume_fabric_iocbs(phba);
4953 static int
4954 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4956 unsigned long iflags;
4957 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4958 int ready;
4959 int ret;
4961 if (atomic_read(&phba->fabric_iocb_count) > 1)
4962 BUG();
4964 spin_lock_irqsave(&phba->hbalock, iflags);
4965 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
4966 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4968 if (ready)
4969 /* Increment fabric iocb count to hold the position */
4970 atomic_inc(&phba->fabric_iocb_count);
4971 spin_unlock_irqrestore(&phba->hbalock, iflags);
4972 if (ready) {
4973 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4974 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4975 iocb->iocb_flag |= LPFC_IO_FABRIC;
4977 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
4978 "Fabric sched2: ste:x%x",
4979 iocb->vport->port_state, 0, 0);
4981 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4983 if (ret == IOCB_ERROR) {
4984 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
4985 iocb->fabric_iocb_cmpl = NULL;
4986 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
4987 atomic_dec(&phba->fabric_iocb_count);
4989 } else {
4990 spin_lock_irqsave(&phba->hbalock, iflags);
4991 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
4992 spin_unlock_irqrestore(&phba->hbalock, iflags);
4993 ret = IOCB_SUCCESS;
4995 return ret;
4999 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
5001 LIST_HEAD(completions);
5002 struct lpfc_hba *phba = vport->phba;
5003 struct lpfc_iocbq *tmp_iocb, *piocb;
5004 IOCB_t *cmd;
5006 spin_lock_irq(&phba->hbalock);
5007 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
5008 list) {
5010 if (piocb->vport != vport)
5011 continue;
5013 list_move_tail(&piocb->list, &completions);
5015 spin_unlock_irq(&phba->hbalock);
5017 while (!list_empty(&completions)) {
5018 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
5019 list_del_init(&piocb->list);
5021 cmd = &piocb->iocb;
5022 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
5023 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
5024 (piocb->iocb_cmpl) (phba, piocb, piocb);
5028 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
5030 LIST_HEAD(completions);
5031 struct lpfc_hba *phba = ndlp->vport->phba;
5032 struct lpfc_iocbq *tmp_iocb, *piocb;
5033 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5034 IOCB_t *cmd;
5036 spin_lock_irq(&phba->hbalock);
5037 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
5038 list) {
5039 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
5041 list_move_tail(&piocb->list, &completions);
5044 spin_unlock_irq(&phba->hbalock);
5046 while (!list_empty(&completions)) {
5047 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
5048 list_del_init(&piocb->list);
5050 cmd = &piocb->iocb;
5051 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
5052 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
5053 (piocb->iocb_cmpl) (phba, piocb, piocb);
5057 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
5059 LIST_HEAD(completions);
5060 struct lpfc_iocbq *piocb;
5061 IOCB_t *cmd;
5063 spin_lock_irq(&phba->hbalock);
5064 list_splice_init(&phba->fabric_iocb_list, &completions);
5065 spin_unlock_irq(&phba->hbalock);
5067 while (!list_empty(&completions)) {
5068 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
5069 list_del_init(&piocb->list);
5071 cmd = &piocb->iocb;
5072 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
5073 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
5074 (piocb->iocb_cmpl) (phba, piocb, piocb);
5079 #if 0
5080 void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
5082 LIST_HEAD(completions);
5083 struct lpfc_iocbq *tmp_iocb, *piocb;
5084 IOCB_t *cmd;
5085 struct lpfc_nodelist *ndlp;
5087 spin_lock_irq(&phba->hbalock);
5088 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
5089 list) {
5091 cmd = &piocb->iocb;
5092 ndlp = (struct lpfc_nodelist *) piocb->context1;
5093 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
5094 ndlp != NULL &&
5095 ndlp->nlp_DID == Fabric_DID)
5096 list_move_tail(&piocb->list, &completions);
5098 spin_unlock_irq(&phba->hbalock);
5100 while (!list_empty(&completions)) {
5101 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
5102 list_del_init(&piocb->list);
5104 cmd = &piocb->iocb;
5105 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
5106 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
5107 (piocb->iocb_cmpl) (phba, piocb, piocb);
5110 #endif /* 0 */